code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
''' Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
Created on Feb 24, 2014
@author: dfleck
'''
import math
class FingerEntry:
'''Represents an entry in the finger table.
Note: Finger indexes go from 0-->m-1 which is different than the
Chord paper which goes from 1-->m
'''
m = 128 # Number of bits in entry set
def __init__(self, k, n, nodeLocation):
'''k is the finger table entry.
n is the node ID of the node holding this entry
'''
#print("DEBUG: fingerINIT: %d %d " % (k-1,n))
twoToTheM = math.pow(2, FingerEntry.m)
self.start = n + math.pow(2, k-1) % twoToTheM
self.intervalStart = self.start
self.intervalEnd = n + math.pow(2, k) % twoToTheM
self.nodeLocation = nodeLocation # This is the succ on the tables in the Chord paper
def __str__(self):
if self.nodeLocation is None:
nodeId = -999
else:
nodeId = self.nodeLocation.id
return "Start:%d End:%d NodeLocation:%d" % (self.start, self.intervalEnd, nodeId)
|
[
"math.pow"
] |
[((652, 678), 'math.pow', 'math.pow', (['(2)', 'FingerEntry.m'], {}), '(2, FingerEntry.m)\n', (660, 678), False, 'import math\n'), ((704, 722), 'math.pow', 'math.pow', (['(2)', '(k - 1)'], {}), '(2, k - 1)\n', (712, 722), False, 'import math\n'), ((805, 819), 'math.pow', 'math.pow', (['(2)', 'k'], {}), '(2, k)\n', (813, 819), False, 'import math\n')]
|
from leapp.actors import Actor
from leapp.libraries.actor import readopensshconfig
from leapp.models import OpenSshConfig
from leapp.tags import FactsPhaseTag, IPUWorkflowTag
class OpenSshConfigScanner(Actor):
"""
Collect information about the OpenSSH configuration.
Currently supporting the following options:
* PermitRootLogin
* UsePrivilegeSeparation
* Protocol
* Ciphers
* MACs
"""
name = 'read_openssh_config'
consumes = ()
produces = (OpenSshConfig, )
tags = (FactsPhaseTag, IPUWorkflowTag, )
def process(self):
readopensshconfig.scan_sshd(self.produce)
|
[
"leapp.libraries.actor.readopensshconfig.scan_sshd"
] |
[((594, 635), 'leapp.libraries.actor.readopensshconfig.scan_sshd', 'readopensshconfig.scan_sshd', (['self.produce'], {}), '(self.produce)\n', (621, 635), False, 'from leapp.libraries.actor import readopensshconfig\n')]
|
from subprocess import Popen, PIPE
def shell(cmd, shell=False):
if shell:
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
else:
cmd = cmd.split()
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate()
return output
|
[
"subprocess.Popen"
] |
[((93, 153), 'subprocess.Popen', 'Popen', (['cmd'], {'shell': '(True)', 'stdin': 'PIPE', 'stdout': 'PIPE', 'stderr': 'PIPE'}), '(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n', (98, 153), False, 'from subprocess import Popen, PIPE\n'), ((202, 250), 'subprocess.Popen', 'Popen', (['cmd'], {'stdin': 'PIPE', 'stdout': 'PIPE', 'stderr': 'PIPE'}), '(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n', (207, 250), False, 'from subprocess import Popen, PIPE\n')]
|
# Generated by Django 3.1.4 on 2020-12-22 07:46
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import question.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.CharField(max_length=10, primary_key=True, serialize=False)),
('name', models.CharField(max_length=30)),
('description', models.TextField()),
('score', models.IntegerField()),
('input_format', models.TextField(default='')),
('output_format', models.TextField(default='')),
('constraints', models.TextField(default='')),
('correct_code', models.TextField(blank=True, null=True)),
('correct_code_lang', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='core.language')),
],
),
migrations.CreateModel(
name='Testcase',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('input', models.FileField(upload_to=question.models.upload_input_rename)),
('output', models.FileField(upload_to=question.models.upload_output_rename)),
('is_public', models.BooleanField(default=False)),
('weightage', models.IntegerField(default=1, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(10)])),
('que_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='test_cases', to='question.question')),
],
),
]
|
[
"django.db.models.FileField",
"django.db.models.TextField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.IntegerField"
] |
[((424, 490), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'primary_key': '(True)', 'serialize': '(False)'}), '(max_length=10, primary_key=True, serialize=False)\n', (440, 490), False, 'from django.db import migrations, models\n'), ((518, 549), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (534, 549), False, 'from django.db import migrations, models\n'), ((584, 602), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (600, 602), False, 'from django.db import migrations, models\n'), ((631, 652), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (650, 652), False, 'from django.db import migrations, models\n'), ((688, 716), 'django.db.models.TextField', 'models.TextField', ([], {'default': '""""""'}), "(default='')\n", (704, 716), False, 'from django.db import migrations, models\n'), ((753, 781), 'django.db.models.TextField', 'models.TextField', ([], {'default': '""""""'}), "(default='')\n", (769, 781), False, 'from django.db import migrations, models\n'), ((816, 844), 'django.db.models.TextField', 'models.TextField', ([], {'default': '""""""'}), "(default='')\n", (832, 844), False, 'from django.db import migrations, models\n'), ((880, 919), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (896, 919), False, 'from django.db import migrations, models\n'), ((960, 1058), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': '"""core.language"""'}), "(null=True, on_delete=django.db.models.deletion.SET_NULL,\n to='core.language')\n", (977, 1058), False, 'from django.db import migrations, models\n'), ((1188, 1239), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (1204, 1239), False, 'from django.db import migrations, models\n'), ((1268, 1331), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': 'question.models.upload_input_rename'}), '(upload_to=question.models.upload_input_rename)\n', (1284, 1331), False, 'from django.db import migrations, models\n'), ((1361, 1425), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': 'question.models.upload_output_rename'}), '(upload_to=question.models.upload_output_rename)\n', (1377, 1425), False, 'from django.db import migrations, models\n'), ((1458, 1492), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1477, 1492), False, 'from django.db import migrations, models\n'), ((1689, 1807), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""test_cases"""', 'to': '"""question.question"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='test_cases', to='question.question')\n", (1706, 1807), False, 'from django.db import migrations, models\n')]
|
"""
Deletes files in a directory that is not a photo.
"""
import logging
from PIL import Image
import pathlib
def check_image_with_pil(path: pathlib.Path) -> bool:
"""
Checks if the path is an image.
:param pathlib.Path path: path to check the image of
:return: true if the path is an image.
:rtype: bool
"""
try:
with Image.open(path) as image:
pass
except IOError:
return False
return True
def remove_file_if_image(path: pathlib.Path, test: bool = False) -> None:
"""
Removes files in the path recursively, if they are not images.
:param pathlib.Path path: the path to the file or directory
:param bool test: if true, does not delete the files.
:return: None
:rtype: None
"""
if path.is_dir():
for file in path.iterdir():
remove_file_if_image(file, test=test)
else:
if not check_image_with_pil(path):
if test:
logging.info("Would remove {}.".format(path))
else:
logging.info("Removing {}.".format(path))
path.unlink()
def main():
import argparse
# setup command line parsing
parser = argparse.ArgumentParser(description="Deletes files which cannot be parsed by EXIF.")
parser.add_argument("src_dir", type=str, help="source directory")
parser.add_argument("-t", "--test", action="store_true", help="run a test of the removal", dest="test")
parser.add_argument("-v", "--verbose", action="store_true", help="output logging information", dest="verbose")
args = parser.parse_args()
if args.verbose:
logging.getLogger().setLevel(20)
else:
logging.getLogger().setLevel(40)
path = pathlib.Path(args.src_dir)
if not path.exists():
raise IOError("Path does not exist at {}.".format(path))
remove_file_if_image(path, test=args.test)
if __name__ == "__main__":
main()
|
[
"pathlib.Path",
"argparse.ArgumentParser",
"logging.getLogger",
"PIL.Image.open"
] |
[((1210, 1299), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Deletes files which cannot be parsed by EXIF."""'}), "(description=\n 'Deletes files which cannot be parsed by EXIF.')\n", (1233, 1299), False, 'import argparse\n'), ((1743, 1769), 'pathlib.Path', 'pathlib.Path', (['args.src_dir'], {}), '(args.src_dir)\n', (1755, 1769), False, 'import pathlib\n'), ((360, 376), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (370, 376), False, 'from PIL import Image\n'), ((1648, 1667), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1665, 1667), False, 'import logging\n'), ((1699, 1718), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1716, 1718), False, 'import logging\n')]
|
# Copyright 2015 Observable Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, unicode_literals
# python builtins
from os import environ as os_environ
from json import dumps
from logging import getLogger, DEBUG, Formatter
from logging.handlers import SysLogHandler
from socket import gethostname, SOCK_DGRAM, SOCK_STREAM
from time import gmtime, sleep
# local
from service import Service
from snmp_handler import SnmpHandler, SNMP_TRAP_PORT, V2 as SNMPV2
from utils import utc, utcnow, persistent_dict
logger = getLogger(__name__)
ENV_NOTIFICATION_TYPES = 'OBSRVBL_NOTIFICATION_TYPES'
DEFAULT_NOTIFICATION_TYPES = 'alerts observations'
POST_PUBLISH_WAIT_SECONDS = 0.020
UPDATE_INTERVAL_SECONDS = 60
STATE_FILE = '.notifications.state'
MESSAGE_MAP = {
'alerts': {'endpoint': 'alerts', 'priority': 'error'},
'observations': {'endpoint': 'observations', 'priority': 'info'},
'alerts-detail': {'endpoint': 'alert-notifications', 'priority': 'error'},
}
CONFIG_DEFAULTS = {
'syslog_enabled': 'false',
'syslog_facility': 'user',
'syslog_format': ('{time} {sensor_hostname} OBSRVBL '
'[{facility}.{priority}] {message}'),
'syslog_server': None,
'syslog_server_port': 162,
'syslog_server_protocol': 'udp',
'snmp_enabled': 'false',
'snmp_objectid': None,
'snmp_server': None,
'snmp_server_port': SNMP_TRAP_PORT,
'snmp_user': None,
'snmp_version': SNMPV2,
'snmpv3_engineid': None,
'snmpv3_passphrase': None,
}
# translate from human readable config key names to what's in the env
def cfg_format(key):
return 'OBSRVBL_{}'.format(key.upper())
# application config
_CONFIG = {}
# how we actually read the config
def config(key):
return _CONFIG[cfg_format(key)]
# how we reload the config
def _reload_config():
global _CONFIG
_CONFIG = {cfg_format(k): v for k, v in CONFIG_DEFAULTS.iteritems()}
_CONFIG.update(os_environ)
def create_logger():
_reload_config()
log = getLogger('obsrvbl')
log.setLevel(DEBUG)
log.propagate = False
# set up handlers
log.handlers = []
if config('snmp_enabled').lower() == 'true':
log.addHandler(_snmp_log_handler(config))
if config('syslog_enabled').lower() == 'true':
log.addHandler(_syslog_log_handler(config, gethostname()))
return log
def _snmp_log_handler(config):
snmp_config = {
'host': config('snmp_server'),
'port': int(config('snmp_server_port')),
'objectID': config('snmp_objectid'),
'user': config('snmp_user'),
'version': config('snmp_version'),
'engineID': config('snmpv3_engineid'),
'passcode': config('snmpv3_passphrase'),
}
return SnmpHandler(**snmp_config)
def _syslog_log_handler(config, hostname):
host = config('syslog_server')
port = int(config('syslog_server_port'))
if config('syslog_server_protocol').lower() == 'tcp':
socktype = SOCK_STREAM
else:
socktype = SOCK_DGRAM
log_format = config('syslog_format')
facility = config('syslog_facility')
handler = SysLogHandler(
(host, port),
SysLogHandler.facility_names[facility],
socktype=socktype,
)
log_format = log_format.format(
time='%(asctime)s.%(msecs)d+00:00',
sensor_hostname=hostname,
facility=facility,
priority='%(levelname)s',
message='%(message)s'
)
SYSLOG_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S'
handler.formatter = Formatter(log_format, datefmt=SYSLOG_DATE_FORMAT)
handler.formatter.converter = gmtime # UTC
return handler
class NotificationPublisher(Service):
"""
Routinely queries Observation infrastructure for new notification events.
These are then forwarded to the configured syslog or snmp service.
"""
def __init__(self, *args, **kwargs):
kwargs.update({
'poll_seconds': UPDATE_INTERVAL_SECONDS,
})
super(NotificationPublisher, self).__init__(*args, **kwargs)
self.state = persistent_dict(STATE_FILE)
self.logger = create_logger()
notification_types = os_environ.get(
ENV_NOTIFICATION_TYPES, DEFAULT_NOTIFICATION_TYPES
)
self.notification_types = set(notification_types.split())
def get_data(self, endpoint, params):
try:
result = self.api.get_data(endpoint, params).json()
except ValueError:
return None
if 'error' in result:
return None
return result['objects']
def _publish(self, message, priority):
log_func = getattr(self.logger, priority)
formatted = dumps(message)
try:
log_func(formatted)
except Exception as ex:
logger.warning(
"Got error='%s' when trying to public "
"priority='%s', message='%s'",
ex,
priority,
message
)
else:
logger.info(
"Published message, priority='%s', message='%s'",
priority,
formatted
)
def publish(self, messages, priority):
for m in messages:
self._publish(m, priority)
# Rest a bit before sending the next message
sleep(POST_PUBLISH_WAIT_SECONDS)
def execute(self, now=None):
if not self.logger.handlers:
return
for data_type in self.notification_types:
if data_type not in MESSAGE_MAP:
continue
endpoint = MESSAGE_MAP[data_type]['endpoint']
priority = MESSAGE_MAP[data_type]['priority']
try:
params = self.state[data_type]
except KeyError:
params = {'time__gt': utcnow().replace(tzinfo=utc).isoformat()}
self.state[data_type] = params
messages = self.get_data(endpoint, params)
if not messages:
continue
max_time = max(msg['time'] for msg in messages)
self.state[data_type] = {'time__gt': max_time}
self.publish(messages, priority)
if __name__ == '__main__':
watcher = NotificationPublisher()
watcher.run()
|
[
"utils.utcnow",
"snmp_handler.SnmpHandler",
"utils.persistent_dict",
"json.dumps",
"time.sleep",
"logging.Formatter",
"os.environ.get",
"socket.gethostname",
"logging.handlers.SysLogHandler",
"logging.getLogger"
] |
[((1061, 1080), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (1070, 1080), False, 'from logging import getLogger, DEBUG, Formatter\n'), ((2536, 2556), 'logging.getLogger', 'getLogger', (['"""obsrvbl"""'], {}), "('obsrvbl')\n", (2545, 2556), False, 'from logging import getLogger, DEBUG, Formatter\n'), ((3264, 3290), 'snmp_handler.SnmpHandler', 'SnmpHandler', ([], {}), '(**snmp_config)\n', (3275, 3290), False, 'from snmp_handler import SnmpHandler, SNMP_TRAP_PORT, V2 as SNMPV2\n'), ((3644, 3734), 'logging.handlers.SysLogHandler', 'SysLogHandler', (['(host, port)', 'SysLogHandler.facility_names[facility]'], {'socktype': 'socktype'}), '((host, port), SysLogHandler.facility_names[facility],\n socktype=socktype)\n', (3657, 3734), False, 'from logging.handlers import SysLogHandler\n'), ((4042, 4091), 'logging.Formatter', 'Formatter', (['log_format'], {'datefmt': 'SYSLOG_DATE_FORMAT'}), '(log_format, datefmt=SYSLOG_DATE_FORMAT)\n', (4051, 4091), False, 'from logging import getLogger, DEBUG, Formatter\n'), ((4583, 4610), 'utils.persistent_dict', 'persistent_dict', (['STATE_FILE'], {}), '(STATE_FILE)\n', (4598, 4610), False, 'from utils import utc, utcnow, persistent_dict\n'), ((4679, 4745), 'os.environ.get', 'os_environ.get', (['ENV_NOTIFICATION_TYPES', 'DEFAULT_NOTIFICATION_TYPES'], {}), '(ENV_NOTIFICATION_TYPES, DEFAULT_NOTIFICATION_TYPES)\n', (4693, 4745), True, 'from os import environ as os_environ\n'), ((5206, 5220), 'json.dumps', 'dumps', (['message'], {}), '(message)\n', (5211, 5220), False, 'from json import dumps\n'), ((5863, 5895), 'time.sleep', 'sleep', (['POST_PUBLISH_WAIT_SECONDS'], {}), '(POST_PUBLISH_WAIT_SECONDS)\n', (5868, 5895), False, 'from time import gmtime, sleep\n'), ((2853, 2866), 'socket.gethostname', 'gethostname', ([], {}), '()\n', (2864, 2866), False, 'from socket import gethostname, SOCK_DGRAM, SOCK_STREAM\n'), ((6356, 6364), 'utils.utcnow', 'utcnow', ([], {}), '()\n', (6362, 6364), False, 'from utils import utc, utcnow, persistent_dict\n')]
|
from django.urls import include, path
from catalog.admin import admin_site
from django.contrib.auth import logout
from django.conf import settings
from . import views
app_name="catalog"
urlpatterns = [
path('', views.HomeView.as_view(), name='home'),
path('accounts/login/', views.LoginView.as_view(), name='login'),
path('accounts/logout/', views.LogoutView.logout_user, name='logout'),
path('admin/', admin_site.urls),
path('panorama/', views.PanoramaView.as_view(), name='panorama'),
path('resources/upload/getfile/', views.MassiveUpload.uploadData, name='getUploadFile'),
path('resources/data/converter/', views.DataConverter.as_view(), name='converter'),
path('vulnerability/add/', views.AddVulnerability.as_view(), name='addVulnerability'),
path('vulnerability/data/deleteall/', views.RemoveAllVulnerabilities.removeData),
path('vulnerability/data/json/export/', views.JsonExportView.export_database, name='exportData'),
path('vulnerability/data/json/filter/', views.JsonFilterView.get_data, name='jsonfilter'),
path('vulnerability/data/index', views.IndexView.as_view(), name='index'),
path('vulnerability/data/json/<int:num>/', views.JsonDetailView.result),
path('vulnerability/data/json/massiveupload/', views.MassiveUpload.as_view(), name='massiveUpload'),
path('vulnerability/data/panorama/json/', views.PanoramaJsonView.result),
path('vulnerability/delete/<int:pk>/', views.DeleteVulnerability.as_view(), name='deleteVulnerability'),
path('vulnerability/data/delete/', views.DeleteByList.as_view(), name='deleteByList'),
path('vulnerability/detail/<int:pk>/', views.DetailedView.as_view(), name='detail'),
path('vulnerability/detail/json/<int:num>/', views.JsonDetailView.result, name='json_detail'),
path('vulnerability/search/', views.SearchView.search, name='search'),
path('vulnerability/update/fastupdate/<int:pk>/', views.FastUpdateVulnerability.as_view(), name='fastUpdateVulnerability'),
path('vulnerability/update/<int:pk>/', views.UpdateVulnerability.as_view(), name='updateVulnerability'),
path('vulnerability/tinymce/', include('tinymce.urls')),
]
|
[
"django.urls.path",
"django.urls.include"
] |
[((332, 401), 'django.urls.path', 'path', (['"""accounts/logout/"""', 'views.LogoutView.logout_user'], {'name': '"""logout"""'}), "('accounts/logout/', views.LogoutView.logout_user, name='logout')\n", (336, 401), False, 'from django.urls import include, path\n'), ((407, 438), 'django.urls.path', 'path', (['"""admin/"""', 'admin_site.urls'], {}), "('admin/', admin_site.urls)\n", (411, 438), False, 'from django.urls import include, path\n'), ((514, 606), 'django.urls.path', 'path', (['"""resources/upload/getfile/"""', 'views.MassiveUpload.uploadData'], {'name': '"""getUploadFile"""'}), "('resources/upload/getfile/', views.MassiveUpload.uploadData, name=\n 'getUploadFile')\n", (518, 606), False, 'from django.urls import include, path\n'), ((786, 871), 'django.urls.path', 'path', (['"""vulnerability/data/deleteall/"""', 'views.RemoveAllVulnerabilities.removeData'], {}), "('vulnerability/data/deleteall/', views.RemoveAllVulnerabilities.removeData\n )\n", (790, 871), False, 'from django.urls import include, path\n'), ((872, 973), 'django.urls.path', 'path', (['"""vulnerability/data/json/export/"""', 'views.JsonExportView.export_database'], {'name': '"""exportData"""'}), "('vulnerability/data/json/export/', views.JsonExportView.\n export_database, name='exportData')\n", (876, 973), False, 'from django.urls import include, path\n'), ((974, 1068), 'django.urls.path', 'path', (['"""vulnerability/data/json/filter/"""', 'views.JsonFilterView.get_data'], {'name': '"""jsonfilter"""'}), "('vulnerability/data/json/filter/', views.JsonFilterView.get_data, name\n ='jsonfilter')\n", (978, 1068), False, 'from django.urls import include, path\n'), ((1148, 1219), 'django.urls.path', 'path', (['"""vulnerability/data/json/<int:num>/"""', 'views.JsonDetailView.result'], {}), "('vulnerability/data/json/<int:num>/', views.JsonDetailView.result)\n", (1152, 1219), False, 'from django.urls import include, path\n'), ((1330, 1402), 'django.urls.path', 'path', (['"""vulnerability/data/panorama/json/"""', 'views.PanoramaJsonView.result'], {}), "('vulnerability/data/panorama/json/', views.PanoramaJsonView.result)\n", (1334, 1402), False, 'from django.urls import include, path\n'), ((1697, 1794), 'django.urls.path', 'path', (['"""vulnerability/detail/json/<int:num>/"""', 'views.JsonDetailView.result'], {'name': '"""json_detail"""'}), "('vulnerability/detail/json/<int:num>/', views.JsonDetailView.result,\n name='json_detail')\n", (1701, 1794), False, 'from django.urls import include, path\n'), ((1796, 1865), 'django.urls.path', 'path', (['"""vulnerability/search/"""', 'views.SearchView.search'], {'name': '"""search"""'}), "('vulnerability/search/', views.SearchView.search, name='search')\n", (1800, 1865), False, 'from django.urls import include, path\n'), ((2139, 2162), 'django.urls.include', 'include', (['"""tinymce.urls"""'], {}), "('tinymce.urls')\n", (2146, 2162), False, 'from django.urls import include, path\n')]
|
"""
Copyright (c) 2011, <NAME>.
License: MIT (see http://www.opensource.org/licenses/mit-license.php for details)
URL: http://www.gtsystem.eu/blog/2011/11/bottle-decorator-for-validate-query-parameters/
"""
from bottle import request
import functools
import inspect
def checkParams(**types):
def decorate(f):
farg, _, _, def_params = inspect.getargspec(f)
if def_params is None:
def_params = []
farg = farg[:len(farg) - len(def_params)]
param_info = [(par, ptype, par in farg)
for par, ptype in types.items()]
@functools.wraps(f)
def wrapper(*args, **kargs):
getparam = request.GET.get
for par, ptype, required in param_info:
value = getparam(par)
if not value: # None or empty str
if required:
error = "%s() requires the parameter %s" % (wrapper.__name__, par)
raise TypeError(error)
continue
try:
kargs[par] = ptype(value)
except:
error = "Cannot convert parameter %s to %s" % (
par, ptype.__name__)
raise ValueError(error)
return f(*args, **kargs)
return wrapper
return decorate
|
[
"inspect.getargspec",
"functools.wraps"
] |
[((349, 370), 'inspect.getargspec', 'inspect.getargspec', (['f'], {}), '(f)\n', (367, 370), False, 'import inspect\n'), ((594, 612), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (609, 612), False, 'import functools\n')]
|
import datetime
import enum
import typing
import uuid
import pydantic
from .color import Color
from .number import Number
from .parent import DatabaseParents
from .rich_text import RichText
class NumberProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["number"] = "number"
number: Number
def get_value(self):
return self.number.get_value()
class SelectOption(pydantic.BaseModel):
id: str
name: str
color: Color
def get_value(self):
return self.color.value
class Select(pydantic.BaseModel):
options: typing.List[SelectOption]
def get_value(self):
return [o.get_value() for o in self.options]
class SelectProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["select"] = "select"
select: Select
def get_value(self):
return self.select.get_value()
class CreatedTimeProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["created_time"] = "created_time"
created_time: dict = pydantic.Field(default_factory=dict)
def get_value(self):
return self.created_time
class CreatedByProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["created_by"] = "created_by"
created_by: dict = pydantic.Field(default_factory=dict)
def get_value(self):
return self.created_by
class LastEditedTimeProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["last_edited_time"] = "last_edited_time"
last_edited_time: dict = pydantic.Field(default_factory=dict)
def get_value(self):
return self.last_edited_time
class LastEditedByProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["last_edited_by"] = "last_edited_by"
last_edited_by: dict = pydantic.Field(default_factory=dict)
def get_value(self):
return self.last_edited_by
class URLProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["url"] = "url"
url: dict = pydantic.Field(default_factory=dict)
def get_value(self):
return self.url
class TitleProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["title"] = "title"
title: dict = pydantic.Field(default_factory=dict)
def get_value(self):
return self.title
class RichTextProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["rich_text"] = "rich_text"
rich_text: dict = pydantic.Field(default_factory=dict)
def get_value(self):
return self.rich_text
class DateProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["date"] = "date"
date: dict = pydantic.Field(default_factory=dict)
def get_value(self):
return self.date
class FilesProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["files"] = "files"
files: dict = pydantic.Field(default_factory=dict)
def get_value(self):
return self.files
class PeopleProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["people"] = "people"
people: dict = pydantic.Field(default_factory=dict)
def get_value(self):
return self.people
class CheckboxProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["checkbox"] = "checkbox"
checkbox: dict = pydantic.Field(default_factory=dict)
def get_value(self):
return self.checkbox
class EmailProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["email"] = "email"
email: dict = pydantic.Field(default_factory=dict)
def get_value(self):
return self.email
class PhoneNumberProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["phone_number"] = "phone_number"
phone_number: dict = pydantic.Field(default_factory=dict)
def get_value(self):
return self.phone_number
class MultiSelectOption(pydantic.BaseModel):
id: str
name: str
color: Color
def get_value(self):
return self.color.value
class MultiSelectOptions(pydantic.BaseModel):
options: typing.List[MultiSelectOption]
def get_value(self):
return [o.get_value() for o in self.options]
class MultiSelectProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["multi_select"] = "multi_select"
multi_select: MultiSelectOptions
def get_value(self):
return self.multi_select.get_value()
class Formula(pydantic.BaseModel):
expression: str
def get_value(self):
return self.expression
class FormulaProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["formula"] = "formula"
formula: Formula
def get_value(self):
return self.formula.get_value()
class Rollup(pydantic.BaseModel):
relation_property_name: str
relation_property_id: str
rollup_property_name: str
rollup_property_id: str
function: str # TODO: change to an enum
def get_value(self):
return {
"relation_property_name": self.relation_property_name,
"relation_property_id": self.relation_property_id,
"rollup_property_name": self.rollup_property_name,
"rollup_property_id": self.rollup_property_id,
"function": self.function,
}
class RollupProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["rollup"] = "rollup"
rollup: Rollup
def get_value(self):
return self.rollup.get_value()
class Relation(pydantic.BaseModel):
database_id: uuid.UUID
synced_property_name: typing.Optional[str]
synced_property_id: typing.Optional[str]
def get_value(self):
return {
"database_id": self.database_id,
"synced_property_name": self.synced_property_name,
"synced_property_id": self.synced_property_id,
}
class RelationProperty(pydantic.BaseModel):
id: str
name: str
type: typing.Literal["relation"] = "relation"
relation: Relation
def get_value(self):
return self.relation.get_value()
Property = typing.Union[
NumberProperty,
SelectProperty,
CreatedTimeProperty,
URLProperty,
TitleProperty,
RichTextProperty,
DateProperty,
FilesProperty,
PeopleProperty,
CheckboxProperty,
EmailProperty,
PhoneNumberProperty,
MultiSelectProperty,
FormulaProperty,
RollupProperty,
CreatedByProperty,
LastEditedTimeProperty,
LastEditedByProperty,
RelationProperty,
]
Properties = typing.Dict[str, Property]
class Database(pydantic.BaseModel):
object: typing.Literal["database"] = "database"
id: uuid.UUID
created_time: datetime.datetime
last_edited_time: datetime.datetime
title: typing.List[RichText]
parent: DatabaseParents
properties: Properties
|
[
"pydantic.Field"
] |
[((1047, 1083), 'pydantic.Field', 'pydantic.Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1061, 1083), False, 'import pydantic\n'), ((1293, 1329), 'pydantic.Field', 'pydantic.Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1307, 1329), False, 'import pydantic\n'), ((1560, 1596), 'pydantic.Field', 'pydantic.Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1574, 1596), False, 'import pydantic\n'), ((1825, 1861), 'pydantic.Field', 'pydantic.Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1839, 1861), False, 'import pydantic\n'), ((2046, 2082), 'pydantic.Field', 'pydantic.Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (2060, 2082), False, 'import pydantic\n'), ((2264, 2300), 'pydantic.Field', 'pydantic.Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (2278, 2300), False, 'import pydantic\n'), ((2499, 2535), 'pydantic.Field', 'pydantic.Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (2513, 2535), False, 'import pydantic\n'), ((2719, 2755), 'pydantic.Field', 'pydantic.Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (2733, 2755), False, 'import pydantic\n'), ((2938, 2974), 'pydantic.Field', 'pydantic.Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (2952, 2974), False, 'import pydantic\n'), ((3162, 3198), 'pydantic.Field', 'pydantic.Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (3176, 3198), False, 'import pydantic\n'), ((3395, 3431), 'pydantic.Field', 'pydantic.Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (3409, 3431), False, 'import pydantic\n'), ((3618, 3654), 'pydantic.Field', 'pydantic.Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (3632, 3654), False, 'import pydantic\n'), ((3865, 3901), 'pydantic.Field', 'pydantic.Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (3879, 3901), False, 'import pydantic\n')]
|
import uuid
from calm.dsl.builtins import Job, JobScheduler
start_date_time = "2050-10-08 16:17:15"
expiry_date_time = "2050-10-09 00:17:00"
cron = "15 1 32 * *"
time_zone = "America/Jamaica"
RUNBOOK_NAME = "invalid_cron_recurring"
class JobInvalidRecurringSpec(Job):
"""Recurring Job for Executing a Runbook with invalid cron"""
name = "test_job_invalid_cron_recurring_" + str(uuid.uuid4())[:8]
schedule_info = JobScheduler.ScheduleInfo.recurring(
cron, start_date_time, expiry_date_time, time_zone
)
executable = JobScheduler.Exec.runbook(RUNBOOK_NAME, False)
|
[
"calm.dsl.builtins.JobScheduler.Exec.runbook",
"uuid.uuid4",
"calm.dsl.builtins.JobScheduler.ScheduleInfo.recurring"
] |
[((430, 521), 'calm.dsl.builtins.JobScheduler.ScheduleInfo.recurring', 'JobScheduler.ScheduleInfo.recurring', (['cron', 'start_date_time', 'expiry_date_time', 'time_zone'], {}), '(cron, start_date_time, expiry_date_time,\n time_zone)\n', (465, 521), False, 'from calm.dsl.builtins import Job, JobScheduler\n'), ((549, 595), 'calm.dsl.builtins.JobScheduler.Exec.runbook', 'JobScheduler.Exec.runbook', (['RUNBOOK_NAME', '(False)'], {}), '(RUNBOOK_NAME, False)\n', (574, 595), False, 'from calm.dsl.builtins import Job, JobScheduler\n'), ((392, 404), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (402, 404), False, 'import uuid\n')]
|
import sys
from PyQt5.uic import loadUi
import PyQt5.QtCore as QtCore
from PyQt5.QtWidgets import QDialog, QApplication, QStackedWidget, QFileDialog, QProgressBar, QTableWidget, \
QAbstractItemView, QPushButton, QDesktopWidget, QTableWidgetItem
from transformers import AutoTokenizer
import re
import emoji
from soynlp.normalizer import repeat_normalize
Height = 400
Width = 600
emojis = ''.join(emoji.UNICODE_EMOJI.keys())
pattern = re.compile(f'[^ .,?!/@$%~%·∼()\x00-\x7Fㄱ-ㅣ가-힣{emojis}]+')
url_pattern = re.compile(
r'https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*)')
def clean(x):
x = pattern.sub(' ', x)
x = url_pattern.sub('', x)
x = x.strip()
x = repeat_normalize(x, num_repeats=2)
return x
class SelectForm(QDialog):
def __init__(self):
super(SelectForm, self).__init__()
loadUi('select-form.ui', self)
self.selectFile.clicked.connect(self.select_file_clicked)
def select_file_clicked(self):
file_list = QFileDialog.getOpenFileName(self)
self.open_process_form(file_list[0])
def open_process_form(self, path):
process_form = ProcessForm(path)
widget.addWidget(process_form)
widget.setCurrentIndex(widget.currentIndex() + 1)
class ProcessForm(QDialog):
POS = 'T-POS'
NEG = 'T-NEG'
NEU = 'T-NEU'
NATURAL = 'O'
def __init__(self, path):
super(ProcessForm, self).__init__()
loadUi('process-form.ui', self)
self.review_size = 0
self.reviews = []
self.original = []
self.output = []
self.cur_index = 0
self.load_file(path)
self.pbar = QProgressBar(self)
self.pbar.setGeometry(650, 200, 300, 40)
self.pbar.setMaximum(self.review_size - 1)
self.pbar.setValue(self.cur_index)
self.pbar.setFormat("%i/%d" % (self.pbar.value() + 1, self.pbar.maximum() + 1))
self.tableWidget = QTableWidget(self)
self.tableWidget.move(50, 50)
self.tableWidget.resize(1500, 130)
self.tableWidget.setRowCount(2)
self.tableWidget.setColumnCount(len(self.reviews[0]))
self.tableWidget.setSelectionMode(QAbstractItemView.SingleSelection)
self.tableWidget.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.tableWidget.cellClicked.connect(self.__mycell_clicked)
self.setTableWidgetData()
prevBtn = QPushButton('Prev', self)
prevBtn.move(500, 205)
prevBtn.clicked.connect(self.getPrevReview)
passBtn = QPushButton('Pass', self)
passBtn.move(1450, 180)
passBtn.clicked.connect(self.passReview)
nextBtn = QPushButton('Next', self)
nextBtn.move(1000, 205)
nextBtn.clicked.connect(self.getNextReview)
saveBtn = QPushButton('Save', self)
saveBtn.move(1450, 300)
saveBtn.clicked.connect(self.saveResult)
self.setWindowTitle('Cap11 LabelingTool')
self.resize(1600, 350)
self.center()
widget.setFixedHeight(350)
widget.setFixedWidth(1600)
def __mycell_clicked(self, row, col):
before = self.output[self.cur_index][col]
if before == self.NATURAL:
self.output[self.cur_index][col] = self.POS
elif before == self.POS:
self.output[self.cur_index][col] = self.NEG
else:
self.output[self.cur_index][col] = self.NATURAL
self.setTableWidgetData()
def getNextReview(self):
self.tableWidget.scrollTo(self.tableWidget.model().index(0, 0))
self.cur_index += 1
self.cur_index = self.cur_index % self.review_size
self.pbar.setFormat("%i/%d" % (self.cur_index + 1, self.pbar.maximum() + 1))
self.setTableWidgetData()
def getPrevReview(self):
self.tableWidget.scrollTo(self.tableWidget.model().index(0, 0))
self.cur_index -= 1
self.cur_index = self.cur_index % self.review_size
self.pbar.setFormat("%i/%d" % (self.cur_index + 1, self.pbar.maximum() + 1))
self.setTableWidgetData()
def passReview(self):
del self.original[self.cur_index]
del self.reviews[self.cur_index]
del self.output[self.cur_index]
self.review_size = len(self.reviews)
self.pbar.setMaximum(self.review_size - 1)
self.cur_index = self.cur_index % self.review_size
self.pbar.setFormat("%i/%d" % (self.cur_index + 1, self.pbar.maximum() + 1))
self.tableWidget.scrollTo(self.tableWidget.model().index(0, 0))
self.setTableWidgetData()
def saveResult(self):
with open("./output.txt", 'w') as outputFile:
for i in range(self.cur_index + 1):
outputFile.write(self.original[i])
outputFile.write('####')
for label in range(len(self.output[i])):
outputFile.write("%s=%s" % (self.reviews[i][label], self.output[i][label]))
outputFile.write('\n')
def setTableWidgetData(self):
self.tableWidget.setColumnCount(len(self.reviews[self.cur_index]))
self.pbar.setValue(self.cur_index)
for idx, word in enumerate(self.reviews[self.cur_index]):
status = self.output[self.cur_index][idx]
newItem = QTableWidgetItem(word)
color = QtCore.Qt.white
if status == self.NEU:
color = QtCore.Qt.gray
elif status == self.POS:
color = QtCore.Qt.green
elif status == self.NEG:
color = QtCore.Qt.red
newItem.setBackground(color)
self.tableWidget.setItem(0, idx, newItem)
self.tableWidget.setItem(1, idx, QTableWidgetItem(status))
def center(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
def load_file(self, path):
with open(path, 'r', encoding="utf-8-sig") as f:
for line in f.readlines():
line = clean(line.replace("\n", ""))
words = tokenizer.tokenize(line)
words = [word.replace("#", "") for word in words]
self.original.append(line)
self.reviews.append(words)
self.output.append([self.NATURAL] * len(words))
self.review_size = len(self.reviews)
tokenizer = AutoTokenizer.from_pretrained("./bert/")
app = QApplication(sys.argv)
select_form = SelectForm()
widget = QStackedWidget()
widget.addWidget(select_form)
widget.setFixedHeight(Height)
widget.setFixedWidth(Width)
widget.show()
sys.exit(app.exec_())
|
[
"PyQt5.QtWidgets.QProgressBar",
"PyQt5.QtWidgets.QDesktopWidget",
"PyQt5.QtWidgets.QTableWidget",
"soynlp.normalizer.repeat_normalize",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.uic.loadUi",
"PyQt5.QtWidgets.QFileDialog.getOpenFileName",
"transformers.AutoTokenizer.from_pretrained",
"PyQt5.QtWidgets.QTableWidgetItem",
"emoji.UNICODE_EMOJI.keys",
"PyQt5.QtWidgets.QApplication",
"PyQt5.QtWidgets.QStackedWidget",
"re.compile"
] |
[((440, 497), 're.compile', 're.compile', (["f'[^ .,?!/@$%~%·∼()\\x00-\\x7fㄱ-ㅣ가-힣{emojis}]+'"], {}), "(f'[^ .,?!/@$%~%·∼()\\x00-\\x7fㄱ-ㅣ가-힣{emojis}]+')\n", (450, 497), False, 'import re\n'), ((512, 644), 're.compile', 're.compile', (['"""https?:\\\\/\\\\/(www\\\\.)?[-a-zA-Z0-9@:%._\\\\+~#=]{1,256}\\\\.[a-zA-Z0-9()]{1,6}\\\\b([-a-zA-Z0-9()@:%_\\\\+.~#?&//=]*)"""'], {}), "(\n 'https?:\\\\/\\\\/(www\\\\.)?[-a-zA-Z0-9@:%._\\\\+~#=]{1,256}\\\\.[a-zA-Z0-9()]{1,6}\\\\b([-a-zA-Z0-9()@:%_\\\\+.~#?&//=]*)'\n )\n", (522, 644), False, 'import re\n'), ((6421, 6461), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['"""./bert/"""'], {}), "('./bert/')\n", (6450, 6461), False, 'from transformers import AutoTokenizer\n'), ((6468, 6490), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (6480, 6490), False, 'from PyQt5.QtWidgets import QDialog, QApplication, QStackedWidget, QFileDialog, QProgressBar, QTableWidget, QAbstractItemView, QPushButton, QDesktopWidget, QTableWidgetItem\n'), ((6528, 6544), 'PyQt5.QtWidgets.QStackedWidget', 'QStackedWidget', ([], {}), '()\n', (6542, 6544), False, 'from PyQt5.QtWidgets import QDialog, QApplication, QStackedWidget, QFileDialog, QProgressBar, QTableWidget, QAbstractItemView, QPushButton, QDesktopWidget, QTableWidgetItem\n'), ((402, 428), 'emoji.UNICODE_EMOJI.keys', 'emoji.UNICODE_EMOJI.keys', ([], {}), '()\n', (426, 428), False, 'import emoji\n'), ((735, 769), 'soynlp.normalizer.repeat_normalize', 'repeat_normalize', (['x'], {'num_repeats': '(2)'}), '(x, num_repeats=2)\n', (751, 769), False, 'from soynlp.normalizer import repeat_normalize\n'), ((888, 918), 'PyQt5.uic.loadUi', 'loadUi', (['"""select-form.ui"""', 'self'], {}), "('select-form.ui', self)\n", (894, 918), False, 'from PyQt5.uic import loadUi\n'), ((1041, 1074), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QFileDialog.getOpenFileName', (['self'], {}), '(self)\n', (1068, 1074), False, 'from PyQt5.QtWidgets import QDialog, QApplication, QStackedWidget, QFileDialog, QProgressBar, QTableWidget, QAbstractItemView, QPushButton, QDesktopWidget, QTableWidgetItem\n'), ((1483, 1514), 'PyQt5.uic.loadUi', 'loadUi', (['"""process-form.ui"""', 'self'], {}), "('process-form.ui', self)\n", (1489, 1514), False, 'from PyQt5.uic import loadUi\n'), ((1698, 1716), 'PyQt5.QtWidgets.QProgressBar', 'QProgressBar', (['self'], {}), '(self)\n', (1710, 1716), False, 'from PyQt5.QtWidgets import QDialog, QApplication, QStackedWidget, QFileDialog, QProgressBar, QTableWidget, QAbstractItemView, QPushButton, QDesktopWidget, QTableWidgetItem\n'), ((1975, 1993), 'PyQt5.QtWidgets.QTableWidget', 'QTableWidget', (['self'], {}), '(self)\n', (1987, 1993), False, 'from PyQt5.QtWidgets import QDialog, QApplication, QStackedWidget, QFileDialog, QProgressBar, QTableWidget, QAbstractItemView, QPushButton, QDesktopWidget, QTableWidgetItem\n'), ((2449, 2474), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Prev"""', 'self'], {}), "('Prev', self)\n", (2460, 2474), False, 'from PyQt5.QtWidgets import QDialog, QApplication, QStackedWidget, QFileDialog, QProgressBar, QTableWidget, QAbstractItemView, QPushButton, QDesktopWidget, QTableWidgetItem\n'), ((2576, 2601), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Pass"""', 'self'], {}), "('Pass', self)\n", (2587, 2601), False, 'from PyQt5.QtWidgets import QDialog, QApplication, QStackedWidget, QFileDialog, QProgressBar, QTableWidget, QAbstractItemView, QPushButton, QDesktopWidget, QTableWidgetItem\n'), ((2701, 2726), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Next"""', 'self'], {}), "('Next', self)\n", (2712, 2726), False, 'from PyQt5.QtWidgets import QDialog, QApplication, QStackedWidget, QFileDialog, QProgressBar, QTableWidget, QAbstractItemView, QPushButton, QDesktopWidget, QTableWidgetItem\n'), ((2829, 2854), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Save"""', 'self'], {}), "('Save', self)\n", (2840, 2854), False, 'from PyQt5.QtWidgets import QDialog, QApplication, QStackedWidget, QFileDialog, QProgressBar, QTableWidget, QAbstractItemView, QPushButton, QDesktopWidget, QTableWidgetItem\n'), ((5320, 5342), 'PyQt5.QtWidgets.QTableWidgetItem', 'QTableWidgetItem', (['word'], {}), '(word)\n', (5336, 5342), False, 'from PyQt5.QtWidgets import QDialog, QApplication, QStackedWidget, QFileDialog, QProgressBar, QTableWidget, QAbstractItemView, QPushButton, QDesktopWidget, QTableWidgetItem\n'), ((5747, 5771), 'PyQt5.QtWidgets.QTableWidgetItem', 'QTableWidgetItem', (['status'], {}), '(status)\n', (5763, 5771), False, 'from PyQt5.QtWidgets import QDialog, QApplication, QStackedWidget, QFileDialog, QProgressBar, QTableWidget, QAbstractItemView, QPushButton, QDesktopWidget, QTableWidgetItem\n'), ((5843, 5859), 'PyQt5.QtWidgets.QDesktopWidget', 'QDesktopWidget', ([], {}), '()\n', (5857, 5859), False, 'from PyQt5.QtWidgets import QDialog, QApplication, QStackedWidget, QFileDialog, QProgressBar, QTableWidget, QAbstractItemView, QPushButton, QDesktopWidget, QTableWidgetItem\n')]
|
from keras.models import load_model
from sklearn.externals import joblib
from keras.preprocessing.sequence import pad_sequences
import os
current_directory = os.getcwd()
file_name = "CNN__31_05_2020__20_33.h5"
tokenizer_name = "tokenizer_31_05_2020__20_45.pkl"
input_path = "\\".join([current_directory, "models", file_name])
tokenizer_path = "\\".join([current_directory, "transformer", tokenizer_name])
tokenizer = joblib.load(tokenizer_path)
model = load_model(input_path)
reviews = ["Posto non dei migliori, abbiamo trovato un sacco di polvere per terra, orrendo!",
"Luogo al centro di Pisa, abbastanza carino e con personale gentile",
"Personale scortese!"]
X = tokenizer.texts_to_sequences(reviews)
maxlen = 80
X = pad_sequences(X, padding='post', maxlen=maxlen)
result = model.predict_classes(X)
for reviews, predict in zip(reviews, result):
if(predict == 1):
predict = 'POSITIVO'
elif(predict == 2):
predict = 'NEGATIVO'
else:
predict = 'NEUTRO'
print("{} --> {}".format(reviews, predict))
|
[
"os.getcwd",
"keras.models.load_model",
"keras.preprocessing.sequence.pad_sequences",
"sklearn.externals.joblib.load"
] |
[((160, 171), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (169, 171), False, 'import os\n'), ((420, 447), 'sklearn.externals.joblib.load', 'joblib.load', (['tokenizer_path'], {}), '(tokenizer_path)\n', (431, 447), False, 'from sklearn.externals import joblib\n'), ((456, 478), 'keras.models.load_model', 'load_model', (['input_path'], {}), '(input_path)\n', (466, 478), False, 'from keras.models import load_model\n'), ((749, 796), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['X'], {'padding': '"""post"""', 'maxlen': 'maxlen'}), "(X, padding='post', maxlen=maxlen)\n", (762, 796), False, 'from keras.preprocessing.sequence import pad_sequences\n')]
|
#! /usr/bin/python3
from flask import abort, jsonify
from json import loads
carreras = ("Sistemas", "Derecho", "Actuaría", "Arquitectura", "Administración")
orden = ('nombre', 'primer_apellido', 'segundo_apellido', 'carrera','semestre', 'promedio', 'al_corriente')
campos = {'cuenta': (int, True), 'nombre': (str, True), 'primer_apellido': (str, True), 'segundo_apellido': (str, False), 'carrera': (str, True), 'semestre': (int, True), 'promedio': (float, True), 'al_corriente': (bool, True)}
def carga_base(ruta):
with open(ruta, 'tr') as base:
return eval(base.read())
def escribe_base(lista ,ruta):
with open(ruta, 'wt') as base:
base.write(str(lista))
def busca_base(cuenta, base):
for alumno in base:
try:
if alumno['cuenta'] == int(cuenta):
return alumno
except:
return False
return False
def es_tipo(dato, tipo):
if tipo == str:
return True
else:
try:
return tipo(dato) is dato
except:
return False
def reglas(dato, campo):
if campo == "carrera" and dato not in carreras:
return False
elif campo == "semestre" and dato < 1:
return False
elif campo == "promedio" and (dato < 0 or dato > 10):
return False
elif (campo in ("nombre", "primer_apellido") and (dato == "")):
return False
else:
return True
def valida(dato, campo):
return es_tipo(dato, campos[campo][0]) and reglas(dato, campo)
def recurso_completo(base, ruta, cuenta, peticion):
try:
candidato = {'cuenta': int(cuenta)}
peticion = loads(peticion)
if (set(peticion)).issubset(set(orden)):
for campo in orden:
if not campos[campo][1] and campo not in peticion:
candidato[campo] = ''
elif valida(peticion[campo], campo):
candidato[campo] = peticion[campo]
else:
abort(400)
else:
abort(400)
except:
abort(400)
base.append(candidato)
escribe_base(base, ruta)
return jsonify(candidato)
|
[
"flask.jsonify",
"flask.abort",
"json.loads"
] |
[((2186, 2204), 'flask.jsonify', 'jsonify', (['candidato'], {}), '(candidato)\n', (2193, 2204), False, 'from flask import abort, jsonify\n'), ((1658, 1673), 'json.loads', 'loads', (['peticion'], {}), '(peticion)\n', (1663, 1673), False, 'from json import loads\n'), ((2077, 2087), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (2082, 2087), False, 'from flask import abort, jsonify\n'), ((2108, 2118), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (2113, 2118), False, 'from flask import abort, jsonify\n'), ((2040, 2050), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (2045, 2050), False, 'from flask import abort, jsonify\n')]
|
import contextlib
import logging
from time import time_ns
from types import SimpleNamespace
import warnings
import ipywidgets as widgets
from IPython.display import display
import lightkurve_ext as lke
import lightkurve_ext_tls as lke_tls
import lightkurve_ext_pg as lke_pg
def _current_time_millis():
return time_ns() / 1000000
def _flatten(lc, flatten_kwargs):
if flatten_kwargs is None:
return lc
flatten_kwargs = flatten_kwargs.copy()
window_length_in_days = flatten_kwargs.pop("window_length_in_days", None)
if window_length_in_days is not None:
window_length = lke.to_window_length_for_2min_cadence(window_length_in_days)
flatten_kwargs["window_length"] = window_length
return lc.flatten(**flatten_kwargs)
def _remove_fig_title(*ax_args):
# Used to remove the extra title in %matplotlib widget mode
# alternative would be disbale them globally, see
# https://github.com/matplotlib/ipympl/issues/229#issuecomment-633430427
for ax in ax_args:
if ax is not None:
ax.get_figure().canvas.header_visible = False
ax.get_figure().canvas.footer_visible = False
# ax.get_figure().canvas.toolbar_visible = False
# ax.get_figure().canvas.resizable = False
def run_tls(
lc, pg_kwargs={}, flatten_kwargs=None, plot_pg=True, plot_lc_model=True, plot_transit_depth=True, display_context=None
):
if display_context is None:
# note : nullcontext() requires Python 3.7
ctx_validate, ctx_plot = contextlib.nullcontext(), contextlib.nullcontext()
else:
ctx_validate, ctx_plot = display_context["validate"], display_context["plot"]
with ctx_validate:
lc = lc.remove_nans().normalize()
lc = _flatten(lc, flatten_kwargs)
time_b = _current_time_millis()
pg = lke_tls.TransitLeastSquaresPeriodogram.from_lightcurve(lc, **pg_kwargs)
time_e = _current_time_millis()
pg.elapsed_time = time_e - time_b
lke_pg.validate_tls_n_report(pg)
with ctx_plot:
ax_pg = None
if plot_pg:
ax_pg = lke_pg.plot_pg_n_mark_max(pg)
_remove_fig_title(ax_pg)
ax_lc_model_1, ax_lc_model_2, ax_lc_model_f = None, None, None
if plot_lc_model:
ax_lc_model_1, ax_lc_model_2, ax_lc_model_f = lke_pg.plot_lc_with_model(lc, pg)
_remove_fig_title(ax_lc_model_1, ax_lc_model_2, ax_lc_model_f)
ax_tt_depth = None
if plot_transit_depth:
ax_tt_depth = lke_pg.errorbar_transit_depth(pg)
_remove_fig_title(ax_tt_depth)
return SimpleNamespace(
pg=pg,
lc=lc,
ax_pg=ax_pg,
ax_lc_model_1=ax_lc_model_1,
ax_lc_model_2=ax_lc_model_2,
ax_lc_model_f=ax_lc_model_f,
ax_tt_depth=ax_tt_depth,
)
def run_bls(
lc,
use_stellar_specific_search_grid=False,
pg_kwargs={},
flatten_kwargs=None,
plot_pg=True,
plot_lc_model=True,
display_context=None,
):
if display_context is None:
ctx_validate, ctx_plot = contextlib.nullcontext(), contextlib.nullcontext()
else:
ctx_validate, ctx_plot = display_context["validate"], display_context["plot"]
with ctx_validate:
lc = lc.remove_nans().normalize()
lc = _flatten(lc, flatten_kwargs)
time_b = _current_time_millis()
if use_stellar_specific_search_grid:
pg = lke_tls.create_bls_pg_with_stellar_specific_search_grid(lc, **pg_kwargs)
else:
pg = lc.to_periodogram(method="bls", **pg_kwargs)
time_e = _current_time_millis()
pg.elapsed_time = time_e - time_b
lke_pg.validate_bls_n_report(pg)
with ctx_plot:
ax_pg = None
if plot_pg:
ax_pg = lke_pg.plot_pg_n_mark_max(pg)
_remove_fig_title(ax_pg)
ax_lc_model_1, ax_lc_model_2, ax_lc_model_f = None, None, None
if plot_lc_model:
with warnings.catch_warnings():
# avoid warnings about using max power values
warnings.filterwarnings("ignore", message=".*Using.*")
logger = logging.getLogger("lightkurve.periodogram")
logger.setLevel(logging.ERROR)
ax_lc_model_1, ax_lc_model_2, ax_lc_model_f = lke_pg.plot_lc_with_model(lc, pg)
_remove_fig_title(ax_lc_model_1, ax_lc_model_2, ax_lc_model_f)
ax_tt_depth = None
# ax_tt_depth = lke_pg.errorbar_transit_depth(pg) # bls has no info directly
return SimpleNamespace(
pg=pg,
lc=lc,
ax_pg=ax_pg,
ax_lc_model_1=ax_lc_model_1,
ax_lc_model_2=ax_lc_model_2,
ax_lc_model_f=ax_lc_model_f,
ax_tt_depth=ax_tt_depth,
)
def run_bls_n_tls(
lc,
use_stellar_specific_search_grid_for_bls=False,
plot_pg=True,
plot_lc_model=True,
plot_transit_depth=True,
bls_pg_kwargs={},
tls_pg_kwargs={},
):
# Run TLS and BLS and have their results displayed side-by-side.
#
# For the matplotlib figures to be displayed inside the respective boxes in Jupyter, magic
# %matplotlib widget
# is needed (requiring ipympl package)
#
# sometimes it crashes the browsers (possibly too many interactive figures?!)
out_bls_validate = widgets.Output(layout={"border": "0px solid lightgray"})
out_bls_plot = widgets.Output(layout={"border": "0px solid lightgray"})
out_tls_validate = widgets.Output(layout={"border": "0px solid lightgray"})
out_tls_plot = widgets.Output(layout={"border": "0px solid lightgray"})
ctr = widgets.GridBox(
children=[out_bls_validate, out_tls_validate, out_bls_plot, out_tls_plot],
layout=widgets.Layout(width="auto", grid_template_rows="auto", grid_template_columns="50% 50%", grid_gap="5px 10px"),
)
run_bls(
lc,
use_stellar_specific_search_grid=use_stellar_specific_search_grid_for_bls,
pg_kwargs=bls_pg_kwargs,
plot_pg=plot_pg,
plot_lc_model=plot_lc_model,
display_context=dict(validate=out_bls_validate, plot=out_bls_plot),
)
run_tls(
lc,
tls_pg_kwargs,
plot_pg=plot_pg,
plot_lc_model=plot_lc_model,
plot_transit_depth=plot_transit_depth,
display_context=dict(validate=out_tls_validate, plot=out_tls_plot),
)
# with out_bls:
# run_bls(lc, bls_pg_kwargs, plot_pg=plot_pg, plot_lc_model=plot_lc_model)
# with out_tls:
# run_tls(lc, tls_pg_kwargs, plot_pg=plot_pg, plot_lc_model=plot_lc_model, plot_transit_depth=plot_transit_depth)
return display(ctr)
|
[
"lightkurve_ext_pg.errorbar_transit_depth",
"lightkurve_ext_pg.validate_tls_n_report",
"lightkurve_ext_pg.validate_bls_n_report",
"lightkurve_ext_pg.plot_lc_with_model",
"warnings.filterwarnings",
"IPython.display.display",
"ipywidgets.Output",
"lightkurve_ext_pg.plot_pg_n_mark_max",
"lightkurve_ext_tls.create_bls_pg_with_stellar_specific_search_grid",
"warnings.catch_warnings",
"time.time_ns",
"lightkurve_ext_tls.TransitLeastSquaresPeriodogram.from_lightcurve",
"ipywidgets.Layout",
"contextlib.nullcontext",
"lightkurve_ext.to_window_length_for_2min_cadence",
"types.SimpleNamespace",
"logging.getLogger"
] |
[((2627, 2790), 'types.SimpleNamespace', 'SimpleNamespace', ([], {'pg': 'pg', 'lc': 'lc', 'ax_pg': 'ax_pg', 'ax_lc_model_1': 'ax_lc_model_1', 'ax_lc_model_2': 'ax_lc_model_2', 'ax_lc_model_f': 'ax_lc_model_f', 'ax_tt_depth': 'ax_tt_depth'}), '(pg=pg, lc=lc, ax_pg=ax_pg, ax_lc_model_1=ax_lc_model_1,\n ax_lc_model_2=ax_lc_model_2, ax_lc_model_f=ax_lc_model_f, ax_tt_depth=\n ax_tt_depth)\n', (2642, 2790), False, 'from types import SimpleNamespace\n'), ((4560, 4723), 'types.SimpleNamespace', 'SimpleNamespace', ([], {'pg': 'pg', 'lc': 'lc', 'ax_pg': 'ax_pg', 'ax_lc_model_1': 'ax_lc_model_1', 'ax_lc_model_2': 'ax_lc_model_2', 'ax_lc_model_f': 'ax_lc_model_f', 'ax_tt_depth': 'ax_tt_depth'}), '(pg=pg, lc=lc, ax_pg=ax_pg, ax_lc_model_1=ax_lc_model_1,\n ax_lc_model_2=ax_lc_model_2, ax_lc_model_f=ax_lc_model_f, ax_tt_depth=\n ax_tt_depth)\n', (4575, 4723), False, 'from types import SimpleNamespace\n'), ((5327, 5383), 'ipywidgets.Output', 'widgets.Output', ([], {'layout': "{'border': '0px solid lightgray'}"}), "(layout={'border': '0px solid lightgray'})\n", (5341, 5383), True, 'import ipywidgets as widgets\n'), ((5403, 5459), 'ipywidgets.Output', 'widgets.Output', ([], {'layout': "{'border': '0px solid lightgray'}"}), "(layout={'border': '0px solid lightgray'})\n", (5417, 5459), True, 'import ipywidgets as widgets\n'), ((5483, 5539), 'ipywidgets.Output', 'widgets.Output', ([], {'layout': "{'border': '0px solid lightgray'}"}), "(layout={'border': '0px solid lightgray'})\n", (5497, 5539), True, 'import ipywidgets as widgets\n'), ((5559, 5615), 'ipywidgets.Output', 'widgets.Output', ([], {'layout': "{'border': '0px solid lightgray'}"}), "(layout={'border': '0px solid lightgray'})\n", (5573, 5615), True, 'import ipywidgets as widgets\n'), ((6641, 6653), 'IPython.display.display', 'display', (['ctr'], {}), '(ctr)\n', (6648, 6653), False, 'from IPython.display import display\n'), ((318, 327), 'time.time_ns', 'time_ns', ([], {}), '()\n', (325, 327), False, 'from time import time_ns\n'), ((610, 670), 'lightkurve_ext.to_window_length_for_2min_cadence', 'lke.to_window_length_for_2min_cadence', (['window_length_in_days'], {}), '(window_length_in_days)\n', (647, 670), True, 'import lightkurve_ext as lke\n'), ((1844, 1915), 'lightkurve_ext_tls.TransitLeastSquaresPeriodogram.from_lightcurve', 'lke_tls.TransitLeastSquaresPeriodogram.from_lightcurve', (['lc'], {}), '(lc, **pg_kwargs)\n', (1898, 1915), True, 'import lightkurve_ext_tls as lke_tls\n'), ((2007, 2039), 'lightkurve_ext_pg.validate_tls_n_report', 'lke_pg.validate_tls_n_report', (['pg'], {}), '(pg)\n', (2035, 2039), True, 'import lightkurve_ext_pg as lke_pg\n'), ((3688, 3720), 'lightkurve_ext_pg.validate_bls_n_report', 'lke_pg.validate_bls_n_report', (['pg'], {}), '(pg)\n', (3716, 3720), True, 'import lightkurve_ext_pg as lke_pg\n'), ((1536, 1560), 'contextlib.nullcontext', 'contextlib.nullcontext', ([], {}), '()\n', (1558, 1560), False, 'import contextlib\n'), ((1562, 1586), 'contextlib.nullcontext', 'contextlib.nullcontext', ([], {}), '()\n', (1584, 1586), False, 'import contextlib\n'), ((2121, 2150), 'lightkurve_ext_pg.plot_pg_n_mark_max', 'lke_pg.plot_pg_n_mark_max', (['pg'], {}), '(pg)\n', (2146, 2150), True, 'import lightkurve_ext_pg as lke_pg\n'), ((2344, 2377), 'lightkurve_ext_pg.plot_lc_with_model', 'lke_pg.plot_lc_with_model', (['lc', 'pg'], {}), '(lc, pg)\n', (2369, 2377), True, 'import lightkurve_ext_pg as lke_pg\n'), ((2538, 2571), 'lightkurve_ext_pg.errorbar_transit_depth', 'lke_pg.errorbar_transit_depth', (['pg'], {}), '(pg)\n', (2567, 2571), True, 'import lightkurve_ext_pg as lke_pg\n'), ((3091, 3115), 'contextlib.nullcontext', 'contextlib.nullcontext', ([], {}), '()\n', (3113, 3115), False, 'import contextlib\n'), ((3117, 3141), 'contextlib.nullcontext', 'contextlib.nullcontext', ([], {}), '()\n', (3139, 3141), False, 'import contextlib\n'), ((3448, 3520), 'lightkurve_ext_tls.create_bls_pg_with_stellar_specific_search_grid', 'lke_tls.create_bls_pg_with_stellar_specific_search_grid', (['lc'], {}), '(lc, **pg_kwargs)\n', (3503, 3520), True, 'import lightkurve_ext_tls as lke_tls\n'), ((3802, 3831), 'lightkurve_ext_pg.plot_pg_n_mark_max', 'lke_pg.plot_pg_n_mark_max', (['pg'], {}), '(pg)\n', (3827, 3831), True, 'import lightkurve_ext_pg as lke_pg\n'), ((5741, 5854), 'ipywidgets.Layout', 'widgets.Layout', ([], {'width': '"""auto"""', 'grid_template_rows': '"""auto"""', 'grid_template_columns': '"""50% 50%"""', 'grid_gap': '"""5px 10px"""'}), "(width='auto', grid_template_rows='auto',\n grid_template_columns='50% 50%', grid_gap='5px 10px')\n", (5755, 5854), True, 'import ipywidgets as widgets\n'), ((3984, 4009), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (4007, 4009), False, 'import warnings\n'), ((4089, 4143), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'message': '""".*Using.*"""'}), "('ignore', message='.*Using.*')\n", (4112, 4143), False, 'import warnings\n'), ((4169, 4212), 'logging.getLogger', 'logging.getLogger', (['"""lightkurve.periodogram"""'], {}), "('lightkurve.periodogram')\n", (4186, 4212), False, 'import logging\n'), ((4322, 4355), 'lightkurve_ext_pg.plot_lc_with_model', 'lke_pg.plot_lc_with_model', (['lc', 'pg'], {}), '(lc, pg)\n', (4347, 4355), True, 'import lightkurve_ext_pg as lke_pg\n')]
|
import logging
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
def start_action(payload, channels, ws):
# extract all of the data that we need
channel = payload.get("channel")
action = payload.get("action")
rate = payload.get("rate")
cutoff_voltage = payload.get("cutoffVoltage")
# start the relevant action
if action == "charge":
log.info("Starting CHARGE from startAction command.")
elif action == "discharge":
channels[channel-1].start_discharge()
elif action == "dcResistance":
log.info("Starting DC RESISTANCE from startAction command.")
def stop_action(payload, channels, ws):
# extract all of the data that we need
channel = payload.get("channel")
channels[channel-1].stop_action()
|
[
"logging.getLogger"
] |
[((22, 49), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (39, 49), False, 'import logging\n')]
|
# -*- coding: utf-8 -*-
"""S3 utils."""
import gzip
from typing import Optional
import boto3
def get_s3(aws_access_key, aws_secret_access_key):
"""Get S3 connections."""
s3 = boto3.client('s3', aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_access_key)
return s3
def get_s3_resource(aws_access_key, aws_secret_access_key):
"""Get S3 resource."""
s3_resource = boto3.resource('s3', aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_access_key)
return s3_resource
def write_s3(env: str, source_system: str, source_subsystem: str, object_name: str, s3,
s3_bucket: str, content, date_valid: str, page: int = 1,
extension: str = 'json', compress: bool = True, s3_path: Optional[str] = None):
"""Write to S3."""
s3_path = s3_path or ('{env}/{source_system}/{source_subsystem}/'
'{object_name}{date_valid}/data-{page}.{extension}')
if compress:
content = gzip.compress(content.encode('utf-8'))
extension = extension + '.gz'
s3_key = s3_path.format(
env=env,
source_system=source_system,
source_subsystem=source_subsystem,
object_name=object_name,
date_valid='/{0}'.format(date_valid) if date_valid else '',
page=page,
extension=extension
)
print('Writing to {}'.format(s3_key))
s3.put_object(Body=content, Bucket=s3_bucket, Key=s3_key)
|
[
"boto3.resource",
"boto3.client"
] |
[((187, 289), 'boto3.client', 'boto3.client', (['"""s3"""'], {'aws_access_key_id': 'aws_access_key', 'aws_secret_access_key': 'aws_secret_access_key'}), "('s3', aws_access_key_id=aws_access_key, aws_secret_access_key=\n aws_secret_access_key)\n", (199, 289), False, 'import boto3\n'), ((428, 531), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {'aws_access_key_id': 'aws_access_key', 'aws_secret_access_key': 'aws_secret_access_key'}), "('s3', aws_access_key_id=aws_access_key,\n aws_secret_access_key=aws_secret_access_key)\n", (442, 531), False, 'import boto3\n')]
|
#! python
#-*- coding: utf-8 -*-
import requests
import json
class Vendo:
def __init__(self, url_api):
self.setHeader({'Content-Type' : 'application/json', "Content-Length" : "length"})
self.setApi(url_api)
def setApi(self,api_url):
self.API_URL = api_url
def setHeader(self, api_header):
self.API_HEADER = api_header
def getJson(self,request_url, request_data):
req_url = self.API_URL + request_url
json_data = requests.post(req_url, json=request_data, headers=self.API_HEADER)
return json_data.json()
def logInApi(self, api_login, api_pswd):
jsonData = self.getJson(
"/json/reply/Autoryzacja_Zaloguj",
{"Model":{"Login":api_login,"Haslo":api_pswd}})
self.VENDO_TOKEN = jsonData["Wynik"]["Token"]
def logOutApi(self):
jsonData = self.getJson(
"/json/reply/Autoryzacja_Wyloguj",
{"Token":self.VENDO_TOKEN})
def loginUser(self,user_login, user_pswd):
jsonData = self.getJson(
"/json/reply/Autoryzacja_ZalogujUzytkownikaVendo",
{"Token":self.VENDO_TOKEN,"Model":{"Login":user_login,"Haslo":user_pswd}})
self.USER_TOKEN = jsonData["Wynik"]["Token"]
def logOutUser(self):
jsonData = self.getJson(
"/json/reply/WylogujUzytkownikaVendo",
{"Token": self.USER_TOKEN})
|
[
"requests.post"
] |
[((482, 548), 'requests.post', 'requests.post', (['req_url'], {'json': 'request_data', 'headers': 'self.API_HEADER'}), '(req_url, json=request_data, headers=self.API_HEADER)\n', (495, 548), False, 'import requests\n')]
|
"""
Http Server for our API
"""
from flask import Flask, jsonify, request
from controller import product_controller
app = Flask(__name__)
def serialize(products):
return list(map(lambda p: p.serialize(), products))
@app.route("/product/<name>")
def get_product(name: str):
return jsonify({"product": product_controller.get_by_name(name).serialize()})
@app.route("/products")
def get_all_products():
return jsonify({"products": serialize(product_controller.get())})
@app.route("/product", methods=["POST"])
def insert_product():
name = request.json.get("name")
desc = request.json.get("desc")
value = float(request.json.get("value"))
p = product_controller.save(name, desc, value)
return jsonify({"product": p.serialize()})
@app.route("/update_product/<p_name>", methods=["PUT"])
def update_product(p_name: str):
name = request.json.get("name")
desc = request.json.get("desc")
value = request.json.get("value")
return jsonify(
{
"product": product_controller.change(
p_name, name=name, desc=desc, value=value
).serialize()
}
)
@app.route("/delete_product/<name>", methods=["DELETE"])
def delete_product(name: str):
return jsonify({"product_deleted": product_controller.delete_by_id(id).serialize()})
if __name__ == "__main__":
app.run()
|
[
"controller.product_controller.change",
"controller.product_controller.get",
"controller.product_controller.delete_by_id",
"flask.Flask",
"controller.product_controller.save",
"flask.request.json.get",
"controller.product_controller.get_by_name"
] |
[((125, 140), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (130, 140), False, 'from flask import Flask, jsonify, request\n'), ((562, 586), 'flask.request.json.get', 'request.json.get', (['"""name"""'], {}), "('name')\n", (578, 586), False, 'from flask import Flask, jsonify, request\n'), ((598, 622), 'flask.request.json.get', 'request.json.get', (['"""desc"""'], {}), "('desc')\n", (614, 622), False, 'from flask import Flask, jsonify, request\n'), ((677, 719), 'controller.product_controller.save', 'product_controller.save', (['name', 'desc', 'value'], {}), '(name, desc, value)\n', (700, 719), False, 'from controller import product_controller\n'), ((869, 893), 'flask.request.json.get', 'request.json.get', (['"""name"""'], {}), "('name')\n", (885, 893), False, 'from flask import Flask, jsonify, request\n'), ((905, 929), 'flask.request.json.get', 'request.json.get', (['"""desc"""'], {}), "('desc')\n", (921, 929), False, 'from flask import Flask, jsonify, request\n'), ((942, 967), 'flask.request.json.get', 'request.json.get', (['"""value"""'], {}), "('value')\n", (958, 967), False, 'from flask import Flask, jsonify, request\n'), ((641, 666), 'flask.request.json.get', 'request.json.get', (['"""value"""'], {}), "('value')\n", (657, 666), False, 'from flask import Flask, jsonify, request\n'), ((458, 482), 'controller.product_controller.get', 'product_controller.get', ([], {}), '()\n', (480, 482), False, 'from controller import product_controller\n'), ((315, 351), 'controller.product_controller.get_by_name', 'product_controller.get_by_name', (['name'], {}), '(name)\n', (345, 351), False, 'from controller import product_controller\n'), ((1022, 1090), 'controller.product_controller.change', 'product_controller.change', (['p_name'], {'name': 'name', 'desc': 'desc', 'value': 'value'}), '(p_name, name=name, desc=desc, value=value)\n', (1047, 1090), False, 'from controller import product_controller\n'), ((1278, 1313), 'controller.product_controller.delete_by_id', 'product_controller.delete_by_id', (['id'], {}), '(id)\n', (1309, 1313), False, 'from controller import product_controller\n')]
|
from torch.nn.modules.loss import _Loss
from torch.autograd import Variable
import torch
import time
import numpy as np
import torch.nn as nn
import random
import copy
import math
CEloss = nn.CrossEntropyLoss()
def loss_calculation(semantic, target):
bs = semantic.size()[0]
pix_num = 480 * 640
target = target.view(bs, -1).view(-1).contiguous()
semantic = semantic.view(bs, 2, pix_num).transpose(1, 2).contiguous().view(bs * pix_num, 2).contiguous()
semantic_loss = CEloss(semantic, target)
return semantic_loss
class Loss(_Loss):
def __init__(self):
super(Loss, self).__init__(True)
def forward(self, semantic, target):
return loss_calculation(semantic, target)
|
[
"torch.nn.CrossEntropyLoss"
] |
[((200, 221), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (219, 221), True, 'import torch.nn as nn\n')]
|
from hdlConvertorAst.hdlAst import HdlOp, HdlValueId, HdlFunctionDef, HdlOpType
from hdlConvertorAst.to.hdl_ast_modifier import HdlAstModifier
from hdlConvertorAst.translate.verilog_to_basic_hdl_sim_model.utils import hdl_call
class AddCallOperatorForCallWithoutParenthesis(HdlAstModifier):
"""
Verilog function call does not need to have () and it can be called just by its id.
To simplify handling we decorete each such a call with a call operator in this transformation.
"""
def __init__(self):
HdlAstModifier.__init__(self)
self._parentExpr = None
def visit_iHdlExpr(self, o):
"""
:type o: iHdlExpr
:return: iHdlExpr
"""
if isinstance(o, HdlOp):
prev_par_expr = self._parentExpr
self._parentExpr = o
try:
self.visit_HdlOp(o)
finally:
self._parentExpr = prev_par_expr
else:
if isinstance(o, HdlValueId) and\
isinstance(o.obj, HdlFunctionDef) and \
( not isinstance(self._parentExpr, HdlOp) or \
self._parentExpr.fn != HdlOpType.CALL or \
self._parentExpr.ops[0] is not o
):
# wrap function id in a call operator if parent is not a call operator
return hdl_call(o, [])
return o
|
[
"hdlConvertorAst.to.hdl_ast_modifier.HdlAstModifier.__init__",
"hdlConvertorAst.translate.verilog_to_basic_hdl_sim_model.utils.hdl_call"
] |
[((529, 558), 'hdlConvertorAst.to.hdl_ast_modifier.HdlAstModifier.__init__', 'HdlAstModifier.__init__', (['self'], {}), '(self)\n', (552, 558), False, 'from hdlConvertorAst.to.hdl_ast_modifier import HdlAstModifier\n'), ((1377, 1392), 'hdlConvertorAst.translate.verilog_to_basic_hdl_sim_model.utils.hdl_call', 'hdl_call', (['o', '[]'], {}), '(o, [])\n', (1385, 1392), False, 'from hdlConvertorAst.translate.verilog_to_basic_hdl_sim_model.utils import hdl_call\n')]
|
import copy
import itertools
input = """###..#..
.#######
#####...
#..##.#.
###..##.
##...#..
..#...#.
.#....##"""
# input = """.#.
# ..#
# ###"""
cycles_count = 6
def step(world):
size = len(world[0][0])
new_size = size + 2
new_world = copy.deepcopy(world)
# RESIZE PART:
# Add new planes and empty world to make sure we have enough canvas to draw on:
new_world.append([[['.'] * size] * size] * size)
new_world.insert(0, [[['.'] * size] * size] * size)
for z, cube in enumerate(new_world):
cube.append([['.'] * size] * size)
cube.insert(0, [['.'] * size] * size)
for i, plane in enumerate(cube):
new_plane = [['.'] * new_size]
for line in plane:
new_plane += [['.'] + line + ['.']]
new_plane += [['.'] * new_size]
cube[i] = new_plane
# Now we have enough room to grow, actually grow:
directions = list(itertools.product((-1, 0, 1), repeat=4))
directions.remove((0, 0, 0, 0))
newer_world = copy.deepcopy(new_world)
for w, cube in enumerate(new_world):
for z, plane in enumerate(cube):
for y, line in enumerate(plane):
for x, cell in enumerate(line):
n_count = 0
for dz, dy, dx, dw in directions:
try:
friend = new_world[w + dw][z + dz][y + dy][x + dx]
if friend == "#":
n_count += 1
except IndexError:
pass
if cell == '.' and n_count == 3:
newer_world[w][z][y][x] = '#'
elif cell == '#' and n_count not in (2, 3):
newer_world[w][z][y][x] = '.'
return newer_world
def print_world(world):
for w, cube in enumerate(world):
for i, z in enumerate(cube):
print("z=%s" % i, ' w=%s' % w)
for y in z:
print("".join(y))
print()
cur_world = []
for line in input.split('\n'):
cur_line = [i for i in line]
cur_world.append(cur_line)
cur_world = [cur_world]
cur_world = [cur_world]
for i in range(cycles_count):
print("Cycle:", i)
# print_world(cur_world)
cur_world = step(cur_world)
alive = 0
for cube in cur_world:
for plane in cube :
for line in plane:
alive += line.count('#')
print("Alive:", alive)
|
[
"copy.deepcopy",
"itertools.product"
] |
[((255, 275), 'copy.deepcopy', 'copy.deepcopy', (['world'], {}), '(world)\n', (268, 275), False, 'import copy\n'), ((1039, 1063), 'copy.deepcopy', 'copy.deepcopy', (['new_world'], {}), '(new_world)\n', (1052, 1063), False, 'import copy\n'), ((943, 982), 'itertools.product', 'itertools.product', (['(-1, 0, 1)'], {'repeat': '(4)'}), '((-1, 0, 1), repeat=4)\n', (960, 982), False, 'import itertools\n')]
|
from django.contrib import admin
from tweets.models import Tweet, Comment, Likes
admin.site.register(Tweet)
admin.site.register(Comment)
admin.site.register(Likes)
|
[
"django.contrib.admin.site.register"
] |
[((82, 108), 'django.contrib.admin.site.register', 'admin.site.register', (['Tweet'], {}), '(Tweet)\n', (101, 108), False, 'from django.contrib import admin\n'), ((109, 137), 'django.contrib.admin.site.register', 'admin.site.register', (['Comment'], {}), '(Comment)\n', (128, 137), False, 'from django.contrib import admin\n'), ((138, 164), 'django.contrib.admin.site.register', 'admin.site.register', (['Likes'], {}), '(Likes)\n', (157, 164), False, 'from django.contrib import admin\n')]
|
"""
This module creates a scatterplot for specified team with shot attempt rates versus league median from down 3 to up 3.
"""
import matplotlib.pyplot as plt
import math
import pandas as pd
import scrapenhl2.scrape.team_info as team_info
import scrapenhl2.manipulate.manipulate as manip
import scrapenhl2.plot.visualization_helper as vhelper
def team_score_shot_rate_parallel(team, startseason, endseason=None, save_file=None):
"""
:param team:
:param startseason:
:param endseason:
:param save_file:
:return:
"""
if endseason is None:
endseason = startseason
df = pd.concat([manip.team_5v5_shot_rates_by_score(season) for season in range(startseason, endseason + 1)])
df.loc[:, 'ScoreState'] = df.ScoreState.apply(lambda x: max(min(3, x), -3)) # reduce to +/- 3
df = df.drop('Game', axis=1) \
.groupby(['Team', 'ScoreState'], as_index=False) \
.sum()
df.loc[:, 'CF%'] = df.CF / (df.CF + df.CA)
df = df[['Team', 'ScoreState', 'CF%']] \
.sort_values('ScoreState')
statelabels = {x: 'Lead{0:d}'.format(x) if x >= 1 else 'Trail{0:d}'.format(abs(x)) for x in range(-3, 4)}
statelabels[0] = 'Tied'
df.loc[:, 'ScoreState'] = df.ScoreState.apply(lambda x: statelabels[x])
# Go to wide
df = df.pivot_table(index='Team', columns='ScoreState', values='CF%').reset_index()
# Reorder columns
df = df[['Team', 'Trail3', 'Trail2', 'Trail1', 'Tied', 'Lead1', 'Lead2', 'Lead3']]
# Teams to strings
df.loc[:, 'Team'] = df.Team.apply(team_info.team_as_str)
# filter for own team
teamdf = df.query('Team == "{0:s}"'.format(team_info.team_as_str(team)))
# Make parallel coords
vhelper.parallel_coords(df, teamdf, 'Team')
# Set yticklabels
ys = (0.4, 0.5, 0.6)
plt.yticks(ys, ['{0:d}%'.format(int(y * 100)) for y in ys])
plt.ylim(0.35, 0.65)
plt.title(_team_score_shot_rate_parallel_title(team, startseason, endseason))
for direction in ['right', 'top', 'bottom', 'left']:
plt.gca().spines[direction].set_visible(False)
if save_file is None:
plt.show()
else:
plt.savefig(save_file)
def team_score_shot_rate_scatter(team, startseason, endseason=None, save_file=None):
"""
:param team: str or int, team
:param startseason: int, the starting season (inclusive)
:param endseason: int, the ending season (inclusive)
:return: nothing
"""
if endseason is None:
endseason = startseason
df = pd.concat([manip.team_5v5_shot_rates_by_score(season) for season in range(startseason, endseason + 1)])
df.loc[:, 'ScoreState'] = df.ScoreState.apply(lambda x: max(min(3, x), -3)) # reduce to +/- 3
df = df.drop('Game', axis=1) \
.groupby(['Team', 'ScoreState'], as_index=False) \
.sum()
df.loc[:, 'CF60'] = df.CF * 3600 / df.Secs
df.loc[:, 'CA60'] = df.CA * 3600 / df.Secs
# get medians
medians = df[['ScoreState', 'CF60', 'CA60', 'Secs']].groupby('ScoreState', as_index=False).median()
# filter for own team
teamdf = df.query('Team == {0:d}'.format(int(team_info.team_as_id(team))))
statelabels = {x: 'Lead {0:d}'.format(x) if x >= 1 else 'Trail {0:d}'.format(abs(x)) for x in range(-3, 4)}
statelabels[0] = 'Tied'
for state in range(-3, 4):
teamxy = teamdf.query('ScoreState == {0:d}'.format(state))
teamx = teamxy.CF60.iloc[0]
teamy = teamxy.CA60.iloc[0]
leaguexy = medians.query('ScoreState == {0:d}'.format(state))
leaguex = leaguexy.CF60.iloc[0]
leaguey = leaguexy.CA60.iloc[0]
midx = (leaguex + teamx) / 2
midy = (leaguey + teamy) / 2
rot = _calculate_label_rotation(leaguex, leaguey, teamx, teamy)
plt.annotate('', xy=(teamx, teamy), xytext=(leaguex, leaguey), xycoords='data',
arrowprops={'arrowstyle': '-|>'})
plt.annotate(statelabels[state], xy=(midx, midy), ha="center", va="center", xycoords='data', size=8,
rotation=rot, bbox=dict(boxstyle="round", fc="w", alpha=0.9))
plt.scatter(medians.CF60.values, medians.CA60.values, s=100, color='w')
plt.scatter(teamdf.CF60.values, teamdf.CA60.values, s=100, color='w')
#bbox_props = dict(boxstyle="round", fc="w", ec="0.5", alpha=0.9)
#plt.annotate('Fast', xy=(0.95, 0.95), xycoords='axes fraction', bbox=bbox_props, ha='center', va='center')
#plt.annotate('Slow', xy=(0.05, 0.05), xycoords='axes fraction', bbox=bbox_props, ha='center', va='center')
#plt.annotate('Good', xy=(0.95, 0.05), xycoords='axes fraction', bbox=bbox_props, ha='center', va='center')
#plt.annotate('Bad', xy=(0.05, 0.95), xycoords='axes fraction', bbox=bbox_props, ha='center', va='center')
vhelper.add_good_bad_fast_slow()
plt.xlabel('CF60')
plt.ylabel('CA60')
plt.title(_team_score_shot_rate_scatter_title(team, startseason, endseason))
if save_file is None:
plt.show()
else:
plt.savefig(save_file)
def _team_score_shot_rate_scatter_title(team, startseason, endseason):
"""
:param team:
:param startseason:
:param endseason:
:return:
"""
return '{0:s} shot rate by score state, {1:s} to {2:s}'.format(team_info.team_as_str(team),
*vhelper.get_startdate_enddate_from_kwargs(
startseason=startseason,
endseason=endseason))
def _team_score_shot_rate_parallel_title(team, startseason, endseason):
"""
:param team:
:param startseason:
:param endseason:
:return:
"""
return '{0:s} CF% by score state\n{1:s} to {2:s}'.format(team_info.team_as_str(team),
*vhelper.get_startdate_enddate_from_kwargs(
startseason=startseason,
endseason=endseason))
def _calculate_label_rotation(startx, starty, endx, endy):
"""
Calculates the appropriate rotation angle for a label on an arrow (matches line, is between -90 and 90 degrees)
:param startx: start of arrow (x)
:param starty: start of arrow (y)
:param endx: end of arrow (x)
:param endy: end of arrow (y)
:return: rotation angle.
"""
return math.degrees(math.atan((endy - starty)/(endx - startx)))
|
[
"math.atan",
"matplotlib.pyplot.show",
"scrapenhl2.scrape.team_info.team_as_str",
"matplotlib.pyplot.annotate",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.scatter",
"scrapenhl2.scrape.team_info.team_as_id",
"scrapenhl2.plot.visualization_helper.parallel_coords",
"scrapenhl2.plot.visualization_helper.add_good_bad_fast_slow",
"scrapenhl2.manipulate.manipulate.team_5v5_shot_rates_by_score",
"matplotlib.pyplot.gca",
"scrapenhl2.plot.visualization_helper.get_startdate_enddate_from_kwargs",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((1707, 1750), 'scrapenhl2.plot.visualization_helper.parallel_coords', 'vhelper.parallel_coords', (['df', 'teamdf', '"""Team"""'], {}), "(df, teamdf, 'Team')\n", (1730, 1750), True, 'import scrapenhl2.plot.visualization_helper as vhelper\n'), ((1867, 1887), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.35)', '(0.65)'], {}), '(0.35, 0.65)\n', (1875, 1887), True, 'import matplotlib.pyplot as plt\n'), ((4105, 4176), 'matplotlib.pyplot.scatter', 'plt.scatter', (['medians.CF60.values', 'medians.CA60.values'], {'s': '(100)', 'color': '"""w"""'}), "(medians.CF60.values, medians.CA60.values, s=100, color='w')\n", (4116, 4176), True, 'import matplotlib.pyplot as plt\n'), ((4181, 4250), 'matplotlib.pyplot.scatter', 'plt.scatter', (['teamdf.CF60.values', 'teamdf.CA60.values'], {'s': '(100)', 'color': '"""w"""'}), "(teamdf.CF60.values, teamdf.CA60.values, s=100, color='w')\n", (4192, 4250), True, 'import matplotlib.pyplot as plt\n'), ((4773, 4805), 'scrapenhl2.plot.visualization_helper.add_good_bad_fast_slow', 'vhelper.add_good_bad_fast_slow', ([], {}), '()\n', (4803, 4805), True, 'import scrapenhl2.plot.visualization_helper as vhelper\n'), ((4811, 4829), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""CF60"""'], {}), "('CF60')\n", (4821, 4829), True, 'import matplotlib.pyplot as plt\n'), ((4834, 4852), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""CA60"""'], {}), "('CA60')\n", (4844, 4852), True, 'import matplotlib.pyplot as plt\n'), ((2119, 2129), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2127, 2129), True, 'import matplotlib.pyplot as plt\n'), ((2148, 2170), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_file'], {}), '(save_file)\n', (2159, 2170), True, 'import matplotlib.pyplot as plt\n'), ((3773, 3891), 'matplotlib.pyplot.annotate', 'plt.annotate', (['""""""'], {'xy': '(teamx, teamy)', 'xytext': '(leaguex, leaguey)', 'xycoords': '"""data"""', 'arrowprops': "{'arrowstyle': '-|>'}"}), "('', xy=(teamx, teamy), xytext=(leaguex, leaguey), xycoords=\n 'data', arrowprops={'arrowstyle': '-|>'})\n", (3785, 3891), True, 'import matplotlib.pyplot as plt\n'), ((4970, 4980), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4978, 4980), True, 'import matplotlib.pyplot as plt\n'), ((4999, 5021), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_file'], {}), '(save_file)\n', (5010, 5021), True, 'import matplotlib.pyplot as plt\n'), ((5255, 5282), 'scrapenhl2.scrape.team_info.team_as_str', 'team_info.team_as_str', (['team'], {}), '(team)\n', (5276, 5282), True, 'import scrapenhl2.scrape.team_info as team_info\n'), ((5812, 5839), 'scrapenhl2.scrape.team_info.team_as_str', 'team_info.team_as_str', (['team'], {}), '(team)\n', (5833, 5839), True, 'import scrapenhl2.scrape.team_info as team_info\n'), ((6514, 6558), 'math.atan', 'math.atan', (['((endy - starty) / (endx - startx))'], {}), '((endy - starty) / (endx - startx))\n', (6523, 6558), False, 'import math\n'), ((626, 668), 'scrapenhl2.manipulate.manipulate.team_5v5_shot_rates_by_score', 'manip.team_5v5_shot_rates_by_score', (['season'], {}), '(season)\n', (660, 668), True, 'import scrapenhl2.manipulate.manipulate as manip\n'), ((1645, 1672), 'scrapenhl2.scrape.team_info.team_as_str', 'team_info.team_as_str', (['team'], {}), '(team)\n', (1666, 1672), True, 'import scrapenhl2.scrape.team_info as team_info\n'), ((2529, 2571), 'scrapenhl2.manipulate.manipulate.team_5v5_shot_rates_by_score', 'manip.team_5v5_shot_rates_by_score', (['season'], {}), '(season)\n', (2563, 2571), True, 'import scrapenhl2.manipulate.manipulate as manip\n'), ((5352, 5443), 'scrapenhl2.plot.visualization_helper.get_startdate_enddate_from_kwargs', 'vhelper.get_startdate_enddate_from_kwargs', ([], {'startseason': 'startseason', 'endseason': 'endseason'}), '(startseason=startseason,\n endseason=endseason)\n', (5393, 5443), True, 'import scrapenhl2.plot.visualization_helper as vhelper\n'), ((5903, 5994), 'scrapenhl2.plot.visualization_helper.get_startdate_enddate_from_kwargs', 'vhelper.get_startdate_enddate_from_kwargs', ([], {'startseason': 'startseason', 'endseason': 'endseason'}), '(startseason=startseason,\n endseason=endseason)\n', (5944, 5994), True, 'import scrapenhl2.plot.visualization_helper as vhelper\n'), ((3124, 3150), 'scrapenhl2.scrape.team_info.team_as_id', 'team_info.team_as_id', (['team'], {}), '(team)\n', (3144, 3150), True, 'import scrapenhl2.scrape.team_info as team_info\n'), ((2037, 2046), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2044, 2046), True, 'import matplotlib.pyplot as plt\n')]
|
import torch
import os
from Lib.Nets.utils.generic.image2tensorboard import reconstruct_tile
import pickle as pkl
path = '/home/ale/Documents/Python/13_Tesi_2/runs/agan/10_32_idt/checkpoints/args.pkl'
opt = pkl.load(open(path, "rb"))
posx = pkl.load(open(os.path.join(opt.data_dir_train, 'posx.pkl'), "rb"))
posy = pkl.load(open(os.path.join(opt.data_dir_train, 'posy.pkl'), "rb"))
file_list = os.listdir(opt.tb_dir)
tile_list = list(filter(lambda x: '.pt' in x, file_list))
name = 'RT'
par_path = '/home/ale/Documents/Python/13_Tesi_2/Data/Datasets/EUSAR/Train/'
for i in tile_list:
epoch = i.split('.')[0]
trans = torch.load(os.path.join(opt.tb_dir, epoch + '.pt'))
reconstruct_tile(name, opt.patch_size, posx, posy, opt.tb_dir, [8736, 13984], epoch, trans)#, parameter_path=par_path)
|
[
"Lib.Nets.utils.generic.image2tensorboard.reconstruct_tile",
"os.path.join",
"os.listdir"
] |
[((396, 418), 'os.listdir', 'os.listdir', (['opt.tb_dir'], {}), '(opt.tb_dir)\n', (406, 418), False, 'import os\n'), ((683, 779), 'Lib.Nets.utils.generic.image2tensorboard.reconstruct_tile', 'reconstruct_tile', (['name', 'opt.patch_size', 'posx', 'posy', 'opt.tb_dir', '[8736, 13984]', 'epoch', 'trans'], {}), '(name, opt.patch_size, posx, posy, opt.tb_dir, [8736, 13984\n ], epoch, trans)\n', (699, 779), False, 'from Lib.Nets.utils.generic.image2tensorboard import reconstruct_tile\n'), ((256, 300), 'os.path.join', 'os.path.join', (['opt.data_dir_train', '"""posx.pkl"""'], {}), "(opt.data_dir_train, 'posx.pkl')\n", (268, 300), False, 'import os\n'), ((330, 374), 'os.path.join', 'os.path.join', (['opt.data_dir_train', '"""posy.pkl"""'], {}), "(opt.data_dir_train, 'posy.pkl')\n", (342, 374), False, 'import os\n'), ((638, 677), 'os.path.join', 'os.path.join', (['opt.tb_dir', "(epoch + '.pt')"], {}), "(opt.tb_dir, epoch + '.pt')\n", (650, 677), False, 'import os\n')]
|
"""
Dataloaders for CUB200-2011, CARS196 and Stanford Online Products.
"""
"""==================================================================================================="""
################### LIBRARIES ###################
import warnings
warnings.filterwarnings("ignore")
import numpy as np, os, sys, pandas as pd, csv, copy
import torch, torch.nn as nn, matplotlib.pyplot as plt, random
from torch.utils.data import Dataset
from PIL import Image
from torchvision import transforms
from tqdm import tqdm
import pretrainedmodels.utils as utils
import auxiliaries as aux
"""==================================================================================================="""
################ FUNCTION TO RETURN ALL DATALOADERS NECESSARY ####################
def give_dataloaders(dataset, opt):
### ImageNet Properties
opt.mean, opt.std, opt.input_space, opt.input_range = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225], 'RGB', [0,1]
if 'class_samples_per_class' in vars(opt).keys():
opt.samples_per_class = opt.class_samples_per_class
if opt.dataset=='cub200':
datasets = give_CUB200_datasets(opt)
elif opt.dataset=='cars196':
datasets = give_CARS196_datasets(opt)
elif opt.dataset=='online_products':
datasets = give_OnlineProducts_datasets(opt)
else:
raise Exception('No Dataset >{}< available!'.format(dataset))
dataloaders = {}
for key,dataset in datasets.items():
if dataset is not None:
is_val = dataset.is_validation
dataloaders[key] = torch.utils.data.DataLoader(dataset, batch_size=opt.bs, num_workers=opt.kernels, shuffle=not is_val, pin_memory=True, drop_last=not is_val)
return dataloaders
"""==================================================================================================="""
################# FUNCTIONS TO RETURN TRAIN/VAL PYTORCH DATASETS FOR CUB200, CARS196 AND STANFORD ONLINE PRODUCTS ####################################
def give_CUB200_datasets(opt):
"""
This function generates a training and testing dataloader for Metric Learning on the CUB-200-2011 dataset.
For Metric Learning, the dataset is sorted by name, and the first halt used for training while the last half is used for testing.
So no random shuffling of classes.
"""
image_sourcepath = opt.source_path+'/images'
image_classes = sorted([x for x in os.listdir(image_sourcepath) if '._' not in x], key=lambda x: int(x.split('.')[0]))
conversion = {int(x.split('.')[0]):x.split('.')[-1] for x in image_classes}
image_list = {int(key.split('.')[0]):sorted([image_sourcepath+'/'+key+'/'+x for x in os.listdir(image_sourcepath+'/'+key) if '._' not in x]) for key in image_classes}
image_list = [[(key,img_path) for img_path in image_list[key]] for key in image_list.keys()]
image_list = [x for y in image_list for x in y]
image_dict = {}
for key, img_path in image_list:
key = key-1
if not key in image_dict.keys():
image_dict[key] = []
image_dict[key].append(img_path)
keys = sorted(list(image_dict.keys()))
# random.shuffle(keys)
#Following "Deep Metric Learning via Lifted Structured Feature Embedding", we use the first half of classes for training.
train,test = keys[:len(keys)//2], keys[len(keys)//2:]
if opt.sampling=='learned':
if opt.train_val_split_by_class:
train_val_split = int(len(train)*opt.train_val_split)
train, val = train[:train_val_split], train[train_val_split:]
train_image_dict, val_image_dict, test_image_dict = {key:image_dict[key] for key in train}, {key:image_dict[key] for key in val}, {key:image_dict[key] for key in test}
else:
train_image_dict, val_image_dict = {},{}
for key in train:
# train_ixs = np.random.choice(len(image_dict[key]), int(len(image_dict[key])*opt.train_val_split), replace=False)
train_ixs = np.array(list(set(np.round(np.linspace(0,len(image_dict[key])-1,int(len(image_dict[key])*opt.train_val_split)))))).astype(int)
val_ixs = np.array([x for x in range(len(image_dict[key])) if x not in train_ixs])
train_image_dict[key] = np.array(image_dict[key])[train_ixs]
val_image_dict[key] = np.array(image_dict[key])[val_ixs]
else:
train_image_dict = {key:image_dict[key] for key in train}
test_image_dict = {key:image_dict[key] for key in test}
train_dataset = BaseTripletDataset(train_image_dict, opt, samples_per_class=opt.samples_per_class)
test_dataset = BaseTripletDataset(test_image_dict, opt, is_validation=True)
eval_dataset = BaseTripletDataset(train_image_dict, opt, is_validation=True)
train_dataset.conversion = conversion
test_dataset.conversion = conversion
eval_dataset.conversion = conversion
if opt.sampling!='learned':
return {'training':train_dataset, 'testing':test_dataset, 'evaluation':eval_dataset}
else:
val_dataset = BaseTripletDataset(val_image_dict, opt, is_validation=True)
val_dataset.conversion = conversion
return {'training':train_dataset, 'validation':val_dataset, 'testing':test_dataset, 'evaluation':eval_dataset}
def give_CARS196_datasets(opt):
"""
This function generates a training and testing dataloader for Metric Learning on the CARS-196 dataset.
For Metric Learning, the dataset is sorted by name, and the first halt used for training while the last half is used for testing.
So no random shuffling of classes.
"""
image_sourcepath = opt.source_path+'/images'
image_classes = sorted([x for x in os.listdir(image_sourcepath)])
conversion = {i:x for i,x in enumerate(image_classes)}
image_list = {i:sorted([image_sourcepath+'/'+key+'/'+x for x in os.listdir(image_sourcepath+'/'+key)]) for i,key in enumerate(image_classes)}
image_list = [[(key,img_path) for img_path in image_list[key]] for key in image_list.keys()]
image_list = [x for y in image_list for x in y]
image_dict = {}
for key, img_path in image_list:
if not key in image_dict.keys():
image_dict[key] = []
image_dict[key].append(img_path)
keys = sorted(list(image_dict.keys()))
# random.shuffle(keys)
#Following "Deep Metric Learning via Lifted Structured Feature Embedding", we use the first half of classes for training.
train,test = keys[:len(keys)//2], keys[len(keys)//2:]
if opt.sampling=='learned':
if opt.train_val_split_by_class:
train_val_split = int(len(train)*opt.train_val_split)
train, val = train[:train_val_split], train[train_val_split:]
train_image_dict, val_image_dict, test_image_dict = {key:image_dict[key] for key in train}, {key:image_dict[key] for key in val}, {key:image_dict[key] for key in test}
else:
train_image_dict, val_image_dict = {},{}
for key in train:
train_ixs = np.random.choice(len(image_dict[key]), int(len(image_dict[key])*opt.train_val_split), replace=False)
val_ixs = np.array([x for x in range(len(image_dict[key])) if x not in train_ixs])
train_image_dict[key] = np.array(image_dict[key])[train_ixs]
val_image_dict[key] = np.array(image_dict[key])[val_ixs]
test_image_dict = {key:image_dict[key] for key in test}
val_dataset = BaseTripletDataset(val_image_dict, opt, is_validation=True)
val_dataset.conversion = conversion
else:
train_image_dict, test_image_dict = {key:image_dict[key] for key in train}, {key:image_dict[key] for key in test}
val_dataset = None
train_dataset = BaseTripletDataset(train_image_dict, opt, samples_per_class=opt.samples_per_class)
test_dataset = BaseTripletDataset(test_image_dict, opt, is_validation=True)
eval_dataset = BaseTripletDataset(train_image_dict, opt, is_validation=True)
train_dataset.conversion = conversion
test_dataset.conversion = conversion
eval_dataset.conversion = conversion
return {'training':train_dataset, 'validation':val_dataset, 'testing':test_dataset, 'evaluation':eval_dataset}
def give_OnlineProducts_datasets(opt):
image_sourcepath = opt.source_path+'/images'
training_files = pd.read_table(opt.source_path+'/Info_Files/Ebay_train.txt', header=0, delimiter=' ')
test_files = pd.read_table(opt.source_path+'/Info_Files/Ebay_test.txt', header=0, delimiter=' ')
conversion, super_conversion = {},{}
for class_id, path in zip(training_files['class_id'],training_files['path']):
conversion[class_id] = path.split('/')[0]
for super_class_id, path in zip(training_files['super_class_id'],training_files['path']):
conversion[super_class_id] = path.split('/')[0]
for class_id, path in zip(test_files['class_id'],test_files['path']):
conversion[class_id] = path.split('/')[0]
train_image_dict, test_image_dict, super_train_image_dict = {},{},{}
for key, img_path in zip(training_files['class_id'],training_files['path']):
key = key-1
if not key in train_image_dict.keys():
train_image_dict[key] = []
train_image_dict[key].append(image_sourcepath+'/'+img_path)
for key, img_path in zip(test_files['class_id'],test_files['path']):
key = key-1
if not key in test_image_dict.keys():
test_image_dict[key] = []
test_image_dict[key].append(image_sourcepath+'/'+img_path)
for key, img_path in zip(training_files['super_class_id'],training_files['path']):
key = key-1
if not key in super_train_image_dict.keys():
super_train_image_dict[key] = []
super_train_image_dict[key].append(image_sourcepath+'/'+img_path)
train_keys = list(train_image_dict.keys())
# if opt.train_val_split_by_class:
if opt.sampling=='learned':
train_val_split = int(len(train_keys)*opt.train_val_split)
train, val = train_keys[:train_val_split], train_keys[train_val_split:]
train_image_dict, val_image_dict = {key:train_image_dict[key] for key in train}, {key:train_image_dict[key] for key in val}
val_dataset = BaseTripletDataset(val_image_dict, opt, is_validation=True)
val_dataset.conversion = conversion
else:
val_dataset = None
# else:
# train_image_dict_temp, val_image_dict_temp = {},{}
# for key in train_keys:
# print(len(train_image_dict[key]))
# train_ixs = np.random.choice(len(train_image_dict[key]), int(len(train_image_dict[key])*opt.train_val_split), replace=False)
# val_ixs = np.array([x for x in range(len(train_image_dict[key])) if x not in train_ixs])
# train_image_dict_temp[key] = np.array(image_dict[key])[train_ixs]
# val_image_dict_temp[key] = np.array(image_dict[key])[val_ixs]
super_train_dataset = BaseTripletDataset(super_train_image_dict, opt, is_validation=True)
train_dataset = BaseTripletDataset(train_image_dict, opt, samples_per_class=opt.samples_per_class)
test_dataset = BaseTripletDataset(test_image_dict, opt, is_validation=True)
eval_dataset = BaseTripletDataset(train_image_dict, opt, is_validation=True)
super_train_dataset.conversion = super_conversion
train_dataset.conversion = conversion
test_dataset.conversion = conversion
eval_dataset.conversion = conversion
return {'training':train_dataset, 'validation':val_dataset, 'testing':test_dataset, 'evaluation':eval_dataset, 'super_evaluation':super_train_dataset}
"""==================================================================================================="""
################## BASIC PYTORCH DATASET USED FOR ALL DATASETS ##################################
class BaseTripletDataset(Dataset):
def __init__(self, image_dict, opt, samples_per_class=8, is_validation=False):
self.is_validation = is_validation
self.pars = opt
self.image_dict = image_dict
self.samples_per_class = samples_per_class
#####
self.init_setup()
##### Option 2: Use Mean/Stds on which the networks were trained
if 'bninception' in opt.arch:
normalize = transforms.Normalize(mean=[0.502, 0.4588, 0.4078],std=[0.0039, 0.0039, 0.0039])
else:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
transf_list = []
if not self.is_validation:
transf_list.extend([transforms.RandomResizedCrop(size=224), transforms.RandomHorizontalFlip(0.5)])
else:
transf_list.extend([transforms.Resize(256), transforms.CenterCrop(224)])
transf_list.extend([transforms.ToTensor(),
normalize])
self.transform = transforms.Compose(transf_list)
def init_setup(self):
self.n_files = np.sum([len(self.image_dict[key]) for key in self.image_dict.keys()])
self.avail_classes = sorted(list(self.image_dict.keys()))
self.image_dict = {i:self.image_dict[key] for i,key in enumerate(self.avail_classes)}
self.avail_classes = sorted(list(self.image_dict.keys()))
if not self.is_validation:
#Select current class to sample images from up to <samples_per_class>
self.current_class = np.random.randint(len(self.avail_classes))
self.classes_visited = [self.current_class, self.current_class]
self.n_samples_drawn = 0
# if self.is_validation or self.samples_per_class==1:
self.image_list = [[(x,key) for x in self.image_dict[key]] for key in self.image_dict.keys()]
self.image_list = [x for y in self.image_list for x in y]
# self.sample_probs = np.ones(len(self.image_list))/len(self.image_list)
self.is_init = True
def ensure_3dim(self, img):
if len(img.size)==2:
img = img.convert('RGB')
return img
def __getitem__(self, idx):
if self.is_init:
self.current_class = self.avail_classes[idx%len(self.avail_classes)]
self.is_init = False
if not self.is_validation:
if self.samples_per_class==1:
return (self.image_list[idx][-1], self.transform(self.ensure_3dim(Image.open(self.image_list[idx][0]))))
if self.n_samples_drawn==self.samples_per_class:
#Once enough samples per class have been drawn, we choose another class to draw samples from.
#Note that we ensure with self.classes_visited that no class is chosen if it had been chosen
#previously or one before that.
counter = copy.deepcopy(self.avail_classes)
for prev_class in self.classes_visited:
if prev_class in counter: counter.remove(prev_class)
self.current_class = counter[idx%len(counter)]
self.classes_visited = self.classes_visited[1:]+[self.current_class]
self.n_samples_drawn = 0
class_sample_idx = idx%len(self.image_dict[self.current_class])
self.n_samples_drawn += 1
out_img = self.transform(self.ensure_3dim(Image.open(self.image_dict[self.current_class][class_sample_idx])))
if 'bninception' in self.pars.arch:
out_img = out_img[range(3)[::-1],:]
return (self.current_class,out_img)
else:
out_img = self.transform(self.ensure_3dim(Image.open(self.image_list[idx][0])))
if 'bninception' in self.pars.arch:
out_img = out_img[range(3)[::-1],:]
return (self.image_list[idx][-1], out_img)
def __len__(self):
return self.n_files
|
[
"copy.deepcopy",
"torch.utils.data.DataLoader",
"warnings.filterwarnings",
"torchvision.transforms.RandomHorizontalFlip",
"torchvision.transforms.RandomResizedCrop",
"PIL.Image.open",
"torchvision.transforms.Compose",
"numpy.array",
"pandas.read_table",
"torchvision.transforms.CenterCrop",
"torchvision.transforms.Normalize",
"torchvision.transforms.Resize",
"os.listdir",
"torchvision.transforms.ToTensor"
] |
[((248, 281), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (271, 281), False, 'import warnings\n'), ((8460, 8550), 'pandas.read_table', 'pd.read_table', (["(opt.source_path + '/Info_Files/Ebay_train.txt')"], {'header': '(0)', 'delimiter': '""" """'}), "(opt.source_path + '/Info_Files/Ebay_train.txt', header=0,\n delimiter=' ')\n", (8473, 8550), True, 'import numpy as np, os, sys, pandas as pd, csv, copy\n'), ((8566, 8655), 'pandas.read_table', 'pd.read_table', (["(opt.source_path + '/Info_Files/Ebay_test.txt')"], {'header': '(0)', 'delimiter': '""" """'}), "(opt.source_path + '/Info_Files/Ebay_test.txt', header=0,\n delimiter=' ')\n", (8579, 8655), True, 'import numpy as np, os, sys, pandas as pd, csv, copy\n'), ((13075, 13106), 'torchvision.transforms.Compose', 'transforms.Compose', (['transf_list'], {}), '(transf_list)\n', (13093, 13106), False, 'from torchvision import transforms\n'), ((1568, 1712), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'opt.bs', 'num_workers': 'opt.kernels', 'shuffle': '(not is_val)', 'pin_memory': '(True)', 'drop_last': '(not is_val)'}), '(dataset, batch_size=opt.bs, num_workers=opt.\n kernels, shuffle=not is_val, pin_memory=True, drop_last=not is_val)\n', (1595, 1712), False, 'import torch, torch.nn as nn, matplotlib.pyplot as plt, random\n'), ((12495, 12580), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.502, 0.4588, 0.4078]', 'std': '[0.0039, 0.0039, 0.0039]'}), '(mean=[0.502, 0.4588, 0.4078], std=[0.0039, 0.0039, 0.0039]\n )\n', (12515, 12580), False, 'from torchvision import transforms\n'), ((12613, 12688), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (12633, 12688), False, 'from torchvision import transforms\n'), ((2417, 2445), 'os.listdir', 'os.listdir', (['image_sourcepath'], {}), '(image_sourcepath)\n', (2427, 2445), False, 'import numpy as np, os, sys, pandas as pd, csv, copy\n'), ((5765, 5793), 'os.listdir', 'os.listdir', (['image_sourcepath'], {}), '(image_sourcepath)\n', (5775, 5793), False, 'import numpy as np, os, sys, pandas as pd, csv, copy\n'), ((12987, 13008), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (13006, 13008), False, 'from torchvision import transforms\n'), ((14963, 14996), 'copy.deepcopy', 'copy.deepcopy', (['self.avail_classes'], {}), '(self.avail_classes)\n', (14976, 14996), False, 'import numpy as np, os, sys, pandas as pd, csv, copy\n'), ((2684, 2724), 'os.listdir', 'os.listdir', (["(image_sourcepath + '/' + key)"], {}), "(image_sourcepath + '/' + key)\n", (2694, 2724), False, 'import numpy as np, os, sys, pandas as pd, csv, copy\n'), ((4313, 4338), 'numpy.array', 'np.array', (['image_dict[key]'], {}), '(image_dict[key])\n', (4321, 4338), True, 'import numpy as np, os, sys, pandas as pd, csv, copy\n'), ((4390, 4415), 'numpy.array', 'np.array', (['image_dict[key]'], {}), '(image_dict[key])\n', (4398, 4415), True, 'import numpy as np, os, sys, pandas as pd, csv, copy\n'), ((5929, 5969), 'os.listdir', 'os.listdir', (["(image_sourcepath + '/' + key)"], {}), "(image_sourcepath + '/' + key)\n", (5939, 5969), False, 'import numpy as np, os, sys, pandas as pd, csv, copy\n'), ((7365, 7390), 'numpy.array', 'np.array', (['image_dict[key]'], {}), '(image_dict[key])\n', (7373, 7390), True, 'import numpy as np, os, sys, pandas as pd, csv, copy\n'), ((7442, 7467), 'numpy.array', 'np.array', (['image_dict[key]'], {}), '(image_dict[key])\n', (7450, 7467), True, 'import numpy as np, os, sys, pandas as pd, csv, copy\n'), ((12780, 12818), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', ([], {'size': '(224)'}), '(size=224)\n', (12808, 12818), False, 'from torchvision import transforms\n'), ((12820, 12856), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', (['(0.5)'], {}), '(0.5)\n', (12851, 12856), False, 'from torchvision import transforms\n'), ((12905, 12927), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (12922, 12927), False, 'from torchvision import transforms\n'), ((12929, 12955), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (12950, 12955), False, 'from torchvision import transforms\n'), ((15489, 15554), 'PIL.Image.open', 'Image.open', (['self.image_dict[self.current_class][class_sample_idx]'], {}), '(self.image_dict[self.current_class][class_sample_idx])\n', (15499, 15554), False, 'from PIL import Image\n'), ((15773, 15808), 'PIL.Image.open', 'Image.open', (['self.image_list[idx][0]'], {}), '(self.image_list[idx][0])\n', (15783, 15808), False, 'from PIL import Image\n'), ((14569, 14604), 'PIL.Image.open', 'Image.open', (['self.image_list[idx][0]'], {}), '(self.image_list[idx][0])\n', (14579, 14604), False, 'from PIL import Image\n')]
|
from typing import NewType
RPCEndpoint = NewType("RPCEndpoint", str)
|
[
"typing.NewType"
] |
[((44, 71), 'typing.NewType', 'NewType', (['"""RPCEndpoint"""', 'str'], {}), "('RPCEndpoint', str)\n", (51, 71), False, 'from typing import NewType\n')]
|
from rpeakdetection.Utility import Utility
util = Utility()
class Evaluation:
def evaluate(self, rpeaks, name, evaluation_width, rule_based, test_index=None):
real_locations = util.remove_non_beat(name, rule_based)[0]
if test_index is not None:
real_locations = list(filter(lambda x: x >= test_index, real_locations))
window_size = int(evaluation_width / 2)
Y = list()
for y in real_locations:
Y.extend([y + q for q in range(-window_size, window_size)])
filtered_peaks = list()
prev = 0
for peak in rpeaks:
if peak - prev > evaluation_width:
filtered_peaks.append(peak)
prev = peak
correct_detected = set(filtered_peaks).intersection(set(Y))
recall = len(correct_detected) / len(real_locations)
if len(rpeaks) != 0:
precision = len(correct_detected) / len(rpeaks)
else:
precision = 0
return recall, precision
|
[
"rpeakdetection.Utility.Utility"
] |
[((51, 60), 'rpeakdetection.Utility.Utility', 'Utility', ([], {}), '()\n', (58, 60), False, 'from rpeakdetection.Utility import Utility\n')]
|
# Generated by Django 2.1.2 on 2018-12-28 16:58
from django.conf import settings
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('api', '0010_auto_20181228_1625'),
]
operations = [
migrations.CreateModel(
name='Annotation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.PositiveIntegerField()),
('data', django.contrib.postgres.fields.jsonb.JSONField()),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='annotation_set', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
|
[
"django.db.models.ForeignKey",
"django.db.models.DateTimeField",
"django.db.models.PositiveIntegerField",
"django.db.models.AutoField"
] |
[((499, 592), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (515, 592), False, 'from django.db import migrations, models\n'), ((621, 650), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {}), '()\n', (648, 650), False, 'from django.db import migrations, models\n'), ((757, 796), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (777, 796), False, 'from django.db import migrations, models\n'), ((827, 862), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (847, 862), False, 'from django.db import migrations, models\n'), ((898, 996), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""contenttypes.ContentType"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'contenttypes.ContentType')\n", (915, 996), False, 'from django.db import migrations, models\n'), ((1020, 1147), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""annotation_set"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='annotation_set', to=settings.AUTH_USER_MODEL)\n", (1037, 1147), False, 'from django.db import migrations, models\n')]
|
import sys
import matplotlib
matplotlib.use('Qt5Agg')
from PyQt5 import QtCore, QtGui, QtWidgets
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg, NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
class MplCanvas(FigureCanvasQTAgg):
def __init__(self, parent=None, width=5, height=4, dpi=100):
fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = fig.add_subplot(111)
super(MplCanvas, self).__init__(fig)
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
sc = MplCanvas(self, width=5, height=4, dpi=100)
sc.axes.plot([0,1,2,3,4], [10,1,20,3,40])
# Create toolbar, passing canvas as first parament, parent (self, the MainWindow) as second.
toolbar = NavigationToolbar(sc, self)
layout = QtWidgets.QVBoxLayout()
layout.addWidget(toolbar)
layout.addWidget(sc)
# Create a placeholder widget to hold our toolbar and canvas.
widget = QtWidgets.QWidget()
widget.setLayout(layout)
self.setCentralWidget(widget)
self.show()
app = QtWidgets.QApplication(sys.argv)
w = MainWindow()
app.exec_()
|
[
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QVBoxLayout",
"matplotlib.figure.Figure",
"matplotlib.use",
"matplotlib.backends.backend_qt5agg.NavigationToolbar2QT",
"PyQt5.QtWidgets.QApplication"
] |
[((29, 53), 'matplotlib.use', 'matplotlib.use', (['"""Qt5Agg"""'], {}), "('Qt5Agg')\n", (43, 53), False, 'import matplotlib\n'), ((1200, 1232), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (1222, 1232), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((361, 401), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(width, height)', 'dpi': 'dpi'}), '(figsize=(width, height), dpi=dpi)\n', (367, 401), False, 'from matplotlib.figure import Figure\n'), ((859, 886), 'matplotlib.backends.backend_qt5agg.NavigationToolbar2QT', 'NavigationToolbar', (['sc', 'self'], {}), '(sc, self)\n', (876, 886), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg, NavigationToolbar2QT as NavigationToolbar\n'), ((905, 928), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (926, 928), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1080, 1099), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (1097, 1099), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')]
|
import os
import sqlite3
from fridgeai import camera
from PyQt5 import QtCore, QtGui, QtWidgets
from fridgeai.gui.manual import Ui_Manual
from fridgeai.gui.predict import Ui_predict
from fridgeai.gui.testing import Ui_List
from fridgeai.gui.learn import Ui_Learn
from datetime import date
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1024, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.wallpaper = QtWidgets.QLabel(self.centralwidget)
self.wallpaper.setGeometry(QtCore.QRect(-7, -5, 1931, 1080))
self.wallpaper.setStyleSheet("QLabel{\n"
" background-color:\"#1D283D\"\n"
"}")
self.wallpaper.setText("")
self.wallpaper.setObjectName("wallpaper")
self.inventory_wallpaper = QtWidgets.QLabel(self.centralwidget)
self.inventory_wallpaper.setGeometry(QtCore.QRect(60, 40, 401, 531))
self.inventory_wallpaper.setText("")
self.inventory_wallpaper.setPixmap(QtGui.QPixmap("98adaa-2048x1536.png"))
self.inventory_wallpaper.setObjectName("inventory_wallpaper")
self.inventor_title = QtWidgets.QLabel(self.centralwidget)
self.inventor_title.setGeometry(QtCore.QRect(170, 50, 171, 91))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
self.inventor_title.setPalette(palette)
font = QtGui.QFont()
font.setPointSize(22)
font.setBold(False)
font.setWeight(50)
self.inventor_title.setFont(font)
self.inventor_title.setObjectName("inventor_title")
self.vector = QtWidgets.QLabel(self.centralwidget)
self.vector.setGeometry(QtCore.QRect(90, 140, 341, 2))
self.vector.setStyleSheet("QLabel{\n"
"background-color:black\n"
"}")
self.vector.setText("")
self.vector.setObjectName("vector")
self.tableWidget = QtWidgets.QTableWidget(self.centralwidget)
self.tableWidget.setGeometry(QtCore.QRect(90, 160, 341, 411))
font = QtGui.QFont()
font.setPointSize(22)
self.tableWidget.setFont(font)
self.tableWidget.setStyleSheet("QTableWidget {background: #98ADAA; color: #FFFFFF; }")
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(4)
self.tableWidget.setRowCount(10)
self.tableWidget.horizontalHeader().setVisible(False)
self.tableWidget.horizontalHeader().setHighlightSections(False)
self.tableWidget.verticalHeader().setVisible(False)
self.tableWidget.verticalHeader().setHighlightSections(False)
self.tableWidget.setShowGrid(False)
self.Time = QtWidgets.QLabel(self.centralwidget)
self.Time.setGeometry(QtCore.QRect(600, 20, 291, 151))
font = QtGui.QFont()
font.setPointSize(48)
self.Time.setFont(font)
self.Time.setObjectName("Time")
self.temperature = QtWidgets.QLabel(self.centralwidget)
self.temperature.setGeometry(QtCore.QRect(660, 190, 201, 91))
font = QtGui.QFont()
font.setPointSize(48)
self.temperature.setFont(font)
self.temperature.setObjectName("temperature")
self.gas = QtWidgets.QLabel(self.centralwidget)
self.gas.setGeometry(QtCore.QRect(660, 260, 541, 191))
font = QtGui.QFont()
font.setPointSize(48)
self.gas.setFont(font)
self.gas.setObjectName("gas")
self.Add = QtWidgets.QPushButton(self.centralwidget)
self.Add.setGeometry(QtCore.QRect(540, 460, 100, 100))
font = QtGui.QFont()
font.setPointSize(14)
self.Add.setFont(font)
self.Add.setStyleSheet("QPushButton {\n"
" color: #FFFFFF;\n"
" border: 4px solid #FFFFFF;\n"
" border-radius: 50;\n"
" }\n"
"")
self.ListButton = QtWidgets.QPushButton(self.centralwidget)
self.ListButton.setGeometry(QtCore.QRect(90, 160, 341, 411))
font = QtGui.QFont()
font.setPointSize(14)
self.ListButton.setFont(font)
self.ListButton.setStyleSheet("QPushButton {\n"
" \n"
" border: 0.1px solid #FFFFFF;\n"
" \n"
" }\n"
"")
self.ListButton.setText("")
self.ListButton.setObjectName("ListButton")
self.temperature_icon = QtWidgets.QLabel(self.centralwidget)
self.temperature_icon.setGeometry(QtCore.QRect(550, 210, 101, 71))
self.temperature_icon.setText("")
self.temperature_icon.setPixmap(QtGui.QPixmap(os.path.join("data", "temperature-2-64.png")))
self.temperature_icon.setObjectName("temperature_icon")
self.pressure_icon = QtWidgets.QLabel(self.centralwidget)
self.pressure_icon.setGeometry(QtCore.QRect(550, 320, 71, 71))
self.pressure_icon.setText("")
self.pressure_icon.setPixmap(QtGui.QPixmap(os.path.join("data", "pressure-64.png")))
self.pressure_icon.setObjectName("pressure_icon")
self.Water = QtWidgets.QPushButton(self.centralwidget)
self.Water.setGeometry(QtCore.QRect(10, 10, 41, 41))
font = QtGui.QFont()
font.setPointSize(14)
self.Water.setFont(font)
self.Water.setStyleSheet("QPushButton {\n"" color: #FFFFFF;\n" "\n" " border: 4px solid #FFFFFF;\n"
" border-radius: 20;\n"
" }\n"
"")
self.Water.setText("")
self.Water.setObjectName("Water")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(60, 20, 81, 21))
font = QtGui.QFont()
font.setPointSize(10)
self.label.setFont(font)
self.label.setObjectName("label")
self.learn = QtWidgets.QPushButton(self.centralwidget)
self.learn.setGeometry(QtCore.QRect(700, 460, 100, 100))
font = QtGui.QFont()
font.setPointSize(12)
self.learn.setFont(font)
self.learn.setStyleSheet("QPushButton {\n"
" color: #FFFFFF;\n"
" border: 4px solid #FFFFFF;\n"
" border-radius: 50;\n"
" }\n"
"")
self.learn.setObjectName("Learn")
self.Manual = QtWidgets.QPushButton(self.centralwidget)
self.Manual.setGeometry(QtCore.QRect(860, 460, 100, 100))
font = QtGui.QFont()
font.setPointSize(14)
self.Manual.setFont(font)
self.Manual.setStyleSheet("QPushButton {\n"
" color: #FFFFFF;\n"
" border: 4px solid #FFFFFF;\n"
" border-radius: 50;\n"
" }\n"
"")
self.Manual.setObjectName("Manual")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.Add.clicked.connect(self.addItem)
self.learn.clicked.connect(self.learnItem)
self.Manual.clicked.connect(self.addManual)
self.ListButton.clicked.connect(self.showInventory)
connection = sqlite3.connect(os.path.join('data', 'item.db'))
query = "SELECT name,end_date FROM Inventory"
result = connection.execute(query)
#self.ListButton.clicked.connect(self.test)
self.tableWidget.setRowCount(0)
count = 0
for row_number, row_data in enumerate(result):
self.tableWidget.insertRow(row_number)
count=count+1
for colum_number, data in enumerate(row_data):
self.tableWidget.setItem(row_number, colum_number, QtWidgets.QTableWidgetItem(str(data)))
self.tableWidget.setColumnWidth(colum_number, 1000)
for num in range(count):
if(str(self.tableWidget.item(num,1).text()) == str(date.today())):
self.tableWidget.item(num,0).setBackground(QtGui.QColor(246,77,77))
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.reload)
self.timer.setInterval(1000)
self.timer.start()
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.inventor_title.setText(_translate(
"MainWindow", "<html><head/><body><p><span style=\" font-size:24pt; color:#ffffff;\">Inventory</span></p></body></html>"))
self.Time.setText(_translate(
"MainWindow", "<html><head/><body><p><span style=\" color:#ffffff;\">1:18 PM</span></p></body></html>"))
self.temperature.setText(_translate(
"MainWindow", "<html><head/><body><p><span style=\" color:#ffffff;\">37*F</span></p></body></html>"))
self.gas.setText(_translate(
"MainWindow", "<html><head/><body><p><span style=\" color:#ffffff;\">2.4psi</span></p></body></html>"))
self.label.setText(_translate(
"MainWindow", "<html><head/><body><p><span style=\" color:#ffffff;\">Leakage</span></p></body></html>"))
self.Add.setText(_translate("MainWindow", "Add"))
self.learn.setText(_translate("MainWindow", "Learn"))
self.Manual.setText(_translate("MainWindow", "Manual"))
def takeSnap(self):
camera.get_frames(shape=(32, 32), count=5, interval=5)
def addItem(self):
self.MainWindow = QtWidgets.QMainWindow()
self.ui = Ui_predict()
self.ui.setupUi(self.MainWindow)
self.MainWindow.show()
def reload(self):
connection = sqlite3.connect(os.path.join('data', 'item.db'))
query = "SELECT name,end_date FROM Inventory"
result = connection.execute(query)
#self.ListButton.clicked.connect(self.test)
self.tableWidget.setRowCount(0)
count = 0
for row_number, row_data in enumerate(result):
self.tableWidget.insertRow(row_number)
count=count+1
for colum_number, data in enumerate(row_data):
self.tableWidget.setItem(row_number, colum_number, QtWidgets.QTableWidgetItem(str(data)))
self.tableWidget.setColumnWidth(colum_number, 1000)
for num in range(count):
if(str(self.tableWidget.item(num,1).text()) == str(date.today())):
self.tableWidget.item(num,0).setBackground(QtGui.QColor(246,77,77))
def learnItem(self):
self.MainWindow = QtWidgets.QMainWindow()
self.ui = Ui_Learn()
self.ui.setupUi(self.MainWindow)
self.MainWindow.show()
def addManual(self):
self.MainWindow = QtWidgets.QMainWindow()
self.ui = Ui_Manual()
self.ui.setupUi(self.MainWindow)
self.MainWindow.show()
def showInventory(self):
self.MainWindow = QtWidgets.QMainWindow()
self.ui = Ui_List()
self.ui.setupUi(self.MainWindow)
self.MainWindow.show()
|
[
"PyQt5.QtGui.QColor",
"PyQt5.QtWidgets.QPushButton",
"os.path.join",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtCore.QTimer",
"fridgeai.gui.testing.Ui_List",
"fridgeai.gui.manual.Ui_Manual",
"PyQt5.QtCore.QRect",
"PyQt5.QtWidgets.QMainWindow",
"datetime.date.today",
"PyQt5.QtGui.QPalette",
"fridgeai.gui.predict.Ui_predict",
"PyQt5.QtGui.QPixmap",
"PyQt5.QtCore.QMetaObject.connectSlotsByName",
"fridgeai.gui.learn.Ui_Learn",
"PyQt5.QtWidgets.QTableWidget",
"PyQt5.QtGui.QFont",
"fridgeai.camera.get_frames"
] |
[((468, 497), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['MainWindow'], {}), '(MainWindow)\n', (485, 497), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((581, 617), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (597, 617), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((972, 1008), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (988, 1008), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1313, 1349), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1329, 1349), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1440, 1456), 'PyQt5.QtGui.QPalette', 'QtGui.QPalette', ([], {}), '()\n', (1454, 1456), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2055, 2068), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (2066, 2068), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2278, 2314), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (2294, 2314), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2627, 2669), 'PyQt5.QtWidgets.QTableWidget', 'QtWidgets.QTableWidget', (['self.centralwidget'], {}), '(self.centralwidget)\n', (2649, 2669), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2755, 2768), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (2766, 2768), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3400, 3436), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3416, 3436), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3515, 3528), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (3526, 3528), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3658, 3694), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3674, 3694), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3780, 3793), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (3791, 3793), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3936, 3972), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3952, 3972), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4051, 4064), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (4062, 4064), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4183, 4224), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (4204, 4224), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4303, 4316), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (4314, 4316), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4708, 4749), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (4729, 4749), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4834, 4847), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (4845, 4847), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5351, 5387), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (5367, 5387), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5699, 5735), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (5715, 5735), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6018, 6059), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (6039, 6059), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6136, 6149), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (6147, 6149), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6562, 6598), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (6578, 6598), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6675, 6688), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (6686, 6688), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6815, 6856), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (6836, 6856), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6937, 6950), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (6948, 6950), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7394, 7435), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (7415, 7435), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7517, 7530), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (7528, 7530), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8065, 8114), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['MainWindow'], {}), '(MainWindow)\n', (8102, 8114), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9185, 9200), 'PyQt5.QtCore.QTimer', 'QtCore.QTimer', ([], {}), '()\n', (9198, 9200), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10507, 10561), 'fridgeai.camera.get_frames', 'camera.get_frames', ([], {'shape': '(32, 32)', 'count': '(5)', 'interval': '(5)'}), '(shape=(32, 32), count=5, interval=5)\n', (10524, 10561), False, 'from fridgeai import camera\n'), ((10612, 10635), 'PyQt5.QtWidgets.QMainWindow', 'QtWidgets.QMainWindow', ([], {}), '()\n', (10633, 10635), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10654, 10666), 'fridgeai.gui.predict.Ui_predict', 'Ui_predict', ([], {}), '()\n', (10664, 10666), False, 'from fridgeai.gui.predict import Ui_predict\n'), ((11652, 11675), 'PyQt5.QtWidgets.QMainWindow', 'QtWidgets.QMainWindow', ([], {}), '()\n', (11673, 11675), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((11694, 11704), 'fridgeai.gui.learn.Ui_Learn', 'Ui_Learn', ([], {}), '()\n', (11702, 11704), False, 'from fridgeai.gui.learn import Ui_Learn\n'), ((11829, 11852), 'PyQt5.QtWidgets.QMainWindow', 'QtWidgets.QMainWindow', ([], {}), '()\n', (11850, 11852), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((11871, 11882), 'fridgeai.gui.manual.Ui_Manual', 'Ui_Manual', ([], {}), '()\n', (11880, 11882), False, 'from fridgeai.gui.manual import Ui_Manual\n'), ((12011, 12034), 'PyQt5.QtWidgets.QMainWindow', 'QtWidgets.QMainWindow', ([], {}), '()\n', (12032, 12034), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((12053, 12062), 'fridgeai.gui.testing.Ui_List', 'Ui_List', ([], {}), '()\n', (12060, 12062), False, 'from fridgeai.gui.testing import Ui_List\n'), ((653, 685), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(-7)', '(-5)', '(1931)', '(1080)'], {}), '(-7, -5, 1931, 1080)\n', (665, 685), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1054, 1084), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(60)', '(40)', '(401)', '(531)'], {}), '(60, 40, 401, 531)\n', (1066, 1084), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1174, 1211), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""98adaa-2048x1536.png"""'], {}), "('98adaa-2048x1536.png')\n", (1187, 1211), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1390, 1420), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(170)', '(50)', '(171)', '(91)'], {}), '(170, 50, 171, 91)\n', (1402, 1420), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1486, 1507), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (1498, 1507), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1661, 1682), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (1673, 1682), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1838, 1865), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(120)', '(120)', '(120)'], {}), '(120, 120, 120)\n', (1850, 1865), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2347, 2376), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(90)', '(140)', '(341)', '(2)'], {}), '(90, 140, 341, 2)\n', (2359, 2376), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2707, 2738), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(90)', '(160)', '(341)', '(411)'], {}), '(90, 160, 341, 411)\n', (2719, 2738), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3467, 3498), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(600)', '(20)', '(291)', '(151)'], {}), '(600, 20, 291, 151)\n', (3479, 3498), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3732, 3763), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(660)', '(190)', '(201)', '(91)'], {}), '(660, 190, 201, 91)\n', (3744, 3763), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4002, 4034), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(660)', '(260)', '(541)', '(191)'], {}), '(660, 260, 541, 191)\n', (4014, 4034), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4254, 4286), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(540)', '(460)', '(100)', '(100)'], {}), '(540, 460, 100, 100)\n', (4266, 4286), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4786, 4817), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(90)', '(160)', '(341)', '(411)'], {}), '(90, 160, 341, 411)\n', (4798, 4817), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5430, 5461), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(550)', '(210)', '(101)', '(71)'], {}), '(550, 210, 101, 71)\n', (5442, 5461), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5775, 5805), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(550)', '(320)', '(71)', '(71)'], {}), '(550, 320, 71, 71)\n', (5787, 5805), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6091, 6119), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(10)', '(41)', '(41)'], {}), '(10, 10, 41, 41)\n', (6103, 6119), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6630, 6658), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(60)', '(20)', '(81)', '(21)'], {}), '(60, 20, 81, 21)\n', (6642, 6658), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6888, 6920), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(700)', '(460)', '(100)', '(100)'], {}), '(700, 460, 100, 100)\n', (6900, 6920), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7468, 7500), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(860)', '(460)', '(100)', '(100)'], {}), '(860, 460, 100, 100)\n', (7480, 7500), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8363, 8394), 'os.path.join', 'os.path.join', (['"""data"""', '"""item.db"""'], {}), "('data', 'item.db')\n", (8375, 8394), False, 'import os\n'), ((10799, 10830), 'os.path.join', 'os.path.join', (['"""data"""', '"""item.db"""'], {}), "('data', 'item.db')\n", (10811, 10830), False, 'import os\n'), ((5559, 5603), 'os.path.join', 'os.path.join', (['"""data"""', '"""temperature-2-64.png"""'], {}), "('data', 'temperature-2-64.png')\n", (5571, 5603), False, 'import os\n'), ((5897, 5936), 'os.path.join', 'os.path.join', (['"""data"""', '"""pressure-64.png"""'], {}), "('data', 'pressure-64.png')\n", (5909, 5936), False, 'import os\n'), ((9064, 9076), 'datetime.date.today', 'date.today', ([], {}), '()\n', (9074, 9076), False, 'from datetime import date\n'), ((9139, 9164), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(246)', '(77)', '(77)'], {}), '(246, 77, 77)\n', (9151, 9164), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((11500, 11512), 'datetime.date.today', 'date.today', ([], {}), '()\n', (11510, 11512), False, 'from datetime import date\n'), ((11575, 11600), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(246)', '(77)', '(77)'], {}), '(246, 77, 77)\n', (11587, 11600), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')]
|
import pkg_resources
import shutil
import tempfile
import unittest
import jinja2
import os.path
import pwd
import grp
import mock
from charmhelpers.core import templating
TEMPLATES_DIR = pkg_resources.resource_filename(__name__, 'templates')
class TestTemplating(unittest.TestCase):
def setUp(self):
self.charm_dir = pkg_resources.resource_filename(__name__, '')
self._charm_dir_patch = mock.patch.object(templating.hookenv,
'charm_dir')
self._charm_dir_mock = self._charm_dir_patch.start()
self._charm_dir_mock.side_effect = lambda: self.charm_dir
def tearDown(self):
self._charm_dir_patch.stop()
@mock.patch.object(templating.host.os, 'fchown')
@mock.patch.object(templating.host, 'mkdir')
@mock.patch.object(templating.host, 'log')
def test_render(self, log, mkdir, fchown):
with tempfile.NamedTemporaryFile() as fn1, \
tempfile.NamedTemporaryFile() as fn2:
context = {
'nats': {
'port': '1234',
'host': 'example.com',
},
'router': {
'domain': 'api.foo.com'
},
'nginx_port': 80,
}
templating.render('fake_cc.yml', fn1.name,
context, templates_dir=TEMPLATES_DIR)
contents = open(fn1.name).read()
self.assertRegexpMatches(contents, 'port: 1234')
self.assertRegexpMatches(contents, 'host: example.com')
self.assertRegexpMatches(contents, 'domain: api.foo.com')
templating.render('test.conf', fn2.name, context,
templates_dir=TEMPLATES_DIR)
contents = open(fn2.name).read()
self.assertRegexpMatches(contents, 'listen 80')
self.assertEqual(fchown.call_count, 2)
# Not called, because the target directory exists. Calling
# it would make the target directory world readable and
# expose your secrets (!).
self.assertEqual(mkdir.call_count, 0)
@mock.patch.object(templating.host.os, 'fchown')
@mock.patch.object(templating.host, 'mkdir')
@mock.patch.object(templating.host, 'log')
def test_render_from_string(self, log, mkdir, fchown):
with tempfile.NamedTemporaryFile() as fn:
context = {
'foo': 'bar'
}
config_template = '{{ foo }}'
templating.render('somefile.txt', fn.name,
context, templates_dir=TEMPLATES_DIR,
config_template=config_template)
contents = open(fn.name).read()
self.assertRegexpMatches(contents, 'bar')
self.assertEqual(fchown.call_count, 1)
# Not called, because the target directory exists. Calling
# it would make the target directory world readable and
# expose your secrets (!).
self.assertEqual(mkdir.call_count, 0)
@mock.patch.object(templating.host.os, 'fchown')
@mock.patch.object(templating.host, 'mkdir')
@mock.patch.object(templating.host, 'log')
def test_render_loader(self, log, mkdir, fchown):
with tempfile.NamedTemporaryFile() as fn1:
context = {
'nats': {
'port': '1234',
'host': 'example.com',
},
'router': {
'domain': 'api.foo.com'
},
'nginx_port': 80,
}
template_loader = jinja2.ChoiceLoader([jinja2.FileSystemLoader(TEMPLATES_DIR)])
templating.render('fake_cc.yml', fn1.name,
context, template_loader=template_loader)
contents = open(fn1.name).read()
self.assertRegexpMatches(contents, 'port: 1234')
self.assertRegexpMatches(contents, 'host: example.com')
self.assertRegexpMatches(contents, 'domain: api.foo.com')
@mock.patch.object(templating.os.path, 'exists')
@mock.patch.object(templating.host.os, 'fchown')
@mock.patch.object(templating.host, 'mkdir')
@mock.patch.object(templating.host, 'log')
def test_render_no_dir(self, log, mkdir, fchown, exists):
exists.return_value = False
with tempfile.NamedTemporaryFile() as fn1, \
tempfile.NamedTemporaryFile() as fn2:
context = {
'nats': {
'port': '1234',
'host': 'example.com',
},
'router': {
'domain': 'api.foo.com'
},
'nginx_port': 80,
}
templating.render('fake_cc.yml', fn1.name,
context, templates_dir=TEMPLATES_DIR)
contents = open(fn1.name).read()
self.assertRegexpMatches(contents, 'port: 1234')
self.assertRegexpMatches(contents, 'host: example.com')
self.assertRegexpMatches(contents, 'domain: api.foo.com')
templating.render('test.conf', fn2.name, context,
templates_dir=TEMPLATES_DIR)
contents = open(fn2.name).read()
self.assertRegexpMatches(contents, 'listen 80')
self.assertEqual(fchown.call_count, 2)
# Target directory was created, world readable (!).
self.assertEqual(mkdir.call_count, 2)
@mock.patch.object(templating.host.os, 'fchown')
@mock.patch.object(templating.host, 'log')
def test_render_2(self, log, fchown):
tmpdir = tempfile.mkdtemp()
fn1 = os.path.join(tmpdir, 'test.conf')
try:
context = {'nginx_port': 80}
templating.render('test.conf', fn1, context,
owner=pwd.getpwuid(os.getuid()).pw_name,
group=grp.getgrgid(os.getgid()).gr_name,
templates_dir=TEMPLATES_DIR)
with open(fn1) as f:
contents = f.read()
self.assertRegexpMatches(contents, 'something')
finally:
shutil.rmtree(tmpdir, ignore_errors=True)
@mock.patch.object(templating, 'hookenv')
@mock.patch('jinja2.Environment')
def test_load_error(self, Env, hookenv):
Env().get_template.side_effect = jinja2.exceptions.TemplateNotFound(
'fake_cc.yml')
self.assertRaises(
jinja2.exceptions.TemplateNotFound, templating.render,
'fake.src', 'fake.tgt', {}, templates_dir='tmpl')
hookenv.log.assert_called_once_with(
'Could not load template fake.src from tmpl.', level=hookenv.ERROR)
|
[
"mock.patch.object",
"tempfile.NamedTemporaryFile",
"charmhelpers.core.templating.render",
"mock.patch",
"pkg_resources.resource_filename",
"jinja2.FileSystemLoader",
"tempfile.mkdtemp",
"jinja2.exceptions.TemplateNotFound",
"shutil.rmtree"
] |
[((190, 244), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['__name__', '"""templates"""'], {}), "(__name__, 'templates')\n", (221, 244), False, 'import pkg_resources\n'), ((708, 755), 'mock.patch.object', 'mock.patch.object', (['templating.host.os', '"""fchown"""'], {}), "(templating.host.os, 'fchown')\n", (725, 755), False, 'import mock\n'), ((761, 804), 'mock.patch.object', 'mock.patch.object', (['templating.host', '"""mkdir"""'], {}), "(templating.host, 'mkdir')\n", (778, 804), False, 'import mock\n'), ((810, 851), 'mock.patch.object', 'mock.patch.object', (['templating.host', '"""log"""'], {}), "(templating.host, 'log')\n", (827, 851), False, 'import mock\n'), ((2172, 2219), 'mock.patch.object', 'mock.patch.object', (['templating.host.os', '"""fchown"""'], {}), "(templating.host.os, 'fchown')\n", (2189, 2219), False, 'import mock\n'), ((2225, 2268), 'mock.patch.object', 'mock.patch.object', (['templating.host', '"""mkdir"""'], {}), "(templating.host, 'mkdir')\n", (2242, 2268), False, 'import mock\n'), ((2274, 2315), 'mock.patch.object', 'mock.patch.object', (['templating.host', '"""log"""'], {}), "(templating.host, 'log')\n", (2291, 2315), False, 'import mock\n'), ((3105, 3152), 'mock.patch.object', 'mock.patch.object', (['templating.host.os', '"""fchown"""'], {}), "(templating.host.os, 'fchown')\n", (3122, 3152), False, 'import mock\n'), ((3158, 3201), 'mock.patch.object', 'mock.patch.object', (['templating.host', '"""mkdir"""'], {}), "(templating.host, 'mkdir')\n", (3175, 3201), False, 'import mock\n'), ((3207, 3248), 'mock.patch.object', 'mock.patch.object', (['templating.host', '"""log"""'], {}), "(templating.host, 'log')\n", (3224, 3248), False, 'import mock\n'), ((4110, 4157), 'mock.patch.object', 'mock.patch.object', (['templating.os.path', '"""exists"""'], {}), "(templating.os.path, 'exists')\n", (4127, 4157), False, 'import mock\n'), ((4163, 4210), 'mock.patch.object', 'mock.patch.object', (['templating.host.os', '"""fchown"""'], {}), "(templating.host.os, 'fchown')\n", (4180, 4210), False, 'import mock\n'), ((4216, 4259), 'mock.patch.object', 'mock.patch.object', (['templating.host', '"""mkdir"""'], {}), "(templating.host, 'mkdir')\n", (4233, 4259), False, 'import mock\n'), ((4265, 4306), 'mock.patch.object', 'mock.patch.object', (['templating.host', '"""log"""'], {}), "(templating.host, 'log')\n", (4282, 4306), False, 'import mock\n'), ((5564, 5611), 'mock.patch.object', 'mock.patch.object', (['templating.host.os', '"""fchown"""'], {}), "(templating.host.os, 'fchown')\n", (5581, 5611), False, 'import mock\n'), ((5617, 5658), 'mock.patch.object', 'mock.patch.object', (['templating.host', '"""log"""'], {}), "(templating.host, 'log')\n", (5634, 5658), False, 'import mock\n'), ((6304, 6344), 'mock.patch.object', 'mock.patch.object', (['templating', '"""hookenv"""'], {}), "(templating, 'hookenv')\n", (6321, 6344), False, 'import mock\n'), ((6350, 6382), 'mock.patch', 'mock.patch', (['"""jinja2.Environment"""'], {}), "('jinja2.Environment')\n", (6360, 6382), False, 'import mock\n'), ((334, 379), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['__name__', '""""""'], {}), "(__name__, '')\n", (365, 379), False, 'import pkg_resources\n'), ((412, 462), 'mock.patch.object', 'mock.patch.object', (['templating.hookenv', '"""charm_dir"""'], {}), "(templating.hookenv, 'charm_dir')\n", (429, 462), False, 'import mock\n'), ((5718, 5736), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (5734, 5736), False, 'import tempfile\n'), ((6469, 6518), 'jinja2.exceptions.TemplateNotFound', 'jinja2.exceptions.TemplateNotFound', (['"""fake_cc.yml"""'], {}), "('fake_cc.yml')\n", (6503, 6518), False, 'import jinja2\n'), ((912, 941), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (939, 941), False, 'import tempfile\n'), ((968, 997), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (995, 997), False, 'import tempfile\n'), ((1305, 1390), 'charmhelpers.core.templating.render', 'templating.render', (['"""fake_cc.yml"""', 'fn1.name', 'context'], {'templates_dir': 'TEMPLATES_DIR'}), "('fake_cc.yml', fn1.name, context, templates_dir=TEMPLATES_DIR\n )\n", (1322, 1390), False, 'from charmhelpers.core import templating\n'), ((1673, 1751), 'charmhelpers.core.templating.render', 'templating.render', (['"""test.conf"""', 'fn2.name', 'context'], {'templates_dir': 'TEMPLATES_DIR'}), "('test.conf', fn2.name, context, templates_dir=TEMPLATES_DIR)\n", (1690, 1751), False, 'from charmhelpers.core import templating\n'), ((2388, 2417), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (2415, 2417), False, 'import tempfile\n'), ((2547, 2665), 'charmhelpers.core.templating.render', 'templating.render', (['"""somefile.txt"""', 'fn.name', 'context'], {'templates_dir': 'TEMPLATES_DIR', 'config_template': 'config_template'}), "('somefile.txt', fn.name, context, templates_dir=\n TEMPLATES_DIR, config_template=config_template)\n", (2564, 2665), False, 'from charmhelpers.core import templating\n'), ((3316, 3345), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (3343, 3345), False, 'import tempfile\n'), ((3745, 3834), 'charmhelpers.core.templating.render', 'templating.render', (['"""fake_cc.yml"""', 'fn1.name', 'context'], {'template_loader': 'template_loader'}), "('fake_cc.yml', fn1.name, context, template_loader=\n template_loader)\n", (3762, 3834), False, 'from charmhelpers.core import templating\n'), ((4418, 4447), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (4445, 4447), False, 'import tempfile\n'), ((4474, 4503), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (4501, 4503), False, 'import tempfile\n'), ((4811, 4896), 'charmhelpers.core.templating.render', 'templating.render', (['"""fake_cc.yml"""', 'fn1.name', 'context'], {'templates_dir': 'TEMPLATES_DIR'}), "('fake_cc.yml', fn1.name, context, templates_dir=TEMPLATES_DIR\n )\n", (4828, 4896), False, 'from charmhelpers.core import templating\n'), ((5179, 5257), 'charmhelpers.core.templating.render', 'templating.render', (['"""test.conf"""', 'fn2.name', 'context'], {'templates_dir': 'TEMPLATES_DIR'}), "('test.conf', fn2.name, context, templates_dir=TEMPLATES_DIR)\n", (5196, 5257), False, 'from charmhelpers.core import templating\n'), ((6256, 6297), 'shutil.rmtree', 'shutil.rmtree', (['tmpdir'], {'ignore_errors': '(True)'}), '(tmpdir, ignore_errors=True)\n', (6269, 6297), False, 'import shutil\n'), ((3692, 3730), 'jinja2.FileSystemLoader', 'jinja2.FileSystemLoader', (['TEMPLATES_DIR'], {}), '(TEMPLATES_DIR)\n', (3715, 3730), False, 'import jinja2\n')]
|
#!/usr/bin/env python
#------------------------------------------------------------
# Purpose: Program finds best-fit pararameters of a model
# a*sin(bx+c) with data with errors in both variables
# x and y. It uses the effective variance method for
# kmpfit and the results are compared with SciPy's
# ODR routine.
# It can be used to demonstrate the sensitivity of
# the fit process to initial estimates by varying
# values for beta0
# Vog, 09 Dec, 2011
#------------------------------------------------------------
import numpy
from matplotlib.pyplot import figure, show, rc
from numpy.random import normal
from kapteyn import kmpfit
def model(p, x):
# Model: Y = a*sin(b*x+c)
a,b,c = p
return a * numpy.sin(b*x+c)
def residuals(p, data):
# Effective variance method
a, b, c = p
x, y, ex, ey = data
e2 = ey*ey + (a*b*numpy.cos(b*x+c))**2*ex*ex
w = numpy.sqrt(numpy.where(e2==0.0, 0.0, 1.0/(e2)))
d = w*(y-model(p,x))
return d
def residuals2(p, data):
# Merit function for data with errors Y only
a, b, c = p
x, y, ey = data
w = numpy.where(ey==0.0, 0.0, 1.0/(ey))
d = w*(y-model(p,x))
return d
# Generate noisy data points
N = 30
a0 = 2; b0 = 1; c0 = 1
x = numpy.linspace(-3, 7.0, N)
y = model((a0,b0,c0),x) + normal(0.0, 0.3, N)
errx = normal(0.1, 0.2, N)
erry = normal(0.1, 0.3, N)
# It is important to start with realistic initial estimates
beta0 = [1.8,0.9,0.9]
print("\nODR:")
print("==========")
from scipy.odr import Data, Model, ODR, RealData, odr_stop
linear = Model(model)
mydata = RealData(x, y, sx=errx, sy=erry)
myodr = ODR(mydata, linear, beta0=beta0, maxit=5000)
myoutput = myodr.run()
print("Fitted parameters: ", myoutput.beta)
print("Covariance errors: ", numpy.sqrt(myoutput.cov_beta.diagonal()))
print("Standard errors: ", myoutput.sd_beta)
print("Minimum chi^2: ", myoutput.sum_square)
print("Minimum (reduced)chi^2: ", myoutput.res_var)
beta = myoutput.beta
# Prepare fit routine
fitobj = kmpfit.Fitter(residuals=residuals, data=(x, y, errx, erry))
fitobj.fit(params0=beta0)
print("\n\n======== Results kmpfit with effective variance =========")
print("Fitted parameters: ", fitobj.params)
print("Covariance errors: ", fitobj.xerror)
print("Standard errors: ", fitobj.stderr)
print("Chi^2 min: ", fitobj.chi2_min)
print("Reduced Chi^2: ", fitobj.rchi2_min)
print("Status Message:", fitobj.message)
# Compare to a fit with weights for y only
fitobj2 = kmpfit.Fitter(residuals=residuals2, data=(x, y, erry))
fitobj2.fit(params0=beta0)
print("\n\n======== Results kmpfit errors in Y only =========")
print("Fitted parameters: ", fitobj2.params)
print("Covariance errors: ", fitobj2.xerror)
print("Standard errors: ", fitobj2.stderr)
print("Chi^2 min: ", fitobj2.chi2_min)
print("Reduced Chi^2: ", fitobj2.rchi2_min)
print("Status Message:", fitobj2.message)
# Some plotting
rc('font', size=9)
rc('legend', fontsize=8)
fig = figure(1)
frame = fig.add_subplot(1,1,1, aspect=1, adjustable='datalim')
frame.errorbar(x, y, xerr=errx, yerr=erry, fmt='bo')
# Plot first fit
frame.plot(x, model(beta,x), '-y', lw=4, label="SciPy's ODR", alpha=0.6)
frame.plot(x, model(fitobj.params,x), 'c', ls='--', lw=2, label="kmpfit (errors in X & Y")
frame.plot(x, model(fitobj2.params,x), 'm', ls='--', lw=2, label="kmpfit (errors in Y only)")
frame.plot(x, model((a0,b0,c0),x), 'r', label="Model with true parameters")
frame.set_xlabel("X")
frame.set_ylabel("Y")
frame.set_title("ODR and kmpfit with weighted fit. Model: $y=a\,\sin(bx+c)$")
frame.grid(True)
leg = frame.legend(loc=2)
show()
|
[
"kapteyn.kmpfit.Fitter",
"scipy.odr.ODR",
"matplotlib.pyplot.show",
"scipy.odr.Model",
"scipy.odr.RealData",
"matplotlib.pyplot.figure",
"numpy.where",
"numpy.sin",
"matplotlib.pyplot.rc",
"numpy.random.normal",
"numpy.linspace",
"numpy.cos"
] |
[((1276, 1302), 'numpy.linspace', 'numpy.linspace', (['(-3)', '(7.0)', 'N'], {}), '(-3, 7.0, N)\n', (1290, 1302), False, 'import numpy\n'), ((1356, 1375), 'numpy.random.normal', 'normal', (['(0.1)', '(0.2)', 'N'], {}), '(0.1, 0.2, N)\n', (1362, 1375), False, 'from numpy.random import normal\n'), ((1384, 1403), 'numpy.random.normal', 'normal', (['(0.1)', '(0.3)', 'N'], {}), '(0.1, 0.3, N)\n', (1390, 1403), False, 'from numpy.random import normal\n'), ((1593, 1605), 'scipy.odr.Model', 'Model', (['model'], {}), '(model)\n', (1598, 1605), False, 'from scipy.odr import Data, Model, ODR, RealData, odr_stop\n'), ((1615, 1647), 'scipy.odr.RealData', 'RealData', (['x', 'y'], {'sx': 'errx', 'sy': 'erry'}), '(x, y, sx=errx, sy=erry)\n', (1623, 1647), False, 'from scipy.odr import Data, Model, ODR, RealData, odr_stop\n'), ((1656, 1700), 'scipy.odr.ODR', 'ODR', (['mydata', 'linear'], {'beta0': 'beta0', 'maxit': '(5000)'}), '(mydata, linear, beta0=beta0, maxit=5000)\n', (1659, 1700), False, 'from scipy.odr import Data, Model, ODR, RealData, odr_stop\n'), ((2062, 2121), 'kapteyn.kmpfit.Fitter', 'kmpfit.Fitter', ([], {'residuals': 'residuals', 'data': '(x, y, errx, erry)'}), '(residuals=residuals, data=(x, y, errx, erry))\n', (2075, 2121), False, 'from kapteyn import kmpfit\n'), ((2565, 2619), 'kapteyn.kmpfit.Fitter', 'kmpfit.Fitter', ([], {'residuals': 'residuals2', 'data': '(x, y, erry)'}), '(residuals=residuals2, data=(x, y, erry))\n', (2578, 2619), False, 'from kapteyn import kmpfit\n'), ((3026, 3044), 'matplotlib.pyplot.rc', 'rc', (['"""font"""'], {'size': '(9)'}), "('font', size=9)\n", (3028, 3044), False, 'from matplotlib.pyplot import figure, show, rc\n'), ((3045, 3069), 'matplotlib.pyplot.rc', 'rc', (['"""legend"""'], {'fontsize': '(8)'}), "('legend', fontsize=8)\n", (3047, 3069), False, 'from matplotlib.pyplot import figure, show, rc\n'), ((3076, 3085), 'matplotlib.pyplot.figure', 'figure', (['(1)'], {}), '(1)\n', (3082, 3085), False, 'from matplotlib.pyplot import figure, show, rc\n'), ((3719, 3725), 'matplotlib.pyplot.show', 'show', ([], {}), '()\n', (3723, 3725), False, 'from matplotlib.pyplot import figure, show, rc\n'), ((1139, 1176), 'numpy.where', 'numpy.where', (['(ey == 0.0)', '(0.0)', '(1.0 / ey)'], {}), '(ey == 0.0, 0.0, 1.0 / ey)\n', (1150, 1176), False, 'import numpy\n'), ((1329, 1348), 'numpy.random.normal', 'normal', (['(0.0)', '(0.3)', 'N'], {}), '(0.0, 0.3, N)\n', (1335, 1348), False, 'from numpy.random import normal\n'), ((774, 794), 'numpy.sin', 'numpy.sin', (['(b * x + c)'], {}), '(b * x + c)\n', (783, 794), False, 'import numpy\n'), ((951, 988), 'numpy.where', 'numpy.where', (['(e2 == 0.0)', '(0.0)', '(1.0 / e2)'], {}), '(e2 == 0.0, 0.0, 1.0 / e2)\n', (962, 988), False, 'import numpy\n'), ((906, 926), 'numpy.cos', 'numpy.cos', (['(b * x + c)'], {}), '(b * x + c)\n', (915, 926), False, 'import numpy\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Layer
from kerastools.initializers import RandomMaclaurin
class CompactKOrderPooling(Layer):
""" Keras layer to compute K-th order moments representation. In the non-trainable case, the Random Maclaurin
initialization is used while in trainable mode we simply initialize the weights with Glorot uniform initializer.
:param output_dim: Dimension of the high-order representation.
:param ho_trainable: if the weights for high-order approximation are trainable.
"""
def __init__(self,
output_dim,
ho_trainable=False,
**kwargs):
super(CompactKOrderPooling, self).__init__(**kwargs)
self.ho_trainable = ho_trainable
self.output_dim = output_dim
self.k_order_weights = []
self.order = 0
if ho_trainable:
self.init_func = "glorot_uniform"
else:
self.init_func = RandomMaclaurin()
def build(self, input_shape):
for k_shape in input_shape:
self.order += 1
self.k_order_weights.append(self.add_weight(name='W' + str(self.order),
shape=(1, 1, int(k_shape[-1]), self.output_dim),
initializer=self.init_func,
trainable=self.ho_trainable,
constraint=None))
super(CompactKOrderPooling, self).build(input_shape)
def call(self, inputs, **kwargs):
if type(inputs) is not list or len(inputs) != self.order:
raise Exception('Compact Bilinear Pooling must be called '
'on a list of ' + str(self.order) + ' tensors. Got: ' + str(inputs))
T = 1.
for k, inp in enumerate(inputs):
T *= tf.nn.conv2d(input=inp,
filter=self.k_order_weights[k],
strides=[1, 1, 1, 1],
padding="SAME",
dilations=[1, 1, 1, 1]) # shape = bs x W x H x dim_intermediate
return T
def compute_output_shape(self, input_shape):
return input_shape[0][0], input_shape[0][1], input_shape[0][2], self.output_dim
def get_config(self):
base_config = super(CompactKOrderPooling, self).get_config()
config = {"output_dim": self.output_dim,
"ho_trainable": self.ho_trainable}
return dict(list(base_config.items()) + list(config.items()))
class PartialKOrderBlock(Layer):
""" Keras layer to compute approximate bilinear product with either trainable weights or Random Maclaurin init.
Arguments:
output_dim: Dimension of the representation.
only_project_second: Do not add learnable weights for the second entry (cascaded implementation)
ho_trainable: make high-order weights trainable or not.
Returns:
A Keras layer.
"""
def __init__(self,
output_dim,
only_project_second=True,
ho_trainable=True,
**kwargs):
self.ho_trainable = ho_trainable
self.output_dim = output_dim
self.only_project_second = only_project_second
if ho_trainable:
self.init_func = "glorot_uniform"
else:
self.init_func = RandomMaclaurin()
super(PartialKOrderBlock, self).__init__(**kwargs)
def build(self, input_shape):
self.second_block_dim = int(input_shape[1][-1])
self.proj = self.add_weight(name='w',
shape=(1, 1, self.second_block_dim, self.output_dim),
initializer=self.init_func,
trainable=self.ho_trainable,
constraint=None)
if not self.only_project_second:
self.first_block_dim = int(input_shape[0][-1])
self.first_proj = self.add_weight(name='w_first',
shape=(1, 1, self.first_block_dim, self.output_dim),
initializer=self.init_func,
trainable=self.ho_trainable,
constraint=None)
super(PartialKOrderBlock, self).build(input_shape)
def call(self, inputs, **kwargs):
if type(inputs) is not list or len(inputs) != 2:
raise Exception('Partial Hadamard Block must be called '
'on a list of 2 tensors. Got: {}'.format(inputs))
first_block, second_block = inputs
second_block = tf.nn.conv2d(input=second_block,
filter=self.proj,
strides=[1, 1, 1, 1],
padding="VALID",
dilations=[1, 1, 1, 1]) # shape = bs x W x H x dim_intermediate
if not self.only_project_second:
first_block = tf.nn.conv2d(input=first_block,
filter=self.first_proj,
strides=[1, 1, 1, 1],
padding="VALID",
dilations=[1, 1, 1, 1]) # shape = bs x W x H x dim_intermediate
return first_block * second_block
def compute_output_shape(self, input_shape):
return input_shape[0][0], input_shape[0][1], input_shape[0][2], self.output_dim
def get_config(self):
base_config = super(PartialKOrderBlock, self).get_config()
config = {'output_dim': self.output_dim,
'only_project_second': self.only_project_second,
'ho_trainable': self.ho_trainable}
return dict(list(base_config.items()) + list(config.items()))
# alias
CKOP = CompactKOrderPooling
PKOB = PartialKOrderBlock
|
[
"tensorflow.nn.conv2d",
"kerastools.initializers.RandomMaclaurin"
] |
[((4864, 4981), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'second_block', 'filter': 'self.proj', 'strides': '[1, 1, 1, 1]', 'padding': '"""VALID"""', 'dilations': '[1, 1, 1, 1]'}), "(input=second_block, filter=self.proj, strides=[1, 1, 1, 1],\n padding='VALID', dilations=[1, 1, 1, 1])\n", (4876, 4981), True, 'import tensorflow as tf\n'), ((1040, 1057), 'kerastools.initializers.RandomMaclaurin', 'RandomMaclaurin', ([], {}), '()\n', (1055, 1057), False, 'from kerastools.initializers import RandomMaclaurin\n'), ((1997, 2119), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'inp', 'filter': 'self.k_order_weights[k]', 'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""', 'dilations': '[1, 1, 1, 1]'}), "(input=inp, filter=self.k_order_weights[k], strides=[1, 1, 1, 1\n ], padding='SAME', dilations=[1, 1, 1, 1])\n", (2009, 2119), True, 'import tensorflow as tf\n'), ((3533, 3550), 'kerastools.initializers.RandomMaclaurin', 'RandomMaclaurin', ([], {}), '()\n', (3548, 3550), False, 'from kerastools.initializers import RandomMaclaurin\n'), ((5231, 5354), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'first_block', 'filter': 'self.first_proj', 'strides': '[1, 1, 1, 1]', 'padding': '"""VALID"""', 'dilations': '[1, 1, 1, 1]'}), "(input=first_block, filter=self.first_proj, strides=[1, 1, 1, 1\n ], padding='VALID', dilations=[1, 1, 1, 1])\n", (5243, 5354), True, 'import tensorflow as tf\n')]
|
from argparse import ArgumentParser
from typing import Optional, Sequence, Text
from luh3417.serialized_replace import walk
from luh3417.utils import make_doer, run_main, setup_logging
doing = make_doer("luh3417.replace")
def parse_args(argv: Optional[Sequence[Text]] = None):
parser = ArgumentParser(description="Seeks and replaces serialized values")
parser.add_argument("-i", "--input", required=True, help="Input file name")
parser.add_argument("-o", "--output", required=True, help="Output file name")
parser.add_argument("-b", "--before", nargs="+", help="String(s) to look for")
parser.add_argument("-a", "--after", nargs="+", help="String(s) to replace by")
parser.add_argument(
"-c", "--charset", default="utf-8", help="What charset to use to read the file"
)
args = parser.parse_args(argv)
if len(args.before) != len(args.after):
parser.error("Not the same number of --before and --after")
exit(1)
return args
def main(argv: Optional[Sequence[Text]] = None):
args = parse_args(argv)
setup_logging()
rep = [
*zip(
(x.encode(args.charset) for x in args.before),
(x.encode(args.charset) for x in args.after),
)
]
with open(args.input, "rb") as i, open(args.output, "wb") as o:
for line in i:
o.write(walk(line, rep))
def __main__():
return run_main(main, doing)
if __name__ == "__main__":
__main__()
|
[
"argparse.ArgumentParser",
"luh3417.utils.setup_logging",
"luh3417.utils.run_main",
"luh3417.utils.make_doer",
"luh3417.serialized_replace.walk"
] |
[((195, 223), 'luh3417.utils.make_doer', 'make_doer', (['"""luh3417.replace"""'], {}), "('luh3417.replace')\n", (204, 223), False, 'from luh3417.utils import make_doer, run_main, setup_logging\n'), ((294, 360), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Seeks and replaces serialized values"""'}), "(description='Seeks and replaces serialized values')\n", (308, 360), False, 'from argparse import ArgumentParser\n'), ((1075, 1090), 'luh3417.utils.setup_logging', 'setup_logging', ([], {}), '()\n', (1088, 1090), False, 'from luh3417.utils import make_doer, run_main, setup_logging\n'), ((1409, 1430), 'luh3417.utils.run_main', 'run_main', (['main', 'doing'], {}), '(main, doing)\n', (1417, 1430), False, 'from luh3417.utils import make_doer, run_main, setup_logging\n'), ((1363, 1378), 'luh3417.serialized_replace.walk', 'walk', (['line', 'rep'], {}), '(line, rep)\n', (1367, 1378), False, 'from luh3417.serialized_replace import walk\n')]
|
from common.preprocessor import Preprocessor
class EnvWrapper:
# Wrapper class for SC2Env.
# Used to fit the data coming from SC2Env to the agents model and vice versa
def __init__(self, env, model_config):
self.env = env
self.model_config = model_config
self.preprocessor = Preprocessor(model_config)
def reset(self):
timesteps = self.env.reset()
return self.preprocess_timesteps(timesteps)
def step(self, action):
processed_actions = self.preprocessor.preprocess_action(action)
timesteps = self.env.step(processed_actions)
return self.preprocess_timesteps(timesteps)
def preprocess_timesteps(self, timesteps):
obs_raw = [timestep.observation for timestep in timesteps]
available_actions_raw = [ob.available_actions for ob in obs_raw]
rewards = [timestep.reward for timestep in timesteps]
score_cumulative = [timestep.observation['score_cumulative'] for timestep in timesteps]
dones = [timestep.last() for timestep in timesteps]
# Available actions get one hot encoded
available_actions = [self.preprocessor.preprocess_available_actions(available_actions_raw)]
# raw observations are made to better fit the configuration of the agents model
processed_obs = self.preprocessor.preprocess_observations(obs_raw)
return {'observation': processed_obs,
'rewards': rewards,
'score_cumulative': score_cumulative,
'dones': dones,
'available_actions': available_actions}
def observation_spec(self):
return self.env.observation_spec
def action_spec(self):
return self.env.action_spec
def close(self):
self.env.close()
|
[
"common.preprocessor.Preprocessor"
] |
[((324, 350), 'common.preprocessor.Preprocessor', 'Preprocessor', (['model_config'], {}), '(model_config)\n', (336, 350), False, 'from common.preprocessor import Preprocessor\n')]
|
# Generated by Django 3.1 on 2020-08-24 21:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('shortener', '0008_auto_20200824_1342'),
]
operations = [
migrations.AlterField(
model_name='shortenedurl',
name='key',
field=models.CharField(db_index=True, default='', max_length=8, unique=True),
),
migrations.AlterField(
model_name='visit',
name='created_at',
field=models.DateField(auto_now_add=True, db_index=True),
),
migrations.CreateModel(
name='Analytic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('all_visits', models.JSONField(default={})),
('unique_visits', models.JSONField(default={})),
('short_url', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='analytic', to='shortener.shortenedurl')),
],
),
]
|
[
"django.db.models.OneToOneField",
"django.db.models.CharField",
"django.db.models.JSONField",
"django.db.models.AutoField",
"django.db.models.DateField"
] |
[((372, 442), 'django.db.models.CharField', 'models.CharField', ([], {'db_index': '(True)', 'default': '""""""', 'max_length': '(8)', 'unique': '(True)'}), "(db_index=True, default='', max_length=8, unique=True)\n", (388, 442), False, 'from django.db import migrations, models\n'), ((567, 617), 'django.db.models.DateField', 'models.DateField', ([], {'auto_now_add': '(True)', 'db_index': '(True)'}), '(auto_now_add=True, db_index=True)\n', (583, 617), False, 'from django.db import migrations, models\n'), ((735, 828), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (751, 828), False, 'from django.db import migrations, models\n'), ((858, 886), 'django.db.models.JSONField', 'models.JSONField', ([], {'default': '{}'}), '(default={})\n', (874, 886), False, 'from django.db import migrations, models\n'), ((923, 951), 'django.db.models.JSONField', 'models.JSONField', ([], {'default': '{}'}), '(default={})\n', (939, 951), False, 'from django.db import migrations, models\n'), ((984, 1107), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""analytic"""', 'to': '"""shortener.shortenedurl"""'}), "(on_delete=django.db.models.deletion.CASCADE,\n related_name='analytic', to='shortener.shortenedurl')\n", (1004, 1107), False, 'from django.db import migrations, models\n')]
|
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class Evaluation(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Evaluation - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'str',
'name': 'str',
'conversation': 'Conversation',
'evaluation_form': 'EvaluationForm',
'evaluator': 'User',
'agent': 'User',
'calibration': 'Calibration',
'status': 'str',
'answers': 'EvaluationScoringSet',
'agent_has_read': 'bool',
'release_date': 'datetime',
'assigned_date': 'datetime',
'changed_date': 'datetime',
'queue': 'Queue',
'media_type': 'list[str]',
'rescore': 'bool',
'conversation_date': 'datetime',
'conversation_end_date': 'datetime',
'never_release': 'bool',
'resource_id': 'str',
'resource_type': 'str',
'redacted': 'bool',
'is_scoring_index': 'bool',
'authorized_actions': 'list[str]',
'self_uri': 'str'
}
self.attribute_map = {
'id': 'id',
'name': 'name',
'conversation': 'conversation',
'evaluation_form': 'evaluationForm',
'evaluator': 'evaluator',
'agent': 'agent',
'calibration': 'calibration',
'status': 'status',
'answers': 'answers',
'agent_has_read': 'agentHasRead',
'release_date': 'releaseDate',
'assigned_date': 'assignedDate',
'changed_date': 'changedDate',
'queue': 'queue',
'media_type': 'mediaType',
'rescore': 'rescore',
'conversation_date': 'conversationDate',
'conversation_end_date': 'conversationEndDate',
'never_release': 'neverRelease',
'resource_id': 'resourceId',
'resource_type': 'resourceType',
'redacted': 'redacted',
'is_scoring_index': 'isScoringIndex',
'authorized_actions': 'authorizedActions',
'self_uri': 'selfUri'
}
self._id = None
self._name = None
self._conversation = None
self._evaluation_form = None
self._evaluator = None
self._agent = None
self._calibration = None
self._status = None
self._answers = None
self._agent_has_read = None
self._release_date = None
self._assigned_date = None
self._changed_date = None
self._queue = None
self._media_type = None
self._rescore = None
self._conversation_date = None
self._conversation_end_date = None
self._never_release = None
self._resource_id = None
self._resource_type = None
self._redacted = None
self._is_scoring_index = None
self._authorized_actions = None
self._self_uri = None
@property
def id(self):
"""
Gets the id of this Evaluation.
The globally unique identifier for the object.
:return: The id of this Evaluation.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this Evaluation.
The globally unique identifier for the object.
:param id: The id of this Evaluation.
:type: str
"""
self._id = id
@property
def name(self):
"""
Gets the name of this Evaluation.
:return: The name of this Evaluation.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Evaluation.
:param name: The name of this Evaluation.
:type: str
"""
self._name = name
@property
def conversation(self):
"""
Gets the conversation of this Evaluation.
:return: The conversation of this Evaluation.
:rtype: Conversation
"""
return self._conversation
@conversation.setter
def conversation(self, conversation):
"""
Sets the conversation of this Evaluation.
:param conversation: The conversation of this Evaluation.
:type: Conversation
"""
self._conversation = conversation
@property
def evaluation_form(self):
"""
Gets the evaluation_form of this Evaluation.
Evaluation form used for evaluation.
:return: The evaluation_form of this Evaluation.
:rtype: EvaluationForm
"""
return self._evaluation_form
@evaluation_form.setter
def evaluation_form(self, evaluation_form):
"""
Sets the evaluation_form of this Evaluation.
Evaluation form used for evaluation.
:param evaluation_form: The evaluation_form of this Evaluation.
:type: EvaluationForm
"""
self._evaluation_form = evaluation_form
@property
def evaluator(self):
"""
Gets the evaluator of this Evaluation.
:return: The evaluator of this Evaluation.
:rtype: User
"""
return self._evaluator
@evaluator.setter
def evaluator(self, evaluator):
"""
Sets the evaluator of this Evaluation.
:param evaluator: The evaluator of this Evaluation.
:type: User
"""
self._evaluator = evaluator
@property
def agent(self):
"""
Gets the agent of this Evaluation.
:return: The agent of this Evaluation.
:rtype: User
"""
return self._agent
@agent.setter
def agent(self, agent):
"""
Sets the agent of this Evaluation.
:param agent: The agent of this Evaluation.
:type: User
"""
self._agent = agent
@property
def calibration(self):
"""
Gets the calibration of this Evaluation.
:return: The calibration of this Evaluation.
:rtype: Calibration
"""
return self._calibration
@calibration.setter
def calibration(self, calibration):
"""
Sets the calibration of this Evaluation.
:param calibration: The calibration of this Evaluation.
:type: Calibration
"""
self._calibration = calibration
@property
def status(self):
"""
Gets the status of this Evaluation.
:return: The status of this Evaluation.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this Evaluation.
:param status: The status of this Evaluation.
:type: str
"""
allowed_values = ["PENDING", "INPROGRESS", "FINISHED"]
if status.lower() not in map(str.lower, allowed_values):
# print("Invalid value for status -> " + status)
self._status = "outdated_sdk_version"
else:
self._status = status
@property
def answers(self):
"""
Gets the answers of this Evaluation.
:return: The answers of this Evaluation.
:rtype: EvaluationScoringSet
"""
return self._answers
@answers.setter
def answers(self, answers):
"""
Sets the answers of this Evaluation.
:param answers: The answers of this Evaluation.
:type: EvaluationScoringSet
"""
self._answers = answers
@property
def agent_has_read(self):
"""
Gets the agent_has_read of this Evaluation.
:return: The agent_has_read of this Evaluation.
:rtype: bool
"""
return self._agent_has_read
@agent_has_read.setter
def agent_has_read(self, agent_has_read):
"""
Sets the agent_has_read of this Evaluation.
:param agent_has_read: The agent_has_read of this Evaluation.
:type: bool
"""
self._agent_has_read = agent_has_read
@property
def release_date(self):
"""
Gets the release_date of this Evaluation.
Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:return: The release_date of this Evaluation.
:rtype: datetime
"""
return self._release_date
@release_date.setter
def release_date(self, release_date):
"""
Sets the release_date of this Evaluation.
Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:param release_date: The release_date of this Evaluation.
:type: datetime
"""
self._release_date = release_date
@property
def assigned_date(self):
"""
Gets the assigned_date of this Evaluation.
Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:return: The assigned_date of this Evaluation.
:rtype: datetime
"""
return self._assigned_date
@assigned_date.setter
def assigned_date(self, assigned_date):
"""
Sets the assigned_date of this Evaluation.
Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:param assigned_date: The assigned_date of this Evaluation.
:type: datetime
"""
self._assigned_date = assigned_date
@property
def changed_date(self):
"""
Gets the changed_date of this Evaluation.
Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:return: The changed_date of this Evaluation.
:rtype: datetime
"""
return self._changed_date
@changed_date.setter
def changed_date(self, changed_date):
"""
Sets the changed_date of this Evaluation.
Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:param changed_date: The changed_date of this Evaluation.
:type: datetime
"""
self._changed_date = changed_date
@property
def queue(self):
"""
Gets the queue of this Evaluation.
:return: The queue of this Evaluation.
:rtype: Queue
"""
return self._queue
@queue.setter
def queue(self, queue):
"""
Sets the queue of this Evaluation.
:param queue: The queue of this Evaluation.
:type: Queue
"""
self._queue = queue
@property
def media_type(self):
"""
Gets the media_type of this Evaluation.
List of different communication types used in conversation.
:return: The media_type of this Evaluation.
:rtype: list[str]
"""
return self._media_type
@media_type.setter
def media_type(self, media_type):
"""
Sets the media_type of this Evaluation.
List of different communication types used in conversation.
:param media_type: The media_type of this Evaluation.
:type: list[str]
"""
self._media_type = media_type
@property
def rescore(self):
"""
Gets the rescore of this Evaluation.
Is only true when evaluation is re-scored.
:return: The rescore of this Evaluation.
:rtype: bool
"""
return self._rescore
@rescore.setter
def rescore(self, rescore):
"""
Sets the rescore of this Evaluation.
Is only true when evaluation is re-scored.
:param rescore: The rescore of this Evaluation.
:type: bool
"""
self._rescore = rescore
@property
def conversation_date(self):
"""
Gets the conversation_date of this Evaluation.
Date of conversation. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:return: The conversation_date of this Evaluation.
:rtype: datetime
"""
return self._conversation_date
@conversation_date.setter
def conversation_date(self, conversation_date):
"""
Sets the conversation_date of this Evaluation.
Date of conversation. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:param conversation_date: The conversation_date of this Evaluation.
:type: datetime
"""
self._conversation_date = conversation_date
@property
def conversation_end_date(self):
"""
Gets the conversation_end_date of this Evaluation.
End date of conversation if it had completed before evaluation creation. Null if created before the conversation ended. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:return: The conversation_end_date of this Evaluation.
:rtype: datetime
"""
return self._conversation_end_date
@conversation_end_date.setter
def conversation_end_date(self, conversation_end_date):
"""
Sets the conversation_end_date of this Evaluation.
End date of conversation if it had completed before evaluation creation. Null if created before the conversation ended. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:param conversation_end_date: The conversation_end_date of this Evaluation.
:type: datetime
"""
self._conversation_end_date = conversation_end_date
@property
def never_release(self):
"""
Gets the never_release of this Evaluation.
Signifies if the evaluation is never to be released. This cannot be set true if release date is also set.
:return: The never_release of this Evaluation.
:rtype: bool
"""
return self._never_release
@never_release.setter
def never_release(self, never_release):
"""
Sets the never_release of this Evaluation.
Signifies if the evaluation is never to be released. This cannot be set true if release date is also set.
:param never_release: The never_release of this Evaluation.
:type: bool
"""
self._never_release = never_release
@property
def resource_id(self):
"""
Gets the resource_id of this Evaluation.
Only used for email evaluations. Will be null for all other evaluations.
:return: The resource_id of this Evaluation.
:rtype: str
"""
return self._resource_id
@resource_id.setter
def resource_id(self, resource_id):
"""
Sets the resource_id of this Evaluation.
Only used for email evaluations. Will be null for all other evaluations.
:param resource_id: The resource_id of this Evaluation.
:type: str
"""
self._resource_id = resource_id
@property
def resource_type(self):
"""
Gets the resource_type of this Evaluation.
The type of resource. Only used for email evaluations. Will be null for evaluations on all other resources.
:return: The resource_type of this Evaluation.
:rtype: str
"""
return self._resource_type
@resource_type.setter
def resource_type(self, resource_type):
"""
Sets the resource_type of this Evaluation.
The type of resource. Only used for email evaluations. Will be null for evaluations on all other resources.
:param resource_type: The resource_type of this Evaluation.
:type: str
"""
allowed_values = ["EMAIL"]
if resource_type.lower() not in map(str.lower, allowed_values):
# print("Invalid value for resource_type -> " + resource_type)
self._resource_type = "outdated_sdk_version"
else:
self._resource_type = resource_type
@property
def redacted(self):
"""
Gets the redacted of this Evaluation.
Is only true when the user making the request does not have sufficient permissions to see evaluation
:return: The redacted of this Evaluation.
:rtype: bool
"""
return self._redacted
@redacted.setter
def redacted(self, redacted):
"""
Sets the redacted of this Evaluation.
Is only true when the user making the request does not have sufficient permissions to see evaluation
:param redacted: The redacted of this Evaluation.
:type: bool
"""
self._redacted = redacted
@property
def is_scoring_index(self):
"""
Gets the is_scoring_index of this Evaluation.
:return: The is_scoring_index of this Evaluation.
:rtype: bool
"""
return self._is_scoring_index
@is_scoring_index.setter
def is_scoring_index(self, is_scoring_index):
"""
Sets the is_scoring_index of this Evaluation.
:param is_scoring_index: The is_scoring_index of this Evaluation.
:type: bool
"""
self._is_scoring_index = is_scoring_index
@property
def authorized_actions(self):
"""
Gets the authorized_actions of this Evaluation.
List of user authorized actions on evaluation. Possible values: edit, editScore, editAgentSignoff, delete, viewAudit
:return: The authorized_actions of this Evaluation.
:rtype: list[str]
"""
return self._authorized_actions
@authorized_actions.setter
def authorized_actions(self, authorized_actions):
"""
Sets the authorized_actions of this Evaluation.
List of user authorized actions on evaluation. Possible values: edit, editScore, editAgentSignoff, delete, viewAudit
:param authorized_actions: The authorized_actions of this Evaluation.
:type: list[str]
"""
self._authorized_actions = authorized_actions
@property
def self_uri(self):
"""
Gets the self_uri of this Evaluation.
The URI for this object
:return: The self_uri of this Evaluation.
:rtype: str
"""
return self._self_uri
@self_uri.setter
def self_uri(self, self_uri):
"""
Sets the self_uri of this Evaluation.
The URI for this object
:param self_uri: The self_uri of this Evaluation.
:type: str
"""
self._self_uri = self_uri
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"six.iteritems"
] |
[((20100, 20129), 'six.iteritems', 'iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (20109, 20129), False, 'from six import iteritems\n')]
|
#-*- coding:utf-8 -*-
"""
Main class that provides SPC analysis. It detects SPC rules violations.
It can draw charts using matplotlib.
:arguments:
data
user data as flat array/list
"""
from utils import *
import numpy as np
import pandas as pd
RULE_1_BEYOND_3SIGMA = '1个点落在A区以外'
RULE_2_OF_3_BEYOND_2SIGMA_ONE_SIDE = '3个点中有2个点连续落在B区以外'
RULE_4_OF_5_BEYOND_1SIGMA = '5个点中有4个点连续落在中心线同一侧C区以外'
RULE_6_TRENDING = '6个点连续增长或下降'
RULE_8_ON_TWO_SIDE_NONE_C = '8个连续的点落在中心线两侧且无一点在C区'
RULE_9_ON_ONE_SIDE = '9个连续的点在中心线的同一侧'
RULE_14_up_down = '连续14个点交替上下'
RULE_15_below_1sigma = '15个连续点在在中心线两侧C区'
RULES_ALL = [RULE_1_BEYOND_3SIGMA,
RULE_2_OF_3_BEYOND_2SIGMA_ONE_SIDE,
RULE_4_OF_5_BEYOND_1SIGMA,
RULE_6_TRENDING,
RULE_8_ON_TWO_SIDE_NONE_C,
RULE_9_ON_ONE_SIDE,
RULE_14_up_down,
RULE_15_below_1sigma]
RULES_FUNCS = {
RULE_1_BEYOND_3SIGMA: (test_1_beyond_3sigma, 1),
RULE_2_OF_3_BEYOND_2SIGMA_ONE_SIDE: (test_2_OF_3_BEYOND_2SIGMA_ONE_SIDE, 3),
RULE_4_OF_5_BEYOND_1SIGMA: (test_4_OF_5_BEYOND_1SIGMA_ONE_SIDE, 5),
RULE_6_TRENDING: (test_6_thrund, 6),
RULE_8_ON_TWO_SIDE_NONE_C: (test_8_BEYOND_1SIGMA, 8),
RULE_9_ON_ONE_SIDE: (test_violating_runs, 9),
RULE_14_up_down: (test_14_up_down, 14),
RULE_15_below_1sigma: (test_15_below_sigma, 15)}
class SPC_rule(object):
"""
Main class that provides WECR analysis. It detects WECR rules violations.
It can draw charts using matplotlib.
:arguments:
data
user data as flat array/list
"""
def __init__(self, data, center=None, sigma=None, rule_keys=None):
'''
:param data: list/dataframe/np.ndarray
:param center: mean
:param sigma: sigma
:param rule_keys: list, key of rules, such:[1,2]
1:RULE_1_BEYOND_3SIGMA = '1个点落在A区以外'
2:RULE_2_OF_3_BEYOND_2SIGMA_ONE_SIDE = '3个点中有2个点连续落在B区以外'
3:RULE_4_OF_5_BEYOND_1SIGMA = '5个点中有4个点连续落在中心线同一侧C区以外'
4:RULE_6_TRENDING = '6个点连续增长或下降'
5:RULE_8_ON_TWO_SIDE_NONE_C = '8个连续的点落在中心线两侧且无一点在C区'
6:RULE_9_ON_ONE_SIDE = '9个连续的点在中心线的同一侧'
7:RULE_14_up_down = '连续14个点交替上下'
8:RULE_15_below_1sigma = '15个连续点在在中心线两侧C区'
'''
if isinstance(data, pd.DataFrame):
data = data.values
data = data.reshape((1, -1))
data = list(data)
if isinstance(data, np.ndarray):
data = data.reshape((1, -1))
elif not isinstance(data, list):
raise TypeError('please input data of list or pd.Dataframe or np.ndarray')
self.orig_data = data
if not center:
center = np.mean(data)
self.center = center
if not sigma:
sigma = np.std(data, ddof=1)
self.sigma = sigma
if not rule_keys:
rule_new = RULES_ALL
else:
rule_new = []
for key in rule_keys:
rule_new.append(RULES_ALL[key-1])
self.rules = rule_new
self.length = len(data)
self.violating_points = self._find_violating_points()
def __repr__(self):
print(self.get_violating_points())
return "<spc: (%d)>" % self.__hash__()
def _find_violating_points(self):
points_all = {}
for r in self.rules:
func, points_num = RULES_FUNCS[r]
list1 = []
for i in range(len(self.orig_data)):
if i < points_num-1:
continue
if func(self.orig_data[i - points_num+1:i+1], self.center, self.sigma):
list1.extend(range(i - points_num+1, i+1))
points_all.setdefault(r, []).extend(list1)
return points_all
def get_violating_points(self):
"""Return points that violates rules"""
points_all = self.violating_points
points_dict = {}
for key, values in points_all.items():
# if values != []:
points_dict[key] = sorted(set(values))
return points_dict
|
[
"numpy.std",
"numpy.mean"
] |
[((2792, 2805), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (2799, 2805), True, 'import numpy as np\n'), ((2882, 2902), 'numpy.std', 'np.std', (['data'], {'ddof': '(1)'}), '(data, ddof=1)\n', (2888, 2902), True, 'import numpy as np\n')]
|
import pytest
from helpers.cluster import ClickHouseCluster
from helpers.client import QueryRuntimeException
FIRST_PART_NAME = "all_1_1_0"
@pytest.fixture(scope="module")
def cluster():
try:
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance("node",
main_configs=["configs/storage.xml"],
tmpfs=["/disk:size=100M"],
with_minio=True)
cluster.start()
yield cluster
finally:
cluster.shutdown()
@pytest.mark.parametrize("policy", ["encrypted_policy", "encrypted_policy_key192b", "local_policy", "s3_policy"])
def test_encrypted_disk(cluster, policy):
node = cluster.instances["node"]
node.query(
"""
CREATE TABLE encrypted_test (
id Int64,
data String
) ENGINE=MergeTree()
ORDER BY id
SETTINGS storage_policy='{}'
""".format(policy)
)
node.query("INSERT INTO encrypted_test VALUES (0,'data'),(1,'data')")
select_query = "SELECT * FROM encrypted_test ORDER BY id FORMAT Values"
assert node.query(select_query) == "(0,'data'),(1,'data')"
node.query("INSERT INTO encrypted_test VALUES (2,'data'),(3,'data')")
node.query("OPTIMIZE TABLE encrypted_test FINAL")
assert node.query(select_query) == "(0,'data'),(1,'data'),(2,'data'),(3,'data')"
node.query("DROP TABLE IF EXISTS encrypted_test NO DELAY")
@pytest.mark.parametrize("policy, destination_disks", [("local_policy", ["disk_local_encrypted", "disk_local_encrypted2", "disk_local_encrypted_key192b", "disk_local"]), ("s3_policy", ["disk_s3_encrypted", "disk_s3"])])
def test_part_move(cluster, policy, destination_disks):
node = cluster.instances["node"]
node.query(
"""
CREATE TABLE encrypted_test (
id Int64,
data String
) ENGINE=MergeTree()
ORDER BY id
SETTINGS storage_policy='{}'
""".format(policy)
)
node.query("INSERT INTO encrypted_test VALUES (0,'data'),(1,'data')")
select_query = "SELECT * FROM encrypted_test ORDER BY id FORMAT Values"
assert node.query(select_query) == "(0,'data'),(1,'data')"
for destination_disk in destination_disks:
node.query("ALTER TABLE encrypted_test MOVE PART '{}' TO DISK '{}'".format(FIRST_PART_NAME, destination_disk))
assert node.query(select_query) == "(0,'data'),(1,'data')"
with pytest.raises(QueryRuntimeException) as exc:
node.query("ALTER TABLE encrypted_test MOVE PART '{}' TO DISK '{}'".format(FIRST_PART_NAME, destination_disk))
assert("Part '{}' is already on disk '{}'".format(FIRST_PART_NAME, destination_disk) in str(exc.value))
assert node.query(select_query) == "(0,'data'),(1,'data')"
node.query("DROP TABLE IF EXISTS encrypted_test NO DELAY")
@pytest.mark.parametrize("policy,encrypted_disk", [("local_policy", "disk_local_encrypted"), ("s3_policy", "disk_s3_encrypted")])
def test_optimize_table(cluster, policy, encrypted_disk):
node = cluster.instances["node"]
node.query(
"""
CREATE TABLE encrypted_test (
id Int64,
data String
) ENGINE=MergeTree()
ORDER BY id
SETTINGS storage_policy='{}'
""".format(policy)
)
node.query("INSERT INTO encrypted_test VALUES (0,'data'),(1,'data')")
select_query = "SELECT * FROM encrypted_test ORDER BY id FORMAT Values"
assert node.query(select_query) == "(0,'data'),(1,'data')"
node.query("ALTER TABLE encrypted_test MOVE PART '{}' TO DISK '{}'".format(FIRST_PART_NAME, encrypted_disk))
assert node.query(select_query) == "(0,'data'),(1,'data')"
node.query("INSERT INTO encrypted_test VALUES (2,'data'),(3,'data')")
node.query("OPTIMIZE TABLE encrypted_test FINAL")
with pytest.raises(QueryRuntimeException) as exc:
node.query("ALTER TABLE encrypted_test MOVE PART '{}' TO DISK '{}'".format(FIRST_PART_NAME, encrypted_disk))
assert("Part {} is not exists or not active".format(FIRST_PART_NAME) in str(exc.value))
assert node.query(select_query) == "(0,'data'),(1,'data'),(2,'data'),(3,'data')"
node.query("DROP TABLE IF EXISTS encrypted_test NO DELAY")
|
[
"pytest.mark.parametrize",
"pytest.raises",
"helpers.cluster.ClickHouseCluster",
"pytest.fixture"
] |
[((143, 173), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (157, 173), False, 'import pytest\n'), ((567, 683), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""policy"""', "['encrypted_policy', 'encrypted_policy_key192b', 'local_policy', 's3_policy']"], {}), "('policy', ['encrypted_policy',\n 'encrypted_policy_key192b', 'local_policy', 's3_policy'])\n", (590, 683), False, 'import pytest\n'), ((1485, 1717), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""policy, destination_disks"""', "[('local_policy', ['disk_local_encrypted', 'disk_local_encrypted2',\n 'disk_local_encrypted_key192b', 'disk_local']), ('s3_policy', [\n 'disk_s3_encrypted', 'disk_s3'])]"], {}), "('policy, destination_disks', [('local_policy', [\n 'disk_local_encrypted', 'disk_local_encrypted2',\n 'disk_local_encrypted_key192b', 'disk_local']), ('s3_policy', [\n 'disk_s3_encrypted', 'disk_s3'])])\n", (1508, 1717), False, 'import pytest\n'), ((2899, 3031), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""policy,encrypted_disk"""', "[('local_policy', 'disk_local_encrypted'), ('s3_policy', 'disk_s3_encrypted')]"], {}), "('policy,encrypted_disk', [('local_policy',\n 'disk_local_encrypted'), ('s3_policy', 'disk_s3_encrypted')])\n", (2922, 3031), False, 'import pytest\n'), ((216, 243), 'helpers.cluster.ClickHouseCluster', 'ClickHouseCluster', (['__file__'], {}), '(__file__)\n', (233, 243), False, 'from helpers.cluster import ClickHouseCluster\n'), ((3884, 3920), 'pytest.raises', 'pytest.raises', (['QueryRuntimeException'], {}), '(QueryRuntimeException)\n', (3897, 3920), False, 'import pytest\n'), ((2489, 2525), 'pytest.raises', 'pytest.raises', (['QueryRuntimeException'], {}), '(QueryRuntimeException)\n', (2502, 2525), False, 'import pytest\n')]
|
# Generated by Django 3.1.2 on 2021-04-05 18:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0006_auto_20210405_1803'),
]
operations = [
migrations.AddField(
model_name='deliverytime',
name='synthesized',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='deliverytime',
name='taints',
field=models.BigIntegerField(default=0),
),
migrations.AddField(
model_name='deliverytime',
name='trusted',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='file',
name='synthesized',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='file',
name='taints',
field=models.BigIntegerField(default=0),
),
migrations.AddField(
model_name='file',
name='trusted',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='filterstep',
name='synthesized',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='filterstep',
name='taints',
field=models.BigIntegerField(default=0),
),
migrations.AddField(
model_name='filterstep',
name='trusted',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='groupspropertiesrelation',
name='synthesized',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='groupspropertiesrelation',
name='taints',
field=models.BigIntegerField(default=0),
),
migrations.AddField(
model_name='groupspropertiesrelation',
name='trusted',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='image',
name='synthesized',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='image',
name='taints',
field=models.BigIntegerField(default=0),
),
migrations.AddField(
model_name='image',
name='trusted',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='product',
name='synthesized',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='product',
name='taints',
field=models.BigIntegerField(default=0),
),
migrations.AddField(
model_name='product',
name='trusted',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='productaccessories',
name='synthesized',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='productaccessories',
name='taints',
field=models.BigIntegerField(default=0),
),
migrations.AddField(
model_name='productaccessories',
name='trusted',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='productattachment',
name='synthesized',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='productattachment',
name='taints',
field=models.BigIntegerField(default=0),
),
migrations.AddField(
model_name='productattachment',
name='trusted',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='productpropertyvalue',
name='synthesized',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='productpropertyvalue',
name='taints',
field=models.BigIntegerField(default=0),
),
migrations.AddField(
model_name='productpropertyvalue',
name='trusted',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='productspropertiesrelation',
name='synthesized',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='productspropertiesrelation',
name='taints',
field=models.BigIntegerField(default=0),
),
migrations.AddField(
model_name='productspropertiesrelation',
name='trusted',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='property',
name='synthesized',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='property',
name='taints',
field=models.BigIntegerField(default=0),
),
migrations.AddField(
model_name='property',
name='trusted',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='propertygroup',
name='synthesized',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='propertygroup',
name='taints',
field=models.BigIntegerField(default=0),
),
migrations.AddField(
model_name='propertygroup',
name='trusted',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='propertyoption',
name='synthesized',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='propertyoption',
name='taints',
field=models.BigIntegerField(default=0),
),
migrations.AddField(
model_name='propertyoption',
name='trusted',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='staticblock',
name='synthesized',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='staticblock',
name='taints',
field=models.BigIntegerField(default=0),
),
migrations.AddField(
model_name='staticblock',
name='trusted',
field=models.BooleanField(default=True),
),
]
|
[
"django.db.models.BigIntegerField",
"django.db.models.BooleanField"
] |
[((345, 379), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (364, 379), False, 'from django.db import migrations, models\n'), ((505, 538), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (527, 538), False, 'from django.db import migrations, models\n'), ((665, 698), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (684, 698), False, 'from django.db import migrations, models\n'), ((821, 855), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (840, 855), False, 'from django.db import migrations, models\n'), ((973, 1006), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (995, 1006), False, 'from django.db import migrations, models\n'), ((1125, 1158), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (1144, 1158), False, 'from django.db import migrations, models\n'), ((1287, 1321), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1306, 1321), False, 'from django.db import migrations, models\n'), ((1445, 1478), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (1467, 1478), False, 'from django.db import migrations, models\n'), ((1603, 1636), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (1622, 1636), False, 'from django.db import migrations, models\n'), ((1779, 1813), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1798, 1813), False, 'from django.db import migrations, models\n'), ((1951, 1984), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (1973, 1984), False, 'from django.db import migrations, models\n'), ((2123, 2156), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (2142, 2156), False, 'from django.db import migrations, models\n'), ((2280, 2314), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (2299, 2314), False, 'from django.db import migrations, models\n'), ((2433, 2466), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (2455, 2466), False, 'from django.db import migrations, models\n'), ((2586, 2619), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (2605, 2619), False, 'from django.db import migrations, models\n'), ((2745, 2779), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (2764, 2779), False, 'from django.db import migrations, models\n'), ((2900, 2933), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (2922, 2933), False, 'from django.db import migrations, models\n'), ((3055, 3088), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (3074, 3088), False, 'from django.db import migrations, models\n'), ((3225, 3259), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3244, 3259), False, 'from django.db import migrations, models\n'), ((3391, 3424), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (3413, 3424), False, 'from django.db import migrations, models\n'), ((3557, 3590), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (3576, 3590), False, 'from django.db import migrations, models\n'), ((3726, 3760), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3745, 3760), False, 'from django.db import migrations, models\n'), ((3891, 3924), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (3913, 3924), False, 'from django.db import migrations, models\n'), ((4056, 4089), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (4075, 4089), False, 'from django.db import migrations, models\n'), ((4228, 4262), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (4247, 4262), False, 'from django.db import migrations, models\n'), ((4396, 4429), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (4418, 4429), False, 'from django.db import migrations, models\n'), ((4564, 4597), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (4583, 4597), False, 'from django.db import migrations, models\n'), ((4742, 4776), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (4761, 4776), False, 'from django.db import migrations, models\n'), ((4916, 4949), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (4938, 4949), False, 'from django.db import migrations, models\n'), ((5090, 5123), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (5109, 5123), False, 'from django.db import migrations, models\n'), ((5250, 5284), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (5269, 5284), False, 'from django.db import migrations, models\n'), ((5406, 5439), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (5428, 5439), False, 'from django.db import migrations, models\n'), ((5562, 5595), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (5581, 5595), False, 'from django.db import migrations, models\n'), ((5727, 5761), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (5746, 5761), False, 'from django.db import migrations, models\n'), ((5888, 5921), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (5910, 5921), False, 'from django.db import migrations, models\n'), ((6049, 6082), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (6068, 6082), False, 'from django.db import migrations, models\n'), ((6215, 6249), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (6234, 6249), False, 'from django.db import migrations, models\n'), ((6377, 6410), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (6399, 6410), False, 'from django.db import migrations, models\n'), ((6539, 6572), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (6558, 6572), False, 'from django.db import migrations, models\n'), ((6702, 6736), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (6721, 6736), False, 'from django.db import migrations, models\n'), ((6861, 6894), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (6883, 6894), False, 'from django.db import migrations, models\n'), ((7020, 7053), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (7039, 7053), False, 'from django.db import migrations, models\n')]
|
import csv
import os
import datetime
from typing import re
from django.core.files import File
from django.core.management import BaseCommand
from django.db import transaction
from django.utils.encoding import force_text
from django.utils.functional import keep_lazy_text
from report.models import Report, WatchlistedReport, EvidenceFile
from users.models import User
class Command(BaseCommand):
help = "load report evidence"
def handle(self, *args, **kwargs):
import_report_evidence()
def import_report_evidence():
dir_path = os.path.dirname(os.path.realpath(__file__))
with transaction.atomic():
report_evidence_folder = dir_path + '/../data/media/'
# report_evidence_folder = dir_path + '/../../../wikirumours/media/'
directories = os.listdir(report_evidence_folder)
for folder in directories:
report_folder_path = os.path.join(report_evidence_folder, folder)
if os.path.isfile(report_folder_path):
continue
report_public_id = folder
report = Report.objects.filter(public_id=report_public_id).first()
if not report:
continue
process_report_folder(report, report_folder_path)
def process_report_folder(report, report_folder_path):
files = os.listdir(report_folder_path)
for file in files:
existing_file_path = os.path.join(report_folder_path, file)
if os.path.isfile(existing_file_path):
with open(existing_file_path, 'rb') as f:
evidence_file = EvidenceFile(
report=report,
uploader=None,
file=File(f, name=file)
)
evidence_file.save()
return
@keep_lazy_text
def get_valid_filename(s):
"""
Returns the given string converted to a string that can be used for a clean
filename. Specifically, leading and trailing spaces are removed; other
spaces are converted to underscores; and anything that is not a unicode
alphanumeric, dash, underscore, or dot, is removed.
"""
s = force_text(s).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
|
[
"os.listdir",
"django.core.files.File",
"report.models.Report.objects.filter",
"os.path.realpath",
"os.path.isfile",
"typing.re.sub",
"django.utils.encoding.force_text",
"os.path.join",
"django.db.transaction.atomic"
] |
[((1313, 1343), 'os.listdir', 'os.listdir', (['report_folder_path'], {}), '(report_folder_path)\n', (1323, 1343), False, 'import os\n'), ((2169, 2198), 'typing.re.sub', 're.sub', (['"""(?u)[^-\\\\w.]"""', '""""""', 's'], {}), "('(?u)[^-\\\\w.]', '', s)\n", (2175, 2198), False, 'from typing import re\n'), ((569, 595), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (585, 595), False, 'import os\n'), ((606, 626), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (624, 626), False, 'from django.db import transaction\n'), ((789, 823), 'os.listdir', 'os.listdir', (['report_evidence_folder'], {}), '(report_evidence_folder)\n', (799, 823), False, 'import os\n'), ((1396, 1434), 'os.path.join', 'os.path.join', (['report_folder_path', 'file'], {}), '(report_folder_path, file)\n', (1408, 1434), False, 'import os\n'), ((1446, 1480), 'os.path.isfile', 'os.path.isfile', (['existing_file_path'], {}), '(existing_file_path)\n', (1460, 1480), False, 'import os\n'), ((892, 936), 'os.path.join', 'os.path.join', (['report_evidence_folder', 'folder'], {}), '(report_evidence_folder, folder)\n', (904, 936), False, 'import os\n'), ((952, 986), 'os.path.isfile', 'os.path.isfile', (['report_folder_path'], {}), '(report_folder_path)\n', (966, 986), False, 'import os\n'), ((1072, 1121), 'report.models.Report.objects.filter', 'Report.objects.filter', ([], {'public_id': 'report_public_id'}), '(public_id=report_public_id)\n', (1093, 1121), False, 'from report.models import Report, WatchlistedReport, EvidenceFile\n'), ((2118, 2131), 'django.utils.encoding.force_text', 'force_text', (['s'], {}), '(s)\n', (2128, 2131), False, 'from django.utils.encoding import force_text\n'), ((1677, 1695), 'django.core.files.File', 'File', (['f'], {'name': 'file'}), '(f, name=file)\n', (1681, 1695), False, 'from django.core.files import File\n')]
|
#Crie um programa que faça o computador jogar Jokenpô com você.
import random
from time import sleep
print('VAMOS <NAME>!')
print('''Coloque:
[1]PEDRA
[2]PAPEL
[3]TESOURA''')
op = input('Qual opção voce escolhe?')
lista = ['1','2','3']
pc = random.choice(lista)
sleep(1)
print('\033[36mJO!')
sleep(1)
print('\033[36mKEM!')
sleep(1)
print('\033[36mPO!!\033[m')
if op == pc or pc == op:
print('\033[34mEMPATE!\033[m, PC TAMBÉM ESCOLHEU {} '.format(pc))
elif op == '1' and pc == '2':
print('\033[33mPC GANHOU!\033[m, PC ESCOLHEU {} '.format(pc))
elif op == '2' and pc == '1':
print('\033[32mJOGADOR GANHOU!\033[m, PC ESCOLHEU {}'.format(pc))
elif op == '2' and pc == '3':
print('\033[33mPC GANHOU!\033[m, PC ESCOLHEU {}'.format(pc))
elif op == '3' and pc == '2':
print('\033[32mJOGADOR GANHOU!\033[m, PC ESCOLHEU {}'.format(pc))
elif op == '3' and pc == '1':
print('\033[33mPC GANHOU!\033[m, PC ESCOLHEU {}'.format(pc))
elif op == '1' and pc == '3':
print('\033[32mJOGANDOR GANHOU!\033[m, PC ESCOLHEU {}'.format(pc))
|
[
"random.choice",
"time.sleep"
] |
[((242, 262), 'random.choice', 'random.choice', (['lista'], {}), '(lista)\n', (255, 262), False, 'import random\n'), ((263, 271), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (268, 271), False, 'from time import sleep\n'), ((293, 301), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (298, 301), False, 'from time import sleep\n'), ((324, 332), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (329, 332), False, 'from time import sleep\n')]
|
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Exceptions by Cisco Configuration Agent."""
from neutron_lib import exceptions
from networking_cisco._i18n import _
class DriverException(exceptions.NeutronException):
"""Exception created by the Driver class."""
class DriverExpectedKeyNotSetException(DriverException):
"""An attribute expected to be set by plugin is missing"""
message = (_("Value for expected key: %(key)s is missing."
"Driver cannot proceed"))
class InitializationException(DriverException):
"""Exception when initialization of Routing Driver object."""
message = (_("Critical device parameter missing. Failed initializing "
"routing driver object."))
class ConnectionException(DriverException):
"""Connection exception when connecting to IOS XE hosting device."""
message = (_("Failed connecting to Device. Reason: %(reason)s. "
"Connection params are User:%(user)s, Host:%(host)s, "
"Port:%(port)s, Device timeout:%(timeout)s."))
class CSR1kvConfigException(DriverException):
"""Configuration exception thrown when modifying the running config."""
message = (_("Error executing snippet:%(snippet)s. "
"Hosting device:%(dev_id)s Mgmt IP:%(ip)s "
"ErrorType:%(type)s ErrorTag:%(tag)s Config string:%("
"confstr)s."))
class CSR1kvMissingInterfaceException(DriverException):
"""Configuration exception thrown when modifying the running config."""
message = (_("Interface corresponding to port:%(id)s and mac-address:%("
"mac)s is missing in the CSR. Cannot proceed with interface"
"config."))
class CSR1kvUnknownValueException(DriverException):
"""CSR1kv Exception thrown when an unknown value is received."""
message = (_("Data in attribute: %(attribute)s does not correspond to "
"expected value. Value received is %(value)s. "))
class DriverNotExist(DriverException):
message = _("Driver %(driver)s does not exist.")
class DriverNotFound(DriverException):
message = _("Driver not found for %(resource)s id:%(id)s.")
class DriverNotSetForMissingParameter(DriverException):
message = _("Driver cannot be set for missing parameter:%(p)s.")
class HAParamsMissingException(DriverException):
"""MissingParams exception thrown when HA params are missing"""
message = (_("For router: %(r_id)s and port: %(p_id)s, HA_ENABLED is set, "
"but port ha info is missing. Port details: %(port)s"))
|
[
"networking_cisco._i18n._"
] |
[((995, 1064), 'networking_cisco._i18n._', '_', (['"""Value for expected key: %(key)s is missing.Driver cannot proceed"""'], {}), "('Value for expected key: %(key)s is missing.Driver cannot proceed')\n", (996, 1064), False, 'from networking_cisco._i18n import _\n'), ((1217, 1304), 'networking_cisco._i18n._', '_', (['"""Critical device parameter missing. Failed initializing routing driver object."""'], {}), "('Critical device parameter missing. Failed initializing routing driver object.'\n )\n", (1218, 1304), False, 'from networking_cisco._i18n import _\n'), ((1455, 1608), 'networking_cisco._i18n._', '_', (['"""Failed connecting to Device. Reason: %(reason)s. Connection params are User:%(user)s, Host:%(host)s, Port:%(port)s, Device timeout:%(timeout)s."""'], {}), "('Failed connecting to Device. Reason: %(reason)s. Connection params are User:%(user)s, Host:%(host)s, Port:%(port)s, Device timeout:%(timeout)s.'\n )\n", (1456, 1608), False, 'from networking_cisco._i18n import _\n'), ((1784, 1934), 'networking_cisco._i18n._', '_', (['"""Error executing snippet:%(snippet)s. Hosting device:%(dev_id)s Mgmt IP:%(ip)s ErrorType:%(type)s ErrorTag:%(tag)s Config string:%(confstr)s."""'], {}), "('Error executing snippet:%(snippet)s. Hosting device:%(dev_id)s Mgmt IP:%(ip)s ErrorType:%(type)s ErrorTag:%(tag)s Config string:%(confstr)s.'\n )\n", (1785, 1934), False, 'from networking_cisco._i18n import _\n'), ((2140, 2272), 'networking_cisco._i18n._', '_', (['"""Interface corresponding to port:%(id)s and mac-address:%(mac)s is missing in the CSR. Cannot proceed with interfaceconfig."""'], {}), "('Interface corresponding to port:%(id)s and mac-address:%(mac)s is missing in the CSR. Cannot proceed with interfaceconfig.'\n )\n", (2141, 2272), False, 'from networking_cisco._i18n import _\n'), ((2447, 2558), 'networking_cisco._i18n._', '_', (['"""Data in attribute: %(attribute)s does not correspond to expected value. Value received is %(value)s. """'], {}), "('Data in attribute: %(attribute)s does not correspond to expected value. Value received is %(value)s. '\n )\n", (2448, 2558), False, 'from networking_cisco._i18n import _\n'), ((2630, 2668), 'networking_cisco._i18n._', '_', (['"""Driver %(driver)s does not exist."""'], {}), "('Driver %(driver)s does not exist.')\n", (2631, 2668), False, 'from networking_cisco._i18n import _\n'), ((2724, 2773), 'networking_cisco._i18n._', '_', (['"""Driver not found for %(resource)s id:%(id)s."""'], {}), "('Driver not found for %(resource)s id:%(id)s.')\n", (2725, 2773), False, 'from networking_cisco._i18n import _\n'), ((2846, 2900), 'networking_cisco._i18n._', '_', (['"""Driver cannot be set for missing parameter:%(p)s."""'], {}), "('Driver cannot be set for missing parameter:%(p)s.')\n", (2847, 2900), False, 'from networking_cisco._i18n import _\n'), ((3035, 3156), 'networking_cisco._i18n._', '_', (['"""For router: %(r_id)s and port: %(p_id)s, HA_ENABLED is set, but port ha info is missing. Port details: %(port)s"""'], {}), "('For router: %(r_id)s and port: %(p_id)s, HA_ENABLED is set, but port ha info is missing. Port details: %(port)s'\n )\n", (3036, 3156), False, 'from networking_cisco._i18n import _\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from .list_rule import ALL_RULE_TYPES, RECOMMENDATION_BAN
from .ban_list import BanList
from synapse.types import UserID
logger = logging.getLogger("synapse.contrib." + __name__)
class AntiSpam(object):
def __init__(self, config, api):
self.block_invites = config.get("block_invites", True)
self.block_messages = config.get("block_messages", False)
self.block_usernames = config.get("block_usernames", False)
self.list_room_ids = config.get("ban_lists", [])
self.rooms_to_lists = {} # type: Dict[str, BanList]
self.api = api
# Now we build the ban lists so we can match them
self.build_lists()
def build_lists(self):
for room_id in self.list_room_ids:
self.build_list(room_id)
def build_list(self, room_id):
logger.info("Rebuilding ban list for %s" % (room_id))
self.get_list_for_room(room_id).build()
def get_list_for_room(self, room_id):
if room_id not in self.rooms_to_lists:
self.rooms_to_lists[room_id] = BanList(api=self.api, room_id=room_id)
return self.rooms_to_lists[room_id]
def is_user_banned(self, user_id):
for room_id in self.rooms_to_lists:
ban_list = self.rooms_to_lists[room_id]
for rule in ban_list.user_rules:
if rule.matches(user_id):
return rule.action == RECOMMENDATION_BAN
return False
def is_room_banned(self, invite_room_id):
for room_id in self.rooms_to_lists:
ban_list = self.rooms_to_lists[room_id]
for rule in ban_list.room_rules:
if rule.matches(invite_room_id):
return rule.action == RECOMMENDATION_BAN
return False
def is_server_banned(self, server_name):
for room_id in self.rooms_to_lists:
ban_list = self.rooms_to_lists[room_id]
for rule in ban_list.server_rules:
if rule.matches(server_name):
return rule.action == RECOMMENDATION_BAN
return False
# --- spam checker interface below here ---
def check_event_for_spam(self, event):
room_id = event.get("room_id", "")
event_type = event.get("type", "")
state_key = event.get("state_key", None)
# Rebuild the rules if there's an event for our ban lists
if state_key is not None and event_type in ALL_RULE_TYPES and room_id in self.list_room_ids:
logger.info("Received ban list event - updating list")
self.get_list_for_room(room_id).build(with_event=event)
return False # Ban list updates aren't spam
if not self.block_messages:
return False # not spam (we aren't blocking messages)
sender = UserID.from_string(event.get("sender", ""))
if self.is_user_banned(sender.to_string()):
return True
if self.is_server_banned(sender.domain):
return True
return False # not spam (as far as we're concerned)
def user_may_invite(self, inviter_user_id, invitee_user_id, room_id):
if not self.block_invites:
return True # allowed (we aren't blocking invites)
sender = UserID.from_string(inviter_user_id)
if self.is_user_banned(sender.to_string()):
return False
if self.is_room_banned(room_id):
return False
if self.is_server_banned(sender.domain):
return False
return True # allowed (as far as we're concerned)
def check_username_for_spam(self, user_profile):
if not self.block_usernames:
return True # allowed (we aren't blocking based on usernames)
# Check whether the user ID or display name matches any of the banned
# patterns.
return self.is_user_banned(user_profile["user_id"]) or self.is_user_banned(user_profile["display_name"])
def user_may_create_room(self, user_id):
return True # allowed
def user_may_create_room_alias(self, user_id, room_alias):
return True # allowed
def user_may_publish_room(self, user_id, room_id):
return True # allowed
@staticmethod
def parse_config(config):
return config # no parsing needed
|
[
"synapse.types.UserID.from_string",
"logging.getLogger"
] |
[((767, 815), 'logging.getLogger', 'logging.getLogger', (["('synapse.contrib.' + __name__)"], {}), "('synapse.contrib.' + __name__)\n", (784, 815), False, 'import logging\n'), ((3870, 3905), 'synapse.types.UserID.from_string', 'UserID.from_string', (['inviter_user_id'], {}), '(inviter_user_id)\n', (3888, 3905), False, 'from synapse.types import UserID\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/12/21 2:30 PM
# @Author : zhangzhen
# @Site :
# @File : __init__.py
# @Software: PyCharm
import tensorflow as tf
from numpy.random import RandomState as rdm
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
class MLP:
def __init__(self, in_size=10, out_size=2, hiddens=[10], act_function=None) -> object:
self.x_dimension = in_size
self.y_dimension = out_size
self.build(in_size, out_size, hiddens=hiddens, act_function=act_function)
def build(self, in_size, out_size, hiddens=[], act_function=tf.nn.relu):
def add_layer(inputs: object, in_size: object, out_size: object, act_function: object = None) -> object:
W = tf.Variable(tf.random_normal([in_size, out_size]))
b = tf.Variable(tf.constant(0.1, shape=[out_size]))
Wx_plus_b = tf.matmul(inputs, W) + b
if act_function:
outputs = act_function(Wx_plus_b)
else:
outputs = Wx_plus_b
logging.info("tmp hidden layer out: {}".format(outputs))
return outputs
self.x = tf.placeholder(dtype=tf.float32, shape=(None, in_size), name='X-input')
self.y_ = tf.placeholder(dtype=tf.float32, shape=(None, out_size), name='y-input')
tmp_in_size = in_size
tmp_inputs = self.x
for hidden in hiddens:
tmp_outputs = add_layer(tmp_inputs, tmp_in_size, hidden, act_function=act_function)
tmp_in_size = hidden
tmp_inputs = tmp_outputs
self.y = add_layer(tmp_inputs, tmp_in_size, out_size, act_function=None)
logging.info("last out: {}".format(self.y))
self.cross_entropy = -tf.reduce_mean(self.y_ * tf.log(tf.clip_by_value(self.y, 1e-10, 1.0)))
self.step = tf.train.AdamOptimizer(0.001).minimize(self.cross_entropy)
logging.info("loss: {}".format(self.cross_entropy))
def train(self, steps=5000, batch_size=8):
X, Y = self.generate_data(size=128)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# logging.info()
for i in range(steps):
start = (i*batch_size) % self.dataset_size
end = min(start+batch_size, self.dataset_size)
sess.run(self.step, feed_dict={self.x: X[start: end], self.y_: Y[start: end]})
if i % 1000 == 0:
total_losses = sess.run(self.cross_entropy, feed_dict={self.x: X, self.y_: Y})
logging.info("After {} training steps, crosses entropy on all data is {}".format(i, total_losses))
def predict(self):
pass
def generate_data(self, size=128, rdm_seed=1):
r = rdm(rdm_seed)
self.dataset_size = size
X = r.rand(size, self.x_dimension)
Y = [[int(sum(xs) < self.x_dimension/2)] for xs in X]
return X, Y
if __name__ == '__main__':
mlp = MLP(in_size=2, out_size=1)
mlp.train()
|
[
"logging.basicConfig",
"tensorflow.clip_by_value",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"numpy.random.RandomState",
"tensorflow.constant",
"tensorflow.placeholder",
"tensorflow.matmul",
"tensorflow.random_normal",
"tensorflow.train.AdamOptimizer"
] |
[((244, 340), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s : %(levelname)s : %(message)s"""', 'level': 'logging.DEBUG'}), "(format='%(asctime)s : %(levelname)s : %(message)s',\n level=logging.DEBUG)\n", (263, 340), False, 'import logging\n'), ((1214, 1285), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '(None, in_size)', 'name': '"""X-input"""'}), "(dtype=tf.float32, shape=(None, in_size), name='X-input')\n", (1228, 1285), True, 'import tensorflow as tf\n'), ((1304, 1376), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '(None, out_size)', 'name': '"""y-input"""'}), "(dtype=tf.float32, shape=(None, out_size), name='y-input')\n", (1318, 1376), True, 'import tensorflow as tf\n'), ((2825, 2838), 'numpy.random.RandomState', 'rdm', (['rdm_seed'], {}), '(rdm_seed)\n', (2828, 2838), True, 'from numpy.random import RandomState as rdm\n'), ((2113, 2125), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2123, 2125), True, 'import tensorflow as tf\n'), ((815, 852), 'tensorflow.random_normal', 'tf.random_normal', (['[in_size, out_size]'], {}), '([in_size, out_size])\n', (831, 852), True, 'import tensorflow as tf\n'), ((882, 916), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': '[out_size]'}), '(0.1, shape=[out_size])\n', (893, 916), True, 'import tensorflow as tf\n'), ((942, 962), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'W'], {}), '(inputs, W)\n', (951, 962), True, 'import tensorflow as tf\n'), ((1888, 1917), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(0.001)'], {}), '(0.001)\n', (1910, 1917), True, 'import tensorflow as tf\n'), ((2156, 2189), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2187, 2189), True, 'import tensorflow as tf\n'), ((1829, 1865), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['self.y', '(1e-10)', '(1.0)'], {}), '(self.y, 1e-10, 1.0)\n', (1845, 1865), True, 'import tensorflow as tf\n')]
|
"""Tests for Geofabrik module."""
import os
from datetime import datetime
from tempfile import TemporaryDirectory
import pytest
import vcr
from geohealthaccess.geofabrik import Geofabrik, Page, Region
BASEURL = "http://download.geofabrik.de/"
@vcr.use_cassette("tests/cassettes/geofabrik-index.yaml")
def test_page_parsing_index():
url = BASEURL + "index.html"
page = Page(url)
assert page.name == "OpenStreetMap Data Extracts"
assert len(page.continents) == 8
@vcr.use_cassette("tests/cassettes/geofabrik-africa.yaml")
def test_page_parsing_continent():
url = BASEURL + "africa.html"
page = Page(url)
assert page.name == "Africa"
assert len(page.raw_details) == 37
assert len(page.subregions) == 55
assert len(page.special_subregions) == 1
@vcr.use_cassette("tests/cassettes/geofabrik-kenya.yaml")
def test_page_parsing_country():
url = BASEURL + "africa/kenya.html"
page = Page(url)
assert page.name == "Kenya"
assert len(page.raw_details) == 73
@vcr.use_cassette("tests/cassettes/geofabrik-comores.yaml")
def test_region():
region = Region("/africa/comores")
assert region.id == "africa/comores"
assert region.level == 1
assert region.name == "Comores"
assert region.extent.is_valid
assert region.url == "http://download.geofabrik.de/africa/comores.html"
@vcr.use_cassette("tests/cassettes/geofabrik-comores.yaml")
def test_region_files():
region = Region("/africa/comores")
assert len(region.files) == 65
assert "/africa/comores-latest.osm.pbf" in region.files
@vcr.use_cassette("tests/cassettes/geofabrik-comores.yaml")
def test_region_datasets():
region = Region("africa/comores")
assert len(region.datasets) == 12
assert isinstance(region.datasets[0]["date"], datetime)
assert isinstance(region.datasets[0]["file"], str)
assert isinstance(region.datasets[0]["url"], str)
assert region.datasets[0]["url"].startswith("http://")
assert region.datasets[0]["file"].endswith(".osm.pbf")
@vcr.use_cassette("tests/cassettes/geofabrik-comores.yaml")
def test_region_latest():
region = Region("africa/comores")
assert region.latest.endswith(".osm.pbf")
@vcr.use_cassette("tests/cassettes/geofabrik-france.yaml")
def test_region_subregions():
region = Region("europe/france")
assert len(region.subregions) == 27
assert "/europe/france/alsace" in region.subregions
def test_geofabrik_sindex():
geofab = Geofabrik()
assert len(geofab.sindex) == 363
row = geofab.sindex.loc["africa"]
assert row.name == "africa"
assert row.geometry.is_valid
def test_geofabrik_search(senegal):
geofab = Geofabrik()
region_id, match = geofab.search(senegal)
assert region_id == "africa/senegal-and-gambia"
assert match == pytest.approx(0.62, rel=0.01)
@vcr.use_cassette("tests/cassettes/geofabrik-saotomeprincipe-download.yaml")
def test_geofabrik_download():
geofabrik = Geofabrik()
with TemporaryDirectory(prefix="geohealthaccess_") as tmpdir:
osmpbf = geofabrik.download("africa/sao-tome-and-principe", tmpdir)
mtime = os.path.getmtime(osmpbf)
assert os.path.isfile(osmpbf)
# should not download again (overwrite=False)
geofabrik.download("africa/sao-tome-and-principe", tmpdir, overwrite=False)
assert os.path.getmtime(osmpbf) == mtime
# should download again (overwrite=True)
geofabrik.download("africa/sao-tome-and-principe", tmpdir, overwrite=True)
assert os.path.getmtime(osmpbf) != mtime
|
[
"tempfile.TemporaryDirectory",
"vcr.use_cassette",
"geohealthaccess.geofabrik.Page",
"geohealthaccess.geofabrik.Region",
"os.path.isfile",
"os.path.getmtime",
"geohealthaccess.geofabrik.Geofabrik",
"pytest.approx"
] |
[((250, 306), 'vcr.use_cassette', 'vcr.use_cassette', (['"""tests/cassettes/geofabrik-index.yaml"""'], {}), "('tests/cassettes/geofabrik-index.yaml')\n", (266, 306), False, 'import vcr\n'), ((486, 543), 'vcr.use_cassette', 'vcr.use_cassette', (['"""tests/cassettes/geofabrik-africa.yaml"""'], {}), "('tests/cassettes/geofabrik-africa.yaml')\n", (502, 543), False, 'import vcr\n'), ((792, 848), 'vcr.use_cassette', 'vcr.use_cassette', (['"""tests/cassettes/geofabrik-kenya.yaml"""'], {}), "('tests/cassettes/geofabrik-kenya.yaml')\n", (808, 848), False, 'import vcr\n'), ((1017, 1075), 'vcr.use_cassette', 'vcr.use_cassette', (['"""tests/cassettes/geofabrik-comores.yaml"""'], {}), "('tests/cassettes/geofabrik-comores.yaml')\n", (1033, 1075), False, 'import vcr\n'), ((1353, 1411), 'vcr.use_cassette', 'vcr.use_cassette', (['"""tests/cassettes/geofabrik-comores.yaml"""'], {}), "('tests/cassettes/geofabrik-comores.yaml')\n", (1369, 1411), False, 'import vcr\n'), ((1574, 1632), 'vcr.use_cassette', 'vcr.use_cassette', (['"""tests/cassettes/geofabrik-comores.yaml"""'], {}), "('tests/cassettes/geofabrik-comores.yaml')\n", (1590, 1632), False, 'import vcr\n'), ((2027, 2085), 'vcr.use_cassette', 'vcr.use_cassette', (['"""tests/cassettes/geofabrik-comores.yaml"""'], {}), "('tests/cassettes/geofabrik-comores.yaml')\n", (2043, 2085), False, 'import vcr\n'), ((2199, 2256), 'vcr.use_cassette', 'vcr.use_cassette', (['"""tests/cassettes/geofabrik-france.yaml"""'], {}), "('tests/cassettes/geofabrik-france.yaml')\n", (2215, 2256), False, 'import vcr\n'), ((2830, 2905), 'vcr.use_cassette', 'vcr.use_cassette', (['"""tests/cassettes/geofabrik-saotomeprincipe-download.yaml"""'], {}), "('tests/cassettes/geofabrik-saotomeprincipe-download.yaml')\n", (2846, 2905), False, 'import vcr\n'), ((382, 391), 'geohealthaccess.geofabrik.Page', 'Page', (['url'], {}), '(url)\n', (386, 391), False, 'from geohealthaccess.geofabrik import Geofabrik, Page, Region\n'), ((624, 633), 'geohealthaccess.geofabrik.Page', 'Page', (['url'], {}), '(url)\n', (628, 633), False, 'from geohealthaccess.geofabrik import Geofabrik, Page, Region\n'), ((933, 942), 'geohealthaccess.geofabrik.Page', 'Page', (['url'], {}), '(url)\n', (937, 942), False, 'from geohealthaccess.geofabrik import Geofabrik, Page, Region\n'), ((1108, 1133), 'geohealthaccess.geofabrik.Region', 'Region', (['"""/africa/comores"""'], {}), "('/africa/comores')\n", (1114, 1133), False, 'from geohealthaccess.geofabrik import Geofabrik, Page, Region\n'), ((1450, 1475), 'geohealthaccess.geofabrik.Region', 'Region', (['"""/africa/comores"""'], {}), "('/africa/comores')\n", (1456, 1475), False, 'from geohealthaccess.geofabrik import Geofabrik, Page, Region\n'), ((1674, 1698), 'geohealthaccess.geofabrik.Region', 'Region', (['"""africa/comores"""'], {}), "('africa/comores')\n", (1680, 1698), False, 'from geohealthaccess.geofabrik import Geofabrik, Page, Region\n'), ((2125, 2149), 'geohealthaccess.geofabrik.Region', 'Region', (['"""africa/comores"""'], {}), "('africa/comores')\n", (2131, 2149), False, 'from geohealthaccess.geofabrik import Geofabrik, Page, Region\n'), ((2300, 2323), 'geohealthaccess.geofabrik.Region', 'Region', (['"""europe/france"""'], {}), "('europe/france')\n", (2306, 2323), False, 'from geohealthaccess.geofabrik import Geofabrik, Page, Region\n'), ((2464, 2475), 'geohealthaccess.geofabrik.Geofabrik', 'Geofabrik', ([], {}), '()\n', (2473, 2475), False, 'from geohealthaccess.geofabrik import Geofabrik, Page, Region\n'), ((2667, 2678), 'geohealthaccess.geofabrik.Geofabrik', 'Geofabrik', ([], {}), '()\n', (2676, 2678), False, 'from geohealthaccess.geofabrik import Geofabrik, Page, Region\n'), ((2953, 2964), 'geohealthaccess.geofabrik.Geofabrik', 'Geofabrik', ([], {}), '()\n', (2962, 2964), False, 'from geohealthaccess.geofabrik import Geofabrik, Page, Region\n'), ((2797, 2826), 'pytest.approx', 'pytest.approx', (['(0.62)'], {'rel': '(0.01)'}), '(0.62, rel=0.01)\n', (2810, 2826), False, 'import pytest\n'), ((2974, 3019), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {'prefix': '"""geohealthaccess_"""'}), "(prefix='geohealthaccess_')\n", (2992, 3019), False, 'from tempfile import TemporaryDirectory\n'), ((3123, 3147), 'os.path.getmtime', 'os.path.getmtime', (['osmpbf'], {}), '(osmpbf)\n', (3139, 3147), False, 'import os\n'), ((3163, 3185), 'os.path.isfile', 'os.path.isfile', (['osmpbf'], {}), '(osmpbf)\n', (3177, 3185), False, 'import os\n'), ((3339, 3363), 'os.path.getmtime', 'os.path.getmtime', (['osmpbf'], {}), '(osmpbf)\n', (3355, 3363), False, 'import os\n'), ((3520, 3544), 'os.path.getmtime', 'os.path.getmtime', (['osmpbf'], {}), '(osmpbf)\n', (3536, 3544), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
"""pyvib sample data files"""
import socket
import os.path
import warnings
from shutil import move
from tempfile import TemporaryDirectory
from subprocess import check_call
from .config import get_and_create_sample_dir
__all__ = ['download_sample_data', 'get_sample_file']
# https://api.github.com/repos/pawsen/pyvib_data/contents/pyvib/data/nlbeam
# https://stackoverflow.com/a/18194523/1121523
#_github_downloader = 'https://minhaskamal.github.io/DownGit/#/home?url='
_base_urls = (
'https://api.github.com/repos/pawsen/pyvib_data/contents/pyvib/data/',
#'https://github.com/pawsen/pyvib_data/tree/master/pyvib/data/',
)
# files or folders to download
sample_files = {
"NLBEAM": "nlbeam",
"2DOF": "2dof",
"BOUCWEN": "boucwen",
"SILVERBOX": "silverbox",
}
def download_sample_data(show_progress=True):
"""
Download all sample data at once. This will overwrite any existing files.
Parameters
----------
show_progress: `bool`
Show a progress bar during download
Returns
-------
None
"""
for filename in sample_files.values():
get_sample_file(filename, url_list=_base_urls, overwrite=True)
def get_sample_file(filename, url_list=_base_urls, overwrite=False):
"""
Downloads a sample file. Will download a sample data file and move it to
the sample data directory. Also, uncompresses zip files if necessary.
Returns the local file if exists.
Parameters
----------
filename: `str`
Name of the file
url_list: `str` or `list`
urls where to look for the file
show_progress: `bool`
Show a progress bar during download
overwrite: `bool`
If True download and overwrite an existing file.
timeout: `float`
The timeout in seconds. If `None` the default timeout is used from
`astropy.utils.data.Conf.remote_timeout`.
Returns
-------
result: `str`
The local path of the file. None if it failed.
"""
# Creating the directory for sample files to be downloaded
sampledata_dir = get_and_create_sample_dir()
src = os.path.join(sampledata_dir, filename)
if not overwrite and os.path.isfile(src):
return src
else:
# check each provided url to find the file
for base_url in url_list:
try:
url = base_url + filename
with TemporaryDirectory() as d:
rc = check_call(['github-download.sh', url], cwd=d)
# move files to the data directory
move(d, src)
return src
except (socket.error, socket.timeout) as e:
warnings.warn("Download failed with error {}. \n"
"Retrying with different mirror.".format(e))
# if reach here then file has not been downloaded.
warnings.warn("File {} not found.".format(filename))
return None
|
[
"shutil.move",
"tempfile.TemporaryDirectory",
"subprocess.check_call"
] |
[((2414, 2434), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {}), '()\n', (2432, 2434), False, 'from tempfile import TemporaryDirectory\n'), ((2466, 2512), 'subprocess.check_call', 'check_call', (["['github-download.sh', url]"], {'cwd': 'd'}), "(['github-download.sh', url], cwd=d)\n", (2476, 2512), False, 'from subprocess import check_call\n'), ((2588, 2600), 'shutil.move', 'move', (['d', 'src'], {}), '(d, src)\n', (2592, 2600), False, 'from shutil import move\n')]
|
"""Search for files purely in discord."""
from .async_search_client import AsyncSearchClient
import discord
from typing import List, Dict
from fuzzywuzzy import fuzz
from utils import attachment_to_search_dict
import datetime
class PastFileSearch(AsyncSearchClient):
"""Search for files in discord with just discord."""
def __init__(self, thresh: int = 85):
"""
Create a DiscordSearch object.
It's annoying to need bot_user but we do this to enable searching on files from other bots.
Args:
bot_user: The name of the bot user.
"""
self.banned_file_ids = set()
self.thresh = thresh
self.user = None
def initialize(self, bot_user: str, *args, **kwargs) -> bool:
"""
Initialize past file search.
Args:
bot_user: The bot username.
"""
self.user = bot_user
return True
def match(self, message: discord.Message, filename: str, **kwargs) -> List[discord.Attachment]:
"""
Match the message against possible arguments.
Args:
message: The message to test
kwargs: kwargs of args to match
Returns:
A list of discord.Attachments that match the query.
"""
if not message.attachments or message.author == self.user:
return []
if kwargs.get("content"):
if fuzz.partial_ratio(kwargs['content'].lower(), message.content.lower()) < self.thresh:
return []
if kwargs.get("after"):
if message.created_at < kwargs["after"]:
return []
if kwargs.get("before"):
if message.created_at > kwargs["before"]:
return []
if kwargs.get("author"):
if message.author != kwargs["author"]:
return []
if kwargs.get("channel"):
if message.channel != kwargs["channel"]:
return []
# print(message.author, self.user, message.attachments[0].filename)
res = filter(lambda atch: fuzz.partial_ratio(atch.filename.lower(),
filename.lower()) > self.thresh, message.attachments)
if kwargs.get("mimetype"):
return [attachment for attachment in res if attachment.content_type == kwargs["mimetype"]]
if kwargs.get("banned_ids"):
return [attachment for attachment in res if attachment.id not in kwargs["banned_ids"]]
return list(res)
async def search(self, filename: str, onii_chan, ctx_channel, *args, **kwargs) -> List[Dict]:
"""
Iterate through previous messages in a discord channel for files.
Args:
filename: The query
onii_chan: The channel to search in
kawrgs: Search paramaters
Returns:
A list of dicts of files.
"""
if self.user is None or not isinstance(filename, str):
return ""
files = []
onii_chan = ctx_channel
if kwargs.get('channel'):
onii_chan = kwargs['channel']
if kwargs.get('banned_ids'):
kwargs['banned_ids'].update(self.banned_file_ids)
else:
kwargs['banned_ids'] = self.banned_file_ids
matched_messages = onii_chan.history(limit=int(1e9), before=kwargs.get('before'), after=kwargs.get('after'))
async for message in matched_messages:
matched = self.match(message, filename, **kwargs)
files.extend([{**attachment_to_search_dict(message, atch), 'url': atch.url,
'jump_url': message.jump_url} for atch in matched])
return files
async def create_doc(self, *args, **kwargs):
"""We don't maintain search indices in this class, so this is not needed."""
return
async def clear(self, *args, **kwargs):
"""We don't maintain search indices in this class, so this is not needed."""
return
async def remove_doc(self, file_ids: list, *args, **kwargs):
"""Update banned ids with the file ids."""
self.banned_file_ids.add(tuple(file_ids))
return
|
[
"utils.attachment_to_search_dict"
] |
[((3527, 3567), 'utils.attachment_to_search_dict', 'attachment_to_search_dict', (['message', 'atch'], {}), '(message, atch)\n', (3552, 3567), False, 'from utils import attachment_to_search_dict\n')]
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import pytest
from airflow.config_templates.airflow_local_settings import DEFAULT_LOGGING_CONFIG
from airflow.models import DAG, TaskInstance
from airflow.operators.empty import EmptyOperator
from airflow.utils.log.logging_mixin import set_context
from airflow.utils.state import DagRunState
from airflow.utils.timezone import datetime
from airflow.utils.types import DagRunType
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_runs
DEFAULT_DATE = datetime(2019, 1, 1)
TASK_HANDLER = 'task'
TASK_HANDLER_CLASS = 'airflow.utils.log.task_handler_with_custom_formatter.TaskHandlerWithCustomFormatter'
PREV_TASK_HANDLER = DEFAULT_LOGGING_CONFIG['handlers']['task']
DAG_ID = "task_handler_with_custom_formatter_dag"
TASK_ID = "task_handler_with_custom_formatter_task"
@pytest.fixture(scope="module", autouse=True)
def custom_task_log_handler_config():
DEFAULT_LOGGING_CONFIG['handlers']['task'] = {
'class': TASK_HANDLER_CLASS,
'formatter': 'airflow',
'stream': 'sys.stdout',
}
logging.config.dictConfig(DEFAULT_LOGGING_CONFIG)
logging.root.disabled = False
yield
DEFAULT_LOGGING_CONFIG['handlers']['task'] = PREV_TASK_HANDLER
logging.config.dictConfig(DEFAULT_LOGGING_CONFIG)
@pytest.fixture()
def task_instance():
dag = DAG(DAG_ID, start_date=DEFAULT_DATE)
task = EmptyOperator(task_id=TASK_ID, dag=dag)
dagrun = dag.create_dagrun(DagRunState.RUNNING, execution_date=DEFAULT_DATE, run_type=DagRunType.MANUAL)
ti = TaskInstance(task=task, run_id=dagrun.run_id)
ti.log.disabled = False
yield ti
clear_db_runs()
def assert_prefix(task_instance: TaskInstance, prefix: str) -> None:
handler = next((h for h in task_instance.log.handlers if h.name == TASK_HANDLER), None)
assert handler is not None, "custom task log handler not set up correctly"
assert handler.formatter is not None, "custom task log formatter not set up correctly"
expected_format = f"{prefix}:{handler.formatter._fmt}"
set_context(task_instance.log, task_instance)
assert expected_format == handler.formatter._fmt
def test_custom_formatter_default_format(task_instance):
"""The default format provides no prefix."""
assert_prefix(task_instance, "")
@conf_vars({("logging", "task_log_prefix_template"): "{{ti.dag_id }}-{{ ti.task_id }}"})
def test_custom_formatter_custom_format_not_affected_by_config(task_instance):
assert_prefix(task_instance, f"{DAG_ID}-{TASK_ID}")
|
[
"tests.test_utils.config.conf_vars",
"airflow.utils.timezone.datetime",
"airflow.utils.log.logging_mixin.set_context",
"pytest.fixture",
"tests.test_utils.db.clear_db_runs",
"airflow.operators.empty.EmptyOperator",
"airflow.models.DAG",
"logging.config.dictConfig",
"airflow.models.TaskInstance"
] |
[((1291, 1311), 'airflow.utils.timezone.datetime', 'datetime', (['(2019)', '(1)', '(1)'], {}), '(2019, 1, 1)\n', (1299, 1311), False, 'from airflow.utils.timezone import datetime\n'), ((1610, 1654), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""', 'autouse': '(True)'}), "(scope='module', autouse=True)\n", (1624, 1654), False, 'import pytest\n'), ((2073, 2089), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (2087, 2089), False, 'import pytest\n'), ((3077, 3168), 'tests.test_utils.config.conf_vars', 'conf_vars', (["{('logging', 'task_log_prefix_template'): '{{ti.dag_id }}-{{ ti.task_id }}'}"], {}), "({('logging', 'task_log_prefix_template'):\n '{{ti.dag_id }}-{{ ti.task_id }}'})\n", (3086, 3168), False, 'from tests.test_utils.config import conf_vars\n'), ((1855, 1904), 'logging.config.dictConfig', 'logging.config.dictConfig', (['DEFAULT_LOGGING_CONFIG'], {}), '(DEFAULT_LOGGING_CONFIG)\n', (1880, 1904), False, 'import logging\n'), ((2020, 2069), 'logging.config.dictConfig', 'logging.config.dictConfig', (['DEFAULT_LOGGING_CONFIG'], {}), '(DEFAULT_LOGGING_CONFIG)\n', (2045, 2069), False, 'import logging\n'), ((2121, 2157), 'airflow.models.DAG', 'DAG', (['DAG_ID'], {'start_date': 'DEFAULT_DATE'}), '(DAG_ID, start_date=DEFAULT_DATE)\n', (2124, 2157), False, 'from airflow.models import DAG, TaskInstance\n'), ((2169, 2208), 'airflow.operators.empty.EmptyOperator', 'EmptyOperator', ([], {'task_id': 'TASK_ID', 'dag': 'dag'}), '(task_id=TASK_ID, dag=dag)\n', (2182, 2208), False, 'from airflow.operators.empty import EmptyOperator\n'), ((2327, 2372), 'airflow.models.TaskInstance', 'TaskInstance', ([], {'task': 'task', 'run_id': 'dagrun.run_id'}), '(task=task, run_id=dagrun.run_id)\n', (2339, 2372), False, 'from airflow.models import DAG, TaskInstance\n'), ((2418, 2433), 'tests.test_utils.db.clear_db_runs', 'clear_db_runs', ([], {}), '()\n', (2431, 2433), False, 'from tests.test_utils.db import clear_db_runs\n'), ((2830, 2875), 'airflow.utils.log.logging_mixin.set_context', 'set_context', (['task_instance.log', 'task_instance'], {}), '(task_instance.log, task_instance)\n', (2841, 2875), False, 'from airflow.utils.log.logging_mixin import set_context\n')]
|
''' mbinary
#########################################################################
# File : radixSort.py
# Author: mbinary
# Mail: <EMAIL>
# Blog: https://mbinary.xyz
# Github: https://github.com/mbinary
# Created Time: 2018-07-06 15:52
# Description:
#########################################################################
'''
from random import randint
from quickSort import quickSort
from time import time
def radixSort(lst, radix=10):
ls = [[] for i in range(radix)]
mx = max(lst)
weight = 1
while mx >= weight:
for i in lst:
ls[(i // weight) % radix].append(i)
weight *= radix
lst = sum(ls, [])
ls = [[] for i in range(radix)]
return lst
def countSort(lst, mn, mx):
mark = [0]*(mx-mn+1)
for i in lst:
mark[i-mn] += 1
ret = []
for n, i in enumerate(mark):
ret += [n+mn]*i
return ret
def timer(funcs, span, num=1000000):
lst = [randint(0, span) for i in range(num)]
print('range({}), {} items'.format(span, num))
for func in funcs:
data = lst.copy()
t = time()
func(data)
t = time()-t
print('{}: {}s'.format(func.__name__, t))
if __name__ == '__main__':
timer([quickSort, radixSort, sorted], 1000000000000, 1000)
timer([quickSort, radixSort, sorted], 10000, 100000)
lst = [randint(0, 100) for i in range(1000)]
print(countSort(lst, 0, 100) == sorted(lst))
|
[
"random.randint",
"time.time"
] |
[((948, 964), 'random.randint', 'randint', (['(0)', 'span'], {}), '(0, span)\n', (955, 964), False, 'from random import randint\n'), ((1098, 1104), 'time.time', 'time', ([], {}), '()\n', (1102, 1104), False, 'from time import time\n'), ((1355, 1370), 'random.randint', 'randint', (['(0)', '(100)'], {}), '(0, 100)\n', (1362, 1370), False, 'from random import randint\n'), ((1136, 1142), 'time.time', 'time', ([], {}), '()\n', (1140, 1142), False, 'from time import time\n')]
|
# import libraries
import pandas as pd
import os
import sys
from operator import itemgetter
from collections import defaultdict
class MovieRecommendation:
user_threshold: int
min_support: int
min_confidence: float
def __init__(self):
# users in train set
self.user_threshold = 200
# minimum support for movies
self.min_support = 50
# Defining a minimum for confidence level
self.min_confidence = 0.9
self.itemsets = {}
# Dataset placeholders
self.ratings_full = {}
self.ratings = {} # Minimise version of the whole dataset
self.movies = {}
self.users_movies = {}
self.significant_rules = {}
def load_data(self):
# Setting path
data_folder = os.path.join(os.path.curdir, "input")
rating_filename = os.path.join(data_folder, "ratings.dat")
movie_name_filename = os.path.join(data_folder, "movies.dat")
# Defining dateparser for reading date column
date_parser = lambda x: pd.to_datetime(x, unit='s')
# Reading the reviews file and defining the columns name
self.ratings_full = pd.read_csv(rating_filename,
delimiter="::",
header=None,
names=['UserID', 'MovieID', 'Rating', 'Datetime'],
parse_dates=['Datetime'],
date_parser=date_parser,
engine='python')
# Reading movies dat file and setting column names
self.movies = pd.read_csv(movie_name_filename,
delimiter="::",
header=None,
encoding="mac-roman",
engine='python')
self.movies.columns = ['MovieID', 'Title', 'Genres']
def data_prep(self, rating_threshold=3, user_threshold=200):
# Adding Favorable feature if user rated over 3
self.ratings_full['Favorable'] = self.ratings_full['Rating'] > rating_threshold
# Make a sample dataset to make our Apriori algorithm faster
self.ratings = self.ratings_full[self.ratings_full.UserID <= user_threshold]
# Filtering the dataset for only favorable movies
favorable_ratings = self.ratings[self.ratings['Favorable']]
# List of movies which each user considered as favorable
self.users_movies = dict((user_id, frozenset(movies)) # why frozenset, only cuz of speed in search
for user_id, movies in \
favorable_ratings.groupby("UserID")["MovieID"])
def create_initial_itemset(self):
# Frequency of each movie given a favorable review
movie_freq = self.ratings[['MovieID', 'Favorable']].groupby('MovieID').sum()
"""
The structure of itemsets will be as a dictionary with following format:
Structure:
{length_of_itemset: {(set_of_movies_list_in_current_itemset): frequency_of_itemset},}
Key: int
length of itemset
Value: dict
Key: frozenset
a frozenset of list of involving movies in current itemset
Value: int
how many times current combination of movies occurred in user ratings
Example:
for a itemset comprises of 3 movies
{(movie_1,movie_15,movie_495) : 59}
"""
# itemset_length=1 are a list of all movies which have rating more than min_support
self.itemsets[1] = dict((frozenset((movie_id,)), row["Favorable"])
for movie_id, row in movie_freq.iterrows()
if row["Favorable"] > self.min_support)
print("[length:itemsets]: ({}:{})".format(1, len(self.itemsets[1])))
sys.stdout.flush()
def create_freq_itemsets(self, superset_max_size=15):
print("Itemsets creation is in progress, be patient...\n")
sys.stdout.flush()
# Creating the first itemsets
self.create_initial_itemset()
# Creating further itemsets with size bigger than 2
for superset_length in range(2, superset_max_size + 1):
# Finding candidate itemsets in various lengths up to super_max_size based on preceding itemset
candidate_superset_freq = defaultdict(int)
for user_id, user_movies in self.users_movies.items():
for itemset in self.itemsets[superset_length - 1]:
# Check if itemset is a subset of user favorite movies
if itemset.issubset(user_movies):
# Construct superset with union of current itemset and each of another movies
# which user liked separately
for other_reviewed_movie in user_movies - itemset: # exclude current movies in itemset first
current_superset = itemset | frozenset((other_reviewed_movie,)) # union each remaining itemset
# increase the frequency of recent superset which just occurred
candidate_superset_freq[current_superset] += 1
# Checking for frequency of any recent built itemset (candidates) again minimum threshold
superset = dict([(candidate_superset, candidate_superset_frequency)
for candidate_superset, candidate_superset_frequency in candidate_superset_freq.items()
if candidate_superset_frequency >= self.min_support])
print("[length:itemsets]: ({}:{})".format(superset_length, len(superset)))
sys.stdout.flush()
if len(superset):
self.itemsets[superset_length] = superset
elif len(superset) == 0:
print("No further exploring.")
sys.stdout.flush()
break
# Itemsets in length 1 are not useful for recommending system so we can drop it
del self.itemsets[1]
print('\nItemsets total count: {0}'.format(sum(len(itemsets) for itemsets in self.itemsets.values())))
def extract_association_rules(self):
"""
In order to identifying association rules we have to iterate over all itemsets and within each itemset
pick each member and consider it as conclusion and all others as premises at a time.
"""
candidate_rules = []
for itemset_length, itemset_dict in self.itemsets.items():
for itemset in itemset_dict.keys():
# selecting each item in itemset and consider it as conclusion
for conclusion in itemset:
# making premise set by excluding conclusion
premise = itemset - set((conclusion,))
candidate_rules.append((premise, conclusion))
# Next, we compute the confidence of each of these rules.
valid_rule = defaultdict(int)
invalid_rule = defaultdict(int)
for user, fav_movies in self.users_movies.items():
for candidate_rule in candidate_rules:
premise, conclusion = candidate_rule
# If user liked all premise movies
if premise.issubset(fav_movies):
# If user liked conclusion movie too
if conclusion in fav_movies:
# Then rule should be considered as valid
valid_rule[candidate_rule] += 1
else:
invalid_rule[candidate_rule] += 1
# Calculating confidence level for candidate_rules
rules_confidence = {
candidate_rule: valid_rule[candidate_rule] / float(valid_rule[candidate_rule] +
invalid_rule[candidate_rule])
for candidate_rule in candidate_rules}
# Filter out the rules with poor confidence
self.significant_rules = {rule: confidence for rule, confidence in rules_confidence.items()
if confidence > self.min_confidence}
print("Among {} candidate rules only which {} of them are significant.".format(len(candidate_rules),
len(self.significant_rules)))
def get_movie_name(self, movie_id):
return self.movies.loc[self.movies["MovieID"] == movie_id, 'Title'].values[0]
def report_associations(self, rule_count=10):
# Sorting significant rules dictionary based on significant level
sorted_confidence = sorted(self.significant_rules.items(), key=itemgetter(1), reverse=True)
for index in range(rule_count):
(premise, conclusion) = sorted_confidence[index][0]
premise_names = "\n ".join(self.get_movie_name(movie_id=mov_id) for mov_id in premise)
conclusion_name = self.get_movie_name(movie_id=conclusion)
print("Rule rank #{0} (confidence {1:.3f}):".format(index + 1,
self.significant_rules[(premise, conclusion)]))
print("If a person recommends:\n {0} \nThey will also recommend: \n {1}".format(premise_names,
conclusion_name))
print("\n")
def evaluate_model(self, rule_count=10):
# Make a test dataset to evaluate model
test_df = self.ratings_full[self.ratings_full.UserID > self.user_threshold]
test_fav = test_df[test_df["Favorable"]]
test_users_movies = dict((test_user_id, frozenset(movies))
for test_user_id, movies in
test_fav.groupby("UserID")["MovieID"])
candidate_rules = []
for itemset_length, itemset_dict in self.itemsets.items():
for itemset in itemset_dict.keys():
for conclusion in itemset:
premise = itemset - set((conclusion,))
candidate_rules.append((premise, conclusion))
# Same evaluation as what we have done in extarcting association rules
valid_rule = defaultdict(int)
invalid_rule = defaultdict(int)
for user, fav_movies in test_users_movies.items():
for candidate_rule in candidate_rules:
premise, conclusion = candidate_rule
# If user liked all premise movies
if premise.issubset(fav_movies):
# If user liked conclusion movie too
if conclusion in fav_movies:
# Then rule should be considered as valid
valid_rule[candidate_rule] += 1
else:
invalid_rule[candidate_rule] += 1
test_confidence = {candidate_rule: valid_rule[candidate_rule] / float(
valid_rule[candidate_rule] + invalid_rule[candidate_rule])
for candidate_rule in candidate_rules}
sorted_confidence = sorted(self.significant_rules.items(), key=itemgetter(1), reverse=True)
for index in range(rule_count):
(premise, conclusion) = sorted_confidence[index][0]
premise_names = "\n ".join(self.get_movie_name(movie_id=mov_id) for mov_id in premise)
conclusion_name = self.get_movie_name(movie_id=conclusion)
print("Rule rank #{0} \n({1:.3f} confidence )\n({2:.3f} test confidence):".format(index + 1,
self.significant_rules[
(premise, conclusion)],
test_confidence.get(
(premise, conclusion),
-1)))
print("If a person recommends:\n {0} \nThey will also recommend: \n {1}".format(premise_names,
conclusion_name))
print("\n")
if __name__ == '__main__':
engine = MovieRecommendation()
# Load datasets
engine.load_data()
# Minimizing the dataset size
engine.data_prep(user_threshold=200)
# Constructing itemsets
engine.create_freq_itemsets(superset_max_size=15)
# Making association rules
engine.extract_association_rules()
# Printing reports of extracted rules
engine.report_associations()
# Evaluate model
engine.evaluate_model()
|
[
"pandas.read_csv",
"collections.defaultdict",
"pandas.to_datetime",
"sys.stdout.flush",
"operator.itemgetter",
"os.path.join"
] |
[((787, 824), 'os.path.join', 'os.path.join', (['os.path.curdir', '"""input"""'], {}), "(os.path.curdir, 'input')\n", (799, 824), False, 'import os\n'), ((851, 891), 'os.path.join', 'os.path.join', (['data_folder', '"""ratings.dat"""'], {}), "(data_folder, 'ratings.dat')\n", (863, 891), False, 'import os\n'), ((922, 961), 'os.path.join', 'os.path.join', (['data_folder', '"""movies.dat"""'], {}), "(data_folder, 'movies.dat')\n", (934, 961), False, 'import os\n'), ((1171, 1356), 'pandas.read_csv', 'pd.read_csv', (['rating_filename'], {'delimiter': '"""::"""', 'header': 'None', 'names': "['UserID', 'MovieID', 'Rating', 'Datetime']", 'parse_dates': "['Datetime']", 'date_parser': 'date_parser', 'engine': '"""python"""'}), "(rating_filename, delimiter='::', header=None, names=['UserID',\n 'MovieID', 'Rating', 'Datetime'], parse_dates=['Datetime'], date_parser\n =date_parser, engine='python')\n", (1182, 1356), True, 'import pandas as pd\n'), ((1670, 1775), 'pandas.read_csv', 'pd.read_csv', (['movie_name_filename'], {'delimiter': '"""::"""', 'header': 'None', 'encoding': '"""mac-roman"""', 'engine': '"""python"""'}), "(movie_name_filename, delimiter='::', header=None, encoding=\n 'mac-roman', engine='python')\n", (1681, 1775), True, 'import pandas as pd\n'), ((3996, 4014), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4012, 4014), False, 'import sys\n'), ((4149, 4167), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4165, 4167), False, 'import sys\n'), ((7135, 7151), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (7146, 7151), False, 'from collections import defaultdict\n'), ((7175, 7191), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (7186, 7191), False, 'from collections import defaultdict\n'), ((10444, 10460), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (10455, 10460), False, 'from collections import defaultdict\n'), ((10484, 10500), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (10495, 10500), False, 'from collections import defaultdict\n'), ((1049, 1076), 'pandas.to_datetime', 'pd.to_datetime', (['x'], {'unit': '"""s"""'}), "(x, unit='s')\n", (1063, 1076), True, 'import pandas as pd\n'), ((4518, 4534), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (4529, 4534), False, 'from collections import defaultdict\n'), ((5842, 5860), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5858, 5860), False, 'import sys\n'), ((8867, 8880), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (8877, 8880), False, 'from operator import itemgetter\n'), ((11366, 11379), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (11376, 11379), False, 'from operator import itemgetter\n'), ((6050, 6068), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6066, 6068), False, 'import sys\n')]
|
"""Generate python files from protobufs."""
import glob
import re
from grpc_tools import protoc
protoc.main([
'grpc_tools.protoc',
'--proto_path=protobuf/',
'--python_out=.',
'--grpc_python_out=.'
] + list(glob.iglob('./protobuf/*.proto')))
# Make pb2 imports in generated scripts relative
for script in glob.iglob('./*_pb2*.py'):
with open(script, 'r+') as file:
code = file.read()
file.seek(0)
file.write(re.sub(r'\n(import .+_pb2.*)', '\nfrom . \\1', code))
file.truncate()
|
[
"glob.iglob",
"re.sub"
] |
[((324, 349), 'glob.iglob', 'glob.iglob', (['"""./*_pb2*.py"""'], {}), "('./*_pb2*.py')\n", (334, 349), False, 'import glob\n'), ((225, 257), 'glob.iglob', 'glob.iglob', (['"""./protobuf/*.proto"""'], {}), "('./protobuf/*.proto')\n", (235, 257), False, 'import glob\n'), ((455, 507), 're.sub', 're.sub', (['"""\\\\n(import .+_pb2.*)"""', '"""\nfrom . \\\\1"""', 'code'], {}), "('\\\\n(import .+_pb2.*)', '\\nfrom . \\\\1', code)\n", (461, 507), False, 'import re\n')]
|
from django.http import HttpResponse # type: ignore
from pylti1p3.oidc_login import OIDCLogin
from pylti1p3.request import Request
from .cookie import DjangoCookieService
from .redirect import DjangoRedirect
from .request import DjangoRequest
from .session import DjangoSessionService
class DjangoOIDCLogin(OIDCLogin):
def __init__(self, request, tool_config, session_service=None, cookie_service=None, launch_data_storage=None):
django_request = request if isinstance(request, Request) else DjangoRequest(request)
cookie_service = cookie_service if cookie_service else DjangoCookieService(django_request)
session_service = session_service if session_service else DjangoSessionService(request)
super(DjangoOIDCLogin, self).__init__(django_request, tool_config, session_service, cookie_service,
launch_data_storage)
def get_redirect(self, url):
return DjangoRedirect(url, self._cookie_service)
def get_response(self, html):
return HttpResponse(html)
|
[
"django.http.HttpResponse"
] |
[((1043, 1061), 'django.http.HttpResponse', 'HttpResponse', (['html'], {}), '(html)\n', (1055, 1061), False, 'from django.http import HttpResponse\n')]
|
import os
import csv
import yaml
import argparse
import numpy as np
from operator import itemgetter
from os import listdir
from os.path import isfile, join
""" Find the biggest files
"""
def Main():
parser = argparse.ArgumentParser()
parser.add_argument("dataset",
metavar='p0',
nargs='?',
const=1,
help='datasetname',
type=str,
default='Mibench-f-complete.yaml')
parser.add_argument("max_hot_function",
metavar='p1',
nargs='?',
const=2,
help='The first n hot functions',
type=int,
default=300)
args = parser.parse_args()
statistics_path = '/home/andrefz/research/m-project/core-massalin/tools/inst-count-pass/'
with open(statistics_path + args.dataset) as f:
insts = yaml.safe_load(f)
res = dict(sorted(insts.items(), key=itemgetter(1), reverse = True) [:args.max_hot_function])
for key, value in res.items():
#key = key[:-5]
#key = key + 'yaml'
key = key+'.ll'
print(key)
#print(key + ': '+str(value))
if __name__ == '__main__':
Main()
|
[
"operator.itemgetter",
"yaml.safe_load",
"argparse.ArgumentParser"
] |
[((214, 239), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (237, 239), False, 'import argparse\n'), ((993, 1010), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (1007, 1010), False, 'import yaml\n'), ((1056, 1069), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (1066, 1069), False, 'from operator import itemgetter\n')]
|
import minecraft_data
# Java edition minecraft-data
mcd = minecraft_data("1.13")
print(mcd.version)
print(mcd.find_item_or_block(1))
print(mcd.find_item_or_block('stone'))
print(mcd.recipes['5'][0])
print(mcd.windows['minecraft:brewing_stand'])
print(mcd.effects_name['Haste'])
# Pocket Edition minecraft-data
mcd_pe = minecraft_data("1.0", "pe")
print(mcd_pe.version)
print(mcd_pe.find_item_or_block('stone'))
|
[
"minecraft_data"
] |
[((58, 80), 'minecraft_data', 'minecraft_data', (['"""1.13"""'], {}), "('1.13')\n", (72, 80), False, 'import minecraft_data\n'), ((325, 352), 'minecraft_data', 'minecraft_data', (['"""1.0"""', '"""pe"""'], {}), "('1.0', 'pe')\n", (339, 352), False, 'import minecraft_data\n')]
|
from ...models import Headline
import requests
from bs4 import BeautifulSoup
from datetime import datetime, time, timedelta
from dateparser import parse
def getslate(per_site):
url = 'https://slate.com/news-and-politics'
html = requests.get(url).text
soup = BeautifulSoup(html, 'lxml')
articles = soup.find('div', class_='topic-stories-list').find_all('a')
i = 0
for art in articles:
if i < per_site:
headline = Headline()
headline.leaning = 'left'
headline.url = art['href']
try:
headline.title = art.find('span').text
except AttributeError:
headline.delete()
continue
headline.img = art.find('img')['data-src']
pub_date = parse(art.find('span', class_="topic-story__date").text, languages=['en'])
if pub_date < datetime(year=datetime.now().year, month=datetime.now().month, day=datetime.now().day):
headline.mins_ago = 1441
else:
pub_time = art.find('div', class_='topic-story__byline').text.strip()[-8:].strip()
pub_time = parse(pub_time).time()
delta = datetime.now() - timedelta(hours=pub_time.hour, minutes=pub_time.minute, seconds=pub_time.second)
headline.mins_ago = delta.hour*60 + delta.minute + 60 #for some reason, always short 1 hour
headline.save()
i += 1
else:
break
|
[
"dateparser.parse",
"datetime.timedelta",
"requests.get",
"bs4.BeautifulSoup",
"datetime.datetime.now"
] |
[((271, 298), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""lxml"""'], {}), "(html, 'lxml')\n", (284, 298), False, 'from bs4 import BeautifulSoup\n'), ((237, 254), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (249, 254), False, 'import requests\n'), ((1230, 1244), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1242, 1244), False, 'from datetime import datetime, time, timedelta\n'), ((1247, 1332), 'datetime.timedelta', 'timedelta', ([], {'hours': 'pub_time.hour', 'minutes': 'pub_time.minute', 'seconds': 'pub_time.second'}), '(hours=pub_time.hour, minutes=pub_time.minute, seconds=pub_time.second\n )\n', (1256, 1332), False, 'from datetime import datetime, time, timedelta\n'), ((1166, 1181), 'dateparser.parse', 'parse', (['pub_time'], {}), '(pub_time)\n', (1171, 1181), False, 'from dateparser import parse\n'), ((907, 921), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (919, 921), False, 'from datetime import datetime, time, timedelta\n'), ((934, 948), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (946, 948), False, 'from datetime import datetime, time, timedelta\n'), ((960, 974), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (972, 974), False, 'from datetime import datetime, time, timedelta\n')]
|
###############################################################################
# #
# SYMBOLS, TABLES, SEMANTIC ANALYSIS #
# #
###############################################################################
from astvisitor import NodeVisitor
from base import _SHOULD_LOG_SCOPE, _SHOULD_LOG_STACK, ErrorCode, SemanticError
from lex import TokenType
class Symbol:
def __init__(self, name, type=None):
self.name = name
self.type = type
class VarSymbol(Symbol):
def __init__(self, name, type):
super().__init__(name, type)
def __str__(self):
return "<{class_name}(name='{name}', type='{type}')>".format(
class_name=self.__class__.__name__, name=self.name, type=self.type,
)
__repr__ = __str__
class BuiltinTypeSymbol(Symbol):
def __init__(self, name):
super().__init__(name)
def __str__(self):
return self.name
def __repr__(self):
return "<{class_name}(name='{name}')>".format(
class_name=self.__class__.__name__, name=self.name,
)
class ProcedureSymbol(Symbol):
def __init__(self, name, params=None):
super().__init__(name)
# a list of formal parameters
self.params = params if params is not None else []
def __str__(self):
return "<{class_name}(name={name}, parameters={params})>".format(
class_name=self.__class__.__name__, name=self.name, params=self.params,
)
__repr__ = __str__
class ScopedSymbolTable:
hasReturnStatement = False
def __init__(self, scope_name, scopeType, scope_level, enclosing_scope=None):
self._symbols = {}
self.scope_name = scope_name
self.scopeType = scopeType
self.scope_level = scope_level
self.enclosing_scope = enclosing_scope
self._init_builtins()
def _init_builtins(self):
self.insert(BuiltinTypeSymbol("INTEGER"))
self.insert(BuiltinTypeSymbol("REAL"))
def __str__(self):
h1 = "SCOPE (SCOPED SYMBOL TABLE)"
lines = ["\n", h1, "=" * len(h1)]
for header_name, header_value in (
("Scope name", self.scope_name),
("Scope level", self.scope_level),
(
"Enclosing scope",
self.enclosing_scope.scope_name if self.enclosing_scope else None,
),
):
lines.append("%-15s: %s" % (header_name, header_value))
h2 = "Scope (Scoped symbol table) contents"
lines.extend([h2, "-" * len(h2)])
lines.extend(("%7s: %r" % (key, value)) for key, value in self._symbols.items())
lines.append("\n")
s = "\n".join(lines)
return s
__repr__ = __str__
def log(self, msg):
if _SHOULD_LOG_SCOPE:
print(msg)
def insert(self, symbol):
self.log(f"Insert: {symbol.name}")
self._symbols[symbol.name] = symbol
def lookup(self, name, current_scope_only=False):
self.log(f"Lookup: {name}. (Scope name: {self.scope_name})")
# 'symbol' is either an instance of the Symbol class or None
symbol = self._symbols.get(name)
if symbol is not None:
return symbol
if current_scope_only:
return None
# recursively go up the chain and lookup the name
if self.enclosing_scope is not None:
return self.enclosing_scope.lookup(name)
class SemanticAnalyzer(NodeVisitor):
def __init__(self, scope):
self.current_scope = ScopedSymbolTable("initial", TokenType.PROGRAM, 1)
_SHOULD_LOG_SCOPE = scope
def log(self, msg):
if _SHOULD_LOG_SCOPE:
print(msg)
def error(self, error_code, token):
raise SemanticError(
error_code=error_code,
token=token,
message=f"{error_code.value} -> {token}",
)
def visit_Block(self, node):
for declaration in node.declarations:
self.visit(declaration)
self.visit(node.compound_statement)
def visit_Program(self, node):
self.log("ENTER scope: global")
global_scope = ScopedSymbolTable(
scope_name="global",
scopeType=TokenType.PROGRAM,
scope_level=1,
enclosing_scope=self.current_scope, # None
)
self.current_scope = global_scope
# visit subtree
self.visit(node.block)
self.log(global_scope)
self.current_scope = self.current_scope.enclosing_scope
self.log("LEAVE scope: global")
def visit_Compound(self, node):
for child in node.children:
self.visit(child)
def visit_NoOp(self, node):
pass
def visit_Type(self, node):
pass
def visit_BinOp(self, node):
self.visit(node.left)
self.visit(node.right)
def visit_ProcedureDecl(self, node):
proc_name = node.procName
proc_symbol = ProcedureSymbol(proc_name)
self.current_scope.insert(proc_symbol)
self.log(f"ENTER scope: {proc_name}")
# Scope for parameters and local variables
procedure_scope = ScopedSymbolTable(
scope_name=proc_name,
scopeType=TokenType.PROCEDURE,
scope_level=self.current_scope.scope_level + 1,
enclosing_scope=self.current_scope,
)
self.current_scope = procedure_scope
# Insert parameters into the procedure scope
for param in node.params:
param_type = self.current_scope.lookup(param.type_node.value)
param_name = param.var_node.value
var_symbol = VarSymbol(param_name, param_type)
self.current_scope.insert(var_symbol)
proc_symbol.params.append(var_symbol)
self.visit(node.blockNode)
self.log(procedure_scope)
self.current_scope = self.current_scope.enclosing_scope
self.log(f"LEAVE scope: {proc_name}")
def visit_FunctionDecl(self, node):
funcName = node.funcName
funcSymbol = ProcedureSymbol(funcName)
self.current_scope.insert(funcSymbol)
self.log("Enter Scope:{}".format(funcName))
procedureScope = ScopedSymbolTable(
funcName,
TokenType.FUNCTION,
self.current_scope.scope_level + 1,
self.current_scope,
)
self.current_scope = procedureScope
for param in node.params:
paramType = self.current_scope.lookup(param.type_node.value)
paramName = param.var_node.value
varSymbol = VarSymbol(paramName, paramType)
self.current_scope.insert(varSymbol)
funcSymbol.params.append(varSymbol)
# print(paramName)
self.visit_Type(node.returnType)
self.visit(node.blockNode)
self.log("{}".format(procedureScope))
if procedureScope.hasReturnStatement == False:
self.error(ErrorCode.MISSING_RETURN, node.token)
self.current_scope = self.current_scope.enclosing_scope
self.log("Leave scope : {}".format(funcName))
def visit_VarDecl(self, node):
type_name = node.type_node.value
type_symbol = self.current_scope.lookup(type_name)
# We have all the information we need to create a variable symbol.
# Create the symbol and insert it into the symbol table.
var_name = node.var_node.value
var_symbol = VarSymbol(var_name, type_symbol)
# Signal an error if the table already has a symbol
# with the same name
if self.current_scope.lookup(var_name, current_scope_only=True):
self.error(
error_code=ErrorCode.DUPLICATE_ID, token=node.var_node.token,
)
self.current_scope.insert(var_symbol)
def visit_Assign(self, node):
# right-hand side
# self.visit(node.right)
# # left-hand side
# self.visit(node.left)
varName = node.left.value
currentScope = self.current_scope
# priprint(varName,currentScope.scopeType,currentScope.scope_name)
if (
currentScope.scopeType == TokenType.FUNCTION
and varName == currentScope.scope_name
):
currentScope.hasReturnStatement = True
else:
VarSymbol = self.current_scope.lookup(varName)
if VarSymbol == None:
self.error(error_code=ErrorCode.ID_NOT_FOUND, token=node.token)
self.visit(node.right)
def visit_Var(self, node):
var_name = node.value
var_symbol = self.current_scope.lookup(var_name)
if var_symbol is None:
self.error(error_code=ErrorCode.ID_NOT_FOUND, token=node.token)
def visit_Num(self, node):
pass
def visit_String(self, node):
pass
def visit_UnaryOp(self, node):
self.visit(node.right)
def visit_ProcedureCall(self, node):
for param_node in node.actual_params:
self.visit(param_node)
def visit_Call(self, node):
for param_node in node.actualParams:
self.visit(param_node)
def visit_Readint(self, node):
return
def visit_Readfloat(self, node):
return
def visit_Readstring(self, node):
return
def visit_WritelnCall(self, node):
for param_node in node.actual_params:
self.visit(param_node)
def visit_Condition(self, node):
self.visit(node.condition)
self.visit(node.then)
if node.myElse != None:
self.visit(node.myElse)
def visit_Then(self, node):
self.visit(node.child)
def visit_MyElse(self, node):
self.visit(node.child)
def visit_While(self, node):
self.visit(node.condition)
def visit_MyDo(self, node):
self.visit(node.child)
def visit_MyBoolean(self, node):
return node.value
|
[
"base.SemanticError"
] |
[((3927, 4023), 'base.SemanticError', 'SemanticError', ([], {'error_code': 'error_code', 'token': 'token', 'message': 'f"""{error_code.value} -> {token}"""'}), "(error_code=error_code, token=token, message=\n f'{error_code.value} -> {token}')\n", (3940, 4023), False, 'from base import _SHOULD_LOG_SCOPE, _SHOULD_LOG_STACK, ErrorCode, SemanticError\n')]
|
from mrq.job import Job
import datetime
from mrq.queue import Queue
import time
import pytest
@pytest.mark.parametrize(["p_queue", "p_pushback", "p_timed", "p_flags"], [
["test_timed_set", False, True, "--greenlets 10"],
["pushback_timed_set", True, True, "--greenlets 10"],
["test_sorted_set", False, False, "--greenlets 1"]
])
def test_raw_sorted(worker, p_queue, p_pushback, p_timed, p_flags):
worker.start(flags="%s --config tests/fixtures/config-raw1.py" %
p_flags, queues=p_queue)
test_collection = worker.mongodb_logs.tests_inserts
jobs_collection = worker.mongodb_jobs.mrq_jobs
current_time = int(time.time())
assert jobs_collection.count() == 0
assert Queue(p_queue).size() == 0
# Schedule one in the past, one in the future
worker.send_raw_tasks(p_queue, {
"aaa": current_time - 10,
"bbb": current_time + 5,
"ccc": current_time + 10
}, block=False)
# Re-schedule
worker.send_raw_tasks(p_queue, {
"ccc": current_time + 6
}, block=False)
time.sleep(3)
if not p_timed:
assert Queue(p_queue).size() == 0
assert test_collection.count() == 3
assert list(test_collection.find(projection={"params": 1, "_id": 0}).limit(1)) == [
{"params": {"sorted_set": "aaa"}}
]
return
if p_pushback:
assert Queue(p_queue).size() == 3
assert set(Queue(p_queue).list_raw_jobs()) == set([b"bbb", b"ccc", b"aaa"])
else:
assert Queue(p_queue).size() == 2
assert set(Queue(p_queue).list_raw_jobs()) == set([b"bbb", b"ccc"])
# The second one should not yet even exist in mrq_jobs
assert jobs_collection.count() == 1
assert list(jobs_collection.find())[0]["status"] == "success"
assert list(test_collection.find(projection={"params": 1, "_id": 0})) == [
{"params": {"timed_set": "aaa"}}
]
# Then wait for the second job to be done
time.sleep(5)
if p_pushback:
assert Queue(p_queue).size() == 3
else:
assert Queue(p_queue).size() == 0
assert jobs_collection.count() == 3
assert list(jobs_collection.find())[1]["status"] == "success"
assert list(jobs_collection.find())[2]["status"] == "success"
assert list(jobs_collection.find())[2]["worker"]
assert test_collection.count() == 3
@pytest.mark.parametrize("has_subqueue", [False, True])
@pytest.mark.parametrize(["p_queue", "p_set"], [
["test_raw", False],
["test_set", True]
])
def test_raw_set(worker, has_subqueue, p_queue, p_set):
flags = "--greenlets 10 --config tests/fixtures/config-raw1.py"
if has_subqueue:
flags = "%s --subqueues_refresh_interval=0.1" % flags
# worker should dequeue all subqueues
p_queue = "%s/" % p_queue
worker.start(flags=flags, queues=p_queue)
if has_subqueue:
# queue tasks in p_queue/subqueue
p_queue = "%ssubqueue" % p_queue
test_collection = worker.mongodb_logs.tests_inserts
jobs_collection = worker.mongodb_jobs.mrq_jobs
assert jobs_collection.count() == 0
assert Queue(p_queue).size() == 0
worker.send_raw_tasks(p_queue, ["aaa", "bbb", "ccc", "bbb"], block=True)
assert Queue(p_queue).size() == 0
if p_set:
assert jobs_collection.count() == 3
assert jobs_collection.count({"status": "success"}) == 3
assert test_collection.count() == 3
else:
assert jobs_collection.count() == 4
assert jobs_collection.count({"status": "success"}) == 4
assert test_collection.count() == 4
def test_raw_started(worker):
worker.start(
flags="--greenlets 2 --config tests/fixtures/config-raw1.py", queues="teststarted_raw teststartedx")
worker.send_raw_tasks("teststarted_raw", ["f1", "f2", "f3"], block=False)
time.sleep(2)
jobs_collection = worker.mongodb_jobs.mrq_jobs
assert jobs_collection.find({"status": "started", "queue": "teststartedx"}).count() == 2
assert jobs_collection.count() == 2
worker.mongodb_jobs.tests_flags.insert({"flag": "f1"})
time.sleep(1)
assert jobs_collection.find({"status": "success", "queue": "teststartedx"}).count() == 1
assert jobs_collection.find({"status": "started", "queue": "teststartedx"}).count() == 2
assert jobs_collection.count() == 3
worker.mongodb_jobs.tests_flags.insert({"flag": "f2"})
worker.mongodb_jobs.tests_flags.insert({"flag": "f3"})
time.sleep(1)
worker.stop(block=True, deps=False)
assert jobs_collection.find({"status": "success", "queue": "teststartedx"}).count() == 3
assert jobs_collection.count() == 3
worker.stop_deps()
@pytest.mark.parametrize(["p_queue"], [
["test_raw"],
["test_set"],
["test_timed_set"]
])
def test_raw_remove(worker, p_queue):
worker.start_deps()
worker.send_raw_tasks(
p_queue, ["aa", "bb", "cc"], block=False, start=False)
assert Queue(p_queue).size() == 3
Queue(p_queue).remove_raw_jobs(["aa", "cc"])
assert Queue(p_queue).size() == 1
worker.stop_deps()
def test_raw_exception(worker):
p_queue = "testexception_raw"
worker.start(
flags="--greenlets 10 --config tests/fixtures/config-raw1.py", queues=p_queue)
jobs_collection = worker.mongodb_jobs.mrq_jobs
assert jobs_collection.count() == 0
assert Queue(p_queue).size() == 0
worker.send_raw_tasks(p_queue, ["msg1"], block=True)
failjob = list(jobs_collection.find())[0]
assert Queue("default").size() == 0
assert Queue(p_queue).size() == 0
assert jobs_collection.count() == 1
assert failjob["status"] == "failed"
worker.stop(deps=False)
worker.start(
deps=False, flags="--greenlets 10 --config tests/fixtures/config-raw1.py", queues="default")
worker.send_task(
"mrq.basetasks.utils.JobAction",
{
"id": failjob["_id"],
"action": "requeue"
},
block=True
)
assert Queue("default").size() == 0
assert Queue(p_queue).size() == 0
assert jobs_collection.count() == 2
assert list(jobs_collection.find({"_id": failjob["_id"]}))[
0]["status"] == "queued"
assert list(jobs_collection.find({"_id": {"$ne": failjob["_id"]}}))[
0]["status"] == "success"
worker.stop(deps=False)
worker.start(
deps=False, flags="--greenlets 10 --config tests/fixtures/config-raw1.py", queues="default testx")
worker.wait_for_idle()
assert Queue(p_queue).size() == 0
assert jobs_collection.count() == 2
assert Queue("testx").size() == 0
assert list(jobs_collection.find({"_id": failjob["_id"]}))[
0]["status"] == "failed"
def test_raw_retry(worker):
p_queue = "testretry_raw"
worker.start(
flags="--greenlets 10 --config tests/fixtures/config-raw1.py", queues=p_queue)
jobs_collection = worker.mongodb_jobs.mrq_jobs
assert jobs_collection.count() == 0
assert Queue(p_queue).size() == 0
worker.send_raw_tasks(p_queue, [0], block=True)
failjob = list(jobs_collection.find())[0]
assert Queue("default").size() == 0
assert Queue("testx").size() == 1
assert Queue(p_queue).size() == 0
assert jobs_collection.count() == 1
assert failjob["status"] == "queued"
assert failjob["queue"] == "testx"
@pytest.mark.parametrize(["p_queue", "p_greenlets"], [x1 + x2 for x1 in [
["test_raw default test"],
# ["default test_raw test"],
# ["default test_raw test_set"],
# ["test_set test_raw default"],
# ["test test2 test_set test_raw default"]
] for x2 in [
# [1],
[2],
# [10]
]])
def test_raw_mixed(worker, p_queue, p_greenlets):
worker.start_deps()
worker.send_raw_tasks(
"test_raw", ["aaa", "bbb", "ccc"], start=False, block=False)
worker.send_task("tests.tasks.general.MongoInsert", {
"not_raw": "ddd"
}, start=False, block=False)
assert Queue("test_raw").size() == 3
assert Queue("default").size() == 1
worker.start(flags="--greenlets %s --config tests/fixtures/config-raw1.py" %
p_greenlets, queues=p_queue, deps=False)
test_collection = worker.mongodb_logs.tests_inserts
jobs_collection = worker.mongodb_jobs.mrq_jobs
time.sleep(3)
assert Queue("test_raw").size() == 0
assert Queue("default").size() == 0
assert test_collection.count() == 4
assert jobs_collection.count() == 4
assert jobs_collection.find({"status": "success"}).count() == 4
assert list(jobs_collection.find({"status": "success"}))[0]["worker"]
def test_raw_no_storage(worker):
""" Test tasks that don't store unless they go to error status like 'failed' """
worker.start(
flags="--config tests/fixtures/config-raw1.py",
queues="default testnostorage_raw"
)
jobs_collection = worker.mongodb_jobs.mrq_jobs
test_collection = worker.mongodb_logs.tests_inserts
worker.send_raw_tasks("testnostorage_raw", [
"tests.tasks.general.MongoInsert 3"
], block=False)
time.sleep(2)
# No started inserted.
assert jobs_collection.count() == 0
time.sleep(2)
# No success either, but we did insert
assert test_collection.count() == 1
assert jobs_collection.count() == 0
test_collection.remove({})
# However failed tasks get stored.
worker.send_raw_tasks("testnostorage_raw", [
"tests.tasks.general.RaiseException 0"
], block=False)
time.sleep(2)
# Failed was inserted.
assert jobs_collection.count({"status": "failed", "path": "tests.tasks.general.RaiseException"}) == 1
# If we requeue and don't raise, should be OK and inserted this time, even in success
# no_storage depends on a raw queue, not a task path.
_id = jobs_collection.find_one()["_id"]
jobs_collection.update({"_id": _id}, {"$set": {"path": "tests.tasks.general.MongoInsert"}})
job = Job(_id).fetch(full_data=True)
job.requeue(queue="default")
time.sleep(1)
assert test_collection.count() == 1
assert jobs_collection.count() == 1
assert jobs_collection.count({"status": "success"}) == 1
jobs_collection.remove({})
# Test with retry: should be inserted
worker.send_raw_tasks("testnostorage_raw", [
"tests.tasks.general.Retry 0"
], block=False)
assert jobs_collection.count({"status": "started"}) == 0
time.sleep(2)
assert jobs_collection.count({"status": "retry"}) == 1
|
[
"mrq.job.Job",
"time.time",
"time.sleep",
"pytest.mark.parametrize",
"mrq.queue.Queue"
] |
[((97, 341), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["['p_queue', 'p_pushback', 'p_timed', 'p_flags']", "[['test_timed_set', False, True, '--greenlets 10'], ['pushback_timed_set', \n True, True, '--greenlets 10'], ['test_sorted_set', False, False,\n '--greenlets 1']]"], {}), "(['p_queue', 'p_pushback', 'p_timed', 'p_flags'], [[\n 'test_timed_set', False, True, '--greenlets 10'], ['pushback_timed_set',\n True, True, '--greenlets 10'], ['test_sorted_set', False, False,\n '--greenlets 1']])\n", (120, 341), False, 'import pytest\n'), ((2371, 2425), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""has_subqueue"""', '[False, True]'], {}), "('has_subqueue', [False, True])\n", (2394, 2425), False, 'import pytest\n'), ((2427, 2520), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["['p_queue', 'p_set']", "[['test_raw', False], ['test_set', True]]"], {}), "(['p_queue', 'p_set'], [['test_raw', False], [\n 'test_set', True]])\n", (2450, 2520), False, 'import pytest\n'), ((4680, 4771), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["['p_queue']", "[['test_raw'], ['test_set'], ['test_timed_set']]"], {}), "(['p_queue'], [['test_raw'], ['test_set'], [\n 'test_timed_set']])\n", (4703, 4771), False, 'import pytest\n'), ((7339, 7462), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["['p_queue', 'p_greenlets']", "[(x1 + x2) for x1 in [['test_raw default test']] for x2 in [[2]]]"], {}), "(['p_queue', 'p_greenlets'], [(x1 + x2) for x1 in [[\n 'test_raw default test']] for x2 in [[2]]])\n", (7362, 7462), False, 'import pytest\n'), ((1069, 1082), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (1079, 1082), False, 'import time\n'), ((1972, 1985), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1982, 1985), False, 'import time\n'), ((3837, 3850), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (3847, 3850), False, 'import time\n'), ((4100, 4113), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4110, 4113), False, 'import time\n'), ((4464, 4477), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4474, 4477), False, 'import time\n'), ((8270, 8283), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (8280, 8283), False, 'import time\n'), ((9061, 9074), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (9071, 9074), False, 'import time\n'), ((9148, 9161), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (9158, 9161), False, 'import time\n'), ((9479, 9492), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (9489, 9492), False, 'import time\n'), ((9995, 10008), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (10005, 10008), False, 'import time\n'), ((10399, 10412), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (10409, 10412), False, 'import time\n'), ((655, 666), 'time.time', 'time.time', ([], {}), '()\n', (664, 666), False, 'import time\n'), ((4980, 4994), 'mrq.queue.Queue', 'Queue', (['p_queue'], {}), '(p_queue)\n', (4985, 4994), False, 'from mrq.queue import Queue\n'), ((9926, 9934), 'mrq.job.Job', 'Job', (['_id'], {}), '(_id)\n', (9929, 9934), False, 'from mrq.job import Job\n'), ((721, 735), 'mrq.queue.Queue', 'Queue', (['p_queue'], {}), '(p_queue)\n', (726, 735), False, 'from mrq.queue import Queue\n'), ((3116, 3130), 'mrq.queue.Queue', 'Queue', (['p_queue'], {}), '(p_queue)\n', (3121, 3130), False, 'from mrq.queue import Queue\n'), ((3233, 3247), 'mrq.queue.Queue', 'Queue', (['p_queue'], {}), '(p_queue)\n', (3238, 3247), False, 'from mrq.queue import Queue\n'), ((4948, 4962), 'mrq.queue.Queue', 'Queue', (['p_queue'], {}), '(p_queue)\n', (4953, 4962), False, 'from mrq.queue import Queue\n'), ((5037, 5051), 'mrq.queue.Queue', 'Queue', (['p_queue'], {}), '(p_queue)\n', (5042, 5051), False, 'from mrq.queue import Queue\n'), ((5366, 5380), 'mrq.queue.Queue', 'Queue', (['p_queue'], {}), '(p_queue)\n', (5371, 5380), False, 'from mrq.queue import Queue\n'), ((5509, 5525), 'mrq.queue.Queue', 'Queue', (['"""default"""'], {}), "('default')\n", (5514, 5525), False, 'from mrq.queue import Queue\n'), ((5549, 5563), 'mrq.queue.Queue', 'Queue', (['p_queue'], {}), '(p_queue)\n', (5554, 5563), False, 'from mrq.queue import Queue\n'), ((5994, 6010), 'mrq.queue.Queue', 'Queue', (['"""default"""'], {}), "('default')\n", (5999, 6010), False, 'from mrq.queue import Queue\n'), ((6034, 6048), 'mrq.queue.Queue', 'Queue', (['p_queue'], {}), '(p_queue)\n', (6039, 6048), False, 'from mrq.queue import Queue\n'), ((6499, 6513), 'mrq.queue.Queue', 'Queue', (['p_queue'], {}), '(p_queue)\n', (6504, 6513), False, 'from mrq.queue import Queue\n'), ((6577, 6591), 'mrq.queue.Queue', 'Queue', (['"""testx"""'], {}), "('testx')\n", (6582, 6591), False, 'from mrq.queue import Queue\n'), ((6971, 6985), 'mrq.queue.Queue', 'Queue', (['p_queue'], {}), '(p_queue)\n', (6976, 6985), False, 'from mrq.queue import Queue\n'), ((7110, 7126), 'mrq.queue.Queue', 'Queue', (['"""default"""'], {}), "('default')\n", (7115, 7126), False, 'from mrq.queue import Queue\n'), ((7150, 7164), 'mrq.queue.Queue', 'Queue', (['"""testx"""'], {}), "('testx')\n", (7155, 7164), False, 'from mrq.queue import Queue\n'), ((7189, 7203), 'mrq.queue.Queue', 'Queue', (['p_queue'], {}), '(p_queue)\n', (7194, 7203), False, 'from mrq.queue import Queue\n'), ((7947, 7964), 'mrq.queue.Queue', 'Queue', (['"""test_raw"""'], {}), "('test_raw')\n", (7952, 7964), False, 'from mrq.queue import Queue\n'), ((7988, 8004), 'mrq.queue.Queue', 'Queue', (['"""default"""'], {}), "('default')\n", (7993, 8004), False, 'from mrq.queue import Queue\n'), ((8296, 8313), 'mrq.queue.Queue', 'Queue', (['"""test_raw"""'], {}), "('test_raw')\n", (8301, 8313), False, 'from mrq.queue import Queue\n'), ((8337, 8353), 'mrq.queue.Queue', 'Queue', (['"""default"""'], {}), "('default')\n", (8342, 8353), False, 'from mrq.queue import Queue\n'), ((1120, 1134), 'mrq.queue.Queue', 'Queue', (['p_queue'], {}), '(p_queue)\n', (1125, 1134), False, 'from mrq.queue import Queue\n'), ((1389, 1403), 'mrq.queue.Queue', 'Queue', (['p_queue'], {}), '(p_queue)\n', (1394, 1403), False, 'from mrq.queue import Queue\n'), ((1525, 1539), 'mrq.queue.Queue', 'Queue', (['p_queue'], {}), '(p_queue)\n', (1530, 1539), False, 'from mrq.queue import Queue\n'), ((2021, 2035), 'mrq.queue.Queue', 'Queue', (['p_queue'], {}), '(p_queue)\n', (2026, 2035), False, 'from mrq.queue import Queue\n'), ((2073, 2087), 'mrq.queue.Queue', 'Queue', (['p_queue'], {}), '(p_queue)\n', (2078, 2087), False, 'from mrq.queue import Queue\n'), ((1435, 1449), 'mrq.queue.Queue', 'Queue', (['p_queue'], {}), '(p_queue)\n', (1440, 1449), False, 'from mrq.queue import Queue\n'), ((1571, 1585), 'mrq.queue.Queue', 'Queue', (['p_queue'], {}), '(p_queue)\n', (1576, 1585), False, 'from mrq.queue import Queue\n')]
|
import math
from typing import Callable
class Method:
@staticmethod
def calculate(f: Callable, a: float, b: float):
pass
@staticmethod
def name():
pass
class left_rectangle(Method):
@staticmethod
def calculate(f: Callable, a: float, b: float):
return f(a) * (b - a)
@staticmethod
def name():
return "Формула левого прямоугольника"
class right_rectangle(Method):
@staticmethod
def calculate(f: Callable, a: float, b: float):
return f(b) * (b - a)
@staticmethod
def name():
return "Формула правого прямоугольника"
class middle_rectangle(Method):
@staticmethod
def calculate(f: Callable, a: float, b: float):
return f((a + b) / 2) * (b - a)
@staticmethod
def name():
return "Формула среднего прямоугольника"
class trapezoid(Method):
@staticmethod
def calculate(f: Callable, a: float, b: float):
return (f(a) + f(b)) / 2 * (b - a)
@staticmethod
def name():
return "Формула трапеции"
class simpson(Method):
@staticmethod
def calculate(f: Callable, a: float, b: float):
return (b - a) / 6 * (f(a) + 4 * f((a + b) / 2) + f(b))
@staticmethod
def name():
return "Формула Симпсона"
class three_eights(Method):
@staticmethod
def calculate(f: Callable, a: float, b: float):
h = (b - a) / 3
return (b - a) * (f(a) + 3 * f(a + h) + 3 * f(a + 2 * h) + f(b)) / 8
@staticmethod
def name():
return "Формула 3 / 8"
def run():
polynomials = [
(lambda x: 5, lambda x: 5 * x, "y = 5"),
(lambda x: 3.7 * x - 2.39, lambda x: 3.7 * (x ** 2) / 2 - 2.39 * x, "y = 3.7 * x - 2.39"),
(lambda x: x ** 2 - 4 * x + 1.18, lambda x: (x ** 3) / 3 - 2 * x ** 2 + 1.18 * x, "y = x ** 2 - 4 * x + 1.18"),
(
lambda x: -17 * x ** 3 - 118 * x ** 2 + 10 * x + 27,
lambda x: -17 * (x ** 4) / 4 - 118 * (x ** 3) / 3 + 10 * (x ** 2) / 2 + 27 * x,
"y = -17 * x ** 3 - 118 * x ** 2 + 10 * x + 27"
)
]
functions = [
(lambda x: math.exp(-x) - (x ** 2) / 2, lambda x: -math.exp(-x) - (x ** 3) / 6, "y = math.exp(-x) - (x ** 2) / 2")
]
print("Задание 4. Приближённое вычисление интеграла по квадратурным формулам.")
print("Задача: Вычислите определённый интеграл от заданной функции f(x), используя квадратурные формулы.")
print("Введите нижний предел интегрирования (a):")
a = float(input())
print("Введите верхний предел интегрирования (b):")
b = float(input())
print("----------------------------------------------")
for f, integral_f, formula in polynomials + functions:
print(f"Рассматриваем функцию: {formula}")
for method in [right_rectangle, left_rectangle, middle_rectangle, trapezoid, simpson, three_eights]:
print(method.name())
res = method.calculate(f, a, b)
print(f"Результат вычисления: {res}")
print(f"Точное значение интеграла: {(integral_f(b) - integral_f(a))}")
print(f"Абсолютная фактическая погрешность: {abs((integral_f(b) - integral_f(a)) - res)}")
print("----------------------------------------------")
if __name__ == "__main__":
run()
|
[
"math.exp"
] |
[((2131, 2143), 'math.exp', 'math.exp', (['(-x)'], {}), '(-x)\n', (2139, 2143), False, 'import math\n'), ((2171, 2183), 'math.exp', 'math.exp', (['(-x)'], {}), '(-x)\n', (2179, 2183), False, 'import math\n')]
|
# -*- coding: utf-8 -*-
import decimal
from django.db import connection
class StatsManager:
def __init__(self):
self.cursor = connection.cursor()
def _result(self, args):
result = []
self.cursor.execute(self.sql, args)
for k, v in self.cursor.fetchall():
if isinstance(v, decimal.Decimal):
v = float(v)
result.append((k, v,))
return result
def cases_per_tech(self, location, queues, labels, start, end):
users = User.object.filter(location=location)
def statuses_per_location(self, timescale, location, status, start, end):
self.sql = """SELECT EXTRACT(EPOCH FROM date_trunc(%s, se.triggered_at))*1000 as p,
COUNT(*) AS v
FROM servo_order so, servo_event se
WHERE (se.triggered_at, se.triggered_at) OVERLAPS (%s, %s)
AND se.action = 'set_status'
AND se.object_id = so.id
AND so.location_id = %s
AND se.description = %s
GROUP BY p
ORDER BY p ASC"""
return self._result([timescale, start, end, location, status])
def statuses_per_user(self, timescale, user, status, start, end):
self.sql = """SELECT EXTRACT(EPOCH FROM date_trunc(%s, se.triggered_at))*1000 as p,
COUNT(*) AS v
FROM servo_order so, servo_event se
WHERE (se.triggered_at, se.triggered_at) OVERLAPS (%s, %s)
AND se.action = 'set_status'
AND se.object_id = so.id
AND so.user_id = %s
AND se.description = %s
GROUP BY p
ORDER BY p ASC"""
return self._result([timescale, start, end, user, status])
def sales_invoices(self, timescale, queue, start, end):
self.sql = """SELECT EXTRACT(EPOCH FROM date_trunc(%s, so.created_at))*1000 as p,
SUM(total_gross) AS v
FROM servo_invoice si, servo_order so
WHERE (si.created_at, si.created_at) OVERLAPS (%s, %s)
AND si.order_id = so.id
AND so.queue_id = %s
GROUP BY p
ORDER BY p ASC"""
return self._result([timescale, start, end, queue])
def sales_purchases(self, timescale, queue, start, end):
self.sql = """SELECT EXTRACT(EPOCH FROM date_trunc(%s, po.created_at))*1000 as p,
SUM(total) AS v
FROM servo_purchaseorder po, servo_order so
WHERE (po.created_at, po.created_at) OVERLAPS (%s, %s)
AND po.sales_order_id = so.id
AND so.queue_id = %s
GROUP BY p
ORDER BY p ASC"""
return self._result([timescale, start, end, queue])
def sales_parts_per_labtier(self, start, end):
self.sql = """SELECT labour_tier, count(*)
FROM servo_product p, servo_servicepart sp, servo_serviceorderitem soi
WHERE soi.product_id = p.id
AND sp.order_item_id = soi.id
AND (soi.created_at, soi.created_at) OVERLAPS (%s, %s)
AND char_length(labour_tier) = 4
GROUP BY labour_tier
ORDER BY labour_tier"""
return self._result([start, end])
def order_runrate(self, timescale, location, user, start, end):
self.sql = """SELECT EXTRACT(EPOCH FROM date_trunc(%s, started_at))*1000 as p,
COUNT(*) AS v
FROM servo_order
WHERE user_id = %s
AND location_id = %s
AND (started_at, started_at) OVERLAPS (%s, %s)
GROUP BY p
ORDER BY p ASC"""
return self._result([timescale, user, location, start, end])
def turnaround_per_location(self, timescale, location, start, end):
self.sql = """SELECT EXTRACT(EPOCH FROM date_trunc(%s, created_at))*1000 as p,
EXTRACT(HOUR FROM AVG(closed_at - created_at)) as v
FROM servo_order
WHERE closed_at IS NOT NULL
AND location_id = %s
AND queue_id IS NOT NULL
AND (created_at, created_at) OVERLAPS (%s, %s)
GROUP BY p
ORDER BY p ASC"""
return self._result([timescale, location, start, end])
def runrate_per_location(self, timescale, location, start, end):
self.sql = """SELECT EXTRACT(EPOCH FROM date_trunc(%s, created_at))*1000 as p,
COUNT(*) AS v
FROM servo_order
WHERE location_id = %s
AND closed_at IS NOT NULL
AND (created_at, created_at) OVERLAPS (%s, %s)
GROUP BY p
ORDER BY p ASC"""
return self._result([timescale, location, start, end])
def distribution_per_location(self, start, end):
result = []
self.sql = """SELECT l.title, COUNT(*)
FROM servo_order o LEFT OUTER JOIN servo_location l on (o.location_id = l.id)
WHERE (o.created_at, o.created_at) OVERLAPS (%s, %s)
GROUP BY l.title"""
self.cursor.execute(self.sql, [start, end])
for k, v in self.cursor.fetchall():
result.append({'label': k, 'data': v})
return result
def orders_created_by(self, timescale, location, user, start, end):
self.sql = """SELECT EXTRACT(EPOCH FROM date_trunc(%s, created_at))*1000 as p,
COUNT(*) AS v
FROM servo_order
WHERE location_id = %s
AND created_by_id = %s
AND (created_at, created_at) OVERLAPS (%s, %s)
GROUP BY p
ORDER BY p ASC"""
return self._result([timescale, location, user, start, end])
def orders_created_at(self, timescale, location, start, end):
self.sql = """SELECT EXTRACT(EPOCH FROM date_trunc(%s, created_at))*1000 as p,
COUNT(*) AS v
FROM servo_order
WHERE location_id = %s
AND (created_at, created_at) OVERLAPS (%s, %s)
GROUP BY p
ORDER BY p ASC"""
return self._result([timescale, location, start, end])
def orders_closed_at(self, timescale, location, start, end):
self.sql = """SELECT EXTRACT(EPOCH FROM date_trunc(%s, created_at))*1000 as p,
COUNT(*) AS v
FROM servo_order
WHERE location_id = %s
AND (closed_at, closed_at) OVERLAPS (%s, %s)
GROUP BY p
ORDER BY p ASC"""
return self._result([timescale, location, start, end])
def orders_closed_in(self, timescale, location, queue, start, end):
self.sql = """SELECT EXTRACT(EPOCH FROM date_trunc(%s, created_at))*1000 as p,
COUNT(*) AS v
FROM servo_order
WHERE location_id = %s
AND queue_id = %s
AND (closed_at, closed_at) OVERLAPS (%s, %s)
GROUP BY p
ORDER BY p ASC"""
return self._result([timescale, location, queue, start, end])
def order_count(self, timescale, location, queue, start, end):
self.sql = """SELECT EXTRACT(EPOCH FROM date_trunc(%s, created_at))*1000 as p,
COUNT(*) AS v
FROM servo_order
WHERE location_id = %s
AND queue_id = %s
AND (created_at, created_at) OVERLAPS (%s, %s)
GROUP BY p
ORDER BY p ASC"""
return self._result([timescale, location, queue, start, end])
def order_turnaround(self, timescale, location, queue, start, end):
self.sql = """SELECT EXTRACT(EPOCH FROM date_trunc(%s, created_at))*1000 as p,
EXTRACT(HOUR FROM AVG(closed_at - created_at)) as v
FROM servo_order
WHERE closed_at IS NOT NULL
AND location_id = %s
AND queue_id = %s
AND queue_id IS NOT NULL
AND (created_at, created_at) OVERLAPS (%s, %s)
GROUP BY p
ORDER BY p ASC"""
return self._result([timescale, location, queue, start, end])
def order_turnaround(self, timescale, location, queue, start, end):
self.sql = """SELECT EXTRACT(EPOCH FROM date_trunc(%s, created_at))*1000 as p,
EXTRACT(HOUR FROM AVG(closed_at - created_at)) as v
FROM servo_order
WHERE closed_at IS NOT NULL
AND location_id = %s
AND queue_id = %s
AND queue_id IS NOT NULL
AND (created_at, created_at) OVERLAPS (%s, %s)
GROUP BY p
ORDER BY p ASC"""
return self._result([timescale, location, queue, start, end])
|
[
"django.db.connection.cursor"
] |
[((141, 160), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (158, 160), False, 'from django.db import connection\n')]
|
# -*- coding: utf-8 -*-
"""
@author: wangyouqish
"""
import sys
sys.path.append("..")
import time,datetime
import pytz
import requests
import feedparser
import threading
import database.dbConn as dbConn
import log.logCenter as logCenter
def getFeedFromLink(url,name):
head = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36'}
try:
page = requests.get(url, headers=head,timeout=(10,10))
page.encoding = 'utf-8'
page_content = page.text
if(page.status_code==404 or page.status_code==403):
logger.error(name+" 404/403 failed "+url)
return None
except:
logger.error(name+" download failed "+url)
return None
rss = feedparser.parse(page_content)
return rss
def getRsshubFeed(router,name,recommendedServerID):
downloadedFlag=0
conn=dbConn.getConn()
cSer = conn.cursor()
cursorSer = cSer.execute("SELECT ID,Adress,FuncNum,FirstCheck,LastCheck from rsshubServers where ID={}".format(recommendedServerID))
rss=None
if(recommendedServerID!=0):
for row in cursorSer:
server=row[1]
url=server+router
rss = getFeedFromLink(url,name)
if(rss!=None):
downloadedFlag=1
cursorSer = cSer.execute("SELECT ID,Adress,FuncNum,FirstCheck,LastCheck from rsshubServers")#用所有服务器遍历
for row in cursorSer:
if(downloadedFlag==0):
server=row[1]
url=server+router
rss = getFeedFromLink(url,name)
if(rss!=None):
downloadedFlag=1
conn.close()
return rss
def saveOneNormalRss(url,name,unread):
conn=dbConn.getConn()
old=0
new=0
rss = getFeedFromLink(url,name)
if(rss==None):
return
for post in rss.entries:
title=post.title
summary=post.summary
link=post.link
try:
itemTimestamp=int(time.mktime(post.published_parsed))#第一类time是每一条新闻有发布时间,被feedparser捕获
except:
try:
gmtTime=rss.feed.updated#第二类time是GMT时间,从xml文件生成时间获取
local_tz = pytz.timezone('Asia/Shanghai')
utcDT=datetime.datetime.strptime(gmtTime,"%a, %d %b %Y %H:%M:%S GMT")
itemTimestamp=int(time.mktime(utcDT.replace(tzinfo=pytz.utc).astimezone(local_tz).timetuple()))
except:
try:
bjTime=post.published
bjT=time.strptime(bjTime,"%a,%d %b %Y %H:%M:%S +0800")
itemTimestamp=int(time.mktime(bjT))#仅为了适配cili001的时间格式
except:
itemTimestamp=int(time.time())
try:
conn.execute("INSERT INTO rssData (rssName,title,summary,timestamp,link) VALUES ('{}','{}','{}',{},'{}')".format(name,title,summary,itemTimestamp,link))
new=new+1
except:
old=old+1 #增加新闻失败就是老的新闻
try:
unread=unread+new
sqlupdate="UPDATE normalrsslinks set lastget ={} ,unread={} where name='{}'".format(str(int(time.time())),unread,name)#更新rss任务列表最后提交时间
conn.execute(sqlupdate)
conn.commit()
conn.close()
except:
logger.warning("sqlerror "+name)
logger.info(name+" add new:"+str(new)+" old:"+str(old))
def saveOneRsshub(router,name,recommendedServerID,unread):
conn=dbConn.getConn()
old=0
new=0
rss = getRsshubFeed(router,name,recommendedServerID)
if(rss==None):
logger.warning(name+" all download failed "+router)#全部下载失败,可能是网络不好或者router无效、故障
return
for post in rss.entries:
title=post.title
summary=post.summary
link=post.link
try:
itemTimestamp=int(time.mktime(post.published_parsed))
except:
try:
gmtTime=rss.feed.updated
local_tz = pytz.timezone('Asia/Shanghai')
utcDT=datetime.datetime.strptime(gmtTime,"%a, %d %b %Y %H:%M:%S GMT")
itemTimestamp=int(time.mktime(utcDT.replace(tzinfo=pytz.utc).astimezone(local_tz).timetuple()))
except:
itemTimestamp=int(time.time())
try:
conn.execute("INSERT INTO rssData (rssName,title,summary,timestamp,link) VALUES ('{}','{}','{}',{},'{}')".format(name,title,summary,itemTimestamp,link))
new=new+1
except:
old=old+1
try:
unread=unread+new
sqlupdate="UPDATE rsshubtasks set lastget ={} ,unread={} where name='{}'".format(str(int(time.time())),unread,name)
conn.execute(sqlupdate)
conn.commit()
conn.close()
except:
logger.warning("sqlerror "+name)
logger.info("rsshubtask "+name+" add new:"+str(new)+" old:"+str(old))
def updateAllTasksTitle():
conn=dbConn.getConn()
cTask = conn.cursor()
cursor = cTask.execute("SELECT link , name , round , lastget ,title from normalrsslinks where active=1")
for row in cursor:
rss=getFeedFromLink(row[0],row[1])
conn.execute("UPDATE normalrsslinks set title='{}' where name='{}'".format(rss.feed.title,row[1]))
cursor = cTask.execute("SELECT router , name , round , lastget , recommendedServerID, title from rsshubtasks where active=1")
for row in cursor:
rss=getRsshubFeed(row[0],row[1],row[4])
conn.execute("UPDATE rsshubtasks set title='{}' where name='{}'".format(rss.feed.title,row[1]))
conn.commit()
conn.close()
def getAllRssData():
conn=dbConn.getConn()
taskDoCount=0
taskWaitCount=0
conn=dbConn.getConn()
cTask = conn.cursor()
cursor = cTask.execute("SELECT link , name , round , lastget ,unread from normalrsslinks where active=1")
thread_list = []
for row in cursor:
if((int(time.time())-int(row[3]))>row[2]):
t= threading.Thread(target=saveOneNormalRss,args=(row[0],row[1],row[4]))
t.start()
logger.debug(row[1]+" "+row[0]+" start")
taskDoCount=taskDoCount+1
thread_list.append(t)
else:
logger.debug(row[1]+" less than round")
taskWaitCount=taskWaitCount+1
#以上为常规rss任务
cursor = cTask.execute("SELECT router , name , round , lastget , recommendedServerID ,unread from rsshubtasks where active=1")
for row in cursor:
if((int(time.time())-int(row[3]))>row[2]):
t= threading.Thread(target=saveOneRsshub,args=(row[0],row[1],row[4],row[5]))
t.start()
logger.debug("rsshub "+row[1]+" "+row[0]+" start")
taskDoCount=taskDoCount+1
thread_list.append(t)
else:
logger.debug("rsshub "+row[1]+" less than round")
taskWaitCount=taskWaitCount+1
for t in thread_list:
t.join()
logger.info(str(taskDoCount)+" tasks done "+str(taskWaitCount)+" tasks wait")
conn.commit()
conn.close()
#以上为rsshub的router任务
#以下main开始,被import的初始化在if main外
logger=logCenter.getLogger("rssSpider")
if __name__ == "__main__":
getAllRssData()
|
[
"sys.path.append",
"feedparser.parse",
"threading.Thread",
"time.strptime",
"time.time",
"time.mktime",
"datetime.datetime.strptime",
"pytz.timezone",
"requests.get",
"log.logCenter.getLogger",
"database.dbConn.getConn"
] |
[((64, 85), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (79, 85), False, 'import sys\n'), ((7058, 7090), 'log.logCenter.getLogger', 'logCenter.getLogger', (['"""rssSpider"""'], {}), "('rssSpider')\n", (7077, 7090), True, 'import log.logCenter as logCenter\n'), ((783, 813), 'feedparser.parse', 'feedparser.parse', (['page_content'], {}), '(page_content)\n', (799, 813), False, 'import feedparser\n'), ((912, 928), 'database.dbConn.getConn', 'dbConn.getConn', ([], {}), '()\n', (926, 928), True, 'import database.dbConn as dbConn\n'), ((1729, 1745), 'database.dbConn.getConn', 'dbConn.getConn', ([], {}), '()\n', (1743, 1745), True, 'import database.dbConn as dbConn\n'), ((3415, 3431), 'database.dbConn.getConn', 'dbConn.getConn', ([], {}), '()\n', (3429, 3431), True, 'import database.dbConn as dbConn\n'), ((4867, 4883), 'database.dbConn.getConn', 'dbConn.getConn', ([], {}), '()\n', (4881, 4883), True, 'import database.dbConn as dbConn\n'), ((5567, 5583), 'database.dbConn.getConn', 'dbConn.getConn', ([], {}), '()\n', (5581, 5583), True, 'import database.dbConn as dbConn\n'), ((5631, 5647), 'database.dbConn.getConn', 'dbConn.getConn', ([], {}), '()\n', (5645, 5647), True, 'import database.dbConn as dbConn\n'), ((439, 488), 'requests.get', 'requests.get', (['url'], {'headers': 'head', 'timeout': '(10, 10)'}), '(url, headers=head, timeout=(10, 10))\n', (451, 488), False, 'import requests\n'), ((5894, 5966), 'threading.Thread', 'threading.Thread', ([], {'target': 'saveOneNormalRss', 'args': '(row[0], row[1], row[4])'}), '(target=saveOneNormalRss, args=(row[0], row[1], row[4]))\n', (5910, 5966), False, 'import threading\n'), ((6468, 6545), 'threading.Thread', 'threading.Thread', ([], {'target': 'saveOneRsshub', 'args': '(row[0], row[1], row[4], row[5])'}), '(target=saveOneRsshub, args=(row[0], row[1], row[4], row[5]))\n', (6484, 6545), False, 'import threading\n'), ((1985, 2019), 'time.mktime', 'time.mktime', (['post.published_parsed'], {}), '(post.published_parsed)\n', (1996, 2019), False, 'import time, datetime\n'), ((3788, 3822), 'time.mktime', 'time.mktime', (['post.published_parsed'], {}), '(post.published_parsed)\n', (3799, 3822), False, 'import time, datetime\n'), ((2182, 2212), 'pytz.timezone', 'pytz.timezone', (['"""Asia/Shanghai"""'], {}), "('Asia/Shanghai')\n", (2195, 2212), False, 'import pytz\n'), ((2235, 2299), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['gmtTime', '"""%a, %d %b %Y %H:%M:%S GMT"""'], {}), "(gmtTime, '%a, %d %b %Y %H:%M:%S GMT')\n", (2261, 2299), False, 'import time, datetime\n'), ((3110, 3121), 'time.time', 'time.time', ([], {}), '()\n', (3119, 3121), False, 'import time, datetime\n'), ((3925, 3955), 'pytz.timezone', 'pytz.timezone', (['"""Asia/Shanghai"""'], {}), "('Asia/Shanghai')\n", (3938, 3955), False, 'import pytz\n'), ((3978, 4042), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['gmtTime', '"""%a, %d %b %Y %H:%M:%S GMT"""'], {}), "(gmtTime, '%a, %d %b %Y %H:%M:%S GMT')\n", (4004, 4042), False, 'import time, datetime\n'), ((4598, 4609), 'time.time', 'time.time', ([], {}), '()\n', (4607, 4609), False, 'import time, datetime\n'), ((5844, 5855), 'time.time', 'time.time', ([], {}), '()\n', (5853, 5855), False, 'import time, datetime\n'), ((6418, 6429), 'time.time', 'time.time', ([], {}), '()\n', (6427, 6429), False, 'import time, datetime\n'), ((2520, 2571), 'time.strptime', 'time.strptime', (['bjTime', '"""%a,%d %b %Y %H:%M:%S +0800"""'], {}), "(bjTime, '%a,%d %b %Y %H:%M:%S +0800')\n", (2533, 2571), False, 'import time, datetime\n'), ((4210, 4221), 'time.time', 'time.time', ([], {}), '()\n', (4219, 4221), False, 'import time, datetime\n'), ((2609, 2625), 'time.mktime', 'time.mktime', (['bjT'], {}), '(bjT)\n', (2620, 2625), False, 'import time, datetime\n'), ((2707, 2718), 'time.time', 'time.time', ([], {}), '()\n', (2716, 2718), False, 'import time, datetime\n')]
|
import re
import time
import socket
import struct
import logging
import traceback
from functools import wraps
try:
from Queue import Queue, Empty # Python 2
except ImportError:
from queue import Queue, Empty # Python 3
from collections import defaultdict
from threading import RLock, Thread, Semaphore
__all__ = ["Connection", "start_threads"]
__version_info__ = (1, 0, 2, "final", 0)
__version__ = "{0}.{1}.{2}".format(*__version_info__)
logger = logging.getLogger("collectd")
SEND_INTERVAL = 10 # seconds
MAX_PACKET_SIZE = 1024 # bytes
PLUGIN_TYPE = "gauge"
TYPE_HOST = 0x0000
TYPE_TIME = 0x0001
TYPE_PLUGIN = 0x0002
TYPE_PLUGIN_INSTANCE = 0x0003
TYPE_TYPE = 0x0004
TYPE_TYPE_INSTANCE = 0x0005
TYPE_VALUES = 0x0006
TYPE_INTERVAL = 0x0007
LONG_INT_CODES = [TYPE_TIME, TYPE_INTERVAL]
STRING_CODES = [TYPE_HOST, TYPE_PLUGIN, TYPE_PLUGIN_INSTANCE, TYPE_TYPE, TYPE_TYPE_INSTANCE]
VALUE_COUNTER = 0
VALUE_GAUGE = 1
VALUE_DERIVE = 2
VALUE_ABSOLUTE = 3
VALUE_CODES = {
VALUE_COUNTER: "!Q",
VALUE_GAUGE: "<d",
VALUE_DERIVE: "!q",
VALUE_ABSOLUTE: "!Q"
}
def pack_numeric(type_code, number):
return struct.pack("!HHq", type_code, 12, number)
def pack_string(type_code, string):
return struct.pack("!HH", type_code, 5 + len(string)) + string + "\0"
def pack_value(name, value):
return "".join([
pack(TYPE_TYPE_INSTANCE, name),
struct.pack("!HHH", TYPE_VALUES, 15, 1),
struct.pack("<Bd", VALUE_GAUGE, value)
])
def pack(id, value):
if isinstance(id, basestring):
return pack_value(id, value)
elif id in LONG_INT_CODES:
return pack_numeric(id, value)
elif id in STRING_CODES:
return pack_string(id, value)
else:
raise AssertionError("invalid type code " + str(id))
def message_start(when=None, host=socket.gethostname(), plugin_inst="", plugin_name="any"):
return "".join([
pack(TYPE_HOST, host),
pack(TYPE_TIME, when or time.time()),
pack(TYPE_PLUGIN, plugin_name),
pack(TYPE_PLUGIN_INSTANCE, plugin_inst),
pack(TYPE_TYPE, PLUGIN_TYPE),
pack(TYPE_INTERVAL, SEND_INTERVAL)
])
def messages(counts, when=None, host=socket.gethostname(), plugin_inst="", plugin_name="any"):
packets = []
start = message_start(when, host, plugin_inst, plugin_name)
parts = [pack(name, count) for name,count in counts.items()]
parts = [p for p in parts if len(start) + len(p) <= MAX_PACKET_SIZE]
if parts:
curr, curr_len = [start], len(start)
for part in parts:
if curr_len + len(part) > MAX_PACKET_SIZE:
packets.append("".join(curr))
curr, curr_len = [start], len(start)
curr.append(part)
curr_len += len(part)
packets.append("".join(curr))
return packets
def sanitize(s):
return re.sub(r"[^a-zA-Z0-9]+", "_", s).strip("_")
def swallow_errors(func):
@wraps(func)
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except:
try:
logger.error("unexpected error", exc_info = True)
except:
pass
return wrapped
def synchronized(method):
@wraps(method)
def wrapped(self, *args, **kwargs):
with self._lock:
return method(self, *args, **kwargs)
return wrapped
class Counter(object):
def __init__(self, category):
self.category = category
self._lock = RLock()
self.counts = defaultdict(lambda: defaultdict(float))
@swallow_errors
@synchronized
def record(self, *args, **kwargs):
for specific in list(args) + [""]:
assert isinstance(specific, basestring)
for stat, value in kwargs.items():
assert isinstance(value, (int, float))
self.counts[str(specific)][str(stat)] += value
@swallow_errors
@synchronized
def set_exact(self, **kwargs):
for stat, value in kwargs.items():
assert isinstance(value, (int, float))
self.counts[""][str(stat)] = value
@synchronized
def snapshot(self):
totals = {}
for specific,counts in self.counts.items():
for stat in counts:
name_parts = map(sanitize, [self.category, specific, stat])
name = "-".join(name_parts).replace("--", "-")
totals[name] = counts[stat]
counts[stat] = 0.0
return totals
class Connection(object):
_lock = RLock() # class-level lock, only used for __new__
instances = {}
@synchronized
def __new__(cls, hostname = socket.gethostname(),
collectd_host = "localhost", collectd_port = 25826,
plugin_inst = "", plugin_name = "any"):
id = (hostname, collectd_host, collectd_port, plugin_inst, plugin_name)
if id in cls.instances:
return cls.instances[id]
else:
inst = object.__new__(cls)
cls.instances[id] = inst
return inst
def __init__(self, hostname = socket.gethostname(),
collectd_host = "localhost", collectd_port = 25826,
plugin_inst = "", plugin_name = "any"):
if "_counters" not in self.__dict__:
self._lock = RLock()
self._counters = {}
self._plugin_inst = plugin_inst
self._plugin_name = plugin_name
self._hostname = hostname
self._collectd_addr = (collectd_host, collectd_port)
@synchronized
def __getattr__(self, name):
if name.startswith("_"):
raise AttributeError("{0} object has no attribute {1!r}".format(self.__class__.__name__, name))
if name not in self._counters:
self._counters[name] = Counter(name)
return self._counters[name]
@synchronized
def _snapshot(self):
return [c.snapshot() for c in self._counters.values() if c.counts]
snaps = Queue()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def take_snapshots():
for conn in Connection.instances.values():
snapshots = conn._snapshot()
if snapshots:
stats = {}
for snapshot in snapshots:
stats.update(snapshot)
snaps.put([int(time.time()), stats, conn])
def send_stats(raise_on_empty = False):
try:
when, stats, conn = snaps.get(timeout = 0.1)
for message in messages(stats, when, conn._hostname, conn._plugin_inst, conn._plugin_name):
sock.sendto(message, conn._collectd_addr)
except Empty:
if raise_on_empty:
raise
def daemonize(func, sleep_for = 0):
@wraps(func)
def wrapped():
while True:
try:
func()
except:
try:
logger.error("unexpected error", exc_info = True)
except:
traceback.print_exc()
time.sleep(sleep_for)
t = Thread(target = wrapped)
t.daemon = True
t.start()
single_start = Semaphore()
def start_threads():
assert single_start.acquire(blocking = False)
daemonize(take_snapshots, sleep_for = SEND_INTERVAL)
daemonize(send_stats)
|
[
"threading.Thread",
"traceback.print_exc",
"threading.RLock",
"socket.socket",
"struct.pack",
"time.sleep",
"collections.defaultdict",
"socket.gethostname",
"time.time",
"functools.wraps",
"re.sub",
"threading.Semaphore",
"queue.Queue",
"logging.getLogger"
] |
[((462, 491), 'logging.getLogger', 'logging.getLogger', (['"""collectd"""'], {}), "('collectd')\n", (479, 491), False, 'import logging\n'), ((6127, 6134), 'queue.Queue', 'Queue', ([], {}), '()\n', (6132, 6134), False, 'from queue import Queue, Empty\n'), ((6142, 6190), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (6155, 6190), False, 'import socket\n'), ((7228, 7239), 'threading.Semaphore', 'Semaphore', ([], {}), '()\n', (7237, 7239), False, 'from threading import RLock, Thread, Semaphore\n'), ((1208, 1250), 'struct.pack', 'struct.pack', (['"""!HHq"""', 'type_code', '(12)', 'number'], {}), "('!HHq', type_code, 12, number)\n", (1219, 1250), False, 'import struct\n'), ((1893, 1913), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (1911, 1913), False, 'import socket\n'), ((2264, 2284), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (2282, 2284), False, 'import socket\n'), ((3009, 3020), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (3014, 3020), False, 'from functools import wraps\n'), ((3300, 3313), 'functools.wraps', 'wraps', (['method'], {}), '(method)\n', (3305, 3313), False, 'from functools import wraps\n'), ((4620, 4627), 'threading.RLock', 'RLock', ([], {}), '()\n', (4625, 4627), False, 'from threading import RLock, Thread, Semaphore\n'), ((6838, 6849), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (6843, 6849), False, 'from functools import wraps\n'), ((7153, 7175), 'threading.Thread', 'Thread', ([], {'target': 'wrapped'}), '(target=wrapped)\n', (7159, 7175), False, 'from threading import RLock, Thread, Semaphore\n'), ((3559, 3566), 'threading.RLock', 'RLock', ([], {}), '()\n', (3564, 3566), False, 'from threading import RLock, Thread, Semaphore\n'), ((4744, 4764), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (4762, 4764), False, 'import socket\n'), ((5202, 5222), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (5220, 5222), False, 'import socket\n'), ((1461, 1500), 'struct.pack', 'struct.pack', (['"""!HHH"""', 'TYPE_VALUES', '(15)', '(1)'], {}), "('!HHH', TYPE_VALUES, 15, 1)\n", (1472, 1500), False, 'import struct\n'), ((1510, 1548), 'struct.pack', 'struct.pack', (['"""<Bd"""', 'VALUE_GAUGE', 'value'], {}), "('<Bd', VALUE_GAUGE, value)\n", (1521, 1548), False, 'import struct\n'), ((2933, 2964), 're.sub', 're.sub', (['"""[^a-zA-Z0-9]+"""', '"""_"""', 's'], {}), "('[^a-zA-Z0-9]+', '_', s)\n", (2939, 2964), False, 'import re\n'), ((5432, 5439), 'threading.RLock', 'RLock', ([], {}), '()\n', (5437, 5439), False, 'from threading import RLock, Thread, Semaphore\n'), ((7118, 7139), 'time.sleep', 'time.sleep', (['sleep_for'], {}), '(sleep_for)\n', (7128, 7139), False, 'import time\n'), ((3609, 3627), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (3620, 3627), False, 'from collections import defaultdict\n'), ((2035, 2046), 'time.time', 'time.time', ([], {}), '()\n', (2044, 2046), False, 'import time\n'), ((6448, 6459), 'time.time', 'time.time', ([], {}), '()\n', (6457, 6459), False, 'import time\n'), ((7084, 7105), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (7103, 7105), False, 'import traceback\n')]
|
import os
import cv2
import math
import random
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset
import timm
class VIT_Attention(nn.Module):
def __init__(self,
arch_name,
pretrained=False,
img_size=256,
multi_drop=False,
multi_drop_rate=0.5,
att_layer=False,
att_pattern="A"
):
super().__init__()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.att_layer = att_layer
self.multi_drop = multi_drop
self.model = timm.create_model(
arch_name, pretrained=pretrained
)
n_features = self.model.head.in_features
self.model.head = nn.Identity()
self.head = nn.Linear(n_features, 5)
self.head_drops = nn.ModuleList()
for i in range(5):
self.head_drops.append(nn.Dropout(multi_drop_rate))
if att_layer:
if att_pattern == "A":
self.att_layer = nn.Sequential(
nn.Linear(n_features, 256),
nn.Tanh(),
nn.Linear(256, 1),
)
elif att_pattern == "B":
self.att_layer = nn.Linear(n_features, 1)
else:
raise ValueError("invalid att pattern")
def forward(self, x):
if self.att_layer:
l = x.shape[2] // 2
h1 = self.model(x[:, :, :l, :l])
h2 = self.model(x[:, :, :l, l:])
h3 = self.model(x[:, :, l:, :l])
h4 = self.model(x[:, :, l:, l:])
w = F.softmax(torch.cat([
self.att_layer(h1),
self.att_layer(h2),
self.att_layer(h3),
self.att_layer(h4),
], dim=1), dim=1)
h = h1 * w[:, 0].unsqueeze(-1) + \
h2 * w[:, 1].unsqueeze(-1) + \
h3 * w[:, 2].unsqueeze(-1) + \
h4 * w[:, 3].unsqueeze(-1)
else:
h = self.model(x)
if self.multi_drop:
for i, dropout in enumerate(self.head_drops):
if i == 0:
output = self.head(dropout(h))
else:
output += self.head(dropout(h))
output /= len(self.head_drops)
else:
output = self.head(h)
return output
|
[
"torch.nn.Dropout",
"torch.nn.ModuleList",
"torch.nn.Tanh",
"timm.create_model",
"torch.cuda.is_available",
"torch.nn.Linear",
"torch.nn.Identity"
] |
[((644, 695), 'timm.create_model', 'timm.create_model', (['arch_name'], {'pretrained': 'pretrained'}), '(arch_name, pretrained=pretrained)\n', (661, 695), False, 'import timm\n'), ((793, 806), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (804, 806), True, 'import torch.nn as nn\n'), ((828, 852), 'torch.nn.Linear', 'nn.Linear', (['n_features', '(5)'], {}), '(n_features, 5)\n', (837, 852), True, 'import torch.nn as nn\n'), ((879, 894), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (892, 894), True, 'import torch.nn as nn\n'), ((512, 537), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (535, 537), False, 'import torch\n'), ((957, 984), 'torch.nn.Dropout', 'nn.Dropout', (['multi_drop_rate'], {}), '(multi_drop_rate)\n', (967, 984), True, 'import torch.nn as nn\n'), ((1112, 1138), 'torch.nn.Linear', 'nn.Linear', (['n_features', '(256)'], {}), '(n_features, 256)\n', (1121, 1138), True, 'import torch.nn as nn\n'), ((1160, 1169), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (1167, 1169), True, 'import torch.nn as nn\n'), ((1191, 1208), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(1)'], {}), '(256, 1)\n', (1200, 1208), True, 'import torch.nn as nn\n'), ((1298, 1322), 'torch.nn.Linear', 'nn.Linear', (['n_features', '(1)'], {}), '(n_features, 1)\n', (1307, 1322), True, 'import torch.nn as nn\n')]
|
from torch.functional import Tensor
import torchvision.models as models
import torch.nn as nn
class Encoder_VGG16(nn.Module):
def __init__(self):
super(Encoder_VGG16, self).__init__()
pretrained_model = models.vgg16(pretrained=True)
self.conv_base = pretrained_model.features
# Freeze All layers as they will be used for inference
for param in self.conv_base.parameters():
param.requires_grad = False
# Flaten layer that flatten the dimensions 2 and 3 (H and W of the feature maps respectively)
self.flat = nn.Flatten(2,3)
def forward(self, x):
# For an image size of (224x224) --> x dims (batch_size, 3, 244 , 244)
features = self.conv_base(x)
# For an image size of (224x224) --> features dims (batch_size, feat_maps=512, H=7 , W=7)
features = self.flat(features)
# For an image size of (224x224) --> features dims (batch_size, 512, 7x7=49)
return features
class Encoder_ResNet50(nn.Module):
def __init__(self):
super(Encoder_ResNet50, self).__init__()
pretrained_model = models.resnet50(pretrained=True)
modules = list(pretrained_model.children())[:-2]
self.conv_base = nn.Sequential(*modules)
# Freeze All layers as they will be used for inference
for param in self.conv_base.parameters():
param.requires_grad = False
# Flaten layer that flatten the dimensions 2 and 3 (H and W of the feature maps respectively)
self.flat = nn.Flatten(2,3)
def forward(self, x):
# For an image size of (224x224) --> x dims (batch_size, 3, H=224 , W=224)
features = self.conv_base(x)
# For an image size of (224x224) --> features dims (batch_size, feat_maps=2048, H=7 , W=7)
features = self.flat(features)
# For an image size of (224x224) --> features dims (batch_size, feat_maps=2048, 7x7=49)
return features
class Encoder_DenseNet(nn.Module):
def __init__(self):
super(Encoder_DenseNet, self).__init__()
pretrained_model = models.densenet161(pretrained=True)
self.conv_base = pretrained_model.features
# Freeze All layers as they will be used for inference
for param in self.conv_base.parameters():
param.requires_grad = False
# Flaten layer that flatten the dimensions 2 and 3 (H and W of the feature maps respectively)
self.flat = nn.Flatten(2,3)
# We apply here a ReLU
self.relu = nn.ReLU()
def forward(self, x):
# (batch_size, feat_maps=512, H=7 , W=7) or (batch_size, 512, 14 , 14)
features = self.conv_base(x)
# (batch_size, 512, 7x7=49) or (batch_size, 512, 14x14=196)
features = self.flat(features)
# (batch_size, 49, 512) or (batch_size, 196, 512)
return self.relu(features)
|
[
"torch.nn.ReLU",
"torch.nn.Sequential",
"torchvision.models.densenet161",
"torchvision.models.resnet50",
"torchvision.models.vgg16",
"torch.nn.Flatten"
] |
[((225, 254), 'torchvision.models.vgg16', 'models.vgg16', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (237, 254), True, 'import torchvision.models as models\n'), ((585, 601), 'torch.nn.Flatten', 'nn.Flatten', (['(2)', '(3)'], {}), '(2, 3)\n', (595, 601), True, 'import torch.nn as nn\n'), ((1146, 1178), 'torchvision.models.resnet50', 'models.resnet50', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (1161, 1178), True, 'import torchvision.models as models\n'), ((1262, 1285), 'torch.nn.Sequential', 'nn.Sequential', (['*modules'], {}), '(*modules)\n', (1275, 1285), True, 'import torch.nn as nn\n'), ((1563, 1579), 'torch.nn.Flatten', 'nn.Flatten', (['(2)', '(3)'], {}), '(2, 3)\n', (1573, 1579), True, 'import torch.nn as nn\n'), ((2121, 2156), 'torchvision.models.densenet161', 'models.densenet161', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (2139, 2156), True, 'import torchvision.models as models\n'), ((2493, 2509), 'torch.nn.Flatten', 'nn.Flatten', (['(2)', '(3)'], {}), '(2, 3)\n', (2503, 2509), True, 'import torch.nn as nn\n'), ((2562, 2571), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2569, 2571), True, 'import torch.nn as nn\n')]
|
# Deletes selected objects recursively from the object hierarchy
import bpy
obj = bpy.context.object
stack = [obj]
while len(stack) > 0:
tmp = stack.pop()
if hasattr(tmp, "children"):
for child in tmp.children:
child.select = True
stack.append(child)
bpy.ops.object.delete()
|
[
"bpy.ops.object.delete"
] |
[((265, 288), 'bpy.ops.object.delete', 'bpy.ops.object.delete', ([], {}), '()\n', (286, 288), False, 'import bpy\n')]
|
"""Implementations of edge walk aggregators."""
import abc
import torch
from torch import nn
class BaseAggregator(abc.ABC, nn.Module):
"""Base class for edge walk aggregators."""
def __init__(self):
"""Inits BaseAggregator."""
super().__init__()
self._device = torch.device(
'cuda' if torch.cuda.is_available() else 'cpu'
)
@abc.abstractmethod
def aggregate(
self,
edge_features: torch.Tensor,
nodes_features: torch.Tensor,
) -> torch.Tensor:
"""Aggregates single edge walk into feature vector."""
pass
|
[
"torch.cuda.is_available"
] |
[((333, 358), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (356, 358), False, 'import torch\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2014 pyReScene
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# Docs for a quicker understanding:
# http://wiki.multimedia.cx/index.php?title=QuickTime_container
# http://code.google.com/p/mp4parser/
import os
import struct
from rescene.utility import is_rar
from rescene.rarstream import RarStream
BE_LONG = struct.Struct('>L') # unsigned long: 4 bytes
BE_LONGLONG = struct.Struct('>Q') # unsigned long long: 8 bytes
class MovReadMode(object):
MP4, Sample, SRS = list(range(3))
# MP4 == Sample, but doesn't throw InvalidDataException
class InvalidDataException(ValueError):
pass
class Atom(object):
def __init__(self, size, object_guid):
"""size: full size of the atom (including 2 first header fields)
object_guid: the type of the atom (moov, mdat,...)"""
self.size = size
self.type = object_guid
self.raw_header = b""
self.start_pos = -1
def __repr__(self, *args, **kwargs):
return "<Atom type=%r size=%d start_pos=%d>" % (self.type,
self.size, self.start_pos)
class MovReader(object):
"""Implements a simple Reader class that reads through MP4
or MP4-SRS files one atom/box at a time.
atom: QuickTime File Format
box: ISO/IEC 14496-12:2008"""
def __init__(self, read_mode, path=None, stream=None,
archived_file_name=""):
assert path or stream
if path:
if is_rar(path):
self._mov_stream = RarStream(path, archived_file_name)
else:
self._mov_stream = open(path, 'rb')
elif stream:
self._mov_stream = stream
self._mov_stream.seek(0, 2)
self._file_length = self._mov_stream.tell()
self._mov_stream.seek(0)
self.mode = read_mode
self.read_done = True
self.current_atom = None
self.atom_type = None
def read(self):
# "Read() is invalid at this time", "MoveToChild(), ReadContents(), or
# SkipContents() must be called before Read() can be called again")
assert self.read_done or (self.mode == MovReadMode.SRS and
self.atom_type == b"mdat")
atom_start_position = self._mov_stream.tell()
self.current_atom = None
self.read_done = False
# no room for size (4B) and type (4B) of the atom
if atom_start_position + 8 > self._file_length:
return False
self._atom_header = self._mov_stream.read(8)
# 4 bytes for atom length, 4 bytes for atom type
(atom_length,) = BE_LONG.unpack_from(self._atom_header)
self.atom_type = self._atom_header[4:]
# special sizes
hsize = 8
if atom_length == 1:
# 8-byte size field after the atom type
bsize = self._mov_stream.read(8)
(atom_length,) = BE_LONGLONG.unpack(bsize)
self._atom_header += bsize
hsize += 8
elif atom_length == 0:
# print("Box without size found.")
# FoV/COMPULSiON samples have an atom that consists of just 8
# null bytes. This is the case if it is followed by an mdat
# try to make it work with those samples too
# https://code.google.com/p/mp4parser/ can not open these files!
if self.atom_type == b"\x00\x00\x00\x00":
atom_length = 8
else:
# the atom extends to the end of the file
atom_length = self._file_length - atom_start_position
# sanity check on atom length
# Skip check on mdat so we can still report expected size.
# This is only applied on samples,
# since a partial movie might still be useful.
end_offset = atom_start_position + atom_length
if (self.mode == MovReadMode.Sample and self.atom_type != b"mdat" and
end_offset > self._file_length):
raise InvalidDataException("Invalid box length at 0x%08X" %
atom_start_position)
self.current_atom = Atom(atom_length, self.atom_type)
self.current_atom.raw_header = self._atom_header
self.current_atom.start_pos = atom_start_position
self._mov_stream.seek(atom_start_position, os.SEEK_SET)
# Apple Computer reserves
# all four-character codes consisting entirely of lowercase letters.
return True
def read_contents(self):
# if read_done is set, we've already read or skipped it.
# back up and read again?
if self.read_done:
self._mov_stream.seek(self.current_atom.start_pos, os.SEEK_SET)
self.read_done = True
buff = b""
if (self.mode != MovReadMode.SRS and self.atom_type == b"mdat"):
raise NotImplementedError("Programming error: implement this "
"for mdat atoms using the chunk method. These mdat atoms "
"can become enormous and cause a MemoryError.")
# do always when it's not a SRS file
# else skip it when encountering removed data
if (self.mode != MovReadMode.SRS or self.atom_type != b"mdat"):
# skip header bytes
hl = len(self.current_atom.raw_header)
self._mov_stream.seek(hl, os.SEEK_CUR)
buff = self._mov_stream.read(self.current_atom.size - hl)
return buff
def read_contents_chunks(self, chunk_size=65536):
"""Lazy function (generator) to read a lot of data piece by piece."""
if self.atom_type != b"mdat" or self.mode == MovReadMode.SRS:
raise NotImplementedError("Only use this for 'mdat' atoms.")
self.read_done = True
# skip header bytes
hl = len(self.current_atom.raw_header)
self._mov_stream.seek(self.current_atom.start_pos + hl, os.SEEK_SET)
end_offset = self.current_atom.start_pos + self.current_atom.size
todo = self.current_atom.size - hl # to prevent ending up in a loop
while todo != 0 and self._mov_stream.tell() + todo == end_offset:
amount = end_offset - self._mov_stream.tell()
if amount > chunk_size:
amount = chunk_size
todo -= amount
yield self._mov_stream.read(amount)
def skip_contents(self):
if not self.read_done:
self.read_done = True
# do always when it's not a SRS file
# else skip it when encountering removed data
if (self.mode != MovReadMode.SRS
or self.atom_type != b"mdat"):
self._mov_stream.seek(self.current_atom.start_pos +
self.current_atom.size,
os.SEEK_SET)
def move_to_child(self):
self.read_done = True
# skip the header bytes
hl = len(self.current_atom.raw_header)
self._mov_stream.seek(hl, os.SEEK_CUR)
def close(self):
try: # close the file/stream
self._mov_stream.close()
except:
pass
def __del__(self):
try: # close the file/stream
self._mov_stream.close()
except:
pass
|
[
"struct.Struct",
"rescene.rarstream.RarStream",
"rescene.utility.is_rar"
] |
[((1397, 1416), 'struct.Struct', 'struct.Struct', (['""">L"""'], {}), "('>L')\n", (1410, 1416), False, 'import struct\n'), ((1457, 1476), 'struct.Struct', 'struct.Struct', (['""">Q"""'], {}), "('>Q')\n", (1470, 1476), False, 'import struct\n'), ((2423, 2435), 'rescene.utility.is_rar', 'is_rar', (['path'], {}), '(path)\n', (2429, 2435), False, 'from rescene.utility import is_rar\n'), ((2460, 2495), 'rescene.rarstream.RarStream', 'RarStream', (['path', 'archived_file_name'], {}), '(path, archived_file_name)\n', (2469, 2495), False, 'from rescene.rarstream import RarStream\n')]
|
from math import sqrt
from PEPit import PEP
from PEPit.functions import ConvexLipschitzFunction
def wc_subgradient_method(M, n, gamma, verbose=1):
"""
Consider the minimization problem
.. math:: f_\\star \\triangleq \\min_x f(x),
where :math:`f` is convex and :math:`M`-Lipschitz. This problem is a (possibly non-smooth) minimization problem.
This code computes a worst-case guarantee for the **subgradient** method. That is, it computes
the smallest possible :math:`\\tau(n, M, \\gamma)` such that the guarantee
.. math:: \\min_{0 \leqslant t \leqslant n} f(x_t) - f_\\star \\leqslant \\tau(n, M, \\gamma) \|x_0 - x_\\star\|
is valid, where :math:`x_t` is the output of the **subgradient** method after :math:`t\\leqslant n` steps,
and where :math:`x_\\star` is the minimizer of :math:`f`.
In short, for given values of :math:`M`, the step-size :math:`\\gamma` and the number of iterations :math:`n`,
:math:`\\tau(n, M, \\gamma)` is computed as the worst-case value of
:math:`\\min_{0 \leqslant t \leqslant n} f(x_t) - f_\\star` when :math:`\\|x_0-x_\\star\\| \\leqslant 1`.
**Algorithm**:
For :math:`t\\in \\{0, \\dots, n-1 \\}`
.. math::
:nowrap:
\\begin{eqnarray}
g_{t} & \\in & \\partial f(x_t) \\\\
x_{t+1} & = & x_t - \\gamma g_t
\\end{eqnarray}
**Theoretical guarantee**: The **tight** bound is obtained in [1, Section 3.2.3] and [2, Eq (2)]
.. math:: \\min_{0 \\leqslant t \\leqslant n} f(x_t)- f(x_\\star) \\leqslant \\frac{M}{\\sqrt{n+1}}\|x_0-x_\\star\|,
and tightness follows from the lower complexity bound for this class of problems, e.g., [3, Appendix A].
**References**: Classical references on this topic include [1, 2].
`[1] <NAME> (2003). Introductory lectures on convex optimization: A basic course.
Springer Science & Business Media.
<https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.693.855&rep=rep1&type=pdf>`_
`[2] <NAME>, <NAME>, <NAME> (2003). Subgradient Methods (lecture notes).
<https://web.stanford.edu/class/ee392o/subgrad_method.pdf>`_
`[3] <NAME>, <NAME> (2016). An optimal variant of Kelley's cutting-plane method.
Mathematical Programming, 160(1), 321-351.
<https://arxiv.org/pdf/1409.2636.pdf>`_
Args:
M (float): the Lipschitz parameter.
n (int): the number of iterations.
gamma (float): step-size.
verbose (int): Level of information details to print.
- 1: No verbose at all.
- 0: This example's output.
- 1: This example's output + PEPit information.
- 2: This example's output + PEPit information + CVXPY details.
Returns:
pepit_tau (float): worst-case value
theoretical_tau (float): theoretical value
Example:
>>> M = 2
>>> n = 6
>>> gamma = 1 / (M * sqrt(n + 1))
>>> pepit_tau, theoretical_tau = wc_subgradient_method(M=M, n=n, gamma=gamma, verbose=1)
(PEPit) Setting up the problem: size of the main PSD matrix: 9x9
(PEPit) Setting up the problem: performance measure is minimum of 7 element(s)
(PEPit) Setting up the problem: initial conditions (1 constraint(s) added)
(PEPit) Setting up the problem: interpolation conditions for 1 function(s)
function 1 : 64 constraint(s) added
(PEPit) Compiling SDP
(PEPit) Calling SDP solver
(PEPit) Solver status: optimal (solver: SCS); optimal value: 0.7559825331741553
*** Example file: worst-case performance of subgradient method ***
PEPit guarantee: min_(0 \leq t \leq n) f(x_i) - f_* <= 0.755983 ||x_0 - x_*||
Theoretical guarantee: min_(0 \leq t \leq n) f(x_i) - f_* <= 0.755929 ||x_0 - x_*||
"""
# Instantiate PEP
problem = PEP()
# Declare a convex lipschitz function
func = problem.declare_function(ConvexLipschitzFunction, M=M)
# Start by defining its unique optimal point xs = x_* and corresponding function value fs = f_*
xs = func.stationary_point()
fs = func(xs)
# Then define the starting point x0 of the algorithm
x0 = problem.set_initial_point()
# Set the initial constraint that is the distance between x0 and xs
problem.set_initial_condition((x0 - xs)**2 <= 1)
# Run n steps of the subgradient method
x = x0
gx, fx = func.oracle(x)
for _ in range(n):
problem.set_performance_metric(fx - fs)
x = x - gamma * gx
gx, fx = func.oracle(x)
# Set the performance metric to the function value accuracy
problem.set_performance_metric(fx - fs)
# Solve the PEP
pepit_verbose = max(verbose, 0)
pepit_tau = problem.solve(verbose=pepit_verbose)
# Compute theoretical guarantee (for comparison)
theoretical_tau = M / sqrt(n + 1)
# Print conclusion if required
if verbose != -1:
print('*** Example file: worst-case performance of subgradient method ***')
print('\tPEPit guarantee:\t min_(0 \leq t \leq n) f(x_i) - f_* <= {:.6} ||x_0 - x_*||'.format(pepit_tau))
print('\tTheoretical guarantee:\t min_(0 \leq t \leq n) f(x_i) - f_* <= {:.6} ||x_0 - x_*||'.format(
theoretical_tau))
# Return the worst-case guarantee of the evaluated method (and the reference theoretical value)
return pepit_tau, theoretical_tau
if __name__ == "__main__":
M = 2
n = 6
gamma = 1 / (M * sqrt(n + 1))
pepit_tau, theoretical_tau = wc_subgradient_method(M=M, n=n, gamma=gamma, verbose=1)
|
[
"math.sqrt",
"PEPit.PEP"
] |
[((3966, 3971), 'PEPit.PEP', 'PEP', ([], {}), '()\n', (3969, 3971), False, 'from PEPit import PEP\n'), ((4968, 4979), 'math.sqrt', 'sqrt', (['(n + 1)'], {}), '(n + 1)\n', (4972, 4979), False, 'from math import sqrt\n'), ((5585, 5596), 'math.sqrt', 'sqrt', (['(n + 1)'], {}), '(n + 1)\n', (5589, 5596), False, 'from math import sqrt\n')]
|
import logging
import azure.functions as func
import azure.durable_functions as df
def generator_function(context):
tasks = []
for i in range(30):
current_task = context.df.callActivity("DurableActivity", str(i))
tasks.append(current_task)
results = yield context.df.task_all(tasks)
logging.warn(f"!!! fanout results {results}")
return results
def main(context: str):
logging.warn("Durable Orchestration Trigger: " + context)
orchestrate = df.Orchestrator.create(generator_function)
logging.warn("!!!type(orchestrate) " + str(type(orchestrate)))
result = orchestrate(context)
logging.warn("!!!serialized json : " + result)
logging.warn("!!!type(result) " + str(type(result)))
return result
|
[
"logging.warn",
"azure.durable_functions.Orchestrator.create"
] |
[((319, 364), 'logging.warn', 'logging.warn', (['f"""!!! fanout results {results}"""'], {}), "(f'!!! fanout results {results}')\n", (331, 364), False, 'import logging\n'), ((414, 471), 'logging.warn', 'logging.warn', (["('Durable Orchestration Trigger: ' + context)"], {}), "('Durable Orchestration Trigger: ' + context)\n", (426, 471), False, 'import logging\n'), ((490, 532), 'azure.durable_functions.Orchestrator.create', 'df.Orchestrator.create', (['generator_function'], {}), '(generator_function)\n', (512, 532), True, 'import azure.durable_functions as df\n'), ((638, 684), 'logging.warn', 'logging.warn', (["('!!!serialized json : ' + result)"], {}), "('!!!serialized json : ' + result)\n", (650, 684), False, 'import logging\n')]
|
from uqcsbot import bot, Command
from uqcsbot.utils.command_utils import loading_status
from typing import Tuple
import requests
from bs4 import BeautifulSoup as Soup
def get_pf_parking_data() -> Tuple[int, str]:
"""
Returns a parking HTML document from the UQ P&F website
"""
page = requests.get("https://pg.pf.uq.edu.au/")
return (page.status_code, page.text)
@bot.on_command("parking")
@loading_status
def handle_parking(command: Command) -> None:
"""
`!parking [all]` - Displays how many car parks are available at UQ St. Lucia
By default, only dispalys casual parking availability
"""
if command.has_arg() and command.arg.lower() == "all":
permit = True
else:
permit = False
# read parking data
code, data = get_pf_parking_data()
if code != 200:
bot.post_message(command.channel_id, "Could Not Retrieve Parking Data")
return
response = ["*Available Parks at UQ St. Lucia*"]
names = {"P1": "P1 - Warehouse (14P Daily)", "P2": "P2 - Space Bank (14P Daily)",
"P3": "P3 - Multi-Level West (Staff)", "P4": "P4 - Multi-Level East (Staff)",
"P6": "P6 - Hartley Teakle (14P Hourly)", "P7": "P7 - DustBowl (14P Daily)",
"P7 UC": "P7 - Keith Street (14P Daily Capped)",
"P8 L1": "P8 - Athletics Basement (14P Daily)",
"P8 L2": "P8 - Athletics Roof (14P Daily)", "P9": "P9 - Boatshed (14P Daily)",
"P10": "P10 - UQ Centre & Playing Fields (14P Daily/14P Daily Capped)",
"P11 L1": "P11 - Conifer Knoll Lower (Staff)",
"P11 L2": "P11 - Conifer Knoll Upper (Staff)",
"P11 L3": "P11 - Conifer Knoll Roof (14P Daily Restricted)"}
def category(fill):
if fill.upper() == "FULL":
return "No"
if fill.upper() == "NEARLY FULL":
return "Few"
return fill
# find parks
table = Soup(data, "html.parser").find("table", attrs={"id": "parkingAvailability"})
rows = table.find_all("tr")[1:]
# split and join for single space whitespace
areas = [[" ".join(i.get_text().split()) for i in j.find_all("td")] for j in rows]
for area in areas:
if area[2]:
response.append(f"{category(area[2])} Carparks Available in {names[area[0]]}")
elif permit and area[1]:
response.append(f"{category(area[1])} Carparks Available in {names[area[0]]}")
bot.post_message(command.channel_id, "\n".join(response))
|
[
"bs4.BeautifulSoup",
"uqcsbot.bot.on_command",
"uqcsbot.bot.post_message",
"requests.get"
] |
[((388, 413), 'uqcsbot.bot.on_command', 'bot.on_command', (['"""parking"""'], {}), "('parking')\n", (402, 413), False, 'from uqcsbot import bot, Command\n'), ((303, 343), 'requests.get', 'requests.get', (['"""https://pg.pf.uq.edu.au/"""'], {}), "('https://pg.pf.uq.edu.au/')\n", (315, 343), False, 'import requests\n'), ((838, 909), 'uqcsbot.bot.post_message', 'bot.post_message', (['command.channel_id', '"""Could Not Retrieve Parking Data"""'], {}), "(command.channel_id, 'Could Not Retrieve Parking Data')\n", (854, 909), False, 'from uqcsbot import bot, Command\n'), ((1941, 1966), 'bs4.BeautifulSoup', 'Soup', (['data', '"""html.parser"""'], {}), "(data, 'html.parser')\n", (1945, 1966), True, 'from bs4 import BeautifulSoup as Soup\n')]
|
import csv
import datetime
import os
from django.contrib.gis.geos import Point
from django.core.management import BaseCommand, CommandError
from django.db import transaction
from geopy import Nominatim
from countries.models import Country
from report.models import Report, Sighting, ReportedViaChoice
from users.models import User
class Command(BaseCommand):
help = "compute first sighting"
def handle(self, *args, **kwargs):
compute_first_sighting()
def compute_first_sighting():
with transaction.atomic():
reports = Report.objects.all()
# print("Total reports : " + str(len(reports)))
for index, report in enumerate(reports):
# print("Report " + str(index + 1) + "/" + str(len(reports)))
sighting = Sighting.objects.filter(report=report).order_by("heard_on").first()
if sighting:
Sighting.objects.filter(report=report).update(is_first_sighting=False)
sighting.is_first_sighting = True
sighting.save()
|
[
"report.models.Report.objects.all",
"django.db.transaction.atomic",
"report.models.Sighting.objects.filter"
] |
[((513, 533), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (531, 533), False, 'from django.db import transaction\n'), ((553, 573), 'report.models.Report.objects.all', 'Report.objects.all', ([], {}), '()\n', (571, 573), False, 'from report.models import Report, Sighting, ReportedViaChoice\n'), ((885, 923), 'report.models.Sighting.objects.filter', 'Sighting.objects.filter', ([], {'report': 'report'}), '(report=report)\n', (908, 923), False, 'from report.models import Report, Sighting, ReportedViaChoice\n'), ((776, 814), 'report.models.Sighting.objects.filter', 'Sighting.objects.filter', ([], {'report': 'report'}), '(report=report)\n', (799, 814), False, 'from report.models import Report, Sighting, ReportedViaChoice\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 5 23:54:49 2018
@author: tyler
"""
import numpy as np
#%%
def postprocess_cut(supernodes_original,supernodes_f,supernode_nonempty_Q,not_loop_Q):
'''
returns : partition of original vertices of $G$ and size of coresponding cut
'''
sn = supernodes_f[supernode_nonempty_Q]
size_E = sum(not_loop_Q)
if len(sn[0])<len(sn[1]):
sn = sn[0]
else:
sn = sn[1]
for v in list(sn):
if v >= len(supernodes):
sn.remove(v)
for sn_o in supernodes_original:
if v in sn_o:
sn.add(min(sn_o))
break
return list(sn),size_E
#%%
def karger(E,G,supernodes,supernode_nonempty_Q,not_loop_Q):
# start = time.clock()
size_E = np.shape(E)[0]
size_V = sum(supernode_nonempty_Q)
f = 0
s = 0
sn0 = {}
for j in range(size_V-2):
if j%500==0:
print('===================')
print('iteration:', j)
'''
print('find endpoints: ',f)
print('find loops : ', s)
f = 0
s = 0
print('sn0 size: ', len(sn0))
sn_count = sum(map(len,supernodes[np.where(supernode_nonempty_Q)[0]]))
print('numer of vert :', sn_count)
print('numer of sns :', sum(supernode_nonempty_Q))
'''
# pick random edge
#probably can't be faster than this unless we can compile
cs = np.cumsum(not_loop_Q)
# rand_idx = np.where(cs > np.random.randint(cs[-1]))[0][0]
rand_idx = np.searchsorted(cs, np.random.randint(cs[-1]))
e0,e1 = E[rand_idx]
#find edge endopoint vertices
# start = time.clock()
supernode_nonempty_idx = np.where(supernode_nonempty_Q)[0]
for i0 in supernode_nonempty_idx:
if e0 in supernodes[i0]:
break
for i1 in supernode_nonempty_idx[::-1]:
if e1 in supernodes[i1]:
break
# f += (time.clock() - start)
# merge vertex equivalence classes
sn0 = supernodes[i0]
sn1 = supernodes[i1]
# find loops
# search for edges with one end in sn0 and one in sn1
# start = time.clock()
for i in sn0:
Gi = G[i]
for j in sn1:
Gij = Gi[j]
if Gij != -1:
if not_loop_Q[Gij]:
not_loop_Q[Gij] = False
# s += time.clock() - start
# put sn1 into sn0 and sn1 into sn0 and delete sn1
supernodes[i0] = supernodes[i0] | supernodes[i1]
supernode_nonempty_Q[i1] = False
return supernodes,supernode_nonempty_Q,not_loop_Q
#%% load data
d = np.load('b0_pre.npz')
E=d['E']
G=d['G']
supernodes_=d['supernodes_']
supernode_nonempty_Q_=d['supernode_nonempty_Q_']
del(d)
#%%
supernodes,supernode_nonempty_Q = np.copy(supernodes_),np.copy(supernode_nonempty_Q_)
not_loop_Q = np.ones(len(E),dtype='bool')
supernodes,supernode_nonempty_Q,not_loop_Q = karger(E,G,supernodes,supernode_nonempty_Q,not_loop_Q)
#%%
postprocess_cut(supernodes_,supernodes,supernode_nonempty_Q,not_loop_Q)
#%%
len(E[np.any(E==8,axis=1)])
|
[
"numpy.load",
"numpy.copy",
"numpy.shape",
"numpy.cumsum",
"numpy.any",
"numpy.random.randint",
"numpy.where"
] |
[((2949, 2970), 'numpy.load', 'np.load', (['"""b0_pre.npz"""'], {}), "('b0_pre.npz')\n", (2956, 2970), True, 'import numpy as np\n'), ((3117, 3137), 'numpy.copy', 'np.copy', (['supernodes_'], {}), '(supernodes_)\n', (3124, 3137), True, 'import numpy as np\n'), ((3138, 3168), 'numpy.copy', 'np.copy', (['supernode_nonempty_Q_'], {}), '(supernode_nonempty_Q_)\n', (3145, 3168), True, 'import numpy as np\n'), ((844, 855), 'numpy.shape', 'np.shape', (['E'], {}), '(E)\n', (852, 855), True, 'import numpy as np\n'), ((1584, 1605), 'numpy.cumsum', 'np.cumsum', (['not_loop_Q'], {}), '(not_loop_Q)\n', (1593, 1605), True, 'import numpy as np\n'), ((3402, 3424), 'numpy.any', 'np.any', (['(E == 8)'], {'axis': '(1)'}), '(E == 8, axis=1)\n', (3408, 3424), True, 'import numpy as np\n'), ((1713, 1738), 'numpy.random.randint', 'np.random.randint', (['cs[-1]'], {}), '(cs[-1])\n', (1730, 1738), True, 'import numpy as np\n'), ((1901, 1931), 'numpy.where', 'np.where', (['supernode_nonempty_Q'], {}), '(supernode_nonempty_Q)\n', (1909, 1931), True, 'import numpy as np\n')]
|
import numpy as np
import torch
from scipy.io import loadmat
import cv2
import time
import deepLabv3.deeplab as deeplab
from deepLabv3.pascal import VOCSegmentation
from deepLabv3.cityscapes import Cityscapes
from deepLabv3.utils import AverageMeter, inter_and_union, load_model
from deepLabv3.detector import Detector
from deepLabv3.argLoader import ArgLoader
def main():
assert torch.cuda.is_available()
argloader = ArgLoader()
args = argloader.args
torch.backends.cudnn.benchmark = True
if args.dataset == 'pascal':
dataset = VOCSegmentation(
args.voc_path,
train=args.train, crop_size=args.crop_size)
elif args.dataset == 'cityscapes':
dataset = Cityscapes(
args.cityscape_path,
train=args.train, crop_size=args.crop_size)
else:
raise ValueError('Unknown dataset: {}'.format(args.dataset))
model, model_fname = load_model(args, dataset.CLASSES)
detector = Detector(model)
if args.train:
detector.train(dataset, model_fname, args)
else:
torch.cuda.set_device(args.gpu)
model = model.cuda()
model.eval()
checkpoint = torch.load(model_fname % args.epochs)
state_dict = {k[7:]: v for k, v in checkpoint['state_dict'].items()
if 'tracked' not in k}
model.load_state_dict(state_dict)
cmap = loadmat('data/pascal_seg_colormap.mat')['colormap']
cmap = (cmap * 255).astype(np.uint8).flatten().tolist()
inter_meter = AverageMeter()
union_meter = AverageMeter()
for i in range(len(dataset)):
prev_time = time.time()
inputs, target, fname = dataset[i]
pred = detector.inference(inputs)
mask = target.numpy().astype(np.uint8)
inter, union = inter_and_union(pred, mask, len(dataset.CLASSES))
inter_meter.update(inter)
union_meter.update(union)
print("time elapsed", time.time() - prev_time)
iou = inter_meter.sum / (union_meter.sum + 1e-10)
for i, val in enumerate(iou):
print('IoU {0}: {1:.2f}'.format(dataset.CLASSES[i], val * 100))
print('Mean IoU: {0:.2f}'.format(iou.mean() * 100))
if __name__ == "__main__":
main()
|
[
"deepLabv3.cityscapes.Cityscapes",
"deepLabv3.utils.AverageMeter",
"scipy.io.loadmat",
"torch.load",
"deepLabv3.argLoader.ArgLoader",
"deepLabv3.pascal.VOCSegmentation",
"time.time",
"torch.cuda.is_available",
"torch.cuda.set_device",
"deepLabv3.utils.load_model",
"deepLabv3.detector.Detector"
] |
[((388, 413), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (411, 413), False, 'import torch\n'), ((430, 441), 'deepLabv3.argLoader.ArgLoader', 'ArgLoader', ([], {}), '()\n', (439, 441), False, 'from deepLabv3.argLoader import ArgLoader\n'), ((948, 981), 'deepLabv3.utils.load_model', 'load_model', (['args', 'dataset.CLASSES'], {}), '(args, dataset.CLASSES)\n', (958, 981), False, 'from deepLabv3.utils import AverageMeter, inter_and_union, load_model\n'), ((997, 1012), 'deepLabv3.detector.Detector', 'Detector', (['model'], {}), '(model)\n', (1005, 1012), False, 'from deepLabv3.detector import Detector\n'), ((562, 636), 'deepLabv3.pascal.VOCSegmentation', 'VOCSegmentation', (['args.voc_path'], {'train': 'args.train', 'crop_size': 'args.crop_size'}), '(args.voc_path, train=args.train, crop_size=args.crop_size)\n', (577, 636), False, 'from deepLabv3.pascal import VOCSegmentation\n'), ((1102, 1133), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.gpu'], {}), '(args.gpu)\n', (1123, 1133), False, 'import torch\n'), ((1205, 1242), 'torch.load', 'torch.load', (['(model_fname % args.epochs)'], {}), '(model_fname % args.epochs)\n', (1215, 1242), False, 'import torch\n'), ((1561, 1575), 'deepLabv3.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (1573, 1575), False, 'from deepLabv3.utils import AverageMeter, inter_and_union, load_model\n'), ((1598, 1612), 'deepLabv3.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (1610, 1612), False, 'from deepLabv3.utils import AverageMeter, inter_and_union, load_model\n'), ((735, 810), 'deepLabv3.cityscapes.Cityscapes', 'Cityscapes', (['args.cityscape_path'], {'train': 'args.train', 'crop_size': 'args.crop_size'}), '(args.cityscape_path, train=args.train, crop_size=args.crop_size)\n', (745, 810), False, 'from deepLabv3.cityscapes import Cityscapes\n'), ((1422, 1461), 'scipy.io.loadmat', 'loadmat', (['"""data/pascal_seg_colormap.mat"""'], {}), "('data/pascal_seg_colormap.mat')\n", (1429, 1461), False, 'from scipy.io import loadmat\n'), ((1676, 1687), 'time.time', 'time.time', ([], {}), '()\n', (1685, 1687), False, 'import time\n'), ((2024, 2035), 'time.time', 'time.time', ([], {}), '()\n', (2033, 2035), False, 'import time\n')]
|
import time
from autocfr.worker import Worker, VecWorker, GroupVecWorker
import numpy as np
import ray
class DiverContainer(Worker):
@ray.remote
def run(task):
a = task["a"]
b = task["b"]
result = {
"worker_index": task["worker_index"],
"group_index": task["group_index"],
"a": a,
"b": b,
}
try:
time.sleep(a)
out = a / b
except Exception as e:
result["status"] = "fail"
result["error"] = e
result["info"] = str(e)
else:
result["status"] = "succ"
result["out"] = out
return result
class Diver(Worker):
def run(self, task):
try:
result = self.get_result_dict(task)
a = task["a"]
b = task["b"]
time.sleep(int(a))
out = a / 0
out = a / b
except Exception as e:
result["state"] = "fail"
result["error"] = e
result["info"] = str(e)
else:
result["state"] = "succ"
result["out"] = out
return result
def test_run():
diver = Diver(1)
result = diver.run(dict(a=1, b=0))
assert result["state"] == "fail"
def atest_parallel_run():
ray.init()
vec_worker = VecWorker(3, Diver)
for i in range(10):
a = np.random.randint(low=0, high=100)
b = np.random.randint(low=0, high=100)
vec_worker.add_task(dict(a=a, b=b))
for i in range(20):
time.sleep(0.01)
result = vec_worker.get_result()
print(vec_worker.get_info())
ray.shutdown()
def atest_parallel_run_sync():
ray.init()
vec_worker = VecWorker(2, Diver)
tasks = []
for i in range(10):
a = np.random.randint(low=0, high=100)
b = np.random.randint(low=0, high=100)
tasks.append(dict(a=a, b=b))
results = vec_worker.execute_tasks(tasks)
for task, result in zip(tasks, results):
print(task["a"], task["b"], task["a"] / task["b"], result["out"])
ray.shutdown()
def atest_group_run():
ray.init()
group_vec_worker = GroupVecWorker(10, DiverContainer)
group_vec_worker.add_tasks([dict(a=3, b=4), dict(a=3, b=7), dict(a=1, b=1)])
group_vec_worker.add_tasks([dict(a=3, b=4), dict(a=5, b=0)])
group_vec_worker.add_tasks([dict(a=1, b=0), dict(a=3, b=4), dict(a=3, b=0)])
group_vec_worker.add_tasks([dict(a=1, b=4), dict(a=3, b=0), dict(a=2, b=4), ])
for i in range(20):
time.sleep(1)
print(group_vec_worker.info())
while True:
result = group_vec_worker.get_result()
if result is not None:
print(result)
else:
break
ray.shutdown()
|
[
"ray.init",
"time.sleep",
"numpy.random.randint",
"ray.shutdown",
"autocfr.worker.VecWorker",
"autocfr.worker.GroupVecWorker"
] |
[((1315, 1325), 'ray.init', 'ray.init', ([], {}), '()\n', (1323, 1325), False, 'import ray\n'), ((1343, 1362), 'autocfr.worker.VecWorker', 'VecWorker', (['(3)', 'Diver'], {}), '(3, Diver)\n', (1352, 1362), False, 'from autocfr.worker import Worker, VecWorker, GroupVecWorker\n'), ((1656, 1670), 'ray.shutdown', 'ray.shutdown', ([], {}), '()\n', (1668, 1670), False, 'import ray\n'), ((1708, 1718), 'ray.init', 'ray.init', ([], {}), '()\n', (1716, 1718), False, 'import ray\n'), ((1736, 1755), 'autocfr.worker.VecWorker', 'VecWorker', (['(2)', 'Diver'], {}), '(2, Diver)\n', (1745, 1755), False, 'from autocfr.worker import Worker, VecWorker, GroupVecWorker\n'), ((2095, 2109), 'ray.shutdown', 'ray.shutdown', ([], {}), '()\n', (2107, 2109), False, 'import ray\n'), ((2139, 2149), 'ray.init', 'ray.init', ([], {}), '()\n', (2147, 2149), False, 'import ray\n'), ((2173, 2207), 'autocfr.worker.GroupVecWorker', 'GroupVecWorker', (['(10)', 'DiverContainer'], {}), '(10, DiverContainer)\n', (2187, 2207), False, 'from autocfr.worker import Worker, VecWorker, GroupVecWorker\n'), ((2784, 2798), 'ray.shutdown', 'ray.shutdown', ([], {}), '()\n', (2796, 2798), False, 'import ray\n'), ((1399, 1433), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(100)'}), '(low=0, high=100)\n', (1416, 1433), True, 'import numpy as np\n'), ((1446, 1480), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(100)'}), '(low=0, high=100)\n', (1463, 1480), True, 'import numpy as np\n'), ((1557, 1573), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (1567, 1573), False, 'import time\n'), ((1807, 1841), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(100)'}), '(low=0, high=100)\n', (1824, 1841), True, 'import numpy as np\n'), ((1854, 1888), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(100)'}), '(low=0, high=100)\n', (1871, 1888), True, 'import numpy as np\n'), ((2551, 2564), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2561, 2564), False, 'import time\n'), ((406, 419), 'time.sleep', 'time.sleep', (['a'], {}), '(a)\n', (416, 419), False, 'import time\n')]
|
# Copyright 2021 - 2022, <NAME> <<EMAIL>>, Dr. <NAME> <<EMAIL>>
# Use of this source code is governed by an MIT-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/MIT.
# This is a small debug utility to print the pid KMLogger is
# running on to help with profiling
import os
from base.util import block_text
def print_pid():
block_text("PID")
print(os.getpid())
|
[
"base.util.block_text",
"os.getpid"
] |
[((376, 393), 'base.util.block_text', 'block_text', (['"""PID"""'], {}), "('PID')\n", (386, 393), False, 'from base.util import block_text\n'), ((404, 415), 'os.getpid', 'os.getpid', ([], {}), '()\n', (413, 415), False, 'import os\n')]
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
import logging
from typing import Set, Tuple
from botocore.exceptions import ClientError
from intelliflow.core.platform.definitions.aws.common import _is_trust_policy_AWS_principle_deleted
logger = logging.getLogger(__name__)
_allow_block_sid = "DONOT_DELETE_allow_use_of_the_key"
_admin_sid = "DONOT_DELETE_admin_access"
# https://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html
KMS_MIN_DELETION_WAITING_PERIOD_IN_DAYS = 7
KMS_MAX_DELETION_WAITING_PERIOD_IN_DAYS = 30
def create_cmk(kms_client, policy: str, desc: str = "RheocerOS CMK") -> Tuple[str, str]:
"""Create a KMS (Symmetric) Customer Master Key
The created CMK is a Customer-managed key stored in AWS KMS.
Please note that a brand new key is 'enabled' by default.
https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html
:param desc: key description
:return Tuple(KeyId, KeyArn) where:
KeyId: AWS globally-unique string ID
KeyArn: Amazon Resource Name of the CMK
"""
try:
response = kms_client.create_key(Policy=policy, Description=desc)
except ClientError:
logger.info("Couldn't create new KMS CMK (desc=%s).", desc)
raise
else:
# Return the key ID and ARN
return response["KeyMetadata"]["KeyId"], response["KeyMetadata"]["Arn"]
def schedule_cmk_deletion(kms_client, id_or_arn: str, pending_window_in_days=KMS_MIN_DELETION_WAITING_PERIOD_IN_DAYS) -> "datetime":
"""Refer
https://boto3.amazonaws.com/v1/documentation/api/1.9.42/reference/services/kms.html#KMS.Client.schedule_key_deletion
:returns <datetime> 'DeletionDate' from KMS response; by when the key will be irrevocably deleted.
"""
if pending_window_in_days < KMS_MIN_DELETION_WAITING_PERIOD_IN_DAYS or pending_window_in_days > KMS_MAX_DELETION_WAITING_PERIOD_IN_DAYS:
raise ValueError(
f"Please provide a 'pending_window_in_days' value between 7 and 30 (inclusive) for the " f"deletion of KMS CMK {id_or_arn}."
)
try:
response = kms_client.schedule_key_deletion(KeyId=id_or_arn, PendingWindowInDays=pending_window_in_days)
except ClientError as err:
if err.response["Error"]["Code"] not in ["NotFoundException", "NotFound", "ResourceNotFoundException"]:
# see it was already in PendingDeletion state
key_metadata = kms_client.describe_key(KeyId=id_or_arn)
if "KeyMetadata" in key_metadata and key_metadata["KeyMetadata"]["KeyState"] == "PendingDeletion":
return key_metadata["KeyMetadata"]["DeletionDate"]
raise
else:
return response["DeletionDate"]
def get_cmk(kms_client, id_or_arn_or_alias: str) -> Tuple[str, str]:
"""refer
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kms.html#KMS.Client.describe_key
:return Tuple(KeyId, KeyArn) where:
KeyId: AWS globally-unique string ID
KeyArn: Amazon Resource Name of the CMK
"""
try:
response = kms_client.describe_key(KeyId=id_or_arn_or_alias)
except ClientError as err:
if err.response["Error"]["Code"] not in ["NotFoundException"]:
logger.info("Couldn't get KMS CMK (alias=%s).", id_or_arn_or_alias)
raise
return None, None
if "KeyMetadata" in response:
return response["KeyMetadata"]["KeyId"], response["KeyMetadata"]["Arn"]
return None, None
def create_alias(kms_client, alias_name: str, target_key_id: str) -> None:
try:
response = kms_client.create_alias(AliasName=alias_name, TargetKeyId=target_key_id)
except ClientError:
logger.info("Couldn't create new alias '%s' for KMS CMK.", alias_name)
raise
def update_alias(kms_client, alias_name: str, target_key_id: str) -> None:
try:
response = kms_client.update_alias(AliasName=alias_name, TargetKeyId=target_key_id)
except ClientError:
raise
def delete_alias(kms_client, alias_name: str):
try:
kms_client.delete_alias(AliasName=alias_name)
except ClientError:
logger.info("Couldn't delete KMS alias '%s'.", alias_name)
raise
def create_default_policy(account_id: str, users_to_be_added: Set[str], admins: Set[str], trust_same_account=False) -> str:
default_policy = {"Version": "2012-10-17", "Id": "IntelliFlow-CMK-policy", "Statement": []}
if admins or trust_same_account:
admin_list = list(admins)
if trust_same_account:
admin_list.append(f"arn:aws:iam::{account_id}:root")
# see https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam
default_policy["Statement"].append(
{"Sid": _admin_sid, "Effect": "Allow", "Principal": {"AWS": admin_list}, "Action": "kms:*", "Resource": "*"}
)
elif not users_to_be_added:
raise ValueError(f"Cannot risk KMS CMK lockout due to no AWS entity as a trustee in the policy.")
if users_to_be_added:
current_statements = default_policy["Statement"]
new_aws_entity_list = list(users_to_be_added)
current_statements.append(
{
"Sid": _allow_block_sid,
"Effect": "Allow",
"Principal": {"AWS": new_aws_entity_list},
"Action": ["kms:Encrypt", "kms:Decrypt", "kms:ReEncrypt*", "kms:GenerateDataKey*", "kms:DescribeKey"],
"Resource": "*",
}
)
return json.dumps(default_policy)
def put_cmk_policy(
kms_client,
key_id: str,
account_id: str,
users_to_be_added: Set[str] = set(),
users_to_be_removed: Set[str] = set(),
trust_same_account: bool = None,
) -> None:
default_policy = {"Version": "2012-10-17", "Id": f"IntelliFlow-CMK-{key_id}-policy", "Statement": []}
same_account_root = f"arn:aws:iam::{account_id}:root"
# https://docs.aws.amazon.com/kms/latest/developerguide/programming-key-policies.html
# the only valid policy name is 'default'
policy_name = "default"
change_detected = False
current_policy_doc = None
try:
response = kms_client.get_key_policy(KeyId=key_id, PolicyName=policy_name)
current_policy_doc = json.loads(response["Policy"])
except ClientError as policy_error:
if policy_error.response["Error"]["Code"] not in ["NotFoundException"]:
raise
current_policy_doc = default_policy
change_detected = True
current_statements = current_policy_doc["Statement"]
new_aws_entity_set = set(users_to_be_added)
allow_block_found = False
admin_block_found = False
removed_statement_indexes = []
for i, statement in enumerate(current_statements):
sid = statement.get("Sid", None)
if sid == _allow_block_sid:
allow_block_found = True
current_aws_entity_list = statement["Principal"]["AWS"]
current_aws_entity_list = [current_aws_entity_list] if isinstance(current_aws_entity_list, str) else current_aws_entity_list
# first check deleted entities
for current_entity in current_aws_entity_list:
if _is_trust_policy_AWS_principle_deleted(current_entity):
users_to_be_removed.add(current_entity)
if users_to_be_added or users_to_be_removed:
if bool(new_aws_entity_set - set(current_aws_entity_list)) or set(current_aws_entity_list).intersection(
users_to_be_removed
):
change_detected = True
for current_aws_entity in current_aws_entity_list:
if current_aws_entity not in users_to_be_removed:
new_aws_entity_set.add(current_aws_entity)
if new_aws_entity_set:
statement["Principal"]["AWS"] = list(new_aws_entity_set)
else:
removed_statement_indexes.append(i)
elif sid == _admin_sid:
admin_block_found = True
current_admins_set = statement["Principal"]["AWS"]
current_admins_set = {current_admins_set} if isinstance(current_admins_set, str) else set(current_admins_set)
new_admins_set = set(current_admins_set)
# - clean-up if a zombie entity is still here (due to manual modifications, deletions, etc)
# - clean-up deleted users from admin list as well (if in there)
for current_entity in current_admins_set:
if _is_trust_policy_AWS_principle_deleted(current_entity) or current_entity in users_to_be_removed:
change_detected = True
new_admins_set.remove(current_entity)
if trust_same_account is not None:
if trust_same_account is True and same_account_root not in new_admins_set:
change_detected = True
new_admins_set.add(same_account_root)
elif trust_same_account is False and same_account_root in new_admins_set:
change_detected = True
new_admins_set.remove(same_account_root)
if new_admins_set != current_admins_set:
statement["Principal"]["AWS"] = list(new_admins_set)
# delete in reverse order so that iteration won't mess up
for i in sorted(removed_statement_indexes, reverse=True):
del current_statements[i]
if not allow_block_found and new_aws_entity_set:
change_detected = True
current_statements.append(
{
"Sid": _allow_block_sid,
"Effect": "Allow",
"Principal": {"AWS": list(new_aws_entity_set)},
"Action": ["kms:Encrypt", "kms:Decrypt", "kms:ReEncrypt*", "kms:GenerateDataKey*", "kms:DescribeKey"],
"Resource": "*",
}
)
if not admin_block_found and trust_same_account is True:
change_detected = True
# this normally should not happen (unless the policy had some other admin level statement)
# we still want to have our own well-defined block.
default_policy["Statement"].append(
{"Sid": _admin_sid, "Effect": "Allow", "Principal": {"AWS": same_account_root}, "Action": "kms:*", "Resource": "*"}
)
if change_detected:
try:
response = kms_client.put_key_policy(
KeyId=key_id, PolicyName=policy_name, Policy=json.dumps(current_policy_doc), BypassPolicyLockoutSafetyCheck=False
)
except ClientError:
logger.info("Couldn't update policy for KMS key '%s'.", key_id)
raise
def enable_key_rotation(kms_client, key_id: str):
try:
response = kms_client.enable_key_rotation(KeyId=key_id)
except ClientError:
logger.info("Couldn't enable key rotation for KMS key '%s'.", key_id)
raise
|
[
"intelliflow.core.platform.definitions.aws.common._is_trust_policy_AWS_principle_deleted",
"json.loads",
"logging.getLogger",
"json.dumps"
] |
[((321, 348), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (338, 348), False, 'import logging\n'), ((5632, 5658), 'json.dumps', 'json.dumps', (['default_policy'], {}), '(default_policy)\n', (5642, 5658), False, 'import json\n'), ((6375, 6405), 'json.loads', 'json.loads', (["response['Policy']"], {}), "(response['Policy'])\n", (6385, 6405), False, 'import json\n'), ((7315, 7369), 'intelliflow.core.platform.definitions.aws.common._is_trust_policy_AWS_principle_deleted', '_is_trust_policy_AWS_principle_deleted', (['current_entity'], {}), '(current_entity)\n', (7353, 7369), False, 'from intelliflow.core.platform.definitions.aws.common import _is_trust_policy_AWS_principle_deleted\n'), ((10615, 10645), 'json.dumps', 'json.dumps', (['current_policy_doc'], {}), '(current_policy_doc)\n', (10625, 10645), False, 'import json\n'), ((8670, 8724), 'intelliflow.core.platform.definitions.aws.common._is_trust_policy_AWS_principle_deleted', '_is_trust_policy_AWS_principle_deleted', (['current_entity'], {}), '(current_entity)\n', (8708, 8724), False, 'from intelliflow.core.platform.definitions.aws.common import _is_trust_policy_AWS_principle_deleted\n')]
|
from flask import jsonify, request
from flask_restful import Resource, reqparse
from processing import agent_checkin
from processing.user_role import authorized_groups
from logger import log
agent_checkin_parser = reqparse.RequestParser()
agent_checkin_parser.add_argument('TransportId')
agent_checkin_parser.add_argument('SourceIP')
agent_checkin_parser.add_argument('Message')
class AgentCheckinEndpoint(Resource):
@authorized_groups(['StandardRead', 'Transport'])
def get(self, agent_name):
log("AgentCheckinEndpoint:GET", "AgentID: {0}".format(agent_name))
task_obj = agent_checkin.process_agent_checkin(agent_name=agent_name,
transport_id=request.args.get('TransportId'),
source_ip=request.args.get('SourceIp'))
return jsonify(task_obj)
@authorized_groups(['StandardWrite', 'Transport'])
def post(self, agent_name):
args = agent_checkin_parser.parse_args()
log("AgentCheckinEndpoint:POST", "AgentID: %s | Args: {0}".format(agent_name, args))
task_obj = agent_checkin.process_agent_checkin(agent_name=agent_name,
transport_id=args.get('TransportId'),
source_ip=args.get('SourceIp'),
message=args.get('Message'))
return jsonify(task_obj)
|
[
"processing.user_role.authorized_groups",
"flask.jsonify",
"flask_restful.reqparse.RequestParser",
"flask.request.args.get"
] |
[((216, 240), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (238, 240), False, 'from flask_restful import Resource, reqparse\n'), ((425, 473), 'processing.user_role.authorized_groups', 'authorized_groups', (["['StandardRead', 'Transport']"], {}), "(['StandardRead', 'Transport'])\n", (442, 473), False, 'from processing.user_role import authorized_groups\n'), ((894, 943), 'processing.user_role.authorized_groups', 'authorized_groups', (["['StandardWrite', 'Transport']"], {}), "(['StandardWrite', 'Transport'])\n", (911, 943), False, 'from processing.user_role import authorized_groups\n'), ((870, 887), 'flask.jsonify', 'jsonify', (['task_obj'], {}), '(task_obj)\n', (877, 887), False, 'from flask import jsonify, request\n'), ((1476, 1493), 'flask.jsonify', 'jsonify', (['task_obj'], {}), '(task_obj)\n', (1483, 1493), False, 'from flask import jsonify, request\n'), ((727, 758), 'flask.request.args.get', 'request.args.get', (['"""TransportId"""'], {}), "('TransportId')\n", (743, 758), False, 'from flask import jsonify, request\n'), ((825, 853), 'flask.request.args.get', 'request.args.get', (['"""SourceIp"""'], {}), "('SourceIp')\n", (841, 853), False, 'from flask import jsonify, request\n')]
|
import json
import os
from urllib.parse import parse_qsl
import asyncio
from requests_oauthlib import OAuth1Session
from flask import Flask, jsonify, request, redirect, url_for
from flask import render_template
from citrus_drop import CitrusDrop
app = Flask(__name__)
user_drop = {
'screen_name': '未取得',
'last_update': '-',
'profile_image_url': './static/not_found.png',
'followers_count': '未取得',
'friends_count': '未取得',
'result': []
}
@app.route('/update', methods=['GET'])
def update():
global loop
title = "CitrusDrop"
page = "main"
#loop = asyncio.get_event_loop()
print("update呼ばれた")
hoge = loop.run_until_complete(update_dict())
print(hoge)
return render_template('main.html', title=title, page=page, message=user_drop, disabled="true")
async def update_dict():
title = "CitrusDrop"
page = "main"
print('kokomadekitayo')
task = loop.create_task(asyncio.sleep(10))
await task
return render_template('main.html', title=title, page=page, message=user_drop, disabled="false")
@app.route('/')
def main():
print("呼ばれた")
disabled = "false"
print(request.args.get('disabled'))
if request.args.get('disabled'):
disabled = "true"
else:
pass
title = "CitrusDrop"
page = "main"
return render_template('main.html', title=title, page=page, message=user_drop, disabled=disabled)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(app.run())
#app.run()
|
[
"asyncio.get_event_loop",
"flask.request.args.get",
"asyncio.sleep",
"flask.Flask",
"flask.render_template"
] |
[((255, 270), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (260, 270), False, 'from flask import Flask, jsonify, request, redirect, url_for\n'), ((745, 837), 'flask.render_template', 'render_template', (['"""main.html"""'], {'title': 'title', 'page': 'page', 'message': 'user_drop', 'disabled': '"""true"""'}), "('main.html', title=title, page=page, message=user_drop,\n disabled='true')\n", (760, 837), False, 'from flask import render_template\n'), ((1006, 1099), 'flask.render_template', 'render_template', (['"""main.html"""'], {'title': 'title', 'page': 'page', 'message': 'user_drop', 'disabled': '"""false"""'}), "('main.html', title=title, page=page, message=user_drop,\n disabled='false')\n", (1021, 1099), False, 'from flask import render_template\n'), ((1214, 1242), 'flask.request.args.get', 'request.args.get', (['"""disabled"""'], {}), "('disabled')\n", (1230, 1242), False, 'from flask import Flask, jsonify, request, redirect, url_for\n'), ((1347, 1441), 'flask.render_template', 'render_template', (['"""main.html"""'], {'title': 'title', 'page': 'page', 'message': 'user_drop', 'disabled': 'disabled'}), "('main.html', title=title, page=page, message=user_drop,\n disabled=disabled)\n", (1362, 1441), False, 'from flask import render_template\n'), ((1478, 1502), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1500, 1502), False, 'import asyncio\n'), ((960, 977), 'asyncio.sleep', 'asyncio.sleep', (['(10)'], {}), '(10)\n', (973, 977), False, 'import asyncio\n'), ((1177, 1205), 'flask.request.args.get', 'request.args.get', (['"""disabled"""'], {}), "('disabled')\n", (1193, 1205), False, 'from flask import Flask, jsonify, request, redirect, url_for\n')]
|
import sympy
_id = lambda x: x
class Kinematics(object):
"""Robot symbolic Jacobians.
kinobj.J: list of link frame Jacobians - complete (6 x N):
[linear_velocity
angular_velocity] = J * joint_velocities
kinobj.Jc: list of link center-of-mass Jacobians - complete
kinobj.Jp: list of link frame Jacobians - linear velocity part only
kinobj.Jo: list of link frame Jacobians - angular velocity part only
kinobj.Jcp: list of link center-of-mass Jacobians - linear part
kinobj.Jco: list of link center-of-mass Jacobians - angular part
"""
def __init__(self, robotdef, geom, ifunc=None):
if not ifunc:
ifunc = _id
self.rbtdef = robotdef
self.geom = geom
self.dof = self.rbtdef.dof
def sym_skew(v):
return sympy.Matrix([[0, -v[2], v[1]],
[v[2], 0, -v[0]],
[-v[1], v[0], 0]])
if self.rbtdef._dh_convention == 'standard':
# extend z and p so that z[-1] and p[-1] return values from base
# frame
z_ext = geom.z + [sympy.Matrix([0, 0, 1])]
p_ext = geom.p + [sympy.zeros(3, 1)]
self.Jp = list(range(self.rbtdef.dof))
for l in range(self.rbtdef.dof):
self.Jp[l] = sympy.zeros(3, self.rbtdef.dof)
for j in range(l + 1):
if self.rbtdef._links_sigma[j]:
self.Jp[l][0:3, j] = ifunc(z_ext[j - 1])
else:
self.Jp[l][0:3, j] = ifunc(z_ext[j - 1].cross(
(p_ext[l] - p_ext[j - 1])).reshape(3, 1))
self.Jo = list(range(self.rbtdef.dof))
for l in range(self.rbtdef.dof):
self.Jo[l] = sympy.zeros(3, self.rbtdef.dof)
for j in range(l + 1):
if self.rbtdef._links_sigma[j]:
self.Jo[l][0:3, j] = sympy.zeros(3, 1)
else:
self.Jo[l][0:3, j] = ifunc(z_ext[j - 1])
elif self.rbtdef._dh_convention == 'modified':
self.Jp = list(range(self.rbtdef.dof))
for l in range(self.rbtdef.dof):
self.Jp[l] = sympy.zeros(3, self.rbtdef.dof)
for j in range(l + 1):
if self.rbtdef._links_sigma[j]:
self.Jp[l][0:3, j] = ifunc(geom.z[j])
else:
self.Jp[l][0:3, j] = ifunc(geom.z[j].cross(
(geom.p[l] - geom.p[j])).reshape(3, 1))
self.Jo = list(range(self.rbtdef.dof))
for l in range(self.rbtdef.dof):
self.Jo[l] = sympy.zeros(3, self.rbtdef.dof)
for j in range(l + 1):
if self.rbtdef._links_sigma[j]:
self.Jo[l][0:3, j] = sympy.zeros(3, 1)
else:
self.Jo[l][0:3, j] = ifunc(geom.z[j])
self.J = list(range(self.rbtdef.dof))
for l in range(self.rbtdef.dof):
self.J[l] = self.Jp[l].col_join(self.Jo[l])
self.Jcp = list(range(self.rbtdef.dof))
self.Jco = self.Jo
for l in range(self.rbtdef.dof):
self.Jcp[l] = ifunc(self.Jp[l] - sym_skew(
geom.R[l] * sympy.Matrix(self.rbtdef.l[l])) * self.Jo[l])
self.Jc = list(range(self.rbtdef.dof))
for l in range(self.rbtdef.dof):
self.Jc[l] = self.Jcp[l].col_join(self.Jco[l])
|
[
"sympy.zeros",
"sympy.Matrix"
] |
[((871, 939), 'sympy.Matrix', 'sympy.Matrix', (['[[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]]'], {}), '([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])\n', (883, 939), False, 'import sympy\n'), ((1400, 1431), 'sympy.zeros', 'sympy.zeros', (['(3)', 'self.rbtdef.dof'], {}), '(3, self.rbtdef.dof)\n', (1411, 1431), False, 'import sympy\n'), ((1881, 1912), 'sympy.zeros', 'sympy.zeros', (['(3)', 'self.rbtdef.dof'], {}), '(3, self.rbtdef.dof)\n', (1892, 1912), False, 'import sympy\n'), ((1200, 1223), 'sympy.Matrix', 'sympy.Matrix', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (1212, 1223), False, 'import sympy\n'), ((1255, 1272), 'sympy.zeros', 'sympy.zeros', (['(3)', '(1)'], {}), '(3, 1)\n', (1266, 1272), False, 'import sympy\n'), ((2340, 2371), 'sympy.zeros', 'sympy.zeros', (['(3)', 'self.rbtdef.dof'], {}), '(3, self.rbtdef.dof)\n', (2351, 2371), False, 'import sympy\n'), ((2813, 2844), 'sympy.zeros', 'sympy.zeros', (['(3)', 'self.rbtdef.dof'], {}), '(3, self.rbtdef.dof)\n', (2824, 2844), False, 'import sympy\n'), ((2049, 2066), 'sympy.zeros', 'sympy.zeros', (['(3)', '(1)'], {}), '(3, 1)\n', (2060, 2066), False, 'import sympy\n'), ((2981, 2998), 'sympy.zeros', 'sympy.zeros', (['(3)', '(1)'], {}), '(3, 1)\n', (2992, 2998), False, 'import sympy\n'), ((3431, 3461), 'sympy.Matrix', 'sympy.Matrix', (['self.rbtdef.l[l]'], {}), '(self.rbtdef.l[l])\n', (3443, 3461), False, 'import sympy\n')]
|
# Copyright 2020 Astronomer Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from setuptools import find_namespace_packages, setup
def fpath(*parts):
return os.path.join(os.path.dirname(__file__), *parts)
def read(*parts):
return open(fpath(*parts)).read()
def desc():
return read('README.rst')
# https://packaging.python.org/guides/single-sourcing-package-version/
def find_version(*paths):
version_file = read(*paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
def or_fallback(fn, *args, fallback, **kwargs):
try:
return fn(*args, **kwargs)
except Exception:
return fallback
VERSION = or_fallback(find_version, 'encryptedsecrets', '__init__.py', fallback='0.0.0-dev1')
setup(
name='encrypted-secrets',
version=VERSION,
url='https://github.com/astronomer/encrypted-python-secrets',
license='Apache2',
author='astronomerio',
author_email='<EMAIL>',
description='Store secrets in an encrypted YAML file, inspired by hiera-eyaml',
long_description=or_fallback(desc, fallback=''),
long_description_content_type="text/rst",
packages=find_namespace_packages(include=('encryptedsecrets', 'encryptedsecrets.*')),
package_data={
'': ['LICENSE'],
},
include_package_data=True,
zip_safe=True,
platforms='any',
entry_points={
'console_scripts': ['encrypted-secrets = encryptedsecrets.__main__:cli']
},
install_requires=[
'encrypteddict',
'pyyaml',
'click'
],
setup_requires=[
'pytest-runner~=4.0',
],
tests_require=[
'encrypted-secrets[test]',
],
extras_require={
'test': [
'pytest',
'pytest-mock',
'pytest-flake8',
],
},
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 3',
],
python_requires='>=3.6',
)
|
[
"setuptools.find_namespace_packages",
"os.path.dirname",
"re.search"
] |
[((986, 1059), 're.search', 're.search', (['"""^__version__ = [\'\\\\"]([^\'\\\\"]*)[\'\\\\"]"""', 'version_file', 're.M'], {}), '(\'^__version__ = [\\\'\\\\"]([^\\\'\\\\"]*)[\\\'\\\\"]\', version_file, re.M)\n', (995, 1059), False, 'import re\n'), ((698, 723), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (713, 723), False, 'import os\n'), ((1807, 1882), 'setuptools.find_namespace_packages', 'find_namespace_packages', ([], {'include': "('encryptedsecrets', 'encryptedsecrets.*')"}), "(include=('encryptedsecrets', 'encryptedsecrets.*'))\n", (1830, 1882), False, 'from setuptools import find_namespace_packages, setup\n')]
|
from .errors import TorrentHashNotFound, TorrentNotValid, HttpException
import aiohttp
import asyncio
import json
class AConnector:
def __init__(self, *, base, session = None, loop = None):
self.base = base
self.loop = loop or asyncio.get_event_loop()
self.session = session
async def request(self, method, path : str, *, payload = None):
url = self.base + path
retries = 5
while retries:
async with self.session.request(method, url, data=payload) as r:
data = await r.text(encoding='utf-8')
if r.headers.get('Content-Type', None) == 'application/json':
data = json.loads(data)
if r.status == 200:
"""Everything is fine?"""
return data
elif r.status == 403:
"""Login has probably been invalidated. retry."""
await self.login(self.credentials['username'], self.credentials['password'])
elif r.status == 400:
retries -= 1
print(f'Bad Http request, retrying {retries}')
await asyncio.sleep(1)
elif r.status == 404:
raise TorrentHashNotFound(r, data)
elif r.status == 415:
raise TorrentNotValid(r, data)
else:
return r.status, data
raise HttpException(r, data)
async def login(self, username : str, password : str):
"""
Attempt to log into the web api using a username and password.
Parameters
----------
username: str
The username to log into the web api
password: str
The password to log into the web api
"""
if not self.session:
self.session = aiohttp.ClientSession(cookie_jar=aiohttp.CookieJar(unsafe=True))
payload = {
'username' : username,
'password' : password
}
self.credentials = payload
return await self.request('POST', '/auth/login', payload=self.credentials)
async def logout(self):
"""Attempt to log out of the webapi"""
return await self.request('POST', '/auth/logout')
await self.session.close()
import requests
class RConnector:
def __init__(self, *, base, session = None):
self.base = base
self.session = session or requests.Session()
def request(self, method, path : str, *, payload = None):
url = self.base + path
retries = 5
while retries:
r = self.session.request(method, url, data=payload)
if r.status_code == 200:
data = r.text
if r.headers.get('Content-Type', None) == 'application/json':
return json.loads(data)
return data
def login(self, username : str, password : str):
if not self.session:
self.session = requests.Session()
payload = {
'username' : username,
'password' : password
}
self.credentials = payload
return self.request('POST', '/auth/login', payload=self.credentials)
def logout(self):
self.session.close()
|
[
"aiohttp.CookieJar",
"asyncio.get_event_loop",
"json.loads",
"asyncio.sleep",
"requests.Session"
] |
[((249, 273), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (271, 273), False, 'import asyncio\n'), ((2481, 2499), 'requests.Session', 'requests.Session', ([], {}), '()\n', (2497, 2499), False, 'import requests\n'), ((3016, 3034), 'requests.Session', 'requests.Session', ([], {}), '()\n', (3032, 3034), False, 'import requests\n'), ((2866, 2882), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (2876, 2882), False, 'import json\n'), ((685, 701), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (695, 701), False, 'import json\n'), ((1911, 1941), 'aiohttp.CookieJar', 'aiohttp.CookieJar', ([], {'unsafe': '(True)'}), '(unsafe=True)\n', (1928, 1941), False, 'import aiohttp\n'), ((1185, 1201), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (1198, 1201), False, 'import asyncio\n')]
|
import os
import pytest
from httpx import AsyncClient
from odm2_postgres_api.aquamonitor.aquamonitor_client import (
get_method_by_id,
get_project_stations,
get_taxonomy,
get_taxonomy_domain_id,
get_taxonomy_codes,
)
from odm2_postgres_api.aquamonitor.aquamonitor_mapping import METHODS_NIVABASE_MAP
"""
Tests that do actual calls to aquamonitor API. Using aquamonitor API in production,
so we are only doing reading operations
"""
@pytest.fixture(scope="function")
async def aquamonitor_client():
username = os.environ["AQUAMONITOR_USER"]
password = os.environ["<PASSWORD>"]
url = "https://test-aquamonitor.niva.no/AquaServices/api"
async with AsyncClient(base_url=url, auth=(username, password)) as client:
yield client
@pytest.mark.asyncio
@pytest.mark.aquamonitor_api_test
async def test_aquamonitor_client_get_method(aquamonitor_client):
method_name = "Kiselalger Relative abundance"
method_id = METHODS_NIVABASE_MAP[method_name]
method = await get_method_by_id(aquamonitor_client, method_id=method_id)
assert method
assert method.Id == method_id
assert method.Laboratory == "NIVA"
assert method.Matrix is None
assert method.MethodRef is None
assert method.Name == method_name
assert method.Unit is None
@pytest.mark.asyncio
@pytest.mark.aquamonitor_api_test
async def test_aquamonitor_client_get_stations(aquamonitor_client):
station_code = "HEDEGL06"
station = await get_project_stations(
aquamonitor_client,
project_name="Overvåkning av Glomma, Vorma og Øyeren",
station_code=station_code,
)
assert station.Name == "<NAME>"
assert station.Id == 57692
assert station.Code == station_code
assert station.Type["Text"] == "Elv"
@pytest.mark.asyncio
@pytest.mark.aquamonitor_api_test
async def test_aquamonitor_get_taxonomy(aquamonitor_client):
taxon_domain = await get_taxonomy_domain_id(aquamonitor_client, "Begroingsalger")
taxa = await get_taxonomy_codes(aquamonitor_client, domain_id=taxon_domain)
bambusina_spp = [t for t in taxa if t.Code == "BAMBUSIZ"][0]
assert bambusina_spp.Id
taxon = await get_taxonomy(aquamonitor_client, domain_name="Begroingsalger", code=bambusina_spp.Code)
# comparing taxon fetched from get_taxonomy and get_taxonomy_codes. Should be equal
assert taxon.Id == bambusina_spp.Id
assert taxon.Code == bambusina_spp.Code
assert taxon.Name == bambusina_spp.Name
# TODO: would like to assert that the two objects are equal, but they do indeed differ on domain. This fails:
# assert taxon.Domain == bambusina_spp.Domain
# assert taxon.Taxonomy == bambusina_spp.Taxonomy
|
[
"odm2_postgres_api.aquamonitor.aquamonitor_client.get_project_stations",
"pytest.fixture",
"httpx.AsyncClient",
"odm2_postgres_api.aquamonitor.aquamonitor_client.get_taxonomy_codes",
"odm2_postgres_api.aquamonitor.aquamonitor_client.get_method_by_id",
"odm2_postgres_api.aquamonitor.aquamonitor_client.get_taxonomy_domain_id",
"odm2_postgres_api.aquamonitor.aquamonitor_client.get_taxonomy"
] |
[((459, 491), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (473, 491), False, 'import pytest\n'), ((687, 739), 'httpx.AsyncClient', 'AsyncClient', ([], {'base_url': 'url', 'auth': '(username, password)'}), '(base_url=url, auth=(username, password))\n', (698, 739), False, 'from httpx import AsyncClient\n'), ((1014, 1071), 'odm2_postgres_api.aquamonitor.aquamonitor_client.get_method_by_id', 'get_method_by_id', (['aquamonitor_client'], {'method_id': 'method_id'}), '(aquamonitor_client, method_id=method_id)\n', (1030, 1071), False, 'from odm2_postgres_api.aquamonitor.aquamonitor_client import get_method_by_id, get_project_stations, get_taxonomy, get_taxonomy_domain_id, get_taxonomy_codes\n'), ((1476, 1603), 'odm2_postgres_api.aquamonitor.aquamonitor_client.get_project_stations', 'get_project_stations', (['aquamonitor_client'], {'project_name': '"""Overvåkning av Glomma, Vorma og Øyeren"""', 'station_code': 'station_code'}), "(aquamonitor_client, project_name=\n 'Overvåkning av Glomma, Vorma og Øyeren', station_code=station_code)\n", (1496, 1603), False, 'from odm2_postgres_api.aquamonitor.aquamonitor_client import get_method_by_id, get_project_stations, get_taxonomy, get_taxonomy_domain_id, get_taxonomy_codes\n'), ((1922, 1982), 'odm2_postgres_api.aquamonitor.aquamonitor_client.get_taxonomy_domain_id', 'get_taxonomy_domain_id', (['aquamonitor_client', '"""Begroingsalger"""'], {}), "(aquamonitor_client, 'Begroingsalger')\n", (1944, 1982), False, 'from odm2_postgres_api.aquamonitor.aquamonitor_client import get_method_by_id, get_project_stations, get_taxonomy, get_taxonomy_domain_id, get_taxonomy_codes\n'), ((2000, 2062), 'odm2_postgres_api.aquamonitor.aquamonitor_client.get_taxonomy_codes', 'get_taxonomy_codes', (['aquamonitor_client'], {'domain_id': 'taxon_domain'}), '(aquamonitor_client, domain_id=taxon_domain)\n', (2018, 2062), False, 'from odm2_postgres_api.aquamonitor.aquamonitor_client import get_method_by_id, get_project_stations, get_taxonomy, get_taxonomy_domain_id, get_taxonomy_codes\n'), ((2175, 2267), 'odm2_postgres_api.aquamonitor.aquamonitor_client.get_taxonomy', 'get_taxonomy', (['aquamonitor_client'], {'domain_name': '"""Begroingsalger"""', 'code': 'bambusina_spp.Code'}), "(aquamonitor_client, domain_name='Begroingsalger', code=\n bambusina_spp.Code)\n", (2187, 2267), False, 'from odm2_postgres_api.aquamonitor.aquamonitor_client import get_method_by_id, get_project_stations, get_taxonomy, get_taxonomy_domain_id, get_taxonomy_codes\n')]
|
from django.templatetags.static import static
from django.utils.functional import lazy
static_lazy = lazy(static, str)
|
[
"django.utils.functional.lazy"
] |
[((102, 119), 'django.utils.functional.lazy', 'lazy', (['static', 'str'], {}), '(static, str)\n', (106, 119), False, 'from django.utils.functional import lazy\n')]
|
import pennylane as qml
import numpy as np
if __name__ != '__main__':
from . encoder.encoding_circuits import EncodingCircuitsPennylane
from . pqc.parametric_circuits import ParametricCircuitsPennylane
from . measurement.measurement_circuits import MeasurementCircuitsPennylane
class PennylaneQNNCircuit:
def __init__(self, enc = 1, pqc = 1, meas = 1, layers = 1, qubit = 1):
'''
initialize variables
'''
self.enc = enc
self.pqc = pqc
self.meas = meas
self.qubit = qubit
self.layers = layers
self.pqc_builder = ParametricCircuitsPennylane(pqc = self.pqc, qubit = self.qubit, layers = self.layers)
self.enc_builder = EncodingCircuitsPennylane(enc = self.enc, qubit = self.qubit)
self.meas_builder = MeasurementCircuitsPennylane(meas = self.meas, qubit = self.qubit)
def construct_qnn_circuit(self, inputs, weights0, weights1 = 0):
assert len(inputs) <= self.enc_builder.max_inputs_length()
pqc_weights_shape = self.pqc_builder.weigths_shape()
if isinstance(pqc_weights_shape[0], tuple):
assert weights0.shape == pqc_weights_shape[0]
assert weights1.shape == pqc_weights_shape[1]
else:
assert weights0.shape == pqc_weights_shape
self.enc_builder.get_encoder(inputs)
self.pqc_builder.get_pqc(weights0, weights1 = weights1)
return self.meas_builder.get_meas()
if __name__ == '__main__':
from encoder.encoding_circuits import EncodingCircuitsPennylane
from pqc.parametric_circuits import ParametricCircuitsPennylane
from measurement.measurement_circuits import MeasurementCircuitsPennylane
qnn = PennylaneQNNCircuit(enc = 5, qubit = 5, layers = 2, pqc = 19, meas = 3)
input_length = qnn.enc_builder.max_inputs_length()
weight_shape = qnn.pqc_builder.weigths_shape()
inputs = np.random.random(input_length)
dev = qml.device("default.qubit", wires = 10) #target pennylane device
qnode = qml.QNode(qnn.construct_qnn_circuit, dev) #circuit
if isinstance(weight_shape[0], tuple):
weights0 = np.random.random(weight_shape[0])
weights1 = np.random.random(weight_shape[1])
qnode(inputs, weights0, weights1)
else:
weights = np.random.random(weight_shape)
qnode(inputs, weights)
print(qnode.draw())
|
[
"measurement.measurement_circuits.MeasurementCircuitsPennylane",
"pqc.parametric_circuits.ParametricCircuitsPennylane",
"pennylane.device",
"numpy.random.random",
"pennylane.QNode",
"encoder.encoding_circuits.EncodingCircuitsPennylane"
] |
[((1949, 1979), 'numpy.random.random', 'np.random.random', (['input_length'], {}), '(input_length)\n', (1965, 1979), True, 'import numpy as np\n'), ((1991, 2028), 'pennylane.device', 'qml.device', (['"""default.qubit"""'], {'wires': '(10)'}), "('default.qubit', wires=10)\n", (2001, 2028), True, 'import pennylane as qml\n'), ((2069, 2110), 'pennylane.QNode', 'qml.QNode', (['qnn.construct_qnn_circuit', 'dev'], {}), '(qnn.construct_qnn_circuit, dev)\n', (2078, 2110), True, 'import pennylane as qml\n'), ((619, 698), 'pqc.parametric_circuits.ParametricCircuitsPennylane', 'ParametricCircuitsPennylane', ([], {'pqc': 'self.pqc', 'qubit': 'self.qubit', 'layers': 'self.layers'}), '(pqc=self.pqc, qubit=self.qubit, layers=self.layers)\n', (646, 698), False, 'from pqc.parametric_circuits import ParametricCircuitsPennylane\n'), ((733, 790), 'encoder.encoding_circuits.EncodingCircuitsPennylane', 'EncodingCircuitsPennylane', ([], {'enc': 'self.enc', 'qubit': 'self.qubit'}), '(enc=self.enc, qubit=self.qubit)\n', (758, 790), False, 'from encoder.encoding_circuits import EncodingCircuitsPennylane\n'), ((824, 886), 'measurement.measurement_circuits.MeasurementCircuitsPennylane', 'MeasurementCircuitsPennylane', ([], {'meas': 'self.meas', 'qubit': 'self.qubit'}), '(meas=self.meas, qubit=self.qubit)\n', (852, 886), False, 'from measurement.measurement_circuits import MeasurementCircuitsPennylane\n'), ((2184, 2217), 'numpy.random.random', 'np.random.random', (['weight_shape[0]'], {}), '(weight_shape[0])\n', (2200, 2217), True, 'import numpy as np\n'), ((2238, 2271), 'numpy.random.random', 'np.random.random', (['weight_shape[1]'], {}), '(weight_shape[1])\n', (2254, 2271), True, 'import numpy as np\n'), ((2345, 2375), 'numpy.random.random', 'np.random.random', (['weight_shape'], {}), '(weight_shape)\n', (2361, 2375), True, 'import numpy as np\n')]
|
import asyncio
from typing import List
from . import utils
from .abc import IRaftServer
from .errors import *
from .rpc import protocol as prot
from .rpc import rpc
from .state_machine import RaftStateMachine, State, Command
ELECTION_TIMEOUT = 0.5
FLEXIBLE_PAXOS_QUORUM = 2 / 6
RPC_TIMEOUT = 1
class ClusterMember:
def __init__(self, ip: str, port: int):
self.ip = ip
self.port = port
self.id = utils.get_id(self, ip, port)
class Server(ClusterMember):
def __init__(self, ip: str, port: int, cluster):
super().__init__(ip, port)
self._cluster: List[rpc.RemoteRaftServer] = cluster or []
self._leader = None
self._leader_hbeat = asyncio.Event()
self._leader_volatile_state_data = None
self._listener_task = None
async def _start_listening(self):
self._listener_task = await asyncio.start_server(
self._handle_request, self.ip, self.port
)
async def _handle_request(self, reader, writer):
message = await prot.read_decode_msg(reader)
if isinstance(self, RaftServer):
await rpc.handle_request(self, (reader, writer), message)
else:
raise TypeError("Invalid Server instance")
writer.close()
class RaftServer(IRaftServer, Server, RaftStateMachine):
def __init__(
self, ip: str, port: int, cluster, state: State = State.FOLLOWER, log=None
):
Server.__init__(self, ip, port, cluster)
RaftStateMachine.__init__(self, state, log)
self._voted_for = None # candidateId that received vote in currentterm
self._next_indexes = (
{}
) # for each server, index of the next log entryto send to that server
self._match_indexes = (
{}
) # for each server, index of highest log entryknown to be replicated on server
self._commands_queue = (
asyncio.Queue()
) # Queue where commands waiting for commit process to start are stored
self._cluster_locks = (
{}
) # Lock for mantaining order when several AppendEntries RPC calls are sent for same server
self._election_task = None
self._timeout_task = None
self._entries_task = None
self._hbeat_task = None
self._append_tasks = (
{}
) # TODO: every cluster member has a list of tasks running
# TODO: init tasks
async def update_state(self, key, value):
command = Command(key, value)
if self._leader is self:
await self._queue_command(command)
else:
while True:
try:
await self._leader.command_request(command)
break
except TermConsistencyError as error:
self._current_term = error.term
pass # TODO: do something else?
except LeaderConsistencyError as error:
self.leader = list(
filter(lambda s: s.id == error.leader_id, self.cluster)
)[0]
async def join_cluster(self, random_server: ClusterMember):
if random_server:
remote_server = rpc.RemoteRaftServer(random_server.ip, random_server.port)
self.cluster, leader_id = await remote_server.get_cluster_configuration()
self.leader = list(filter(lambda s: s.id == leader_id, self.cluster))[0]
if self not in self.cluster:
self.cluster.append(self)
# TODO: init configuration change
else:
pass # TODO: means it already was in the cluster, but it had crushed
else:
pass # TODO: first cluster member
async def leave_cluster(self):
self.cluster.remove(self)
# TODO: init configuration change
async def remove_cluster_member(self, id):
self.cluster = list(filter(lambda s: s.id != id, self.cluster))
# TODO: init configuration change
async def _run_timeout_task(self):
while True:
try:
await asyncio.wait_for(
self._leader_hbeat.wait(), timeout=ELECTION_TIMEOUT
) # TODO: random timeout
except asyncio.TimeoutError:
await self._change_state(State.CANDIDATE)
finally:
self._leader_hbeat.clear()
async def _run_entries_task_(self):
command = await self._commands_queue.get()
self._append_command(command)
rpc_calls = []
for server in filter(lambda s: s is not self, self.cluster):
append_task = asyncio.create_task(
self._append_entry_task(s, len(self._log) - 1)
)
rpc_calls.append(append_task)
self._append_tasks[server.id].append(append_task)
committed_amount = 1 # Starts on '1' because of itself
for rpc_call in asyncio.as_completed(rpc_calls):
await rpc_call.result()
committed_amount += 1
if committed_amount >= int(len(self._cluster) * FLEXIBLE_PAXOS_QUORUM):
self._commit_command(command)
break
async def _run_hbeat_task(self):
while True:
await self._send_hbeat()
await asyncio.sleep(
ELECTION_TIMEOUT * 0.9
) # Just in case there is high latency
async def _run_election_task(self):
self._current_term += 1
self._leader_hbeat.set()
last_log_index = self._last_applied
last_log_term = (
0 if not len(self._log) > last_log_index else self._log[last_log_index].term
)
voting_rpcs = list(
map(
lambda s: asyncio.create_task(
s.request_vote(
self._current_term, self.id, last_log_index, last_log_term
)
),
filter(lambda s: s is not self, self._cluster),
)
)
granted_votes = 1 # 1 -> its own vote
votes = 1
election_win = False
for next_vote in asyncio.as_completed(voting_rpcs, timeout=RPC_TIMEOUT):
try:
vote = (await next_vote).result()
granted_votes += int(vote)
except asyncio.TimeoutError:
pass
votes += 1
if granted_votes >= int(
len(self._cluster) * (1 - FLEXIBLE_PAXOS_QUORUM) + 1
): # Equal because itself is not considered
election_win = True
if election_win:
self._change_state(State.LEADER)
async def _queue_command(self, command: Command):
await self._commands_queue.put(command)
async def _send_hbeat(self):
for server in filter(lambda s: s != self, self.cluster):
task = asyncio.create_task(
server.append_entries(
self._current_term,
self.id,
self._last_applied,
self._log[self._last_applied].term,
None,
self._commit_index,
)
)
self._append_tasks[server.id].append(task)
def _change_state(self, new_state: State):
if new_state is State.FOLLOWER:
self._cancel_leader_tasks()
self._cancel_candidate_tasks()
self._timeout_task = asyncio.create_task(self._run_timeout_task())
self._state = State.FOLLOWER
elif new_state is State.LEADER:
if self._timeout_task and not self._timeout_task.cancelled():
self._timeout_task.cancel()
self._hbeat_task = asyncio.create_task(self._run_hbeat_task())
self._entries_task = asyncio.create_task(self._run_entries_task_())
self._state = State.LEADER
elif new_state is State.CANDIDATE:
self._cancel_leader_tasks()
self._cancel_candidate_tasks()
self._election_task = asyncio.create_task(self._run_election_task())
self._state = State.CANDIDATE
def _cancel_leader_tasks(self):
if self._hbeat_task and not self._hbeat_task.cancelled():
self._hbeat_task.cancel()
if self._entries_task and not self._entries_task.cancelled():
self._entries_task.cancel()
self._cancel_append_tasks()
self._cluster_locks.clear()
def _cancel_append_tasks(self):
for server_tasks in self._append_tasks.values():
for task in server_tasks:
if not task.cancelled():
task.cancel()
server_tasks.clear()
def _cancel_candidate_tasks(self):
if self._election_task and not self._election_task.cancelled():
self._election_task.cancel()
def _im_leader(self):
return self._state is State.LEADER
async def _append_entry_task(
self, server: rpc.RemoteRaftServer, entries_index: int
):
async with self._cluster_locks[server.id]:
while True:
try:
await server.append_entries(
self._current_term,
self.id,
max(entries_index - 1, 0),
self._log[max(entries_index - 1, 0)].term,
self._log[entries_index:],
self._commit_index,
)
break
except TermConsistencyError as error:
self._current_term = error.term
self._change_state(State.FOLLOWER)
break
except EntriesConsistencyError:
entries_index = max(entries_index - 1, 0)
except:
pass # Network error, so retry until it answers
|
[
"asyncio.start_server",
"asyncio.sleep",
"asyncio.Event",
"asyncio.as_completed",
"asyncio.Queue"
] |
[((698, 713), 'asyncio.Event', 'asyncio.Event', ([], {}), '()\n', (711, 713), False, 'import asyncio\n'), ((1923, 1938), 'asyncio.Queue', 'asyncio.Queue', ([], {}), '()\n', (1936, 1938), False, 'import asyncio\n'), ((4978, 5009), 'asyncio.as_completed', 'asyncio.as_completed', (['rpc_calls'], {}), '(rpc_calls)\n', (4998, 5009), False, 'import asyncio\n'), ((6186, 6240), 'asyncio.as_completed', 'asyncio.as_completed', (['voting_rpcs'], {'timeout': 'RPC_TIMEOUT'}), '(voting_rpcs, timeout=RPC_TIMEOUT)\n', (6206, 6240), False, 'import asyncio\n'), ((872, 934), 'asyncio.start_server', 'asyncio.start_server', (['self._handle_request', 'self.ip', 'self.port'], {}), '(self._handle_request, self.ip, self.port)\n', (892, 934), False, 'import asyncio\n'), ((5346, 5383), 'asyncio.sleep', 'asyncio.sleep', (['(ELECTION_TIMEOUT * 0.9)'], {}), '(ELECTION_TIMEOUT * 0.9)\n', (5359, 5383), False, 'import asyncio\n')]
|
"""
Train a speaker model on R2R
"""
import logging
from typing import List, Tuple, Dict
import copy
import os
import random
import shutil
import sys
from datetime import datetime
from tqdm import tqdm
import numpy as np
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, Subset, Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.tensorboard import SummaryWriter
from apex.parallel import DistributedDataParallel as DDP
from transformers import AutoTokenizer, BertTokenizer
from vilbert.optimization import AdamW, WarmupLinearSchedule
from vilbert.vilbert import BertConfig
from airbert import Airbert
from utils.cli import get_parser
from utils.dataset import PanoFeaturesReader
from utils.dataset.speak_dataset import SpeakDataset
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
stream=sys.stdout,
)
logger = logging.getLogger(__name__)
Batch = Dict[str, torch.Tensor]
def main():
# ----- #
# setup #
# ----- #
# command line parsing
parser = get_parser(training=True, speaker=True)
args = parser.parse_args()
# FIXME how to do it properly in bash?
args.perturbations = [p for pert in args.perturbations for p in pert.split(" ")]
# validate command line arguments
if not (args.masked_vision or args.masked_language) and args.no_ranking:
parser.error(
"No training objective selected, add --masked_vision, "
"--masked_language, or remove --no_ranking"
)
# set seed
if args.seed:
seed = args.seed
if args.local_rank != -1:
seed += args.local_rank
torch.manual_seed(seed)
np.random.seed(seed) # type: ignore
random.seed(seed)
# get device settings
if args.local_rank == -1:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
else:
# Initializes the distributed backend which will take care of synchronizing
# nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
dist.init_process_group(backend="nccl")
n_gpu = 1
# check if this is the default gpu
default_gpu = True
if args.local_rank != -1 and dist.get_rank() != 0:
default_gpu = False
if default_gpu:
logger.info(f"Playing with {n_gpu} GPUs")
# create output directory
save_folder = os.path.join(args.output_dir, f"run-{args.save_name}")
if default_gpu and not os.path.exists(save_folder):
os.makedirs(save_folder)
# ------------ #
# data loaders #
# ------------ #
tokenizer = AutoTokenizer.from_pretrained(args.bert_tokenizer)
if not isinstance(tokenizer, BertTokenizer):
raise ValueError("fix mypy")
features_reader = PanoFeaturesReader(args.img_feature)
vln_path = f"data/task/{args.prefix}R2R_train.json"
if default_gpu:
logger.info("using provided training trajectories")
logger.info(f"VLN path: {vln_path}")
if default_gpu:
logger.info("Loading train dataset")
train_dataset: Dataset = SpeakDataset(
vln_path=vln_path,
skeleton_path="np_train.json" if args.np else "",
tokenizer=tokenizer,
features_reader=features_reader,
max_instruction_length=args.max_instruction_length,
max_path_length=args.max_path_length,
max_num_boxes=args.max_num_boxes,
default_gpu=default_gpu,
)
if default_gpu:
logger.info("Loading val datasets")
val_seen_dataset = SpeakDataset(
vln_path=f"data/task/{args.prefix}R2R_val_seen.json",
skeleton_path="np_val_seen.json" if args.np else "",
tokenizer=tokenizer,
features_reader=features_reader,
max_instruction_length=args.max_instruction_length,
max_path_length=args.max_path_length,
max_num_boxes=args.max_num_boxes,
default_gpu=default_gpu,
)
val_unseen_dataset = SpeakDataset(
vln_path=f"data/task/{args.prefix}R2R_val_unseen.json",
skeleton_path="np_val_unseen.json" if args.np else "",
tokenizer=tokenizer,
features_reader=features_reader,
max_instruction_length=args.max_instruction_length,
max_path_length=args.max_path_length,
max_num_boxes=args.max_num_boxes,
default_gpu=default_gpu,
)
if args.local_rank == -1:
train_sampler = RandomSampler(train_dataset)
val_seen_sampler = SequentialSampler(val_seen_dataset)
val_unseen_sampler = SequentialSampler(val_unseen_dataset)
else:
train_sampler = DistributedSampler(train_dataset)
val_seen_sampler = DistributedSampler(val_seen_dataset)
val_unseen_sampler = DistributedSampler(val_unseen_dataset)
# adjust the batch size for distributed training
batch_size = args.batch_size // args.gradient_accumulation_steps
if args.local_rank != -1:
batch_size = batch_size // dist.get_world_size()
if default_gpu:
logger.info(f"batch_size: {batch_size}")
if default_gpu:
logger.info(f"Creating dataloader")
# create data loaders
train_data_loader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=batch_size,
num_workers=args.num_workers,
pin_memory=True,
)
val_seen_data_loader = DataLoader(
val_seen_dataset,
sampler=val_seen_sampler,
shuffle=False,
batch_size=batch_size,
num_workers=args.num_workers,
pin_memory=True,
)
val_unseen_data_loader = DataLoader(
val_unseen_dataset,
sampler=val_unseen_sampler,
shuffle=False,
batch_size=batch_size,
num_workers=args.num_workers,
pin_memory=True,
)
# ----- #
# model #
# ----- #
if default_gpu:
logger.info(f"Loading model")
config = BertConfig.from_json_file(args.config_file)
config.cat_highlight = args.cat_highlight # type: ignore
config.convert_mask = True # type: ignore
if len(args.from_pretrained) == 0: # hack for catching --from_pretrained ""
model = Airbert(config)
else:
model = Airbert.from_pretrained(
args.from_pretrained, config, default_gpu=default_gpu
)
if default_gpu:
logger.info(
f"number of parameters: {sum(p.numel() for p in model.parameters())}"
)
# move/distribute model to device
model.to(device)
if args.local_rank != -1:
model = DDP(model, delay_allreduce=True)
if default_gpu:
logger.info("using distributed data parallel")
# elif n_gpu > 1:
# model = torch.nn.DataParallel(model) # type: ignore
# if default_gpu:
# logger.info("using data parallel")
# ------------ #
# optimization #
# ------------ #
# set parameter specific weight decay
no_decay = ["bias", "LayerNorm.weight", "LayerNorm.bias"]
optimizer_grouped_parameters = [
{"params": [], "weight_decay": 0.0},
{"params": [], "weight_decay": args.weight_decay},
]
for name, param in model.named_parameters():
if any(nd in name for nd in no_decay):
optimizer_grouped_parameters[0]["params"].append(param)
else:
optimizer_grouped_parameters[1]["params"].append(param)
# optimizer
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate,)
# calculate learning rate schedule
t_total = (
len(train_data_loader) // args.gradient_accumulation_steps
) * args.num_epochs
warmup_steps = args.warmup_proportion * t_total
adjusted_t_total = warmup_steps + args.cooldown_factor * (t_total - warmup_steps)
scheduler = (
WarmupLinearSchedule(
optimizer,
warmup_steps=warmup_steps,
t_total=adjusted_t_total,
last_epoch=-1,
)
if not args.no_scheduler
else MultiplicativeLR(optimizer, lr_lambda=lambda epoch: 1.0) # type: ignore
)
# --------------- #
# before training #
# --------------- #
# save the parameters
if default_gpu:
with open(os.path.join(save_folder, "config.txt"), "w") as fid:
print(f"{datetime.now()}", file=fid)
print("\n", file=fid)
print(vars(args), file=fid)
print("\n", file=fid)
print(config, file=fid)
# loggers
if default_gpu:
writer = SummaryWriter(
log_dir=os.path.join(save_folder, "logging"), flush_secs=30
)
else:
writer = None
# -------- #
# training #
# -------- #
# run training
if default_gpu:
logger.info("starting training...")
best_seen_success_rate, best_unseen_success_rate = 0, 0
for epoch in range(args.num_epochs):
if default_gpu and args.debug:
logger.info(f"epoch {epoch}")
if args.local_rank > -1:
train_data_loader.sampler.set_epoch(epoch) # type: ignore
# train for one epoch
train_epoch(
epoch,
model,
optimizer,
scheduler,
train_data_loader,
writer,
default_gpu,
args,
)
if default_gpu and args.debug:
logger.info(f"saving the model")
# save the model every epoch
model_path = os.path.join(save_folder, f"pytorch_model_{epoch + 1}.bin")
if default_gpu:
model_state = (
model.module.state_dict() # type: ignore
if hasattr(model, "module")
else model.state_dict()
)
torch.save(
{
"model_state_dict": model_state,
"optimizer_state_dict": optimizer.state_dict(),
"scheduler_state_dict": scheduler.state_dict(),
"epoch": epoch,
},
model_path
)
if default_gpu and args.debug:
logger.info(f"running validation")
# run validation
global_step = (epoch + 1) * len(train_data_loader)
# run validation on the "val seen" split
with torch.no_grad():
seen_success_rate = val_epoch(
epoch,
model,
"val_seen",
val_seen_data_loader,
writer,
default_gpu,
args,
global_step,
)
if default_gpu:
logger.info(
f"[val_seen] epoch: {epoch + 1} success_rate: {seen_success_rate.item():.3f}"
)
# save the model that performs the best on val seen
if seen_success_rate > best_seen_success_rate:
best_seen_success_rate = seen_success_rate
if default_gpu:
best_seen_path = os.path.join(
save_folder, "pytorch_model_best_seen.bin"
)
shutil.copyfile(model_path, best_seen_path) # type: ignore
# run validation on the "val unseen" split
with torch.no_grad():
unseen_success_rate = val_epoch(
epoch,
model,
"val_unseen",
val_unseen_data_loader,
writer,
default_gpu,
args,
global_step,
)
if default_gpu:
logger.info(
f"[val_unseen] epoch: {epoch + 1} success_rate: {unseen_success_rate.item():.3f}"
)
# save the model that performs the best on val unseen
if unseen_success_rate > best_unseen_success_rate:
best_unseen_success_rate = unseen_success_rate
if default_gpu:
best_unseen_path = os.path.join(
save_folder, "pytorch_model_best_unseen.bin"
)
shutil.copyfile(model_path, best_unseen_path)
# -------------- #
# after training #
# -------------- #
if default_gpu:
writer.close()
def rollout(batch: Batch, model: nn.Module, window: int
) :
"""
we split the batch over sequences of $window tokens.
This reduces the burden on memory usage.
"""
# get the model input and output
instruction_length = batch["target_tokens"].shape[1]
batch_size = get_batch_size(batch)
device = get_device(batch)
inputs = get_model_input(batch)
# import ipdb
# ipdb.set_trace()
# B, N
target = get_target(batch) # inputs["instr_tokens"][:, 0]
# B, N, N
pred_mask = get_mask_predictions(batch)
# B, N
pad_or_sep = (batch["target_tokens"] == 102) | (batch["target_tokens"] == 0)
pad_or_sep = pad_or_sep.squeeze(1)
map_loss = torch.tensor(0.).to(device)
map_correct = torch.tensor(0.).to(device)
map_batch_size = torch.tensor(0.).to(device)
for start in range(0, instruction_length, window):
small_inputs = {
key: tensor[:, start: start+ window].flatten(0, 1) for key, tensor in inputs.items()
}
small_target = target[:, start+1:start+window+1].flatten()
output = model(**small_inputs)
# N * W * B
small_mask = pred_mask[:, start : start + window].flatten()
# N * W * B x V
predictions = output[2].view(-1, output[2].shape[-1])
# W * B x V
predictions = predictions[small_mask]
# W x B
instr = predictions.argmax(1).view(batch_size, -1)
# calculate the final loss on non-padding tokens
loss = F.cross_entropy(predictions, small_target, ignore_index=0)
# backward pass
if model.training:
loss.backward()
# calculate accuracy
# remove pad tokens and sep tokens
small_pad = pad_or_sep[0,start+1: start+window+1 ].flatten()
correct = torch.sum(instr.flatten()[small_pad] == small_target[small_pad]).detach().float()
# calculate accumulated stats
map_batch_size += batch_size
map_loss += loss.detach().float()
map_correct += correct.detach().float()
map_loss = torch.true_divide(map_loss.sum(), map_batch_size) # type: ignore
map_correct = torch.true_divide(map_correct.sum(), map_batch_size) # type: ignore
return map_batch_size.float(), map_loss.float(), map_correct.float()
def train_epoch(
epoch, model, optimizer, scheduler, data_loader, writer, default_gpu, args
) -> None:
device = next(model.parameters()).device
model.train()
batch: Batch
for step, batch in enumerate(tqdm(data_loader, disable=False)): # not (default_gpu))):
if step < 78:
continue
# load batch on gpu
batch = {
k: t.cuda(device=device, non_blocking=True) if hasattr(t, "cuda") else t
for k, t in batch.items()
}
batch_size, loss, correct = rollout(batch, model, args.window)
if args.gradient_accumulation_steps > 1:
loss /= args.gradient_accumulation_steps
correct /= args.gradient_accumulation_steps
# write stats to tensorboard
if default_gpu:
global_step = step + epoch * len(data_loader)
writer.add_scalar("loss/train", loss.float(), global_step=global_step)
writer.add_scalar(
"accuracy/train",
correct.float(),
global_step=global_step,
)
writer.add_scalar(
"learning_rate/train", scheduler.get_lr()[0], global_step=global_step
)
if args.local_rank != -1:
world_size = float(dist.get_world_size())
loss /= world_size
dist.all_reduce(loss, op=dist.ReduceOp.SUM)
dist.all_reduce(correct, op=dist.ReduceOp.SUM)
dist.all_reduce(batch_size, op=dist.ReduceOp.SUM)
if default_gpu and args.debug:
logger.info(
f"[train] step: {step + 1} "
f"loss: {loss:0.2f} "
f"accuracy: {correct / batch_size:0.2f} "
f"lr: {scheduler.get_lr()[0]:0.1e}"
)
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step()
model.zero_grad()
def val_epoch(epoch: int, model, tag, data_loader, writer, default_gpu, args, global_step):
device = next(model.parameters()).device
# validation
model.eval()
stats = torch.zeros(3, device=device).float()
for step, batch in enumerate(data_loader):
# load batch on gpu
batch = {
k: t.cuda(device=device, non_blocking=True) if hasattr(t, "cuda") else t
for k, t in batch.items()
}
# get the model output
batch_size, loss, correct = rollout(batch, model, args.window)
# accumulate
stats[0] += loss
stats[1] += correct
stats[2] += batch_size
if default_gpu and args.debug:
logger.info(
f"[{tag}] step: {step + 1} "
f"running loss: {stats[0] / stats[2]:0.2f} "
f"running success rate: {stats[1] / stats[2]:0.2f}"
)
if args.local_rank != -1:
dist.all_reduce(stats, op=dist.ReduceOp.SUM)
# write stats to tensorboard
if default_gpu:
writer.add_scalar(
f"loss/vce_{tag}", stats[0] / stats[2], global_step=global_step
)
writer.add_scalar(
f"accuracy/sr_{tag}", stats[1] / stats[2], global_step=global_step
)
return stats[1] / stats[2]
# ------------- #
# batch parsing #
# ------------- #
# batch format:
# 1:image_features, 2:image_locations, 3:image_mask,
# 5:image_targets_mask, 6:instr_tokens, 7:instr_mask, 8:instr_targets, 9:instr_highlights, 10:segment_ids,
# 11:co_attention_mask, 12:item_id
def get_instr_length(batch: Batch):
return batch["instr_tokens"].shape[1]
def get_instr_mask(batch: Batch) -> torch.Tensor:
return batch["instr_mask"].squeeze(1)
def get_model_input(batch: Batch) -> Dict[str, torch.Tensor]:
batch_size = get_batch_size(batch)
num_tokens = get_instr_length(batch)
# duplicate for each word token
image_features = batch["image_features"].unsqueeze(1).repeat(1, num_tokens - 1, 1, 1)
image_locations = batch["image_boxes"].unsqueeze(1).repeat(1, num_tokens - 1, 1, 1)
image_mask = batch["image_masks"].unsqueeze(1).repeat(1, num_tokens - 1, 1)
instr_tokens = batch["instr_tokens"].unsqueeze(1).repeat(1, num_tokens - 1, 1)
segment_ids = batch["segment_ids"].unsqueeze(1).repeat(1, num_tokens - 1, 1)
instr_mask = batch["instr_mask"].unsqueeze(1).repeat(1, num_tokens - 1, 1)
# create triangular masks
tri = (
torch.ones((num_tokens - 1, num_tokens))
.tril(0)
.bool()
.repeat(batch_size, 1, 1)
. transpose(0, 1)
.reshape(-1, num_tokens)
.to(instr_mask.device)
)
instr_mask = torch.logical_and(instr_mask, tri) # type: ignore
# transform batch shape
co_attention_mask = batch["co_attention_mask"].view(
-1, batch["co_attention_mask"].size(2), batch["co_attention_mask"].size(3)
)
return {
"instr_tokens": instr_tokens,
"image_features": image_features,
"image_locations": image_locations,
"token_type_ids": segment_ids,
"attention_mask": instr_mask,
"image_attention_mask": image_mask,
"co_attention_mask": co_attention_mask,
}
def get_batch_size(batch: Batch):
return batch["instr_tokens"].shape[0]
def get_target(batch: Batch) -> torch.Tensor:
return batch["target_tokens"]
def get_device(batch: Batch):
return batch["instr_tokens"].device
def get_mask_predictions(batch: Batch) -> torch.Tensor:
target_length = batch["target_tokens"].shape[1]
instruction_length = get_instr_length(batch) - target_length
batch_size = get_batch_size(batch)
device = get_device(batch)
diag = torch.diag(torch.tensor([1] * instruction_length), diagonal=target_length).bool().to(device)
diag = diag[:-target_length]
diag[-1] = 0
diag = diag.repeat(batch_size, 1, 1)
return diag
if __name__ == "__main__":
main()
|
[
"numpy.random.seed",
"torch.utils.data.RandomSampler",
"torch.cuda.device_count",
"torch.distributed.get_world_size",
"torch.device",
"torch.no_grad",
"os.path.join",
"torch.ones",
"torch.utils.data.DataLoader",
"torch.distributed.get_rank",
"os.path.exists",
"vilbert.optimization.WarmupLinearSchedule",
"torch.utils.data.distributed.DistributedSampler",
"random.seed",
"torch.utils.data.SequentialSampler",
"vilbert.vilbert.BertConfig.from_json_file",
"torch.cuda.set_device",
"torch.zeros",
"shutil.copyfile",
"utils.dataset.PanoFeaturesReader",
"datetime.datetime.now",
"airbert.Airbert.from_pretrained",
"tqdm.tqdm",
"vilbert.optimization.AdamW",
"torch.manual_seed",
"torch.nn.functional.cross_entropy",
"transformers.AutoTokenizer.from_pretrained",
"torch.cuda.is_available",
"apex.parallel.DistributedDataParallel",
"airbert.Airbert",
"utils.cli.get_parser",
"torch.distributed.all_reduce",
"torch.distributed.init_process_group",
"utils.dataset.speak_dataset.SpeakDataset",
"logging.basicConfig",
"os.makedirs",
"torch.tensor",
"logging.getLogger",
"torch.logical_and"
] |
[((892, 1054), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'level': 'logging.INFO', 'stream': 'sys.stdout'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S', level=logging.INFO, stream=sys.stdout)\n", (911, 1054), False, 'import logging\n'), ((1073, 1100), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1090, 1100), False, 'import logging\n'), ((1230, 1269), 'utils.cli.get_parser', 'get_parser', ([], {'training': '(True)', 'speaker': '(True)'}), '(training=True, speaker=True)\n', (1240, 1269), False, 'from utils.cli import get_parser\n'), ((2657, 2711), 'os.path.join', 'os.path.join', (['args.output_dir', 'f"""run-{args.save_name}"""'], {}), "(args.output_dir, f'run-{args.save_name}')\n", (2669, 2711), False, 'import os\n'), ((2882, 2932), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['args.bert_tokenizer'], {}), '(args.bert_tokenizer)\n', (2911, 2932), False, 'from transformers import AutoTokenizer, BertTokenizer\n'), ((3041, 3077), 'utils.dataset.PanoFeaturesReader', 'PanoFeaturesReader', (['args.img_feature'], {}), '(args.img_feature)\n', (3059, 3077), False, 'from utils.dataset import PanoFeaturesReader\n'), ((3356, 3658), 'utils.dataset.speak_dataset.SpeakDataset', 'SpeakDataset', ([], {'vln_path': 'vln_path', 'skeleton_path': "('np_train.json' if args.np else '')", 'tokenizer': 'tokenizer', 'features_reader': 'features_reader', 'max_instruction_length': 'args.max_instruction_length', 'max_path_length': 'args.max_path_length', 'max_num_boxes': 'args.max_num_boxes', 'default_gpu': 'default_gpu'}), "(vln_path=vln_path, skeleton_path='np_train.json' if args.np else\n '', tokenizer=tokenizer, features_reader=features_reader,\n max_instruction_length=args.max_instruction_length, max_path_length=\n args.max_path_length, max_num_boxes=args.max_num_boxes, default_gpu=\n default_gpu)\n", (3368, 3658), False, 'from utils.dataset.speak_dataset import SpeakDataset\n'), ((3801, 4141), 'utils.dataset.speak_dataset.SpeakDataset', 'SpeakDataset', ([], {'vln_path': 'f"""data/task/{args.prefix}R2R_val_seen.json"""', 'skeleton_path': "('np_val_seen.json' if args.np else '')", 'tokenizer': 'tokenizer', 'features_reader': 'features_reader', 'max_instruction_length': 'args.max_instruction_length', 'max_path_length': 'args.max_path_length', 'max_num_boxes': 'args.max_num_boxes', 'default_gpu': 'default_gpu'}), "(vln_path=f'data/task/{args.prefix}R2R_val_seen.json',\n skeleton_path='np_val_seen.json' if args.np else '', tokenizer=\n tokenizer, features_reader=features_reader, max_instruction_length=args\n .max_instruction_length, max_path_length=args.max_path_length,\n max_num_boxes=args.max_num_boxes, default_gpu=default_gpu)\n", (3813, 4141), False, 'from utils.dataset.speak_dataset import SpeakDataset\n'), ((4221, 4565), 'utils.dataset.speak_dataset.SpeakDataset', 'SpeakDataset', ([], {'vln_path': 'f"""data/task/{args.prefix}R2R_val_unseen.json"""', 'skeleton_path': "('np_val_unseen.json' if args.np else '')", 'tokenizer': 'tokenizer', 'features_reader': 'features_reader', 'max_instruction_length': 'args.max_instruction_length', 'max_path_length': 'args.max_path_length', 'max_num_boxes': 'args.max_num_boxes', 'default_gpu': 'default_gpu'}), "(vln_path=f'data/task/{args.prefix}R2R_val_unseen.json',\n skeleton_path='np_val_unseen.json' if args.np else '', tokenizer=\n tokenizer, features_reader=features_reader, max_instruction_length=args\n .max_instruction_length, max_path_length=args.max_path_length,\n max_num_boxes=args.max_num_boxes, default_gpu=default_gpu)\n", (4233, 4565), False, 'from utils.dataset.speak_dataset import SpeakDataset\n'), ((5428, 5550), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'sampler': 'train_sampler', 'batch_size': 'batch_size', 'num_workers': 'args.num_workers', 'pin_memory': '(True)'}), '(train_dataset, sampler=train_sampler, batch_size=batch_size,\n num_workers=args.num_workers, pin_memory=True)\n', (5438, 5550), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, Subset, Dataset\n'), ((5621, 5764), 'torch.utils.data.DataLoader', 'DataLoader', (['val_seen_dataset'], {'sampler': 'val_seen_sampler', 'shuffle': '(False)', 'batch_size': 'batch_size', 'num_workers': 'args.num_workers', 'pin_memory': '(True)'}), '(val_seen_dataset, sampler=val_seen_sampler, shuffle=False,\n batch_size=batch_size, num_workers=args.num_workers, pin_memory=True)\n', (5631, 5764), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, Subset, Dataset\n'), ((5845, 5992), 'torch.utils.data.DataLoader', 'DataLoader', (['val_unseen_dataset'], {'sampler': 'val_unseen_sampler', 'shuffle': '(False)', 'batch_size': 'batch_size', 'num_workers': 'args.num_workers', 'pin_memory': '(True)'}), '(val_unseen_dataset, sampler=val_unseen_sampler, shuffle=False,\n batch_size=batch_size, num_workers=args.num_workers, pin_memory=True)\n', (5855, 5992), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, Subset, Dataset\n'), ((6159, 6202), 'vilbert.vilbert.BertConfig.from_json_file', 'BertConfig.from_json_file', (['args.config_file'], {}), '(args.config_file)\n', (6184, 6202), False, 'from vilbert.vilbert import BertConfig\n'), ((7661, 7719), 'vilbert.optimization.AdamW', 'AdamW', (['optimizer_grouped_parameters'], {'lr': 'args.learning_rate'}), '(optimizer_grouped_parameters, lr=args.learning_rate)\n', (7666, 7719), False, 'from vilbert.optimization import AdamW, WarmupLinearSchedule\n'), ((19405, 19439), 'torch.logical_and', 'torch.logical_and', (['instr_mask', 'tri'], {}), '(instr_mask, tri)\n', (19422, 19439), False, 'import torch\n'), ((1838, 1861), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1855, 1861), False, 'import torch\n'), ((1870, 1890), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1884, 1890), True, 'import numpy as np\n'), ((1914, 1931), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1925, 1931), False, 'import random\n'), ((2083, 2108), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (2106, 2108), False, 'import torch\n'), ((2232, 2270), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.local_rank'], {}), '(args.local_rank)\n', (2253, 2270), False, 'import torch\n'), ((2288, 2325), 'torch.device', 'torch.device', (['"""cuda"""', 'args.local_rank'], {}), "('cuda', args.local_rank)\n", (2300, 2325), False, 'import torch\n'), ((2334, 2373), 'torch.distributed.init_process_group', 'dist.init_process_group', ([], {'backend': '"""nccl"""'}), "(backend='nccl')\n", (2357, 2373), True, 'import torch.distributed as dist\n'), ((2776, 2800), 'os.makedirs', 'os.makedirs', (['save_folder'], {}), '(save_folder)\n', (2787, 2800), False, 'import os\n'), ((4674, 4702), 'torch.utils.data.RandomSampler', 'RandomSampler', (['train_dataset'], {}), '(train_dataset)\n', (4687, 4702), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, Subset, Dataset\n'), ((4730, 4765), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['val_seen_dataset'], {}), '(val_seen_dataset)\n', (4747, 4765), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, Subset, Dataset\n'), ((4795, 4832), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['val_unseen_dataset'], {}), '(val_unseen_dataset)\n', (4812, 4832), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, Subset, Dataset\n'), ((4867, 4900), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['train_dataset'], {}), '(train_dataset)\n', (4885, 4900), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((4928, 4964), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['val_seen_dataset'], {}), '(val_seen_dataset)\n', (4946, 4964), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((4994, 5032), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['val_unseen_dataset'], {}), '(val_unseen_dataset)\n', (5012, 5032), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((6408, 6423), 'airbert.Airbert', 'Airbert', (['config'], {}), '(config)\n', (6415, 6423), False, 'from airbert import Airbert\n'), ((6450, 6528), 'airbert.Airbert.from_pretrained', 'Airbert.from_pretrained', (['args.from_pretrained', 'config'], {'default_gpu': 'default_gpu'}), '(args.from_pretrained, config, default_gpu=default_gpu)\n', (6473, 6528), False, 'from airbert import Airbert\n'), ((6791, 6823), 'apex.parallel.DistributedDataParallel', 'DDP', (['model'], {'delay_allreduce': '(True)'}), '(model, delay_allreduce=True)\n', (6794, 6823), True, 'from apex.parallel import DistributedDataParallel as DDP\n'), ((8032, 8136), 'vilbert.optimization.WarmupLinearSchedule', 'WarmupLinearSchedule', (['optimizer'], {'warmup_steps': 'warmup_steps', 't_total': 'adjusted_t_total', 'last_epoch': '(-1)'}), '(optimizer, warmup_steps=warmup_steps, t_total=\n adjusted_t_total, last_epoch=-1)\n', (8052, 8136), False, 'from vilbert.optimization import AdamW, WarmupLinearSchedule\n'), ((9687, 9746), 'os.path.join', 'os.path.join', (['save_folder', 'f"""pytorch_model_{epoch + 1}.bin"""'], {}), "(save_folder, f'pytorch_model_{epoch + 1}.bin')\n", (9699, 9746), False, 'import os\n'), ((13960, 14018), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['predictions', 'small_target'], {'ignore_index': '(0)'}), '(predictions, small_target, ignore_index=0)\n', (13975, 14018), True, 'import torch.nn.functional as F\n'), ((14970, 15002), 'tqdm.tqdm', 'tqdm', (['data_loader'], {'disable': '(False)'}), '(data_loader, disable=False)\n', (14974, 15002), False, 'from tqdm import tqdm\n'), ((17648, 17692), 'torch.distributed.all_reduce', 'dist.all_reduce', (['stats'], {'op': 'dist.ReduceOp.SUM'}), '(stats, op=dist.ReduceOp.SUM)\n', (17663, 17692), True, 'import torch.distributed as dist\n'), ((2488, 2503), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (2501, 2503), True, 'import torch.distributed as dist\n'), ((2739, 2766), 'os.path.exists', 'os.path.exists', (['save_folder'], {}), '(save_folder)\n', (2753, 2766), False, 'import os\n'), ((5221, 5242), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (5240, 5242), True, 'import torch.distributed as dist\n'), ((10520, 10535), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10533, 10535), False, 'import torch\n'), ((11450, 11465), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11463, 11465), False, 'import torch\n'), ((13147, 13164), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (13159, 13164), False, 'import torch\n'), ((13193, 13210), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (13205, 13210), False, 'import torch\n'), ((13243, 13260), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (13255, 13260), False, 'import torch\n'), ((16105, 16148), 'torch.distributed.all_reduce', 'dist.all_reduce', (['loss'], {'op': 'dist.ReduceOp.SUM'}), '(loss, op=dist.ReduceOp.SUM)\n', (16120, 16148), True, 'import torch.distributed as dist\n'), ((16161, 16207), 'torch.distributed.all_reduce', 'dist.all_reduce', (['correct'], {'op': 'dist.ReduceOp.SUM'}), '(correct, op=dist.ReduceOp.SUM)\n', (16176, 16207), True, 'import torch.distributed as dist\n'), ((16220, 16269), 'torch.distributed.all_reduce', 'dist.all_reduce', (['batch_size'], {'op': 'dist.ReduceOp.SUM'}), '(batch_size, op=dist.ReduceOp.SUM)\n', (16235, 16269), True, 'import torch.distributed as dist\n'), ((16879, 16908), 'torch.zeros', 'torch.zeros', (['(3)'], {'device': 'device'}), '(3, device=device)\n', (16890, 16908), False, 'import torch\n'), ((2029, 2054), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2052, 2054), False, 'import torch\n'), ((8453, 8492), 'os.path.join', 'os.path.join', (['save_folder', '"""config.txt"""'], {}), "(save_folder, 'config.txt')\n", (8465, 8492), False, 'import os\n'), ((8787, 8823), 'os.path.join', 'os.path.join', (['save_folder', '"""logging"""'], {}), "(save_folder, 'logging')\n", (8799, 8823), False, 'import os\n'), ((11215, 11271), 'os.path.join', 'os.path.join', (['save_folder', '"""pytorch_model_best_seen.bin"""'], {}), "(save_folder, 'pytorch_model_best_seen.bin')\n", (11227, 11271), False, 'import os\n'), ((11326, 11369), 'shutil.copyfile', 'shutil.copyfile', (['model_path', 'best_seen_path'], {}), '(model_path, best_seen_path)\n', (11341, 11369), False, 'import shutil\n'), ((12167, 12225), 'os.path.join', 'os.path.join', (['save_folder', '"""pytorch_model_best_unseen.bin"""'], {}), "(save_folder, 'pytorch_model_best_unseen.bin')\n", (12179, 12225), False, 'import os\n'), ((12280, 12325), 'shutil.copyfile', 'shutil.copyfile', (['model_path', 'best_unseen_path'], {}), '(model_path, best_unseen_path)\n', (12295, 12325), False, 'import shutil\n'), ((16039, 16060), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (16058, 16060), True, 'import torch.distributed as dist\n'), ((8528, 8542), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (8540, 8542), False, 'from datetime import datetime\n'), ((20470, 20508), 'torch.tensor', 'torch.tensor', (['([1] * instruction_length)'], {}), '([1] * instruction_length)\n', (20482, 20508), False, 'import torch\n'), ((19183, 19223), 'torch.ones', 'torch.ones', (['(num_tokens - 1, num_tokens)'], {}), '((num_tokens - 1, num_tokens))\n', (19193, 19223), False, 'import torch\n')]
|
"""
5_costsTester.py
Created by <NAME> at 18/02/2021, University of Milano-Bicocca.
(<EMAIL>)
All rights reserved.
This file is part of the EcoFin-Library (https://github.com/LucaCamerani/EcoFin-Library),
and is released under the "BSD Open Source License".
"""
"""
4_portfolioTester.py
Created by <NAME> at 10/02/2021, University of Milano-Bicocca.
(<EMAIL>)
All rights reserved.
This file is part of the EcoFin-Library (https://github.com/LucaCamerani/EcoFin-Library),
and is released under the "BSD Open Source License".
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from tqdm import tqdm
from EcoFin.assetAllocation.performance import Performance
from EcoFin.utils import utils
from EcoFin.assetAllocation.allocation import Allocation
# -------------------------[Set-up]-------------------------
ticker_list = [line.rstrip('\n') for line in open(r'../INDEXs/DJIA.txt')]
maturity_min = 15
base_path = r'../Export/BackTest_C'
start_date = 0
# Strategy set-up
direction = 'OPS_[OI]' # Direction driver
force = 'VIX_[CBOE]' # In None, don't use force driver
polarize = True # True or False: polarize direction component
# Portfolio set-up
buy_only = False # Set a buy only strategy that ignore negative signals
w_limit = None # Ranks best N ticker based on strategy
w_equally = False # Equally weighted mode
leverage = None # Strategy leverage (1 is no leverage, None is auto-compensation)
# Transaction costs
tc = 8 # unit in basis points
# ----------------------------------------------------------
base = ['SpotPrice']
data = {b: {} for b in base + [direction, force]}
if None in data.keys():
del data[None]
for tick in tqdm(ticker_list, desc='Importing data'):
try:
# Import data and clean-up
source = pd.read_excel(r'{}/{}/backTest_[{}].xlsx'.format(base_path, tick, maturity_min), engine='openpyxl')
source = source.loc[source['Date'] >= start_date, ~source.columns.str.contains('^Unnamed')]
source.set_index(pd.to_datetime(source['Date'], format='%Y%m%d'), drop=True, inplace=True)
for driver in data.keys():
data[driver][tick] = source[driver]
except:
pass
# Merge (concatenate) data and create dataframes
for driver in data.keys():
data[driver] = pd.concat(data[driver], axis=1)
# ❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌[Normalize direction data]❌❌❌❌❌❌❌❌❌❌❌
if driver == direction:
data[driver] = data[driver].sub(data[driver].mean(axis=1), axis=0)
# ❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌❌
# Generate strategy signals
# -----------------------------------[STRATEGY SET-UP]-----------------------------------
if polarize: #
data[direction] = utils.polarizeTable(data[direction]) #
#
if force is None: #
force_v = 1 #
else: #
force_v = data[force] #
#
data['signals'] = data[direction] * force_v #
# -----------------------------------[STRATEGY SET-UP]-----------------------------------
# =====================================================================================
# FROM HERE NO 'signals data' MANIPULATION
# =====================================================================================
# [1] Compute ln-returns of benchmark
data['lnReturns'] = np.log(data['SpotPrice'].shift(-1) / data['SpotPrice'])
# [2] Compute strategy weights
allocation = Allocation(data['signals'], buyOnly=buy_only, limit=w_limit)
if w_equally:
data['weights'] = allocation.getEquallyWeights()
else:
data['weights'] = allocation.getSignalWeights()
# [3] Compute strategy ln-returns
if leverage is None:
leverage = data['SpotPrice'].shape[1]
data['strategy'] = data['lnReturns'] * data['weights'] * leverage
# Compute turnover and transaction costs
turnover = allocation.getTurnover(data['weights'])
data['costs'] = np.log(turnover.byTime * 2 * (tc/1e4) + 1)
data['strategy_net'] = data['strategy'].mean(axis=1) - data['costs']
# =====================================================================================
# FROM HERE NO DATA MANIPULATION
# =====================================================================================
# Create plot framework
fig, axs = plt.subplots(2, figsize=(15, 8), sharex=True)
fig.suptitle('Strategy tester', fontsize=16)
# Plot strategy return vs. benchmark (data)
axs[0].set_title('data returns')
axs[0].plot(data['lnReturns'].mean(axis=1).cumsum(), linestyle='dotted', label='Benchmark')
axs[0].plot(data['strategy'].mean(axis=1).cumsum(), label='Strategy Gross')
axs[0].plot(data['strategy_net'].cumsum(), label='Strategy Net')
axs[0].set(ylabel='Cumulated ln-returns ($X_t$)')
axs[0].legend()
# Plot transaction costs
ax2 = axs[0].twinx()
color = 'tab:gray'
ax2.set_ylabel('Transaction Costs', color=color)
ax2.fill_between(data['costs'].index, 0, data['costs'], linewidth=.5, alpha=.2, color=color)
ax2.plot(data['costs'], linewidth=.5, alpha=.6, color=color)
ax2.set_ylim([0, data['costs'].max() * 4])
ax2.tick_params(axis='y', labelcolor=color)
# Plot evolution of weights
axs[1].set_title('Transition costs')
axs[1].plot(turnover.byTime, color='gold', label=r'Turnover ($\gamma$)')
axs[1].axhline(turnover.mean, alpha=.6, linestyle='--', label=r'mean')
axs[1].legend()
plt.show()
|
[
"EcoFin.utils.utils.polarizeTable",
"tqdm.tqdm",
"matplotlib.pyplot.show",
"numpy.log",
"EcoFin.assetAllocation.allocation.Allocation",
"pandas.to_datetime",
"matplotlib.pyplot.subplots",
"pandas.concat"
] |
[((1762, 1802), 'tqdm.tqdm', 'tqdm', (['ticker_list'], {'desc': '"""Importing data"""'}), "(ticker_list, desc='Importing data')\n", (1766, 1802), False, 'from tqdm import tqdm\n'), ((4040, 4100), 'EcoFin.assetAllocation.allocation.Allocation', 'Allocation', (["data['signals']"], {'buyOnly': 'buy_only', 'limit': 'w_limit'}), "(data['signals'], buyOnly=buy_only, limit=w_limit)\n", (4050, 4100), False, 'from EcoFin.assetAllocation.allocation import Allocation\n'), ((4499, 4547), 'numpy.log', 'np.log', (['(turnover.byTime * 2 * (tc / 10000.0) + 1)'], {}), '(turnover.byTime * 2 * (tc / 10000.0) + 1)\n', (4505, 4547), True, 'import numpy as np\n'), ((4879, 4924), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {'figsize': '(15, 8)', 'sharex': '(True)'}), '(2, figsize=(15, 8), sharex=True)\n', (4891, 4924), True, 'import matplotlib.pyplot as plt\n'), ((5930, 5940), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5938, 5940), True, 'import matplotlib.pyplot as plt\n'), ((2369, 2400), 'pandas.concat', 'pd.concat', (['data[driver]'], {'axis': '(1)'}), '(data[driver], axis=1)\n', (2378, 2400), True, 'import pandas as pd\n'), ((2850, 2886), 'EcoFin.utils.utils.polarizeTable', 'utils.polarizeTable', (['data[direction]'], {}), '(data[direction])\n', (2869, 2886), False, 'from EcoFin.utils import utils\n'), ((2090, 2137), 'pandas.to_datetime', 'pd.to_datetime', (["source['Date']"], {'format': '"""%Y%m%d"""'}), "(source['Date'], format='%Y%m%d')\n", (2104, 2137), True, 'import pandas as pd\n')]
|
import textract
from itertools import tee
import base64
import bson
import uuid
import os
from flask import Flask, request, jsonify
import datetime
from pymongo import MongoClient
import string
import json
UPLOAD_FOLDER = '/tmp/'
ALLOWED_EXTENSIONS = {'pdf'}
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
client = MongoClient('mongodb://localhost:27017/')
users_collection = client.test.users
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.after_request
def cors_enabled(response):
response.headers['Access-Control-Allow-Origin'] = 'http://localhost:5000'
response.headers['Access-Control-Allow-Credentials'] = 'true'
return response
@app.route('/', methods=['GET', 'POST'])
def upload_file():
is_request_post = request.method == 'POST'
is_user_cookie_string = type(request.cookies.get('currentUserId')) is str
is_user_len_ok = type(request.cookies.get('currentUserId')) is str
if is_request_post and is_user_cookie_string and is_user_len_ok:
user_id = request.cookies.get('currentUserId')[3:-3]
file_stream = request.form['file']
file_name = uuid.uuid4().hex + '.pdf'
file_path = os.path.join(app.config['UPLOAD_FOLDER'], file_name)
with open(file_path, 'wb') as file:
file.write(base64.b64decode(file_stream[28:]))
extract_transaction_from_pdf(file_path, user_id)
return jsonify({"message": "User id : %s transaction was updated" % user_id})
else:
# TODO remove GET
return 'Hello World!'
def pairwise(iterable):
(a, b) = tee(iterable)
next(b, None)
return zip(a, b)
def remove_rtl(text):
text = text.replace('\u202a', '')
text = text.replace('\u202b', '')
text = text.replace('\u202c', '')
text = text.replace('í', 'ם')
text = text.replace('ï', 'ן')
text = text.replace('ó', 'ף')
text = text.replace('ê', 'ך')
text = text.replace('õ', 'ץ')
text = text.replace('', 'נ')
return text
def extract_credit_card_number(lines):
return lines[0][2:]
def is_timestamp(date_text):
if len(date_text) == 8:
numbers = date_text.split('/')
if len(numbers) == 3:
number1 = numbers[0]
number2 = numbers[1]
number3 = numbers[2]
is_number1 = len(number1) == 2 and number1.isdigit()
is_number2 = len(number2) == 2 and number2.isdigit()
is_number3 = len(number3) == 2 and number3.isdigit()
if all([is_number1, is_number2, is_number3]):
return True
return False
def is_price(price):
allowed = string.digits + '.' + ',' + '-'
return all(c in allowed for c in price)
class Transaction:
date = None
to = None
iska = None
class Data:
credit_card_last_digits = None
name = None
address = None
transactions = None
def add_transaction_to_user(transactions, user_id):
current_transactions = users_collection.find_one({'_id': bson.ObjectId(user_id)})['transactions']
for transaction in current_transactions:
transaction.pop('_id', None)
transactions['transactions'] = [transaction for transaction in
transactions['transactions'] if transaction not in current_transactions]
for transaction in transactions['transactions']:
transaction['_id'] = bson.objectid.ObjectId()
users_collection.update_one(
{'_id': bson.ObjectId(user_id)},
{'$push': {'transactions': {'$each': transactions['transactions']}}}
)
def extract_transaction_from_isracard_pdf(lines, user_id):
data = Data()
remove_list = [
'ה.קבע',
'',
'לא הוצג',
'סכו םהחיוב',
'בש"ח',
'}',
'ש םבית עסק',
'תאריך',
'עסקה',
'כרטיס',
'בעסקה',
'ענף',
'ש.אלחוט'
]
categories_list = [
'ביטוח',
'שרות רפואי',
'נופש ותיור',
'בתי ספר',
'פנאי/ספורט',
'שירותי רכב',
'דלק',
'מכולת/סופר',
'רהיטים',
'מסעדות/קפה',
'מוצרי חשמל',
"קניה אינט'",
"תש' רשויות",
'פארמה',
'כלי בית',
'משתלות',
'הלבשה',
'מעדניות',
'תרבות',
'שונות',
'ספרי/םדיסק',
'אבזרי אפנה',
'טוטו/פיס',
'הנעלה',
'צעצועים',
'עיתו/ןדפוס',
'מחשבים'
]
categories_fixer = {
'ביטוח': 'ביטוח',
'שרות רפואי': 'שרות רפואי',
'נופש ותיור': 'נופש ותיור',
'בתי ספר': 'בתי ספר',
'פנאי/ספורט': 'פנאי וספורט',
'שירותי רכב': 'שירותי רכב',
'דלק': 'דלק',
'מכולת/סופר': 'מכולת וסופר',
'רהיטים': 'רהיטים',
'מסעדות/קפה': 'מסעדות וקפה',
'מוצרי חשמל': 'מוצרי חשמל',
"קניה אינט'": 'קניות באינטרנט',
"תש' רשויות": 'תשלומי רשויות',
'פארמה': 'פארמה',
'כלי בית': 'כלי בית',
'משתלות': 'משתלות',
'הלבשה': 'הלבשה',
'מעדניות': 'מעדניות',
'תרבות': 'תרבות',
'שונות': 'שונות',
'ספרי/םדיסק': 'ספרים ודיסקים',
'אבזרי אפנה': 'אביזרי אופנה',
'טוטו/פיס': 'טוטו ופיס',
'הנעלה': 'הנעלה',
'צעצועים': 'צעצועים',
'עיתו/ןדפוס': 'עיתון',
'מחשבים': 'מחשבים'
}
lines = list([value for value in lines if value not in remove_list])
start_index = lines.index('עסקות שחויבו /זוכו -בארץ')
end_index = lines.index('מסגרת הכרטיס ותנאי האשראי')
lines = lines[start_index + 1:end_index]
categories = list([value for value in lines if value in categories_list])
try:
remove_index_start = lines.index('פירוט נוסף')
remove_index_end = lines.index('עסקות שחויבו /זוכו -בארץ')
except ValueError:
remove_index_start = None
remove_index_end = None
if remove_index_start and remove_index_end:
lines = lines[:remove_index_start] + lines[remove_index_end + 1:]
dates = [date for date in lines if is_timestamp(date)]
temp = list()
prices = list()
for line in lines:
if is_price(line):
temp.append(line)
else:
if len(temp) and len(prices) < len(dates):
if len(temp) > len(dates) * 2:
temp = temp[:len(dates) * 2]
if len(temp) % 2 != 0:
temp = temp[:-1]
if int(len(temp)/2) + len(prices) > len(dates):
temp = temp[:-2]
prices += temp[int(len(temp)/2):]
temp = list()
lines = list([value for value in lines if value not in categories_list])
lines = list([value for value in lines if not is_price(value)])
lines = list([value for value in lines if not is_timestamp(value)])
lines = list([value for value in lines if value != 'עסקות שחויבו /זוכו -בארץ'])
lines = list([value for value in lines if value != 'סכו םעסקה'])
lines = list([value for value in lines if 'סה"כ חיוב לתאר' not in value])
businesses = lines[:len(dates) + 1]
transactions = {"transactions": []}
for date, business, price, category in zip(dates, businesses, prices, categories):
transactions['transactions'].append({
'date': date,
'business': business,
'price': price,
'category': category
})
for transaction in transactions['transactions']:
date = transaction['date']
date = date.split('/')
date[2] = '20' + date[2]
date = '/'.join(date)
transaction['price'] = transaction['price'].replace(',', '')
transaction['price'] = int(float(transaction['price']))
transaction['date'] = datetime.datetime.strptime(date, '%d/%m/%Y')
transaction['category'] = categories_fixer[transaction['category']]
add_transaction_to_user(transactions=transactions, user_id=user_id)
def extract_transaction_from_pdf(file_path, user_id):
# TODO add error message and handler for textract.exceptions.ShellError exception
text = textract.process(file_path, 'UTF-8')
decode_text = text.decode()
decode_text = remove_rtl(decode_text)
lines = decode_text.split('\n')
extract_transaction_from_isracard_pdf(lines, user_id)
if __name__ == '__main__':
app.run(port=3000, host='0.0.0.0')
|
[
"pymongo.MongoClient",
"uuid.uuid4",
"bson.objectid.ObjectId",
"flask.Flask",
"flask.request.cookies.get",
"base64.b64decode",
"flask.jsonify",
"datetime.datetime.strptime",
"bson.ObjectId",
"textract.process",
"itertools.tee",
"os.path.join"
] |
[((267, 282), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (272, 282), False, 'from flask import Flask, request, jsonify\n'), ((336, 377), 'pymongo.MongoClient', 'MongoClient', (['"""mongodb://localhost:27017/"""'], {}), "('mongodb://localhost:27017/')\n", (347, 377), False, 'from pymongo import MongoClient\n'), ((1649, 1662), 'itertools.tee', 'tee', (['iterable'], {}), '(iterable)\n', (1652, 1662), False, 'from itertools import tee\n'), ((8128, 8164), 'textract.process', 'textract.process', (['file_path', '"""UTF-8"""'], {}), "(file_path, 'UTF-8')\n", (8144, 8164), False, 'import textract\n'), ((1244, 1296), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", 'file_name'], {}), "(app.config['UPLOAD_FOLDER'], file_name)\n", (1256, 1296), False, 'import os\n'), ((1473, 1543), 'flask.jsonify', 'jsonify', (["{'message': 'User id : %s transaction was updated' % user_id}"], {}), "({'message': 'User id : %s transaction was updated' % user_id})\n", (1480, 1543), False, 'from flask import Flask, request, jsonify\n'), ((3435, 3459), 'bson.objectid.ObjectId', 'bson.objectid.ObjectId', ([], {}), '()\n', (3457, 3459), False, 'import bson\n'), ((7782, 7826), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['date', '"""%d/%m/%Y"""'], {}), "(date, '%d/%m/%Y')\n", (7808, 7826), False, 'import datetime\n'), ((889, 925), 'flask.request.cookies.get', 'request.cookies.get', (['"""currentUserId"""'], {}), "('currentUserId')\n", (908, 925), False, 'from flask import Flask, request, jsonify\n'), ((960, 996), 'flask.request.cookies.get', 'request.cookies.get', (['"""currentUserId"""'], {}), "('currentUserId')\n", (979, 996), False, 'from flask import Flask, request, jsonify\n'), ((1092, 1128), 'flask.request.cookies.get', 'request.cookies.get', (['"""currentUserId"""'], {}), "('currentUserId')\n", (1111, 1128), False, 'from flask import Flask, request, jsonify\n'), ((3509, 3531), 'bson.ObjectId', 'bson.ObjectId', (['user_id'], {}), '(user_id)\n', (3522, 3531), False, 'import bson\n'), ((1198, 1210), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1208, 1210), False, 'import uuid\n'), ((1364, 1398), 'base64.b64decode', 'base64.b64decode', (['file_stream[28:]'], {}), '(file_stream[28:])\n', (1380, 1398), False, 'import base64\n'), ((3053, 3075), 'bson.ObjectId', 'bson.ObjectId', (['user_id'], {}), '(user_id)\n', (3066, 3075), False, 'import bson\n')]
|
#!/usr/bin/env python3
import re
from pprint import pprint
import os
import yaml
import random
import string
from passlib.hash import md5_crypt, sha256_crypt, sha512_crypt
secrets_file = '_secrets_file_'
yaml_pp_vars = dict(os.environ)
yaml_pp_vars[secrets_file] = '_secrets.yaml'
secrets = None
sshkey_re = re.compile(r'(.*)\$SSHKEY:([A-Za-z][A-Za-z0-9]*)(:[^\$]*|)\$')
for line in [
'One two',
'abcd $SSHKEY:linux1$',
'abcd $$PWGEN:linux1$ something',
'abcd $$PWGEN:linux1:32:MD5$',
'abcd $$PWGEN:linux1:32:SHA256$ something',
'abcd $$PWGEN:linux1:32:SHA512$',
'abcd $PWGEN:linux1:16',
'abcd $PWGEN:linux1 something',
'abcd $PWGEN:linux1:32:MD5',
'abcd $PWGEN:linux1:32:SHA256 something',
'abcd $PWGEN:linux1:32:SHA512',
'abcd $PWGEN:linux1:16$',
'abcd $PWGEN:linux1$ something',
'abcd $PWGEN:linux1:32:MD5$',
'abcd $PWGEN:linux1:32:SHA256$ something',
'abcd $PWGEN:linux1:32:SHA512$',
'end' ]:
in_line = line
mv = pwgen_re.match(line)
if mv:
if mv.group(1)[-1] == '$':
line = line[:len(mv.group(1))-1] + line[len(mv.group(1)):]
else:
store = mv.group(2)
pwlen = 12
encode = ''
for opt in mv.group(3).split(':'):
if not opt: continue
if opt == 'MD5' or opt == 'SHA256' or opt == 'SHA512':
encode = opt
elif int(opt) > 6:
pwlen = int(opt)
if secrets is None:
if os.path.isfile(yaml_pp_vars[secrets_file]):
with open(yaml_pp_vars[secrets_file],'r') as fp:
secrets = yaml.safe_load(fp)
else:
secrets = {}
if store in secrets:
passwd = secrets[store]
else:
charset = string.ascii_lowercase + string.ascii_uppercase + string.digits
passwd = ''.join(random.sample(charset, pwlen))
secrets[store] = passwd
with open(yaml_pp_vars[secrets_file],'w') as fp:
fp.write(yaml.dump(secrets))
if encode == 'MD5':
cpassw = md5_crypt.hash(passwd)
elif encode == 'SHA256':
cpassw = sha256_crypt.hash(passwd,rounds=5000)
elif encode == 'SHA512':
cpassw = sha512_crypt.hash(passwd,rounds=5000)
else:
cpassw = passwd
line = line[:len(mv.group(1))] + cpassw + line[len(mv.group(0)):]
print('INP: {}'.format(in_line))
print('OUT: {}'.format(line))
|
[
"random.sample",
"yaml.dump",
"passlib.hash.sha256_crypt.hash",
"os.path.isfile",
"yaml.safe_load",
"passlib.hash.md5_crypt.hash",
"passlib.hash.sha512_crypt.hash",
"re.compile"
] |
[((313, 377), 're.compile', 're.compile', (['"""(.*)\\\\$SSHKEY:([A-Za-z][A-Za-z0-9]*)(:[^\\\\$]*|)\\\\$"""'], {}), "('(.*)\\\\$SSHKEY:([A-Za-z][A-Za-z0-9]*)(:[^\\\\$]*|)\\\\$')\n", (323, 377), False, 'import re\n'), ((1479, 1521), 'os.path.isfile', 'os.path.isfile', (['yaml_pp_vars[secrets_file]'], {}), '(yaml_pp_vars[secrets_file])\n', (1493, 1521), False, 'import os\n'), ((2048, 2070), 'passlib.hash.md5_crypt.hash', 'md5_crypt.hash', (['passwd'], {}), '(passwd)\n', (2062, 2070), False, 'from passlib.hash import md5_crypt, sha256_crypt, sha512_crypt\n'), ((1845, 1874), 'random.sample', 'random.sample', (['charset', 'pwlen'], {}), '(charset, pwlen)\n', (1858, 1874), False, 'import random\n'), ((2119, 2157), 'passlib.hash.sha256_crypt.hash', 'sha256_crypt.hash', (['passwd'], {'rounds': '(5000)'}), '(passwd, rounds=5000)\n', (2136, 2157), False, 'from passlib.hash import md5_crypt, sha256_crypt, sha512_crypt\n'), ((1604, 1622), 'yaml.safe_load', 'yaml.safe_load', (['fp'], {}), '(fp)\n', (1618, 1622), False, 'import yaml\n'), ((1984, 2002), 'yaml.dump', 'yaml.dump', (['secrets'], {}), '(secrets)\n', (1993, 2002), False, 'import yaml\n'), ((2205, 2243), 'passlib.hash.sha512_crypt.hash', 'sha512_crypt.hash', (['passwd'], {'rounds': '(5000)'}), '(passwd, rounds=5000)\n', (2222, 2243), False, 'from passlib.hash import md5_crypt, sha256_crypt, sha512_crypt\n')]
|
"""Setup file for cheshire3 package."""
from __future__ import with_statement
import sys
import os
import inspect
from warnings import warn
# Import Distribute / Setuptools
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
from pkg_resources import DistributionNotFound
# Check Python version
py_version = getattr(sys, 'version_info', (0, 0, 0))
if py_version < (2, 6):
warn("Cheshire3 requires Python 2.6 or later; some code may be "
"incompatible with earlier versions.")
# Inspect to find current path
setuppath = inspect.getfile(inspect.currentframe())
setupdir = os.path.dirname(setuppath)
# Basic information
_name = 'cheshire3'
_description = ('Cheshire3 Search and Retrieval Engine and Information '
'Framework')
# Discover version number from file
with open(os.path.join(setupdir, 'VERSION.txt'), 'r') as vfh:
_version = vfh.read().strip()
_download_url = ('http://cheshire3.liv.ac.uk/download/{0}/src/{1}-{2}.tar.gz'
''.format(_version[:3], _name, _version))
# More detailed description from README
try:
fh = open(os.path.join(setupdir, 'README.rst'), 'r')
except IOError:
_long_description = ''
else:
_long_description = fh.read()
fh.close()
# Requirements
with open(os.path.join(setupdir, 'requirements.txt'), 'r') as fh:
_install_requires = fh.readlines()
_tests_require = []
# Determine python-dateutil version
if py_version < (3, 0):
dateutilstr = 'python-dateutil == 1.5'
if py_version < (2, 7):
_install_requires.append('argparse')
_tests_require.append('unittest2')
else:
dateutilstr = 'python-dateutil >= 2.0'
_install_requires.append(dateutilstr)
setup(
name=_name,
version=_version,
packages=[_name],
include_package_data=True,
package_data={'cheshire3': ['configs/*.xml', 'configs/extra/*.xml']},
exclude_package_data={'': ['README.*', '.gitignore']},
requires=['lxml(>=2.1)', 'bsddb', 'dateutil', 'argparse'],
tests_require=_tests_require,
install_requires=_install_requires,
setup_requires=['setuptools-git'],
dependency_links=[
"http://labix.org/python-dateutil",
"http://www.panix.com/~asl2/software/PyZ3950/",
"http://cheshire3.liv.ac.uk/download/latest/reqs/"
],
extras_require={
'graph': ['rdflib'],
'grid': ['PyRods'],
'datamining': ['svm'],
'lucene': ['lucene'],
'nlp': ['numpy', 'nltk >= 2.0.2'],
'sql': ['psycopg2 >= 2.5'],
'textmining': ['numpy', 'nltk >= 2.0.2'],
'web': ['pyoai', 'PyZ3950 >= 2.04', 'ZSI < 2.0']
},
test_suite="cheshire3.test.testAll.suite",
scripts=['scripts/DocumentConverter.py'],
entry_points={
'console_scripts': [
'cheshire3 = cheshire3.commands.console:main',
'cheshire3-init = cheshire3.commands.init:main',
'cheshire3-load = cheshire3.commands.load:main',
'cheshire3-register = cheshire3.commands.register:main',
'cheshire3-unregister = cheshire3.commands.unregister:main',
'cheshire3-search = cheshire3.commands.search:main',
'cheshire3-serve = cheshire3.commands.serve:main',
'icheshire3-load = cheshire3.grid.commands.load:main [grid]'
],
},
keywords="xml document search information retrieval engine data text",
description=_description,
long_description=_long_description,
author="<NAME>, et al.",
author_email="<EMAIL>",
maintainer='<NAME>',
maintainer_email='<EMAIL>',
license="BSD",
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Topic :: Internet :: WWW/HTTP :: Indexing/Search",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
"Topic :: Internet :: Z39.50",
"Topic :: Text Processing :: Indexing",
"Topic :: Text Processing :: Linguistic",
"Topic :: Text Processing :: Markup"
],
url="http://cheshire3.liv.ac.uk /",
download_url=_download_url
)
|
[
"setuptools.setup",
"os.path.dirname",
"inspect.currentframe",
"warnings.warn",
"os.path.join",
"ez_setup.use_setuptools"
] |
[((212, 228), 'ez_setup.use_setuptools', 'use_setuptools', ([], {}), '()\n', (226, 228), False, 'from ez_setup import use_setuptools\n'), ((635, 661), 'os.path.dirname', 'os.path.dirname', (['setuppath'], {}), '(setuppath)\n', (650, 661), False, 'import os\n'), ((1730, 4007), 'setuptools.setup', 'setup', ([], {'name': '_name', 'version': '_version', 'packages': '[_name]', 'include_package_data': '(True)', 'package_data': "{'cheshire3': ['configs/*.xml', 'configs/extra/*.xml']}", 'exclude_package_data': "{'': ['README.*', '.gitignore']}", 'requires': "['lxml(>=2.1)', 'bsddb', 'dateutil', 'argparse']", 'tests_require': '_tests_require', 'install_requires': '_install_requires', 'setup_requires': "['setuptools-git']", 'dependency_links': "['http://labix.org/python-dateutil',\n 'http://www.panix.com/~asl2/software/PyZ3950/',\n 'http://cheshire3.liv.ac.uk/download/latest/reqs/']", 'extras_require': "{'graph': ['rdflib'], 'grid': ['PyRods'], 'datamining': ['svm'], 'lucene':\n ['lucene'], 'nlp': ['numpy', 'nltk >= 2.0.2'], 'sql': [\n 'psycopg2 >= 2.5'], 'textmining': ['numpy', 'nltk >= 2.0.2'], 'web': [\n 'pyoai', 'PyZ3950 >= 2.04', 'ZSI < 2.0']}", 'test_suite': '"""cheshire3.test.testAll.suite"""', 'scripts': "['scripts/DocumentConverter.py']", 'entry_points': "{'console_scripts': ['cheshire3 = cheshire3.commands.console:main',\n 'cheshire3-init = cheshire3.commands.init:main',\n 'cheshire3-load = cheshire3.commands.load:main',\n 'cheshire3-register = cheshire3.commands.register:main',\n 'cheshire3-unregister = cheshire3.commands.unregister:main',\n 'cheshire3-search = cheshire3.commands.search:main',\n 'cheshire3-serve = cheshire3.commands.serve:main',\n 'icheshire3-load = cheshire3.grid.commands.load:main [grid]']}", 'keywords': '"""xml document search information retrieval engine data text"""', 'description': '_description', 'long_description': '_long_description', 'author': '"""<NAME>, et al."""', 'author_email': '"""<EMAIL>"""', 'maintainer': '"""<NAME>"""', 'maintainer_email': '"""<EMAIL>"""', 'license': '"""BSD"""', 'classifiers': "['Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Internet :: WWW/HTTP :: Indexing/Search',\n 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',\n 'Topic :: Internet :: Z39.50', 'Topic :: Text Processing :: Indexing',\n 'Topic :: Text Processing :: Linguistic',\n 'Topic :: Text Processing :: Markup']", 'url': '"""http://cheshire3.liv.ac.uk /"""', 'download_url': '_download_url'}), "(name=_name, version=_version, packages=[_name], include_package_data=\n True, package_data={'cheshire3': ['configs/*.xml',\n 'configs/extra/*.xml']}, exclude_package_data={'': ['README.*',\n '.gitignore']}, requires=['lxml(>=2.1)', 'bsddb', 'dateutil',\n 'argparse'], tests_require=_tests_require, install_requires=\n _install_requires, setup_requires=['setuptools-git'], dependency_links=\n ['http://labix.org/python-dateutil',\n 'http://www.panix.com/~asl2/software/PyZ3950/',\n 'http://cheshire3.liv.ac.uk/download/latest/reqs/'], extras_require={\n 'graph': ['rdflib'], 'grid': ['PyRods'], 'datamining': ['svm'],\n 'lucene': ['lucene'], 'nlp': ['numpy', 'nltk >= 2.0.2'], 'sql': [\n 'psycopg2 >= 2.5'], 'textmining': ['numpy', 'nltk >= 2.0.2'], 'web': [\n 'pyoai', 'PyZ3950 >= 2.04', 'ZSI < 2.0']}, test_suite=\n 'cheshire3.test.testAll.suite', scripts=['scripts/DocumentConverter.py'\n ], entry_points={'console_scripts': [\n 'cheshire3 = cheshire3.commands.console:main',\n 'cheshire3-init = cheshire3.commands.init:main',\n 'cheshire3-load = cheshire3.commands.load:main',\n 'cheshire3-register = cheshire3.commands.register:main',\n 'cheshire3-unregister = cheshire3.commands.unregister:main',\n 'cheshire3-search = cheshire3.commands.search:main',\n 'cheshire3-serve = cheshire3.commands.serve:main',\n 'icheshire3-load = cheshire3.grid.commands.load:main [grid]']},\n keywords='xml document search information retrieval engine data text',\n description=_description, long_description=_long_description, author=\n '<NAME>, et al.', author_email='<EMAIL>', maintainer='<NAME>',\n maintainer_email='<EMAIL>', license='BSD', classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Internet :: WWW/HTTP :: Indexing/Search',\n 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',\n 'Topic :: Internet :: Z39.50', 'Topic :: Text Processing :: Indexing',\n 'Topic :: Text Processing :: Linguistic',\n 'Topic :: Text Processing :: Markup'], url=\n 'http://cheshire3.liv.ac.uk /', download_url=_download_url)\n", (1735, 4007), False, 'from setuptools import setup, find_packages\n'), ((427, 537), 'warnings.warn', 'warn', (['"""Cheshire3 requires Python 2.6 or later; some code may be incompatible with earlier versions."""'], {}), "(\n 'Cheshire3 requires Python 2.6 or later; some code may be incompatible with earlier versions.'\n )\n", (431, 537), False, 'from warnings import warn\n'), ((600, 622), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (620, 622), False, 'import inspect\n'), ((855, 892), 'os.path.join', 'os.path.join', (['setupdir', '"""VERSION.txt"""'], {}), "(setupdir, 'VERSION.txt')\n", (867, 892), False, 'import os\n'), ((1139, 1175), 'os.path.join', 'os.path.join', (['setupdir', '"""README.rst"""'], {}), "(setupdir, 'README.rst')\n", (1151, 1175), False, 'import os\n'), ((1306, 1348), 'os.path.join', 'os.path.join', (['setupdir', '"""requirements.txt"""'], {}), "(setupdir, 'requirements.txt')\n", (1318, 1348), False, 'import os\n')]
|
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import json
import logging
from restclients_core.exceptions import DataFailureException
from uw_iasystem.dao import IASystem_DAO
from uw_iasystem.exceptions import TermEvalNotCreated
from uw_iasystem.util.thread import ThreadWithResponse
logger = logging.getLogger(__name__)
def get_resource(url, domain):
threads = []
for dao in IASystem_DAO(domain):
t = ThreadWithResponse(target=__get_resource, args=(dao, url))
t.start()
threads.append((t, dao.service_name()))
for t, k in threads:
t.join()
if t.response is not None:
data = t.response
if data.get('collection') and\
data.get('collection').get('items'):
return t.response
if t.exception is not None:
logger.error("{}: {}".format(k, str(t.exception)))
raise t.exception
return None
def __get_resource(dao, url):
"""
Issue a GET request to IASystem with the given url
and return a response in Collection+json format.
:returns: http response with content in json
"""
headers = {"Accept": "application/vnd.collection+json"}
response = dao.getURL(url, headers)
status = response.status
logger.debug("{} ==status==> {}".format(url, status))
if status != 200:
message = str(response.data)
if status == 404:
# the URL not exists on the specific domain
return None
if status == 400:
if "Term is out of range" in message:
raise TermEvalNotCreated(url, status, message)
raise DataFailureException(url, status, message)
return json.loads(response.data)
|
[
"json.loads",
"uw_iasystem.exceptions.TermEvalNotCreated",
"uw_iasystem.util.thread.ThreadWithResponse",
"uw_iasystem.dao.IASystem_DAO",
"restclients_core.exceptions.DataFailureException",
"logging.getLogger"
] |
[((337, 364), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (354, 364), False, 'import logging\n'), ((430, 450), 'uw_iasystem.dao.IASystem_DAO', 'IASystem_DAO', (['domain'], {}), '(domain)\n', (442, 450), False, 'from uw_iasystem.dao import IASystem_DAO\n'), ((1741, 1766), 'json.loads', 'json.loads', (['response.data'], {}), '(response.data)\n', (1751, 1766), False, 'import json\n'), ((464, 522), 'uw_iasystem.util.thread.ThreadWithResponse', 'ThreadWithResponse', ([], {'target': '__get_resource', 'args': '(dao, url)'}), '(target=__get_resource, args=(dao, url))\n', (482, 522), False, 'from uw_iasystem.util.thread import ThreadWithResponse\n'), ((1686, 1728), 'restclients_core.exceptions.DataFailureException', 'DataFailureException', (['url', 'status', 'message'], {}), '(url, status, message)\n', (1706, 1728), False, 'from restclients_core.exceptions import DataFailureException\n'), ((1630, 1670), 'uw_iasystem.exceptions.TermEvalNotCreated', 'TermEvalNotCreated', (['url', 'status', 'message'], {}), '(url, status, message)\n', (1648, 1670), False, 'from uw_iasystem.exceptions import TermEvalNotCreated\n')]
|
# -*- coding: utf-8 -*-
import numpy as np
import chainer
from chainer import cuda, Function, Variable
from chainer import Link, Chain, ChainList
import chainer.functions as F
import chainer.links as L
from src.lib.loss import softmax_dice_loss
class Model_L2(Chain):
def __init__(
self,
ndim=3,
n_class=2,
init_channel=2,
kernel_size=3,
pool_size=2,
ap_factor=2,
gpu=-1,
class_weight=np.array([1, 1]).astype(np.float32),
loss_func='F.softmax_cross_entropy'
):
self.gpu = gpu
self.pool_size = pool_size
if gpu >= 0:
self.class_weight = cuda.to_gpu(np.array(class_weight).astype(np.float32))
else:
self.class_weight = np.array(class_weight).astype(np.float32)
self.train = True
self.loss_func = loss_func
initializer = chainer.initializers.HeNormal()
super(Model_L2, self).__init__(
c0=L.ConvolutionND(ndim, 1, init_channel, kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c1=L.ConvolutionND(ndim, init_channel, int(init_channel * (ap_factor ** 1)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c2=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1)), int(init_channel * (ap_factor ** 1)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c3=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1)), int(init_channel * (ap_factor ** 2)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c4=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 2)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c5=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 3)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc0=L.DeconvolutionND(ndim, int(init_channel * (ap_factor ** 3)), int(init_channel * (ap_factor ** 3)), self.pool_size, self.pool_size, 0, initialW=initializer, initial_bias=None),
dc1=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 2) + init_channel * (ap_factor ** 3)), int(init_channel * (ap_factor ** 2)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc2=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 2)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc3=L.DeconvolutionND(ndim, int(init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 2)), self.pool_size, self.pool_size, 0, initialW=initializer, initial_bias=None),
dc4=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1) + init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 1)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc5=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1)), int(init_channel * (ap_factor ** 1)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc6=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1)), n_class, 1, 1, initialW=initializer, initial_bias=None),
bnc0=L.BatchNormalization(init_channel),
bnc1=L.BatchNormalization(int(init_channel * (ap_factor ** 1))),
bnc2=L.BatchNormalization(int(init_channel * (ap_factor ** 1))),
bnc3=L.BatchNormalization(int(init_channel * (ap_factor ** 2))),
bnc4=L.BatchNormalization(int(init_channel * (ap_factor ** 2))),
bnc5=L.BatchNormalization(int(init_channel * (ap_factor ** 3))),
bndc1=L.BatchNormalization(int(init_channel * (ap_factor ** 2))),
bndc2=L.BatchNormalization(int(init_channel * (ap_factor ** 2))),
bndc4=L.BatchNormalization(int(init_channel * (ap_factor ** 1))),
bndc5=L.BatchNormalization(int(init_channel * (ap_factor ** 1)))
)
def _calc(self, x):
e0 = F.relu(self.bnc0(self.c0(x)))
syn0 = F.relu(self.bnc1(self.c1(e0)))
del e0
e1 = F.max_pooling_nd(syn0, self.pool_size, self.pool_size)
e2 = F.relu(self.bnc2(self.c2(e1)))
syn1 = F.relu(self.bnc3(self.c3(e2)))
del e1, e2
e3 = F.max_pooling_nd(syn1, self.pool_size, self.pool_size)
e4 = F.relu(self.bnc4(self.c4(e3)))
e5 = F.relu(self.bnc5(self.c5(e4)))
del e3, e4
d0 = F.concat([self.dc0(e5), syn1])
del e5, syn1
d1 = F.relu(self.bndc1(self.dc1(d0)))
d2 = F.relu(self.bndc2(self.dc2(d1)))
del d0, d1
d3 = F.concat([self.dc3(d2), syn0])
del d2, syn0
d4 = F.relu(self.bndc4(self.dc4(d3)))
d5 = F.relu(self.bndc5(self.dc5(d4)))
del d3, d4
d6 = self.dc6(d5)
del d5
return d6
def __call__(self, x, t=None, seg=True):
h = self._calc(x)
if seg:
pred = F.softmax(h)
del h
return pred.data
else:
#loss = eval(self.loss_func)(h, t, class_weight=self.class_weight)
loss = eval(self.loss_func)(h, t)
pred = F.softmax(h)
del h
return loss, pred.data
class Model_L3(Chain):
def __init__(
self,
ndim=3,
n_class=2,
init_channel=2,
kernel_size=3,
pool_size=2,
ap_factor=2,
gpu=-1,
class_weight=np.array([1, 1]).astype(np.float32),
loss_func='F.softmax_cross_entropy'
):
self.gpu = gpu
self.pool_size = pool_size
if gpu >= 0:
self.class_weight = cuda.to_gpu(np.array(class_weight).astype(np.float32))
else:
self.class_weight = np.array(class_weight).astype(np.float32)
self.train = True
self.loss_func = loss_func
initializer = chainer.initializers.HeNormal()
super(Model_L3, self).__init__(
c0=L.ConvolutionND(ndim, 1, init_channel, kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c1=L.ConvolutionND(ndim, init_channel, int(init_channel * (ap_factor ** 1)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c2=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1)), int(init_channel * (ap_factor ** 1)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c3=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1)), int(init_channel * (ap_factor ** 2)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c4=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 2)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c5=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 3)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c6=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 3)), int(init_channel * (ap_factor ** 3)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c7=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 3)), int(init_channel * (ap_factor ** 4)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc0=L.DeconvolutionND(ndim, int(init_channel * (ap_factor ** 4)), int(init_channel * (ap_factor ** 4)), self.pool_size, self.pool_size, 0, initialW=initializer, initial_bias=None),
dc1=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 3) + init_channel * (ap_factor ** 4)), int(init_channel * (ap_factor ** 3)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc2=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 3)), int(init_channel * (ap_factor ** 3)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc3=L.DeconvolutionND(ndim, int(init_channel * (ap_factor ** 3)), int(init_channel * (ap_factor ** 3)), self.pool_size, self.pool_size, 0, initialW=initializer, initial_bias=None),
dc4=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 2) + init_channel * (ap_factor ** 3)), int(init_channel * (ap_factor ** 2)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc5=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 2)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc6=L.DeconvolutionND(ndim, int(init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 2)), self.pool_size, self.pool_size, 0, initialW=initializer, initial_bias=None),
dc7=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1) + init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 1)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc8=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1)), int(init_channel * (ap_factor ** 1)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc9=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1)), n_class, 1, 1, initialW=initializer, initial_bias=None),
bnc0=L.BatchNormalization(init_channel),
bnc1=L.BatchNormalization(int(init_channel * (ap_factor ** 1))),
bnc2=L.BatchNormalization(int(init_channel * (ap_factor ** 1))),
bnc3=L.BatchNormalization(int(init_channel * (ap_factor ** 2))),
bnc4=L.BatchNormalization(int(init_channel * (ap_factor ** 2))),
bnc5=L.BatchNormalization(int(init_channel * (ap_factor ** 3))),
bnc6=L.BatchNormalization(int(init_channel * (ap_factor ** 3))),
bnc7=L.BatchNormalization(int(init_channel * (ap_factor ** 4))),
bndc1=L.BatchNormalization(int(init_channel * (ap_factor ** 3))),
bndc2=L.BatchNormalization(int(init_channel * (ap_factor ** 3))),
bndc4=L.BatchNormalization(int(init_channel * (ap_factor ** 2))),
bndc5=L.BatchNormalization(int(init_channel * (ap_factor ** 2))),
bndc7=L.BatchNormalization(int(init_channel * (ap_factor ** 1))),
bndc8=L.BatchNormalization(int(init_channel * (ap_factor ** 1)))
)
def _calc(self, x):
e0 = F.relu(self.bnc0(self.c0(x)))
syn0 = F.relu(self.bnc1(self.c1(e0)))
del e0
e1 = F.max_pooling_nd(syn0, self.pool_size, self.pool_size)
e2 = F.relu(self.bnc2(self.c2(e1)))
syn1 = F.relu(self.bnc3(self.c3(e2)))
del e1, e2
e3 = F.max_pooling_nd(syn1, self.pool_size, self.pool_size)
e4 = F.relu(self.bnc4(self.c4(e3)))
syn2 = F.relu(self.bnc5(self.c5(e4)))
del e3, e4
e5 = F.max_pooling_nd(syn2, self.pool_size, self.pool_size)
e6 = F.relu(self.bnc6(self.c6(e5)))
e7 = F.relu(self.bnc7(self.c7(e6)))
del e5, e6
d0 = F.concat([self.dc0(e7), syn2])
del e7, syn2
d1 = F.relu(self.bndc1(self.dc1(d0)))
d2 = F.relu(self.bndc2(self.dc2(d1)))
del d0, d1
d3 = F.concat([self.dc3(d2), syn1])
del d2, syn1
d4 = F.relu(self.bndc4(self.dc4(d3)))
d5 = F.relu(self.bndc5(self.dc5(d4)))
del d3, d4
d6 = F.concat([self.dc6(d5), syn0])
del d5, syn0
d7 = F.relu(self.bndc7(self.dc7(d6)))
d8 = F.relu(self.bndc8(self.dc8(d7)))
del d6, d7
d9 = self.dc9(d8)
del d8
return d9
def __call__(self, x, t=None, seg=True):
h = self._calc(x)
if seg:
pred = F.softmax(h)
del h
return pred.data
else:
#loss = eval(self.loss_func)(h, t, class_weight=self.class_weight)
loss = eval(self.loss_func)(h, t)
pred = F.softmax(h)
del h
return loss, pred.data
class Model_L4(Chain):
def __init__(
self,
ndim=3,
n_class=2,
init_channel=2,
kernel_size=3,
pool_size=2,
ap_factor=2,
gpu=-1,
class_weight=np.array([1, 1]).astype(np.float32),
loss_func='F.softmax_cross_entropy'
):
self.gpu = gpu
self.pool_size = pool_size
if gpu >= 0:
self.class_weight = cuda.to_gpu(np.array(class_weight).astype(np.float32))
else:
self.class_weight = np.array(class_weight).astype(np.float32)
self.train = True
self.loss_func = loss_func
initializer = chainer.initializers.HeNormal()
super(Model_L4, self).__init__(
c0=L.ConvolutionND(ndim, 1, init_channel, kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c1=L.ConvolutionND(ndim, init_channel, int(init_channel * (ap_factor ** 1)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c2=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1)), int(init_channel * (ap_factor ** 1)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c3=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1)), int(init_channel * (ap_factor ** 2)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c4=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 2)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c5=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 3)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c6=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 3)), int(init_channel * (ap_factor ** 3)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c7=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 3)), int(init_channel * (ap_factor ** 4)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c8=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 4)), int(init_channel * (ap_factor ** 4)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
c9=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 4)), int(init_channel * (ap_factor ** 5)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc0=L.DeconvolutionND(ndim, int(init_channel * (ap_factor ** 5)), int(init_channel * (ap_factor ** 5)), self.pool_size, self.pool_size, 0, initialW=initializer, initial_bias=None),
dc1=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 4) + init_channel * (ap_factor ** 5)), int(init_channel * (ap_factor ** 4)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc2=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 4)), int(init_channel * (ap_factor ** 4)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc3=L.DeconvolutionND(ndim, int(init_channel * (ap_factor ** 4)), int(init_channel * (ap_factor ** 4)), self.pool_size, self.pool_size, 0, initialW=initializer, initial_bias=None),
dc4=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 3) + init_channel * (ap_factor ** 4)), int(init_channel * (ap_factor ** 3)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc5=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 3)), int(init_channel * (ap_factor ** 3)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc6=L.DeconvolutionND(ndim, int(init_channel * (ap_factor ** 3)), int(init_channel * (ap_factor ** 3)), self.pool_size, self.pool_size, 0, initialW=initializer, initial_bias=None),
dc7=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 2) + init_channel * (ap_factor ** 3)), int(init_channel * (ap_factor ** 2)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc8=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 2)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc9=L.DeconvolutionND(ndim, int(init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 2)), self.pool_size, self.pool_size, 0, initialW=initializer, initial_bias=None),
dc10=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1) + init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 1)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc11=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1)), int(init_channel * (ap_factor ** 1)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc12=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1)), n_class, 1, 1, initialW=initializer, initial_bias=None),
bnc0=L.BatchNormalization(init_channel),
bnc1=L.BatchNormalization(int(init_channel * (ap_factor ** 1))),
bnc2=L.BatchNormalization(int(init_channel * (ap_factor ** 1))),
bnc3=L.BatchNormalization(int(init_channel * (ap_factor ** 2))),
bnc4=L.BatchNormalization(int(init_channel * (ap_factor ** 2))),
bnc5=L.BatchNormalization(int(init_channel * (ap_factor ** 3))),
bnc6=L.BatchNormalization(int(init_channel * (ap_factor ** 3))),
bnc7=L.BatchNormalization(int(init_channel * (ap_factor ** 4))),
bnc8=L.BatchNormalization(int(init_channel * (ap_factor ** 4))),
bnc9=L.BatchNormalization(int(init_channel * (ap_factor ** 5))),
bndc1=L.BatchNormalization(int(init_channel * (ap_factor ** 4))),
bndc2=L.BatchNormalization(int(init_channel * (ap_factor ** 4))),
bndc4=L.BatchNormalization(int(init_channel * (ap_factor ** 3))),
bndc5=L.BatchNormalization(int(init_channel * (ap_factor ** 3))),
bndc7=L.BatchNormalization(int(init_channel * (ap_factor ** 2))),
bndc8=L.BatchNormalization(int(init_channel * (ap_factor ** 2))),
bndc10=L.BatchNormalization(int(init_channel * (ap_factor ** 1))),
bndc11=L.BatchNormalization(int(init_channel * (ap_factor ** 1)))
)
def _calc(self, x):
e0 = F.relu(self.bnc0(self.c0(x)))
syn0 = F.relu(self.bnc1(self.c1(e0)))
del e0
e1 = F.max_pooling_nd(syn0, self.pool_size, self.pool_size)
e2 = F.relu(self.bnc2(self.c2(e1)))
syn1 = F.relu(self.bnc3(self.c3(e2)))
del e1, e2
e3 = F.max_pooling_nd(syn1, self.pool_size, self.pool_size)
e4 = F.relu(self.bnc4(self.c4(e3)))
syn2 = F.relu(self.bnc5(self.c5(e4)))
del e3, e4
e5 = F.max_pooling_nd(syn2, self.pool_size, self.pool_size)
e6 = F.relu(self.bnc6(self.c6(e5)))
syn3 = F.relu(self.bnc7(self.c7(e6)))
del e5, e6
e7 = F.max_pooling_nd(syn3, self.pool_size, self.pool_size)
e8 = F.relu(self.bnc8(self.c8(e7)))
e9 = F.relu(self.bnc9(self.c9(e8)))
del e7, e8
d0 = F.concat([self.dc0(e9), syn3])
del e9, syn3
d1 = F.relu(self.bndc1(self.dc1(d0)))
d2 = F.relu(self.bndc2(self.dc2(d1)))
del d0, d1
d3 = F.concat([self.dc3(d2), syn2])
del d2, syn2
d4 = F.relu(self.bndc4(self.dc4(d3)))
d5 = F.relu(self.bndc5(self.dc5(d4)))
del d3, d4
d6 = F.concat([self.dc6(d5), syn1])
del d5, syn1
d7 = F.relu(self.bndc7(self.dc7(d6)))
d8 = F.relu(self.bndc8(self.dc8(d7)))
del d6, d7
d9 = F.concat([self.dc9(d8), syn0])
del d8, syn0
d10 = F.relu(self.bndc10(self.dc10(d9)))
d11 = F.relu(self.bndc11(self.dc11(d10)))
del d9, d10
d12 = self.dc12(d11)
del d11
return d12
def __call__(self, x, t=None, seg=True):
h = self._calc(x)
if seg:
pred = F.softmax(h)
del h
return pred.data
else:
#loss = eval(self.loss_func)(h, t, class_weight=self.class_weight)
loss = eval(self.loss_func)(h, t)
pred = F.softmax(h)
del h
return loss, pred.data
|
[
"chainer.initializers.HeNormal",
"chainer.functions.max_pooling_nd",
"numpy.array",
"chainer.functions.softmax",
"chainer.links.BatchNormalization"
] |
[((932, 963), 'chainer.initializers.HeNormal', 'chainer.initializers.HeNormal', ([], {}), '()\n', (961, 963), False, 'import chainer\n'), ((4333, 4387), 'chainer.functions.max_pooling_nd', 'F.max_pooling_nd', (['syn0', 'self.pool_size', 'self.pool_size'], {}), '(syn0, self.pool_size, self.pool_size)\n', (4349, 4387), True, 'import chainer.functions as F\n'), ((4510, 4564), 'chainer.functions.max_pooling_nd', 'F.max_pooling_nd', (['syn1', 'self.pool_size', 'self.pool_size'], {}), '(syn1, self.pool_size, self.pool_size)\n', (4526, 4564), True, 'import chainer.functions as F\n'), ((6161, 6192), 'chainer.initializers.HeNormal', 'chainer.initializers.HeNormal', ([], {}), '()\n', (6190, 6192), False, 'import chainer\n'), ((10868, 10922), 'chainer.functions.max_pooling_nd', 'F.max_pooling_nd', (['syn0', 'self.pool_size', 'self.pool_size'], {}), '(syn0, self.pool_size, self.pool_size)\n', (10884, 10922), True, 'import chainer.functions as F\n'), ((11045, 11099), 'chainer.functions.max_pooling_nd', 'F.max_pooling_nd', (['syn1', 'self.pool_size', 'self.pool_size'], {}), '(syn1, self.pool_size, self.pool_size)\n', (11061, 11099), True, 'import chainer.functions as F\n'), ((11222, 11276), 'chainer.functions.max_pooling_nd', 'F.max_pooling_nd', (['syn2', 'self.pool_size', 'self.pool_size'], {}), '(syn2, self.pool_size, self.pool_size)\n', (11238, 11276), True, 'import chainer.functions as F\n'), ((13049, 13080), 'chainer.initializers.HeNormal', 'chainer.initializers.HeNormal', ([], {}), '()\n', (13078, 13080), False, 'import chainer\n'), ((19065, 19119), 'chainer.functions.max_pooling_nd', 'F.max_pooling_nd', (['syn0', 'self.pool_size', 'self.pool_size'], {}), '(syn0, self.pool_size, self.pool_size)\n', (19081, 19119), True, 'import chainer.functions as F\n'), ((19242, 19296), 'chainer.functions.max_pooling_nd', 'F.max_pooling_nd', (['syn1', 'self.pool_size', 'self.pool_size'], {}), '(syn1, self.pool_size, self.pool_size)\n', (19258, 19296), True, 'import chainer.functions as F\n'), ((19419, 19473), 'chainer.functions.max_pooling_nd', 'F.max_pooling_nd', (['syn2', 'self.pool_size', 'self.pool_size'], {}), '(syn2, self.pool_size, self.pool_size)\n', (19435, 19473), True, 'import chainer.functions as F\n'), ((19596, 19650), 'chainer.functions.max_pooling_nd', 'F.max_pooling_nd', (['syn3', 'self.pool_size', 'self.pool_size'], {}), '(syn3, self.pool_size, self.pool_size)\n', (19612, 19650), True, 'import chainer.functions as F\n'), ((5190, 5202), 'chainer.functions.softmax', 'F.softmax', (['h'], {}), '(h)\n', (5199, 5202), True, 'import chainer.functions as F\n'), ((5408, 5420), 'chainer.functions.softmax', 'F.softmax', (['h'], {}), '(h)\n', (5417, 5420), True, 'import chainer.functions as F\n'), ((12078, 12090), 'chainer.functions.softmax', 'F.softmax', (['h'], {}), '(h)\n', (12087, 12090), True, 'import chainer.functions as F\n'), ((12296, 12308), 'chainer.functions.softmax', 'F.softmax', (['h'], {}), '(h)\n', (12305, 12308), True, 'import chainer.functions as F\n'), ((20642, 20654), 'chainer.functions.softmax', 'F.softmax', (['h'], {}), '(h)\n', (20651, 20654), True, 'import chainer.functions as F\n'), ((20860, 20872), 'chainer.functions.softmax', 'F.softmax', (['h'], {}), '(h)\n', (20869, 20872), True, 'import chainer.functions as F\n'), ((499, 515), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (507, 515), True, 'import numpy as np\n'), ((3446, 3480), 'chainer.links.BatchNormalization', 'L.BatchNormalization', (['init_channel'], {}), '(init_channel)\n', (3466, 3480), True, 'import chainer.links as L\n'), ((5728, 5744), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (5736, 5744), True, 'import numpy as np\n'), ((9670, 9704), 'chainer.links.BatchNormalization', 'L.BatchNormalization', (['init_channel'], {}), '(init_channel)\n', (9690, 9704), True, 'import chainer.links as L\n'), ((12616, 12632), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (12624, 12632), True, 'import numpy as np\n'), ((17555, 17589), 'chainer.links.BatchNormalization', 'L.BatchNormalization', (['init_channel'], {}), '(init_channel)\n', (17575, 17589), True, 'import chainer.links as L\n'), ((807, 829), 'numpy.array', 'np.array', (['class_weight'], {}), '(class_weight)\n', (815, 829), True, 'import numpy as np\n'), ((6036, 6058), 'numpy.array', 'np.array', (['class_weight'], {}), '(class_weight)\n', (6044, 6058), True, 'import numpy as np\n'), ((12924, 12946), 'numpy.array', 'np.array', (['class_weight'], {}), '(class_weight)\n', (12932, 12946), True, 'import numpy as np\n'), ((718, 740), 'numpy.array', 'np.array', (['class_weight'], {}), '(class_weight)\n', (726, 740), True, 'import numpy as np\n'), ((5947, 5969), 'numpy.array', 'np.array', (['class_weight'], {}), '(class_weight)\n', (5955, 5969), True, 'import numpy as np\n'), ((12835, 12857), 'numpy.array', 'np.array', (['class_weight'], {}), '(class_weight)\n', (12843, 12857), True, 'import numpy as np\n')]
|
# pylint: disable-msg=E1101
"""
Wrapper to lowess and stl routines.
LOWESS:
Initial Fortran code available at:
http://netlib.bell-labs.com/netlib/go/lowess.f.gz
initial author: <NAME>, 1979.
Simple to double precision conversion of the Fortran code by Pierre
Gerard-Marchant, 2007/03.
STL:
Initial Fortran code available at:
http://netlib.bell-labs.com/netlib/a/stl.gz
Initial Authors: <NAME>, <NAME>, <NAME>, and
<NAME>, 1990.
Simple-to-double precision conversion of the Fortran code by Pierre
Gerard-Marchant, 2007/03.
LOESS:
Initial C/Fortran package avialable at
http://netlib.bell-labs.com/netlib/a/dloess.gz
Initial authors: <NAME>, <NAME> and Shyu
Adaptation to Pyrex/Python by <NAME>, 2007/03
:author: <NAME>
:contact: pierregm_at_uga_edu
:date: $Date$
:version: $Id$
"""
__author__ = "<NAME> ($Author$)"
__version__ = '1.0'
__revision__ = "$Revision$"
__date__ = '$Date$'
import numpy
from numpy import bool_, complex_, float_, int_, str_, object_
from numpy import array, recarray, empty, fromiter, logical_not
from . import _lowess, _stl, _loess
#####---------------------------------------------------------------------------
#--- --- FLOWESS ---
#####---------------------------------------------------------------------------
def flowess(x,y,span=0.5,nsteps=2,delta=0):
"""Performs a robust locally weighted regression (lowess).
Outputs a *3xN* array of fitted values, residuals and fit weights.
:Parameters:
x : ndarray
Abscissas of the points on the scatterplot; the values in X must be
ordered from smallest to largest.
y : ndarray
Ordinates of the points on the scatterplot.
span : Float *[0.5]*
Fraction of the total number of points used to compute each fitted value.
As f increases the smoothed values become smoother. Choosing f in the range
.2 to .8 usually results in a good fit.
nsteps : Integer *[2]*
Number of iterations in the robust fit. If nsteps=0, the nonrobust fit
is returned; setting nsteps=2 should serve most purposes.
delta : Integer *[0]*
Nonnegative parameter which may be used to save computations.
If N (the number of elements in x) is less than 100, set delta=0.0;
if N is greater than 100 you should find out how delta works by reading
the additional instructions section.
:Returns:
A recarray of smoothed values ('smooth'), residuals ('residuals') and local
robust weights ('weights').
Additional instructions
-----------------------
Fro the original author:
DELTA can be used to save computations. Very roughly the
algorithm is this: on the initial fit and on each of the
NSTEPS iterations locally weighted regression fitted values
are computed at points in X which are spaced, roughly, DELTA
apart; then the fitted values at the remaining points are
computed using linear interpolation. The first locally
weighted regression (l.w.r.) computation is carried out at
X(1) and the last is carried out at X(N). Suppose the
l.w.r. computation is carried out at X(I). If X(I+1) is
greater than or equal to X(I)+DELTA, the next l.w.r.
computation is carried out at X(I+1). If X(I+1) is less
than X(I)+DELTA, the next l.w.r. computation is carried out
at the largest X(J) which is greater than or equal to X(I)
but is not greater than X(I)+DELTA. Then the fitted values
for X(K) between X(I) and X(J), if there are any, are
computed by linear interpolation of the fitted values at
X(I) and X(J). If N is less than 100 then DELTA can be set
to 0.0 since the computation time will not be too great.
For larger N it is typically not necessary to carry out the
l.w.r. computation for all points, so that much computation
time can be saved by taking DELTA to be greater than 0.0.
If DELTA = Range (X)/k then, if the values in X were
uniformly scattered over the range, the full l.w.r.
computation would be carried out at approximately k points.
Taking k to be 50 often works well.
Method
------
The fitted values are computed by using the nearest neighbor
routine and robust locally weighted regression of degree 1
with the tricube weight function. A few additional features
have been added. Suppose r is FN truncated to an integer.
Let h be the distance to the r-th nearest neighbor
from X[i]. All points within h of X[i] are used. Thus if
the r-th nearest neighbor is exactly the same distance as
other points, more than r points can possibly be used for
the smooth at X[i]. There are two cases where robust
locally weighted regression of degree 0 is actually used at
X[i]. One case occurs when h is 0.0. The second case
occurs when the weighted standard error of the X[i] with
respect to the weights w[j] is less than .001 times the
range of the X[i], where w[j] is the weight assigned to the
j-th point of X (the tricube weight times the robustness
weight) divided by the sum of all of the weights. Finally,
if the w[j] are all zero for the smooth at X[i], the fitted
value is taken to be Y[i].
References
----------
<NAME>. 1978. Visual and Computational Considerations in
Smoothing Scatterplots by Locally Weighted Regression. In
Computer Science and Statistics: Eleventh Annual Symposium on the
Interface, pages 96-100. Institute of Statistics, North Carolina
State University, Raleigh, North Carolina, 1978.
<NAME>, 1979. Robust Locally Weighted Regression and
Smoothing Scatterplots. Journal of the American Statistical
Association, 74:829-836, 1979.
<NAME>, 1981. LOWESS: A Program for Smoothing Scatterplots
by Robust Locally Weighted Regression. The American Statistician,
35:54.
"""
x = array(x, copy=False, subok=True, dtype=float_)
y = array(y, copy=False, subok=True, dtype=float_)
if x.size != y.size:
raise ValueError("Incompatible size between observations and response!")
out_dtype = [('smooth',float_), ('weigths', float_), ('residuals', float_)]
return numeric.fromiter(zip(*_lowess.lowess(x,y,span,nsteps,delta,)),
dtype=out_dtype).view(recarray)
class lowess:
"""An object for robust locally weighted regression.
:IVariables:
inputs : An object storing the inputs.
x : A (n,) ndarray of observations (sorted by increasing values).
y : A (n,) ndarray of responses (sorted by increasing x).
parameters : An object storing the control parameters.
span : Fraction of the total number of points used in the smooth.
nsteps : Number of iterations of the robust fit.
delta : Parameter used to save computation time
outputs : An object storing the outputs.
smooth : A (n,) ndarray of fitted values.
residuals : A (n,) ndarray of fitted residuals.
weights : A (n,) ndarray of robust weights.
Method
------
The fitted values are computed by using the nearest neighbor
routine and robust locally weighted regression of degree 1
with the tricube weight function. A few additional features
have been added. Suppose r is FN truncated to an integer.
Let h be the distance to the r-th nearest neighbor
from X[i]. All points within h of X[i] are used. Thus if
the r-th nearest neighbor is exactly the same distance as
other points, more than r points can possibly be used for
the smooth at X[i]. There are two cases where robust
locally weighted regression of degree 0 is actually used at
X[i]. One case occurs when h is 0.0. The second case
occurs when the weighted standard error of the X[i] with
respect to the weights w[j] is less than .001 times the
range of the X[i], where w[j] is the weight assigned to the
j-th point of X (the tricube weight times the robustness
weight) divided by the sum of all of the weights. Finally,
if the w[j] are all zero for the smooth at X[i], the fitted
value is taken to be Y[i].
References
----------
<NAME>. 1978. Visual and Computational Considerations in
Smoothing Scatterplots by Locally Weighted Regression. In
Computer Science and Statistics: Eleventh Annual Symposium on the
Interface, pages 96-100. Institute of Statistics, North Carolina
State University, Raleigh, North Carolina, 1978.
<NAME>, 1979. Robust Locally Weighted Regression and
Smoothing Scatterplots. Journal of the American Statistical
Association, 74:829-836, 1979.
<NAME>, 1981. LOWESS: A Program for Smoothing Scatterplots
by Robust Locally Weighted Regression. The American Statistician,
35:54.
"""
#............................................
class _inputs(object):
"""Inputs of the lowess fit.
:IVariables:
x : ndarray
A (n,) float ndarray of observations (sorted by increasing values).
y : ndarray
A (n,) float ndarray of responses (sorted by increasing x).
"""
def __init__(self, x, y):
x = array(x, copy=False, subok=True, dtype=float_).ravel()
y = array(y, copy=False, subok=True, dtype=float_).ravel()
if x.size != y.size:
msg = "Incompatible size between observations (%s) and response (%s)!"
raise ValueError(msg % (x.size, y.size))
idx = x.argsort()
self._x = x[idx]
self._y = y[idx]
#.....
x = property(fget=lambda self:self._x)
y = property(fget=lambda self:self._y)
#............................................
class _parameters(object):
"""Parameters of the lowess fit.
:IVariables:
span : float *[0.5]*
Fraction of the total number of points used to compute each fitted value.
As f increases the smoothed values become smoother. Choosing f in the range
.2 to .8 usually results in a good fit.
nsteps : integer *[2]*
Number of iterations in the robust fit. If nsteps=0, the nonrobust fit
is returned; setting nsteps=2 should serve most purposes.
delta : integer *[0]*
Nonnegative parameter which may be used to save computations.
If N (the number of observations) is less than 100, set delta=0.0;
if N is greater than 100 you should find out how delta works by reading
the additional instructions section.
"""
def __init__(self, span, nsteps, delta, caller):
self.activated = False
self._span = span
self._nsteps = nsteps
self._delta = delta
self._caller = caller
#.....
def _get_span(self):
"Gets the current span."
return self._span
def _set_span(self, span):
"Sets the current span, and refit if needed."
if span <= 0 or span > 1:
raise ValueError("span should be between zero and one!")
self._span = span
if self.activated:
self._caller.fit()
span = property(fget=_get_span, fset=_set_span)
#.....
def _get_nsteps(self):
"Gets the current number of iterations."
return self._nsteps
def _set_nsteps(self, nsteps):
"Sets the current number of iterations, and refit if needed."
if nsteps < 0:
raise ValueError("nsteps should be positive!")
self._nsteps = nsteps
if self.activated:
self._caller.fit()
nsteps = property(fget=_get_nsteps, fset=_set_nsteps)
#.....
def _get_delta(self):
"Gets the current delta."
return self._delta
def _set_delta(self, delta):
"Sets the current delta, and refit if needed."
if delta < 0:
raise ValueError("delta should be positive!")
self._delta = delta
if self.activated:
self._caller.fit()
delta = property(fget=_get_delta, fset=_set_delta)
#............................................
class _outputs(object):
"""Outputs of the lowess fit.
:IVariables:
fitted_values : ndarray
A (n,) ndarray of fitted values (readonly).
fitted_residuals : ndarray
A (n,) ndarray of residuals (readonly).
weights : ndarray
A (n,) ndarray of robust weights (readonly).
"""
def __init__(self, n):
self._fval = empty((n,), float_)
self._rw = empty((n,), float_)
self._fres = empty((n,), float_)
#.....
fitted_values = property(fget=lambda self:self._fval)
robust_weights = property(fget=lambda self:self._rw)
fitted_residuals = property(fget=lambda self:self._fres)
#............................................
def __init__(self, x, y, span=0.5, nsteps=2, delta=0):
"""
:Parameters:
x : ndarray
Abscissas of the points on the scatterplot; the values in X must be
ordered from smallest to largest.
y : ndarray
Ordinates of the points on the scatterplot.
span : Float *[0.5]*
Fraction of the total number of points used to compute each fitted value.
As span increases the smoothed values become smoother. Choosing span in
the range .2 to .8 usually results in a good fit.
nsteps : Integer *[2]*
Number of iterations in the robust fit. If nsteps=0, the nonrobust fit
is returned; setting nsteps=2 should serve most purposes.
delta : Integer *[0]*
Nonnegative parameter which may be used to save computations.
If N (the number of elements in x) is less than 100, set delta=0.0;
if N is greater than 100 you should find out how delta works by reading
the additional instructions section.
"""
# Chek the input data .........
# Initialize the attributes ...
self.inputs = lowess._inputs(x,y)
self.parameters = lowess._parameters(span, nsteps, delta, self)
self.outputs = lowess._outputs(self.inputs._x.size)
# Force a fit .................
self.fit()
#............................................
def fit(self):
"""Computes the lowess fit. Returns a lowess.outputs object."""
(x, y) = (self.inputs._x, self.inputs._y)
# Get the parameters .....
self.parameters.activated = True
f = self.parameters._span
nsteps = self.parameters._nsteps
delta = self.parameters._delta
(tmp_s, tmp_w, tmp_r) = _lowess.lowess(x, y, f, nsteps, delta)
# Process the outputs .....
#... set the values
self.outputs.fitted_values[:] = tmp_s.flat
self.outputs.robust_weights[:] = tmp_w.flat
self.outputs.fitted_residuals[:] = tmp_r.flat
# Clean up the mess .......
del(tmp_s, tmp_w, tmp_r)
return self.outputs
#####---------------------------------------------------------------------------
#--- --- STL ---
#####---------------------------------------------------------------------------
def stl(y, np=12, ns=7, nt=None, nl=None, isdeg=1, itdeg=1, ildeg=1,
nsjump=None, ntjump=None, nljump=None, robust=True, ni=None, no=None):
"""Decomposes a time series into seasonal and trend components.
:Parameters:
y : Numerical array
Time Series to be decomposed.
np : Integer *[12]*
Period of the seasonal component.
For example, if the time series is monthly with a yearly cycle, then
np=12.
ns : Integer *[7]*
Length of the seasonal smoother.
The value of ns should be an odd integer greater than or equal to 3.
A value ns>6 is recommended. As ns increases the values of the
seasonal component at a given point in the seasonal cycle (e.g., January
values of a monthly series with a yearly cycle) become smoother.
nt : Integer *[None]*
Length of the trend smoother.
The value of nt should be an odd integer greater than or equal to 3.
A value of nt between 1.5*np and 2*np is recommended. As nt increases,
the values of the trend component become smoother.
If nt is None, it is estimated as the smallest odd integer greater
or equal to (1.5*np)/[1-(1.5/ns)]
nl : Integer *[None]*
Length of the low-pass filter.
The value of nl should be an odd integer greater than or equal to 3.
The smallest odd integer greater than or equal to np is used by default.
isdeg : Integer *[1]*
Degree of locally-fitted polynomial in seasonal smoothing.
The value is 0 or 1.
itdeg : Integer *[1]*
Degree of locally-fitted polynomial in trend smoothing.
The value is 0 or 1.
ildeg : Integer *[1]*
Degree of locally-fitted polynomial in low-pass smoothing.
The value is 0 or 1.
nsjump : Integer *[None]*
Skipping value for seasonal smoothing.
The seasonal smoother skips ahead nsjump points and then linearly
interpolates in between. The value of nsjump should be a positive
integer; if nsjump=1, a seasonal smooth is calculated at all n points.
To make the procedure run faster, a reasonable choice for nsjump is
10%-20% of ns. By default, nsjump= 0.1*ns.
ntjump : Integer *[1]*
Skipping value for trend smoothing. If None, ntjump= 0.1*nt
nljump : Integer *[1]*
Skipping value for low-pass smoothing. If None, nljump= 0.1*nl
robust : Boolean *[True]*
Flag indicating whether robust fitting should be performed.
ni : Integer *[None]*
Number of loops for updating the seasonal and trend components.
The value of ni should be a positive integer.
See the next argument for advice on the choice of ni.
If ni is None, ni is set to 1 for robust fitting, to 5 otherwise.
no : Integer *[None]*
Number of iterations of robust fitting. The value of no should
be a nonnegative integer. If the data are well behaved without
outliers, then robustness iterations are not needed. In this case
set no=0, and set ni=2 to 5 depending on how much security
you want that the seasonal-trend looping converges.
If outliers are present then no=3 is a very secure value unless
the outliers are radical, in which case no=5 or even 10 might
be better. If no>0 then set ni to 1 or 2.
If None, then no is set to 15 for robust fitting, to 0 otherwise.
Returns:
A recarray of estimated trend values ('trend'), estimated seasonal
components ('seasonal'), local robust weights ('weights') and fit
residuals ('residuals').
The final local robust weights are all 1 if no=0.
Reference
---------
<NAME>, <NAME>, <NAME> and <NAME>.
1990. STL: A Seasonal-Trend Decomposition Procedure Based on LOESS
(with Discussion). Journal of Official Statistics, 6:3-73.
"""
ns = max(ns, 3)
if ns%2 == 0:
ns += 1
np = max(2, np)
if nt is None:
nt = max(int((1.5*np/(1.-1.5/ns))+0.5), 3)
if not nt%2:
nt += 1
if nl is None:
nl = max(3,np)
if not nl%2:
nl += 1
if nsjump is None:
nsjump = int(0.1*ns + 0.9)
if ntjump is None:
ntjump = int(0.1*nt + 0.9)
if nljump is None:
nljump = int(0.1*nl + 0.9)
if robust:
if ni is None:
ni = 1
if no is None:
no = 15
else:
if ni is None:
ni = 5
if no is None:
no = 0
if hasattr(y,'_mask') and numpy.any(y._mask):
raise ValueError("Missing values should first be filled !")
y = array(y, subok=True, copy=False).ravel()
(rw,szn,trn,work) = _stl.stl(y,np,ns,nt,nl,isdeg,itdeg,ildeg,
nsjump,ntjump,nljump,ni,no,)
dtyp = [('trend', float_), ('seasonal', float_),
('residuals', float_), ('weights', float_)]
result = fromiter(zip(trn,szn,y-trn-szn,rw), dtype=dtyp)
return result.view(recarray)
#####---------------------------------------------------------------------------
#--- --- Loess ---
#####---------------------------------------------------------------------------
loess = _loess.loess
"""
loess : locally weighted estimates. Multi-variate version
:Keywords:
x : ndarray
A (n,p) ndarray of independent variables, with n the number of observations
and p the number of variables.
y : ndarray
A (n,) ndarray of observations
weights : ndarray
A (n,) ndarray of weights to be given to individual observations in the
sum of squared residuals that forms the local fitting criterion. If not
None, the weights should be non negative. If the different observations
have non-equal variances, the weights should be inversely proportional
to the variances.
By default, an unweighted fit is carried out (all the weights are one).
surface : string ["interpolate"]
Determines whether the fitted surface is computed directly at all points
("direct") or whether an interpolation method is used ("interpolate").
The default ("interpolate") is what most users should use unless special
circumstances warrant.
statistics : string ["approximate"]
Determines whether the statistical quantities are computed exactly
("exact") or approximately ("approximate"). "exact" should only be used
for testing the approximation in statistical development and is not meant
for routine usage because computation time can be horrendous.
trace_hat : string ["wait.to.decide"]
Determines how the trace of the hat matrix should be computed. The hat
matrix is used in the computation of the statistical quantities.
If "exact", an exact computation is done; this could be slow when the
number of observations n becomes large. If "wait.to.decide" is selected,
then a default is "exact" for n < 500 and "approximate" otherwise.
This option is only useful when the fitted surface is interpolated. If
surface is "exact", an exact computation is always done for the trace.
Setting trace_hat to "approximate" for large dataset will substantially
reduce the computation time.
iterations : integer
Number of iterations of the robust fitting method. If the family is
"gaussian", the number of iterations is set to 0.
cell : integer
Maximum cell size of the kd-tree. Suppose k = floor(n*cell*span),
where n is the number of observations, and span the smoothing parameter.
Then, a cell is further divided if the number of observations within it
is greater than or equal to k. This option is only used if the surface
is interpolated.
span : float [0.75]
Smoothing factor, as a fraction of the number of points to take into
account.
degree : integer [2]
Overall degree of locally-fitted polynomial. 1 is locally-linear
fitting and 2 is locally-quadratic fitting. Degree should be 2 at most.
normalize : boolean [True]
Determines whether the independent variables should be normalized.
If True, the normalization is performed by setting the 10% trimmed
standard deviation to one. If False, no normalization is carried out.
This option is only useful for more than one variable. For spatial
coordinates predictors or variables with a common scale, it should be
set to False.
family : string ["gaussian"]
Determines the assumed distribution of the errors. The values are
"gaussian" or "symmetric". If "gaussian" is selected, the fit is
performed with least-squares. If "symmetric" is selected, the fit
is performed robustly by redescending M-estimators.
parametric_flags : sequence [ [False]*p ]
Indicates which independent variables should be conditionally-parametric
(if there are two or more independent variables). The argument should
be a sequence of booleans, with the same size as the number of independent
variables, specified in the order of the predictor group ordered in x.
drop_square : sequence [ [False]* p]
When there are two or more independent variables and when a 2nd order
polynomial is used, "drop_square_flags" specifies those numeric predictors
whose squares should be dropped from the set of fitting variables.
The method of specification is the same as for parametric.
:Outputs:
fitted_values : ndarray
The (n,) ndarray of fitted values.
fitted_residuals : ndarray
The (n,) ndarray of fitted residuals (observations - fitted values).
enp : float
Equivalent number of parameters.
s : float
Estimate of the scale of residuals.
one_delta: float
Statistical parameter used in the computation of standard errors.
two_delta : float
Statistical parameter used in the computation of standard errors.
pseudovalues : ndarray
The (n,) ndarray of adjusted values of the response when robust estimation
is used.
trace_hat : float
Trace of the operator hat matrix.
diagonal :
Diagonal of the operator hat matrix.
robust : ndarray
The (n,) ndarray of robustness weights for robust fitting.
divisor : ndarray
The (p,) array of normalization divisors for numeric predictors.
newdata : ndarray
The (m,p) array of independent variables where the surface must be estimated.
values : ndarray
The (m,) ndarray of loess values evaluated at newdata
stderr : ndarray
The (m,) ndarray of the estimates of the standard error on the estimated
values.
residual_scale : float
Estimate of the scale of the residuals
df : integer
Degrees of freedom of the t-distribution used to compute pointwise
confidence intervals for the evaluated surface.
nest : integer
Number of new observations.
"""
loess_anova = _loess.anova
|
[
"numpy.any",
"numpy.empty",
"numpy.array"
] |
[((6139, 6185), 'numpy.array', 'array', (['x'], {'copy': '(False)', 'subok': '(True)', 'dtype': 'float_'}), '(x, copy=False, subok=True, dtype=float_)\n', (6144, 6185), False, 'from numpy import array, recarray, empty, fromiter, logical_not\n'), ((6194, 6240), 'numpy.array', 'array', (['y'], {'copy': '(False)', 'subok': '(True)', 'dtype': 'float_'}), '(y, copy=False, subok=True, dtype=float_)\n', (6199, 6240), False, 'from numpy import array, recarray, empty, fromiter, logical_not\n'), ((20086, 20104), 'numpy.any', 'numpy.any', (['y._mask'], {}), '(y._mask)\n', (20095, 20104), False, 'import numpy\n'), ((12875, 12894), 'numpy.empty', 'empty', (['(n,)', 'float_'], {}), '((n,), float_)\n', (12880, 12894), False, 'from numpy import array, recarray, empty, fromiter, logical_not\n'), ((12918, 12937), 'numpy.empty', 'empty', (['(n,)', 'float_'], {}), '((n,), float_)\n', (12923, 12937), False, 'from numpy import array, recarray, empty, fromiter, logical_not\n'), ((12963, 12982), 'numpy.empty', 'empty', (['(n,)', 'float_'], {}), '((n,), float_)\n', (12968, 12982), False, 'from numpy import array, recarray, empty, fromiter, logical_not\n'), ((20182, 20214), 'numpy.array', 'array', (['y'], {'subok': '(True)', 'copy': '(False)'}), '(y, subok=True, copy=False)\n', (20187, 20214), False, 'from numpy import array, recarray, empty, fromiter, logical_not\n'), ((9448, 9494), 'numpy.array', 'array', (['x'], {'copy': '(False)', 'subok': '(True)', 'dtype': 'float_'}), '(x, copy=False, subok=True, dtype=float_)\n', (9453, 9494), False, 'from numpy import array, recarray, empty, fromiter, logical_not\n'), ((9519, 9565), 'numpy.array', 'array', (['y'], {'copy': '(False)', 'subok': '(True)', 'dtype': 'float_'}), '(y, copy=False, subok=True, dtype=float_)\n', (9524, 9565), False, 'from numpy import array, recarray, empty, fromiter, logical_not\n')]
|
# Generated by Django 2.0.7 on 2018-10-25 16:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('hivs_pp', '0011_rename_field_confidential_to_is_confidential_on_service'),
]
operations = [
migrations.AlterField(
model_name='delivery',
name='gender',
field=models.ForeignKey(blank=True, help_text="If client profile is set this can be overwritten based on the client's profile.", null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='pp_deliveries', to='hivs_utils.Gender', verbose_name='client gender'),
),
]
|
[
"django.db.models.ForeignKey"
] |
[((408, 686), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'help_text': '"""If client profile is set this can be overwritten based on the client\'s profile."""', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""pp_deliveries"""', 'to': '"""hivs_utils.Gender"""', 'verbose_name': '"""client gender"""'}), '(blank=True, help_text=\n "If client profile is set this can be overwritten based on the client\'s profile."\n , null=True, on_delete=django.db.models.deletion.SET_NULL, related_name\n =\'pp_deliveries\', to=\'hivs_utils.Gender\', verbose_name=\'client gender\')\n', (425, 686), False, 'from django.db import migrations, models\n')]
|
from setuptools import setup, find_packages
version = '0.0.1'
setup(
name="alerta-beacon",
version=version,
description='Alerta plugin for Beacon',
url='https://github.com/ernadhalilovic/alerta-contrib',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages(),
py_modules=['alerta_beacon'],
install_requires=[
'requests'
],
include_package_data=True,
zip_safe=True,
entry_points={
'alerta.plugins': [
'beacon = alerta_beacon:ServiceIntegration'
]
}
)
|
[
"setuptools.find_packages"
] |
[((303, 318), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (316, 318), False, 'from setuptools import setup, find_packages\n')]
|
from math import inf
from typing import Dict, Tuple, List
import torch
from torch import nn, Tensor
import pointneighbor as pn
from ..adj import get_adj_sft_spc, vec_sod
from .. import properties as p
def ravel1(idx: List[Tensor], siz: List[int]):
return pn.fn.ravel1(
torch.stack(idx), torch.tensor(siz, device=idx[0].device), dim=0)
def smap(a, c, d, rd):
return (1.0 + c * rd ** a) ** d
def smap_c(a, b):
return 2 ** (a / b) - 1
def smap_d(a, b):
return - b / a
class Smap(nn.Module):
a: Tensor
b: Tensor
c: Tensor
d: Tensor
r0: Tensor
d0: Tensor
def __init__(self, d0, r0, a, b):
super().__init__()
self.register_buffer('d0', d0)
self.register_buffer('r0', r0)
self.register_buffer('a', a)
self.register_buffer('b', b)
self.register_buffer('c', smap_c(self.a, self.b))
self.register_buffer('d', smap_d(self.a, self.b))
def forward(self, eij, dst):
rd = (dst - self.d0[eij]) / self.r0[eij]
ret = smap(self.a[eij], self.c[eij], self.d[eij], rd
).masked_fill(eij < 0, 0.0)
return ret.masked_fill(rd < 0, 1.0)
def rational_almost(rd, nn, nd):
num = 1 - rd.pow(nn)
den = 1 - rd.pow(nd)
return num / den
def rational_singularity(rd, nn, nd):
return 0.5 * nn * (2 + (nn - nd) * (rd - 1)) / nd
def _no_nan(x: Tensor):
return (x == x).all()
class Rational(nn.Module):
def __init__(self, d0, r0, nn, nd):
super().__init__()
self.register_buffer('d0', d0)
self.register_buffer('r0', r0)
self.register_buffer('nn', nn)
self.register_buffer('nd', nd)
self.eps = 1e-2
def forward(self, eij, dst):
assert _no_nan(dst)
rd = (dst - self.d0[eij]) / self.r0[eij]
nn = self.nn[eij]
nd = self.nd[eij]
rat_almost = rational_almost(rd, nn, nd)
rat_singul = rational_singularity(rd, nn, nd)
sing = (rd < 1 + self.eps) & (rd > 1 - self.eps)
rat = torch.where(sing, rat_singul, rat_almost)
ret = rat.masked_fill(rd < 0, 1.0)
assert _no_nan(ret)
ret.masked_fill_(eij < 0, 0.0)
return ret
def mollifier_inner(x, a, rc):
return (
1
- torch.exp(- a / (rc * rc - (rc - x) ** 2)) / torch.exp(-a / rc / rc)
)
def mollifier_outer(x: Tensor, rc: Tensor) -> Tensor:
return (x <= 0).to(x)
def mollifier(x: Tensor, a: Tensor, rc: Tensor):
mask = (x > 1e-6) & (x < rc - 1e-6)
outer = mollifier_outer(x, rc)
inner = mollifier_inner(x[mask], a[mask], rc[mask])
return outer.masked_scatter(mask, inner)
class Mollifier(nn.Module):
a: Tensor
d0: Tensor
rc: Tensor
def __init__(self, a, d0, rc):
super().__init__()
self.register_buffer('a', a)
self.register_buffer('d0', d0)
self.register_buffer('rc', rc)
def forward(self, eij, dst):
rc = self.rc[eij]
d0 = self.d0[eij]
a = self.a[eij]
return mollifier(dst - d0, a, rc - d0)
class Coordination(nn.Module):
elm: Tensor
coef: Tensor
def __init__(self, mod, numel: int, rc: float,
items: Dict[Tuple[int, int], Dict[str, float]]):
super().__init__()
elm = -torch.ones([numel, numel], dtype=torch.long)
dic: Dict[str, List[float]] = {}
n = 0
for n, ((i, j), prp) in enumerate(items.items()):
elm[i, j] = n
elm[j, i] = n
for key, val in prp.items():
if key not in dic:
dic[key] = []
dic[key].append(val)
self.register_buffer('elm', elm)
self.mod = mod(**{key: torch.tensor(val) for key, val in dic.items()})
self.rc = rc
self.n = n + 1
self.pbc = torch.full([self.n], inf)
def forward(self, inp: Dict[str, Tensor]):
adj = get_adj_sft_spc(inp, p.coo, self.rc)
n, i, j = pn.coo2_n_i_j(adj)
ei = inp[p.elm][n, i]
ej = inp[p.elm][n, j]
eij = self.elm[ei, ej]
adapt = eij >= 0
_, sod = vec_sod(inp, adj)
dis: Tensor = sod[adapt].sqrt()
eij = eij[adapt]
coords = self.mod(eij, dis)
n_bch, _ = inp[p.elm].size()
idx = ravel1([n[adapt], eij], [n_bch, self.n])
coord = torch.zeros([n_bch * self.n],
dtype=sod.dtype, device=sod.device)
coord.index_add_(0, idx, coords)
ret = coord.view([n_bch, self.n]) / 2
return ret
class SlabCoordination(nn.Module):
elm: Tensor
coef: Tensor
wz: Tensor
wr: Tensor
pbc: Tensor
def __init__(self, mod, numel: int, rc: float,
items: Dict[
Tuple[int, int],
Tuple[List[float], Dict[str, float]]], dim: int = 2):
super().__init__()
elm = -torch.ones([numel, numel], dtype=torch.long)
dic: Dict[str, List[float]] = {}
wz = []
wr = []
n = 0
for n, ((i, j), (coef, prp)) in enumerate(items.items()):
elm[i, j] = n
for key, val in prp.items():
if key not in dic:
dic[key] = []
dic[key].append(val)
wz.append(coef[0])
wr.append(coef[1])
self.register_buffer('elm', elm)
self.mod = mod(**{key: torch.tensor(val) for key, val in dic.items()})
self.rc = rc
self.n = n + 1
self.register_buffer('pbc', torch.full([self.n], inf))
self.register_buffer('wz', 0.5 / torch.tensor(wz).pow(2))
self.register_buffer('wr', 0.5 / torch.tensor(wr).pow(2))
self.dim = dim
def forward(self, inp: Dict[str, Tensor]):
num_bch = inp[p.elm].size(0)
adj = get_adj_sft_spc(inp, p.coo, self.rc)
n, i, j = pn.coo2_n_i_j(adj)
ei = inp[p.elm][n, i]
ej = inp[p.elm][n, j]
eij = self.elm[ei, ej]
adapt = eij >= 0
vec, sod = vec_sod(inp, adj)
n, i, j = n[adapt], i[adapt], j[adapt]
vec, sod = vec[adapt], sod[adapt]
ei, ej, eij = ei[adapt], ej[adapt], eij[adapt]
zij = -vec[:, self.dim]
wij = torch.exp(-self.wz[eij] * zij) * torch.exp(-self.wr[eij] * sod)
i_max = i.max() + 5
ni = n * i_max + i
unique, idx, cou = torch.unique_consecutive(
ni, return_inverse=True, return_counts=True)
cum = pn.fn.cumsum_from_zero(cou)
den = torch.zeros_like(unique, dtype=wij.dtype)
den.index_add_(0, idx, wij)
num = torch.zeros_like(unique, dtype=wij.dtype)
num.index_add_(0, idx, wij * zij)
zij_ = num / den
eij_ = eij[cum]
n_ = n[cum]
cij_ = self.mod(eij_, zij_)
idx_ = n_ * self.n + eij_
ret = torch.zeros([num_bch * self.n],
device=n.device, dtype=cij_.dtype)
ret.index_add_(0, idx_, cij_)
return ret.view([num_bch, self.n])
|
[
"torch.ones",
"torch.stack",
"torch.where",
"torch.zeros_like",
"pointneighbor.coo2_n_i_j",
"torch.full",
"torch.exp",
"pointneighbor.fn.cumsum_from_zero",
"torch.unique_consecutive",
"torch.zeros",
"torch.tensor"
] |
[((283, 299), 'torch.stack', 'torch.stack', (['idx'], {}), '(idx)\n', (294, 299), False, 'import torch\n'), ((301, 340), 'torch.tensor', 'torch.tensor', (['siz'], {'device': 'idx[0].device'}), '(siz, device=idx[0].device)\n', (313, 340), False, 'import torch\n'), ((2042, 2083), 'torch.where', 'torch.where', (['sing', 'rat_singul', 'rat_almost'], {}), '(sing, rat_singul, rat_almost)\n', (2053, 2083), False, 'import torch\n'), ((3838, 3863), 'torch.full', 'torch.full', (['[self.n]', 'inf'], {}), '([self.n], inf)\n', (3848, 3863), False, 'import torch\n'), ((3981, 3999), 'pointneighbor.coo2_n_i_j', 'pn.coo2_n_i_j', (['adj'], {}), '(adj)\n', (3994, 3999), True, 'import pointneighbor as pn\n'), ((4360, 4425), 'torch.zeros', 'torch.zeros', (['[n_bch * self.n]'], {'dtype': 'sod.dtype', 'device': 'sod.device'}), '([n_bch * self.n], dtype=sod.dtype, device=sod.device)\n', (4371, 4425), False, 'import torch\n'), ((5882, 5900), 'pointneighbor.coo2_n_i_j', 'pn.coo2_n_i_j', (['adj'], {}), '(adj)\n', (5895, 5900), True, 'import pointneighbor as pn\n'), ((6390, 6459), 'torch.unique_consecutive', 'torch.unique_consecutive', (['ni'], {'return_inverse': '(True)', 'return_counts': '(True)'}), '(ni, return_inverse=True, return_counts=True)\n', (6414, 6459), False, 'import torch\n'), ((6487, 6514), 'pointneighbor.fn.cumsum_from_zero', 'pn.fn.cumsum_from_zero', (['cou'], {}), '(cou)\n', (6509, 6514), True, 'import pointneighbor as pn\n'), ((6529, 6570), 'torch.zeros_like', 'torch.zeros_like', (['unique'], {'dtype': 'wij.dtype'}), '(unique, dtype=wij.dtype)\n', (6545, 6570), False, 'import torch\n'), ((6621, 6662), 'torch.zeros_like', 'torch.zeros_like', (['unique'], {'dtype': 'wij.dtype'}), '(unique, dtype=wij.dtype)\n', (6637, 6662), False, 'import torch\n'), ((6858, 6924), 'torch.zeros', 'torch.zeros', (['[num_bch * self.n]'], {'device': 'n.device', 'dtype': 'cij_.dtype'}), '([num_bch * self.n], device=n.device, dtype=cij_.dtype)\n', (6869, 6924), False, 'import torch\n'), ((2279, 2320), 'torch.exp', 'torch.exp', (['(-a / (rc * rc - (rc - x) ** 2))'], {}), '(-a / (rc * rc - (rc - x) ** 2))\n', (2288, 2320), False, 'import torch\n'), ((2324, 2347), 'torch.exp', 'torch.exp', (['(-a / rc / rc)'], {}), '(-a / rc / rc)\n', (2333, 2347), False, 'import torch\n'), ((3298, 3342), 'torch.ones', 'torch.ones', (['[numel, numel]'], {'dtype': 'torch.long'}), '([numel, numel], dtype=torch.long)\n', (3308, 3342), False, 'import torch\n'), ((4913, 4957), 'torch.ones', 'torch.ones', (['[numel, numel]'], {'dtype': 'torch.long'}), '([numel, numel], dtype=torch.long)\n', (4923, 4957), False, 'import torch\n'), ((5546, 5571), 'torch.full', 'torch.full', (['[self.n]', 'inf'], {}), '([self.n], inf)\n', (5556, 5571), False, 'import torch\n'), ((6244, 6274), 'torch.exp', 'torch.exp', (['(-self.wz[eij] * zij)'], {}), '(-self.wz[eij] * zij)\n', (6253, 6274), False, 'import torch\n'), ((6277, 6307), 'torch.exp', 'torch.exp', (['(-self.wr[eij] * sod)'], {}), '(-self.wr[eij] * sod)\n', (6286, 6307), False, 'import torch\n'), ((3727, 3744), 'torch.tensor', 'torch.tensor', (['val'], {}), '(val)\n', (3739, 3744), False, 'import torch\n'), ((5418, 5435), 'torch.tensor', 'torch.tensor', (['val'], {}), '(val)\n', (5430, 5435), False, 'import torch\n'), ((5614, 5630), 'torch.tensor', 'torch.tensor', (['wz'], {}), '(wz)\n', (5626, 5630), False, 'import torch\n'), ((5680, 5696), 'torch.tensor', 'torch.tensor', (['wr'], {}), '(wr)\n', (5692, 5696), False, 'import torch\n')]
|
import argparse
import json
from copy import deepcopy
import numpy as np
def write_submission_output(dialog_turn_id_data, retrieval_scores, output_submission_format_path):
"""
Write the model_scores in
"""
submission_format_output=[]
for dialog in dialog_turn_id_data:
_dialog=[]
for turn_id in range(len(dialog['turn_info'])):
_turn=dialog['turn_info'][turn_id]
assert turn_id==_turn['turn_id']
if turn_id == dialog["final_turn_id"]:
_flat_id=_turn['flat_id']
start_index = _flat_id[0]
end_index = _flat_id[1]
round_scores = retrieval_scores[start_index:end_index]
_dialog.append({"turn_id":turn_id, "scores":round_scores})
submission_format_output.append({"dialog_id":dialog["dialog_id"],
"candidate_scores":_dialog})
with open(output_submission_format_path, "w") as f_retrieval_submission_format:
json.dump(submission_format_output, f_retrieval_submission_format)
def main(args):
print("Reading: {}".format(args["dialog_turn_id_json_path"]))
with open(args["dialog_turn_id_json_path"], "r") as file_id:
dialog_turn_id_data = json.load(file_id)
print("Reading: {}".format(args["model_flat_score_path"]))
with open(args["model_flat_score_path"], "r") as f_score:
retrieval_scores = f_score.readlines()
retrieval_scores = [-float(x.strip()) for x in retrieval_scores]
write_submission_output(
dialog_turn_id_data, retrieval_scores, args["output_submission_format_path"]
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Response Retrieval Evaluation")
parser.add_argument(
"--dialog_turn_id_json_path",
default="data/furniture_train_retrieval_candidates.json",
help="Data with retrieval candidates, gt",
)
parser.add_argument(
"--model_flat_score_path",
default=None,
help="Candidate scores generated by the model",
)
parser.add_argument(
"--output_submission_format_path",
default=None,
help="generate output_submission_format",
)
try:
parsed_args = vars(parser.parse_args())
except (IOError) as msg:
parser.error(str(msg))
main(parsed_args)
|
[
"json.dump",
"json.load",
"argparse.ArgumentParser"
] |
[((1685, 1753), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Response Retrieval Evaluation"""'}), "(description='Response Retrieval Evaluation')\n", (1708, 1753), False, 'import argparse\n'), ((1014, 1080), 'json.dump', 'json.dump', (['submission_format_output', 'f_retrieval_submission_format'], {}), '(submission_format_output, f_retrieval_submission_format)\n', (1023, 1080), False, 'import json\n'), ((1259, 1277), 'json.load', 'json.load', (['file_id'], {}), '(file_id)\n', (1268, 1277), False, 'import json\n')]
|
""" FrankenStrings Service """
import binascii
import hashlib
import mmap
import os
import re
import traceback
from typing import Dict, Iterable, List, Optional, Set, Tuple
import magic
import pefile
from assemblyline.common.net import is_valid_domain, is_valid_email
from assemblyline.common.str_utils import safe_str
from assemblyline_v4_service.common.balbuzard.bbcrack import bbcrack
from assemblyline_v4_service.common.balbuzard.patterns import PatternMatch
from assemblyline_v4_service.common.base import ServiceBase
from assemblyline_v4_service.common.result import Result, ResultSection, BODY_FORMAT, Heuristic
from assemblyline_v4_service.common.request import ServiceRequest
from assemblyline_v4_service.common.task import MaxExtractedExceeded
from frankenstrings.flarefloss import strings
class FrankenStrings(ServiceBase):
""" FrankenStrings Service """
FILETYPES = [
'application',
'document',
'exec',
'image',
'Microsoft',
'text',
]
HEXENC_STRINGS = [
b'\\u',
b'%u',
b'\\x',
b'0x',
b'&H', # hex notation in VBA
]
BBCRACK_TO_TAG = {
'NET_FULL_URI': 'network.static.uri',
}
def __init__(self, config: Optional[Dict] = None) -> None:
super().__init__(config)
# Unless patterns are added/adjusted to patterns.py, the following should remain at 7:
self.st_min_length = 7
self.sample_type = ''
self.excess_extracted = 0
def start(self) -> None:
self.log.debug("FrankenStrings service started")
# --- Support Functions ------------------------------------------------------------------------------------------------
def extract_file(self, request, data, file_name, description):
""" Adds data to a request as an extracted file
request: the request
data: the file data
filename: the name to give the file
description: the desctiption of the file to give the request
"""
if self.excess_extracted:
# Already over maximimum number of extracted files
self.excess_extracted += 1
return
try:
# If for some reason the directory doesn't exist, create it
if not os.path.exists(self.working_directory):
os.makedirs(self.working_directory)
file_path = os.path.join(self.working_directory, file_name)
with open(file_path, 'wb') as f:
f.write(data)
request.add_extracted(file_path, file_name, description)
except MaxExtractedExceeded:
self.excess_extracted += 1
except Exception:
self.log.error(f"Error extracting {file_name} from {request.sha256}: {traceback.format_exc(limit=2)}")
def ioc_to_tag(self, data: bytes, patterns: PatternMatch, res: Optional[ResultSection] = None,
taglist: bool = False, check_length: bool = False, strs_max_size: int = 0,
st_max_length: int = 300) -> Dict[str, Set[str]]:
"""Searches data for patterns and adds as AL tag to result output.
Args:
data: Data to be searched.
patterns: FrankenStrings Patterns() object.
res: AL result.
taglist: True if tag list should be returned.
check_length: True if length of string should be compared to st_max_length.
strs_max_size: Maximum size of strings list. If greater then only network IOCs will be searched.
st_max_length: Maximum length of a string from data that can be searched.
Returns: tag list as dictionary (always empty if taglist is false)
"""
tags: Dict[str, Set[str]] = {}
min_length = self.st_min_length if check_length else 4
strs: Set[bytes] = set()
just_network = False
# Flare-FLOSS ascii string extract
for ast in strings.extract_ascii_strings(data, n=min_length):
if not check_length or len(ast.s) < st_max_length:
strs.add(ast.s)
# Flare-FLOSS unicode string extract
for ust in strings.extract_unicode_strings(data, n=min_length):
if not check_length or len(ust.s) < st_max_length:
strs.add(ust.s)
if check_length and len(strs) > strs_max_size:
just_network = True
for s in strs:
st_value: Dict[str, Iterable[bytes]] = patterns.ioc_match(s, bogon_ip=True, just_network=just_network)
for ty, val in st_value.items():
if taglist and ty not in tags:
tags[ty] = set()
for v in val:
if ty == 'network.static.domain' and not is_valid_domain(v.decode('utf-8')):
continue
if ty == 'network.email.address' and not is_valid_email(v.decode('utf-8')):
continue
if len(v) < 1001:
if res:
res.add_tag(ty, safe_str(v))
if taglist:
tags[ty].add(safe_str(v))
return tags
@staticmethod
def decode_bu(data: bytes, size: int) -> bytes:
""" Convert ascii to hex.
Args:
data: Ascii string to be converted.
size: Unit size.
Returns:
Decoded data.
"""
decoded = b''
if size == 2:
while data != b'':
decoded += binascii.a2b_hex(data[2:4])
data = data[4:]
if size == 4:
while data != b'':
decoded += binascii.a2b_hex(data[4:6]) + binascii.a2b_hex(data[2:4])
data = data[6:]
if size == 8:
while data != b'':
decoded += binascii.a2b_hex(data[8:10]) + binascii.a2b_hex(data[6:8]) + \
binascii.a2b_hex(data[4:6]) + binascii.a2b_hex(data[2:4])
data = data[10:]
if size == 16:
while data != b'':
decoded += binascii.a2b_hex(data[16:18]) + binascii.a2b_hex(data[14:16]) + \
binascii.a2b_hex(data[12:14]) + binascii.a2b_hex(data[10:12]) + \
binascii.a2b_hex(data[8:10]) + binascii.a2b_hex(data[6:8]) + \
binascii.a2b_hex(data[4:6]) + binascii.a2b_hex(data[2:4])
data = data[18:]
return decoded
@staticmethod
def unicode_longest_string(listdata: List[bytes]) -> bytes:
"""Compare sizes of unicode strings.
Args:
listdata: A list of binary strings
Returns:
Result of test: Do all strings match in length?
If True, returns all strings combined.
If False, returns longest string greater than 50 bytes.
If no string longer than 50 bytes, returns empty string.
"""
maxstr = max(listdata, key=len)
newstr = b""
if all(len(i) == len(maxstr) for i in listdata):
for i in listdata:
newstr += i
return newstr
if len(maxstr) > 50:
return maxstr
return newstr
def decode_encoded_udata(self, request: ServiceRequest, encoding: bytes,
data: bytes, decoded_res: Dict[str, Tuple[bytes, bytes]]) -> List[str]:
"""Compare sizes of unicode strings. Some code taken from bas64dump.py @ https://DidierStevens.com.
Args:
request: AL request object (for submitting extracted files to AL when needed).
encoding: Encoding string used (i.e. '0x').
data: Data to be examined.
Returns:
List of hashes of extracted files submitted to AL and list of decoded unicode data information.
"""
decoded_list: List[Tuple[bytes, bytes]] = []
dropped: List[str] = []
qword = re.compile(rb'(?:'+re.escape(encoding)+b'[A-Fa-f0-9]{16})+')
dword = re.compile(rb'(?:'+re.escape(encoding)+b'[A-Fa-f0-9]{8})+')
word = re.compile(rb'(?:'+re.escape(encoding)+b'[A-Fa-f0-9]{4})+')
byte = re.compile(rb'(?:'+re.escape(encoding)+b'[A-Fa-f0-9]{2})+')
qbu = re.findall(qword, data)
if qbu:
qlstr = self.unicode_longest_string(qbu)
if len(qlstr) > 50:
decoded_list.append((self.decode_bu(qlstr, size=16), qlstr[:200]))
dbu = re.findall(dword, data)
if dbu:
dlstr = self.unicode_longest_string(dbu)
if len(dlstr) > 50:
decoded_list.append((self.decode_bu(dlstr, size=8), dlstr[:200]))
wbu = re.findall(word, data)
if wbu:
wlstr = self.unicode_longest_string(wbu)
if len(wlstr) > 50:
decoded_list.append((self.decode_bu(wlstr, size=4), wlstr[:200]))
bbu = re.findall(byte, data)
if bbu:
blstr = self.unicode_longest_string(bbu)
if len(blstr) > 50:
decoded_list.append((self.decode_bu(blstr, size=2), blstr[:200]))
filtered_list = filter(lambda x: len(x[0]) > 30, decoded_list)
for decoded in filtered_list:
uniq_char = set(decoded[0])
sha256hash = hashlib.sha256(decoded[0]).hexdigest()
if len(decoded[0]) >= 500:
if len(uniq_char) > 20:
dropped.append(sha256hash)
udata_file_name = f"{sha256hash[0:10]}_enchex_{safe_str(encoding)}_decoded"
self.extract_file(request, decoded[0], udata_file_name,
"Extracted unicode file during FrankenStrings analysis")
elif len(uniq_char) > 6:
decoded_res[sha256hash] = decoded
return dropped
# Base64 Parse
def b64(self, request: ServiceRequest, b64_string: bytes,
patterns: PatternMatch) -> Tuple[Dict[str, Tuple[int, bytes, bytes, bytes]], Dict[str, Set[str]]]:
"""Decode B64 data.
Args:
request: AL request object (for submitting extracted files to AL when needed).
b64_string: Possible base64 string.
patterns: FrankenStrings patterns object.
Returns:
Result information.
"""
results: Dict[str, Tuple[int, bytes, bytes, bytes]] = {}
pat: Dict[str, Set[str]] = {}
if len(b64_string) >= 16 and len(b64_string) % 4 == 0:
# noinspection PyBroadException
try:
base64data = binascii.a2b_base64(b64_string)
sha256hash = hashlib.sha256(base64data).hexdigest()
# Search for embedded files of interest
if 200 < len(base64data) < 10000000:
m = magic.Magic(mime=True)
mag = magic.Magic()
ftype = m.from_buffer(base64data)
mag_ftype = mag.from_buffer(base64data)
for file_type in self.FILETYPES:
if (file_type in ftype and 'octet-stream' not in ftype) or file_type in mag_ftype:
b64_file_name = f"{sha256hash[0:10]}_b64_decoded"
self.extract_file(request, base64data, b64_file_name,
"Extracted b64 file during FrankenStrings analysis")
results[sha256hash] = (len(b64_string), b64_string[0:50],
b"[Possible file contents. See extracted files.]", b"")
return results, pat
# See if any IOCs in decoded data
pat = self.ioc_to_tag(base64data, patterns, taglist=True)
# Filter printable characters then put in results
asc_b64 = bytes(i for i in base64data if 31 < i < 127)
if len(asc_b64) > 0:
# If patterns exists, report. If not, report only if string looks interesting
if len(pat) > 0:
results[sha256hash] = (len(b64_string), b64_string[0:50], asc_b64, base64data)
# PDF and Office documents have too many FPS
elif not self.sample_type.startswith('document/office') \
and not self.sample_type.startswith('document/pdf'):
# If data has length greater than 50, and unique character to length ratio is high
uniq_char = set(asc_b64)
if len(uniq_char) > 12 and len(re.sub(b"[^A-Za-z0-9]+", b"", asc_b64)) > 50:
results[sha256hash] = (len(b64_string), b64_string[0:50], asc_b64, base64data)
# If not all printable characters but IOCs discovered, extract to file
elif len(pat) > 0:
b64_file_name = f"{sha256hash[0:10]}_b64_decoded"
self.extract_file(request, base64data, b64_file_name,
"Extracted b64 file during FrankenStrings analysis")
results[sha256hash] = (len(b64_string), b64_string[0:50],
b"[IOCs discovered with other non-printable data. "
b"See extracted files.]", b"")
except Exception:
return results, pat
return results, pat
def unhexlify_ascii(self, request: ServiceRequest, data: bytes, filetype: str,
patterns: PatternMatch) -> Tuple[bool, Dict[str, Set[str]], Dict[str, Tuple[bytes, bytes, str]]]:
"""Plain ascii hex conversion.
Args:
request: AL request object (for submitting extracted files to AL when needed).
data: Data to examine.
filetype: request file type.
patterns: Frankenstrings patterns object.
Returns:
If a file was extracted, tags, and xor results
"""
tags: Dict[str, Set[str]] = {}
xor: Dict[str, Tuple[bytes, bytes, str]] = {}
if len(data) % 2 != 0:
data = data[:-1]
# noinspection PyBroadException
try:
binstr = binascii.unhexlify(data)
except Exception:
return False, tags, xor
# If data has less than 7 uniq chars return
uniq_char = set(binstr)
if len(uniq_char) < 7:
return False, tags, xor
# If data is greater than 500 bytes create extracted file
if len(binstr) > 500:
if len(uniq_char) < 20:
return False, tags, xor
sha256hash = hashlib.sha256(binstr).hexdigest()
asciihex_file_name = f"{sha256hash[0:10]}_asciihex_decoded"
self.extract_file(request, binstr, asciihex_file_name,
"Extracted ascii-hex file during FrankenStrings analysis")
return True, tags, xor
# Else look for patterns
tags = self.ioc_to_tag(binstr, patterns, taglist=True, st_max_length=1000)
if tags:
return False, tags, xor
# Else look for small XOR encoded strings in code files
if 20 < len(binstr) <= 128 and filetype.startswith('code/'):
xresult: List[Tuple[str, str, bytes]] = bbcrack(binstr, level='small_string')
if len(xresult) > 0:
for transform, regex, match in xresult:
if regex.startswith('EXE_'):
# noinspection PyTypeChecker
xor['file.string.blacklisted'] = (data, match, transform)
else:
# noinspection PyTypeChecker
xor[regex] = (data, match, transform)
return False, tags, xor
return False, tags, xor
# Executable extraction
def pe_dump(self, request: ServiceRequest, temp_file: str, offset: int, file_string: str, msg: str,
fail_on_except: bool = False) -> bool:
"""Use PEFile application to find the end of the file (biggest section length wins).
Args:
request: AL request object (for submitting extracted PE AL).
temp_file: Sample file with possible embedded PE.
offset: Offset of temp_file where PE file begins.
file_string: String appended to extracted PE file name.
msg: File extraction message
fail_on_except: When False, if PEFile fails, extract from offset all the way to the end of the initial file.
Returns:
True if PE extracted.
"""
pe_extract = None
mm = None
try:
with open(temp_file, "rb") as f:
mm = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
pedata = mm[offset:]
# noinspection PyBroadException
try:
peinfo = pefile.PE(data=pedata)
lsize = 0
for section in peinfo.sections:
size = section.PointerToRawData + section.SizeOfRawData
if size > lsize:
lsize = size
if lsize > 0:
pe_extract = pedata[0:lsize]
else:
if not fail_on_except:
pe_extract = pedata
except Exception:
if not fail_on_except:
pe_extract = pedata
if pe_extract:
pe_file_name = f"{hashlib.sha256(pe_extract).hexdigest()[0:10]}_{file_string}"
self.extract_file(request, pe_extract, pe_file_name, msg)
except Exception:
self.log.warning("Dumping PE file failed for {request.sha256}")
finally:
# noinspection PyBroadException
try:
if mm is not None:
mm.close()
except Exception:
pass
return bool(pe_extract)
# --- Results methods ------------------------------------------------------------------------------------------------
def ascii_results(self, request: ServiceRequest, patterns: PatternMatch,
max_length: int, st_max_size: int) -> Optional[ResultSection]:
"""
Finds and reports ASCII & Unicode IOC Strings.
Args:
request: AL request object with result section
patterns: PatternMatch object
Returns:
The created result section (with request.result as its parent)
"""
# Check the maximum length except for code files
chkl = not self.sample_type.startswith('code')
ascii_res = (ResultSection("The following IOC were found in plain text in the file:",
body_format=BODY_FORMAT.MEMORY_DUMP))
file_plainstr_iocs = self.ioc_to_tag(request.file_contents, patterns, ascii_res, taglist=True,
check_length=chkl, strs_max_size=st_max_size,
st_max_length=max_length)
if file_plainstr_iocs:
request.result.add_section(ascii_res)
for k, l in sorted(file_plainstr_iocs.items()):
for i in sorted(l):
ascii_res.add_line(f"Found {k.upper().replace('.', ' ')} string: {safe_str(i)}")
return ascii_res
return None
def embedded_pe_results(self, request: ServiceRequest) -> Optional[ResultSection]:
"""
Finds, extracts and reports embedded executables
Args:
request: AL request object with result section
Returns:
The result section (with request.result as its parent) if one is created
"""
# PE Strings
pat_exedos = rb'(?s)This program cannot be run in DOS mode'
pat_exeheader = rb'(?s)MZ.{32,1024}PE\000\000.+'
embedded_pe = False
for pos_exe in re.findall(pat_exeheader, request.file_contents[1:]):
if re.search(pat_exedos, pos_exe):
pe_sha256 = hashlib.sha256(pos_exe).hexdigest()
temp_file = os.path.join(self.working_directory, "EXE_TEMP_{}".format(pe_sha256))
with open(temp_file, 'wb') as pedata:
pedata.write(pos_exe)
embedded_pe = embedded_pe or self.pe_dump(request, temp_file, offset=0, file_string="embed_pe",
msg="PE header strings discovered in sample",
fail_on_except=True)
# Report embedded PEs if any are found
if embedded_pe:
return ResultSection("Embedded PE header discovered in sample. See extracted files.",
heuristic=Heuristic(3), parent=request.result)
return None
def base64_results(self, request: ServiceRequest, patterns: PatternMatch) -> Optional[ResultSection]:
"""
Finds and reports Base64 encoded text
Args:
request: AL request object with result section
patterns: PatternMatch object
Returns:
The result section (with request.result as its parent) if one is created
"""
b64_al_results = []
b64_matches = set()
# Base64 characters with possible space, newline characters and HTML line feeds (&#(XA|10);)
for b64_match in re.findall(b'([\x20]{0,2}(?:[A-Za-z0-9+/]{10,}={0,2}'
b'(?:&#[x1][A0];)?[\r]?[\n]?){2,})', request.file_contents):
b64_string = b64_match.replace(b'\n', b'').replace(b'\r', b'').replace(b' ', b'')\
.replace(b'
', b'').replace(b' ', b'')
if b64_string in b64_matches:
continue
b64_matches.add(b64_string)
uniq_char = set(b64_string)
if len(uniq_char) > 6:
b64result, tags = self.b64(request, b64_string, patterns)
if len(b64result) > 0:
b64_al_results.append((b64result, tags))
# UTF-16 strings
for ust in strings.extract_unicode_strings(request.file_contents, n=self.st_min_length):
for b64_match in re.findall(b'([\x20]{0,2}(?:[A-Za-z0-9+/]{10,}={0,2}[\r]?[\n]?){2,})', ust.s):
b64_string = b64_match.replace(b'\n', b'').replace(b'\r', b'').replace(b' ', b'')
uniq_char = set(b64_string)
if len(uniq_char) > 6:
b64result, tags = self.b64(request, b64_string, patterns)
if len(b64result) > 0:
b64_al_results.append((b64result, tags))
# Report B64 Results
if len(b64_al_results) > 0:
b64_ascii_content: List[bytes] = []
b64_res = (ResultSection("Base64 Strings:", heuristic=Heuristic(1), parent=request.result))
b64index = 0
for b64dict, tags in b64_al_results:
for ttype, values in tags.items():
for v in values:
b64_res.add_tag(ttype, v)
for b64k, b64l in b64dict.items():
b64index += 1
sub_b64_res = (ResultSection(f"Result {b64index}", parent=b64_res))
sub_b64_res.add_line(f'BASE64 TEXT SIZE: {b64l[0]}')
sub_b64_res.add_line(f'BASE64 SAMPLE TEXT: {safe_str(b64l[1])}[........]')
sub_b64_res.add_line(f'DECODED SHA256: {b64k}')
subb_b64_res = (ResultSection("DECODED ASCII DUMP:",
body_format=BODY_FORMAT.MEMORY_DUMP, parent=sub_b64_res))
subb_b64_res.add_line(safe_str(b64l[2]))
if b64l[2] not in [b"[Possible file contents. See extracted files.]",
b"[IOCs discovered with other non-printable data. See extracted files.]"]:
b64_ascii_content.append(b64l[3])
# Write all non-extracted decoded b64 content to file
if len(b64_ascii_content) > 0:
all_b64 = b"\n".join(b64_ascii_content)
b64_all_sha256 = hashlib.sha256(all_b64).hexdigest()
self.extract_file(request, all_b64, f"all_b64_{b64_all_sha256[:7]}.txt",
"all misc decoded b64 from sample")
return b64_res
return None
def bbcrack_results(self, request: ServiceRequest) -> Optional[ResultSection]:
"""
Balbuzard's bbcrack XOR'd strings to find embedded patterns/PE files of interest
Args:
request: AL request object with result section
Returns:
The result section (with request.result as its parent) if one is created
"""
x_res = (ResultSection("BBCrack XOR'd Strings:", body_format=BODY_FORMAT.MEMORY_DUMP,
heuristic=Heuristic(2)))
if request.deep_scan:
xresult = bbcrack(request.file_contents, level=2)
else:
xresult = bbcrack(request.file_contents, level=1)
xformat_string = '%-20s %-7s %-7s %-50s'
xor_al_results = []
xindex = 0
for transform, regex, offset, score, smatch in xresult:
if regex == 'EXE_HEAD':
xindex += 1
xtemp_file = os.path.join(self.working_directory, f"EXE_HEAD_{xindex}_{offset}_{score}.unXORD")
with open(xtemp_file, 'wb') as xdata:
xdata.write(smatch)
pe_extracted = self.pe_dump(request, xtemp_file, offset, file_string="xorpe_decoded",
msg="Extracted xor file during FrakenStrings analysis.")
if pe_extracted:
xor_al_results.append(xformat_string % (str(transform), offset, score,
"[PE Header Detected. "
"See Extracted files]"))
else:
if not regex.startswith("EXE_"):
x_res.add_tag(self.BBCRACK_TO_TAG.get(regex, regex), smatch)
xor_al_results.append(xformat_string
% (str(transform), offset, score, safe_str(smatch)))
# Result Graph:
if len(xor_al_results) > 0:
xcolumn_names = ('Transform', 'Offset', 'Score', 'Decoded String')
x_res.add_line(xformat_string % xcolumn_names)
x_res.add_line(xformat_string % tuple('-' * len(s) for s in xcolumn_names))
x_res.add_lines(xor_al_results)
request.result.add_section(x_res)
return x_res
return None
def unicode_results(self, request: ServiceRequest, patterns: PatternMatch) -> Optional[ResultSection]:
"""
Finds and report unicode encoded strings
Args:
request: AL request object with result section
patterns: PatternMatch object
Returns:
The result section (with request.result as its parent) if one is created
"""
unicode_al_results: Dict[str, Tuple[bytes, bytes]] = {}
dropped_unicode: List[Tuple[str, str]] = []
for hes in self.HEXENC_STRINGS:
if re.search(re.escape(hes) + b'[A-Fa-f0-9]{2}', request.file_contents):
dropped = self.decode_encoded_udata(request, hes, request.file_contents, unicode_al_results)
for uhash in dropped:
dropped_unicode.append((uhash, safe_str(hes)))
# Report Unicode Encoded Data:
unicode_heur = Heuristic(5, frequency=len(dropped_unicode)) if dropped_unicode else None
unicode_emb_res = ResultSection("Found Unicode-Like Strings in Non-Executable:",
body_format=BODY_FORMAT.MEMORY_DUMP,
heuristic=unicode_heur)
for uhash, uenc in dropped_unicode:
unicode_emb_res.add_line(f"Extracted over 50 bytes of possible embedded unicode with "
f"{uenc} encoding. SHA256: {uhash}. See extracted files.")
for unires_index, (sha256, (decoded, encoded)) in enumerate(unicode_al_results.items()):
sub_uni_res = (ResultSection(f"Result {unires_index}",
parent=unicode_emb_res))
sub_uni_res.add_line(f'ENCODED TEXT SIZE: {len(decoded)}')
sub_uni_res.add_line(f'ENCODED SAMPLE TEXT: {safe_str(encoded)}[........]')
sub_uni_res.add_line(f'DECODED SHA256: {sha256}')
subb_uni_res = (ResultSection("DECODED ASCII DUMP:",
body_format=BODY_FORMAT.MEMORY_DUMP,
parent=sub_uni_res))
subb_uni_res.add_line('{}'.format(safe_str(decoded)))
# Look for IOCs of interest
hits = self.ioc_to_tag(decoded, patterns, sub_uni_res, st_max_length=1000, taglist=True)
if hits:
sub_uni_res.set_heuristic(6)
subb_uni_res.add_line("Suspicious string(s) found in decoded data.")
else:
sub_uni_res.set_heuristic(4)
if unicode_al_results or dropped_unicode:
request.result.add_section(unicode_emb_res)
return unicode_emb_res
return None
def hex_results(self, request: ServiceRequest, patterns: PatternMatch) -> None:
"""
Finds and reports long ascii hex strings
Args:
request: AL request object with result section
patterns: PatternMatch object
"""
asciihex_file_found = False
asciihex_dict: Dict[str, Set[str]] = {}
asciihex_bb_dict: Dict[str, Set[Tuple[bytes, bytes, str]]] = {}
hex_pat = re.compile(b'((?:[0-9a-fA-F]{2}[\r]?[\n]?){16,})')
for hex_match in re.findall(hex_pat, request.file_contents):
hex_string = hex_match.replace(b'\r', b'').replace(b'\n', b'')
afile_found, asciihex_results, xorhex_results = self.unhexlify_ascii(request, hex_string, request.file_type,
patterns)
if afile_found:
asciihex_file_found = True
for ascii_key, ascii_values in asciihex_results.items():
asciihex_dict.setdefault(ascii_key, set())
asciihex_dict[ascii_key].update(ascii_values)
for xor_key, xor_results in xorhex_results.items():
if xor_key.startswith('BB_'):
xor_key = xor_key.split('_', 1)[1]
asciihex_bb_dict.setdefault(xor_key, set())
asciihex_bb_dict[xor_key].add(xor_results)
else:
asciihex_dict.setdefault(xor_key, set())
asciihex_dict[xor_key].add(safe_str(xor_results[1]))
# Report Ascii Hex Encoded Data:
if asciihex_file_found:
asciihex_emb_res = (ResultSection("Found Large Ascii Hex Strings in Non-Executable:",
body_format=BODY_FORMAT.MEMORY_DUMP,
heuristic=Heuristic(7),
parent=request.result))
asciihex_emb_res.add_line("Extracted possible ascii-hex object(s). See extracted files.")
if asciihex_dict:
# Different scores are used depending on whether the file is a document
asciihex_res = (ResultSection("ASCII HEX DECODED IOC Strings:",
body_format=BODY_FORMAT.MEMORY_DUMP,
heuristic=Heuristic(10 if request.file_type.startswith("document") else 8),
parent=request.result))
for key, hex_list in sorted(asciihex_dict.items()):
for h in hex_list:
asciihex_res.add_line(f"Found {key.replace('_', ' ')} decoded HEX string: {safe_str(h)}")
asciihex_res.add_tag(key, h)
if asciihex_bb_dict:
asciihex_bb_res = (ResultSection("ASCII HEX AND XOR DECODED IOC Strings:",
heuristic=Heuristic(9), parent=request.result))
for xindex, (xkey, xset) in enumerate(sorted(asciihex_bb_dict.items())):
for xresult in xset:
data, match, transform = xresult
asx_res = (ResultSection(f"Result {xindex}", parent=asciihex_bb_res))
asx_res.add_line(f"Found {xkey.replace('_', ' ')} decoded HEX string, masked with "
f"transform {safe_str(transform)}:")
asx_res.add_line("Decoded XOR string:")
asx_res.add_line(safe_str(match))
asx_res.add_line("Original ASCII HEX String:")
asx_res.add_line(safe_str(data))
asciihex_bb_res.add_tag(xkey, match)
# --- Execute ----------------------------------------------------------------------------------------------------------
def execute(self, request: ServiceRequest) -> None:
""" Main Module. See README for details."""
request.result = Result()
patterns = PatternMatch()
self.sample_type = request.file_type
self.excess_extracted = 0
# Filters for submission modes. Listed in order of use.
if request.deep_scan:
# Maximum size of submitted file to run this service:
max_size = 8000000
# String length maximum
# Used in basic ASCII and UNICODE modules:
max_length = 1000000
# String list maximum size
# List produced by basic ASCII and UNICODE module results and will determine
# if patterns.py will only evaluate network IOC patterns:
st_max_size = 1000000
# BBcrack maximum size of submitted file to run module:
bb_max_size = 200000
else:
max_size = self.config.get('max_size', 3000000)
max_length = self.config.get('max_length', 5000)
st_max_size = self.config.get('st_max_size', 0)
bb_max_size = self.config.get('bb_max_size', 85000)
# Begin analysis
if (len(request.file_contents) or 0) >= max_size or self.sample_type.startswith("archive/"):
# No analysis is done if the file is an archive or too large
return
self.ascii_results(request, patterns, max_length, st_max_size)
self.embedded_pe_results(request)
# Possible encoded strings -- all sample types except code/* (code is handled by deobfuscripter service)
if not self.sample_type.startswith('code'):
self.base64_results(request, patterns)
if (len(request.file_contents) or 0) < bb_max_size:
self.bbcrack_results(request)
# Other possible encoded strings -- all sample types but code and executables
if not self.sample_type.split('/', 1)[0] in ['executable', 'code']:
self.unicode_results(request, patterns)
# Go over again, looking for long ASCII-HEX character strings
if not self.sample_type.startswith('document/office'):
self.hex_results(request, patterns)
if self.excess_extracted:
self.log.warning(f"Too many files extracted from {request.sha256}, "
f"{self.excess_extracted} files were not extracted")
request.result.add_section(ResultSection(f"Over extraction limit: "
f"{self.excess_extracted} files were not extracted"))
|
[
"os.path.join",
"binascii.a2b_base64",
"os.path.exists",
"magic.Magic",
"re.escape",
"hashlib.sha256",
"assemblyline_v4_service.common.balbuzard.patterns.PatternMatch",
"re.findall",
"traceback.format_exc",
"re.search",
"re.sub",
"assemblyline_v4_service.common.result.ResultSection",
"frankenstrings.flarefloss.strings.extract_unicode_strings",
"assemblyline.common.str_utils.safe_str",
"assemblyline_v4_service.common.result.Result",
"binascii.unhexlify",
"pefile.PE",
"frankenstrings.flarefloss.strings.extract_ascii_strings",
"re.compile",
"assemblyline_v4_service.common.balbuzard.bbcrack.bbcrack",
"os.makedirs",
"binascii.a2b_hex",
"assemblyline_v4_service.common.result.Heuristic"
] |
[((3928, 3977), 'frankenstrings.flarefloss.strings.extract_ascii_strings', 'strings.extract_ascii_strings', (['data'], {'n': 'min_length'}), '(data, n=min_length)\n', (3957, 3977), False, 'from frankenstrings.flarefloss import strings\n'), ((4138, 4189), 'frankenstrings.flarefloss.strings.extract_unicode_strings', 'strings.extract_unicode_strings', (['data'], {'n': 'min_length'}), '(data, n=min_length)\n', (4169, 4189), False, 'from frankenstrings.flarefloss import strings\n'), ((8262, 8285), 're.findall', 're.findall', (['qword', 'data'], {}), '(qword, data)\n', (8272, 8285), False, 'import re\n'), ((8484, 8507), 're.findall', 're.findall', (['dword', 'data'], {}), '(dword, data)\n', (8494, 8507), False, 'import re\n'), ((8705, 8727), 're.findall', 're.findall', (['word', 'data'], {}), '(word, data)\n', (8715, 8727), False, 'import re\n'), ((8925, 8947), 're.findall', 're.findall', (['byte', 'data'], {}), '(byte, data)\n', (8935, 8947), False, 'import re\n'), ((18735, 18848), 'assemblyline_v4_service.common.result.ResultSection', 'ResultSection', (['"""The following IOC were found in plain text in the file:"""'], {'body_format': 'BODY_FORMAT.MEMORY_DUMP'}), "('The following IOC were found in plain text in the file:',\n body_format=BODY_FORMAT.MEMORY_DUMP)\n", (18748, 18848), False, 'from assemblyline_v4_service.common.result import Result, ResultSection, BODY_FORMAT, Heuristic\n'), ((20020, 20072), 're.findall', 're.findall', (['pat_exeheader', 'request.file_contents[1:]'], {}), '(pat_exeheader, request.file_contents[1:])\n', (20030, 20072), False, 'import re\n'), ((21525, 21640), 're.findall', 're.findall', (["b'([ ]{0,2}(?:[A-Za-z0-9+/]{10,}={0,2}(?:&#[x1][A0];)?[\\r]?[\\n]?){2,})'", 'request.file_contents'], {}), "(\n b'([ ]{0,2}(?:[A-Za-z0-9+/]{10,}={0,2}(?:&#[x1][A0];)?[\\r]?[\\n]?){2,})',\n request.file_contents)\n", (21535, 21640), False, 'import re\n'), ((22235, 22311), 'frankenstrings.flarefloss.strings.extract_unicode_strings', 'strings.extract_unicode_strings', (['request.file_contents'], {'n': 'self.st_min_length'}), '(request.file_contents, n=self.st_min_length)\n', (22266, 22311), False, 'from frankenstrings.flarefloss import strings\n'), ((27921, 28049), 'assemblyline_v4_service.common.result.ResultSection', 'ResultSection', (['"""Found Unicode-Like Strings in Non-Executable:"""'], {'body_format': 'BODY_FORMAT.MEMORY_DUMP', 'heuristic': 'unicode_heur'}), "('Found Unicode-Like Strings in Non-Executable:', body_format=\n BODY_FORMAT.MEMORY_DUMP, heuristic=unicode_heur)\n", (27934, 28049), False, 'from assemblyline_v4_service.common.result import Result, ResultSection, BODY_FORMAT, Heuristic\n'), ((30057, 30107), 're.compile', 're.compile', (["b'((?:[0-9a-fA-F]{2}[\\r]?[\\n]?){16,})'"], {}), "(b'((?:[0-9a-fA-F]{2}[\\r]?[\\n]?){16,})')\n", (30067, 30107), False, 'import re\n'), ((30133, 30175), 're.findall', 're.findall', (['hex_pat', 'request.file_contents'], {}), '(hex_pat, request.file_contents)\n', (30143, 30175), False, 'import re\n'), ((33518, 33526), 'assemblyline_v4_service.common.result.Result', 'Result', ([], {}), '()\n', (33524, 33526), False, 'from assemblyline_v4_service.common.result import Result, ResultSection, BODY_FORMAT, Heuristic\n'), ((33546, 33560), 'assemblyline_v4_service.common.balbuzard.patterns.PatternMatch', 'PatternMatch', ([], {}), '()\n', (33558, 33560), False, 'from assemblyline_v4_service.common.balbuzard.patterns import PatternMatch\n'), ((2398, 2445), 'os.path.join', 'os.path.join', (['self.working_directory', 'file_name'], {}), '(self.working_directory, file_name)\n', (2410, 2445), False, 'import os\n'), ((14277, 14301), 'binascii.unhexlify', 'binascii.unhexlify', (['data'], {}), '(data)\n', (14295, 14301), False, 'import binascii\n'), ((15364, 15401), 'assemblyline_v4_service.common.balbuzard.bbcrack.bbcrack', 'bbcrack', (['binstr'], {'level': '"""small_string"""'}), "(binstr, level='small_string')\n", (15371, 15401), False, 'from assemblyline_v4_service.common.balbuzard.bbcrack import bbcrack\n'), ((20089, 20119), 're.search', 're.search', (['pat_exedos', 'pos_exe'], {}), '(pat_exedos, pos_exe)\n', (20098, 20119), False, 'import re\n'), ((22342, 22416), 're.findall', 're.findall', (["b'([ ]{0,2}(?:[A-Za-z0-9+/]{10,}={0,2}[\\r]?[\\n]?){2,})'", 'ust.s'], {}), "(b'([ ]{0,2}(?:[A-Za-z0-9+/]{10,}={0,2}[\\r]?[\\n]?){2,})', ust.s)\n", (22352, 22416), False, 'import re\n'), ((25148, 25187), 'assemblyline_v4_service.common.balbuzard.bbcrack.bbcrack', 'bbcrack', (['request.file_contents'], {'level': '(2)'}), '(request.file_contents, level=2)\n', (25155, 25187), False, 'from assemblyline_v4_service.common.balbuzard.bbcrack import bbcrack\n'), ((25224, 25263), 'assemblyline_v4_service.common.balbuzard.bbcrack.bbcrack', 'bbcrack', (['request.file_contents'], {'level': '(1)'}), '(request.file_contents, level=1)\n', (25231, 25263), False, 'from assemblyline_v4_service.common.balbuzard.bbcrack import bbcrack\n'), ((28491, 28554), 'assemblyline_v4_service.common.result.ResultSection', 'ResultSection', (['f"""Result {unires_index}"""'], {'parent': 'unicode_emb_res'}), "(f'Result {unires_index}', parent=unicode_emb_res)\n", (28504, 28554), False, 'from assemblyline_v4_service.common.result import Result, ResultSection, BODY_FORMAT, Heuristic\n'), ((28846, 28943), 'assemblyline_v4_service.common.result.ResultSection', 'ResultSection', (['"""DECODED ASCII DUMP:"""'], {'body_format': 'BODY_FORMAT.MEMORY_DUMP', 'parent': 'sub_uni_res'}), "('DECODED ASCII DUMP:', body_format=BODY_FORMAT.MEMORY_DUMP,\n parent=sub_uni_res)\n", (28859, 28943), False, 'from assemblyline_v4_service.common.result import Result, ResultSection, BODY_FORMAT, Heuristic\n'), ((2282, 2320), 'os.path.exists', 'os.path.exists', (['self.working_directory'], {}), '(self.working_directory)\n', (2296, 2320), False, 'import os\n'), ((2338, 2373), 'os.makedirs', 'os.makedirs', (['self.working_directory'], {}), '(self.working_directory)\n', (2349, 2373), False, 'import os\n'), ((5524, 5551), 'binascii.a2b_hex', 'binascii.a2b_hex', (['data[2:4]'], {}), '(data[2:4])\n', (5540, 5551), False, 'import binascii\n'), ((10597, 10628), 'binascii.a2b_base64', 'binascii.a2b_base64', (['b64_string'], {}), '(b64_string)\n', (10616, 10628), False, 'import binascii\n'), ((16969, 16991), 'pefile.PE', 'pefile.PE', ([], {'data': 'pedata'}), '(data=pedata)\n', (16978, 16991), False, 'import pefile\n'), ((25081, 25093), 'assemblyline_v4_service.common.result.Heuristic', 'Heuristic', (['(2)'], {}), '(2)\n', (25090, 25093), False, 'from assemblyline_v4_service.common.result import Result, ResultSection, BODY_FORMAT, Heuristic\n'), ((25517, 25603), 'os.path.join', 'os.path.join', (['self.working_directory', 'f"""EXE_HEAD_{xindex}_{offset}_{score}.unXORD"""'], {}), "(self.working_directory,\n f'EXE_HEAD_{xindex}_{offset}_{score}.unXORD')\n", (25529, 25603), False, 'import os\n'), ((35852, 35946), 'assemblyline_v4_service.common.result.ResultSection', 'ResultSection', (['f"""Over extraction limit: {self.excess_extracted} files were not extracted"""'], {}), "(\n f'Over extraction limit: {self.excess_extracted} files were not extracted')\n", (35865, 35946), False, 'from assemblyline_v4_service.common.result import Result, ResultSection, BODY_FORMAT, Heuristic\n'), ((5664, 5691), 'binascii.a2b_hex', 'binascii.a2b_hex', (['data[4:6]'], {}), '(data[4:6])\n', (5680, 5691), False, 'import binascii\n'), ((5694, 5721), 'binascii.a2b_hex', 'binascii.a2b_hex', (['data[2:4]'], {}), '(data[2:4])\n', (5710, 5721), False, 'import binascii\n'), ((5954, 5981), 'binascii.a2b_hex', 'binascii.a2b_hex', (['data[2:4]'], {}), '(data[2:4])\n', (5970, 5981), False, 'import binascii\n'), ((6402, 6429), 'binascii.a2b_hex', 'binascii.a2b_hex', (['data[2:4]'], {}), '(data[2:4])\n', (6418, 6429), False, 'import binascii\n'), ((7979, 7998), 're.escape', 're.escape', (['encoding'], {}), '(encoding)\n', (7988, 7998), False, 'import re\n'), ((8056, 8075), 're.escape', 're.escape', (['encoding'], {}), '(encoding)\n', (8065, 8075), False, 'import re\n'), ((8131, 8150), 're.escape', 're.escape', (['encoding'], {}), '(encoding)\n', (8140, 8150), False, 'import re\n'), ((8206, 8225), 're.escape', 're.escape', (['encoding'], {}), '(encoding)\n', (8215, 8225), False, 'import re\n'), ((9307, 9333), 'hashlib.sha256', 'hashlib.sha256', (['decoded[0]'], {}), '(decoded[0])\n', (9321, 9333), False, 'import hashlib\n'), ((10830, 10852), 'magic.Magic', 'magic.Magic', ([], {'mime': '(True)'}), '(mime=True)\n', (10841, 10852), False, 'import magic\n'), ((10879, 10892), 'magic.Magic', 'magic.Magic', ([], {}), '()\n', (10890, 10892), False, 'import magic\n'), ((14712, 14734), 'hashlib.sha256', 'hashlib.sha256', (['binstr'], {}), '(binstr)\n', (14726, 14734), False, 'import hashlib\n'), ((20888, 20900), 'assemblyline_v4_service.common.result.Heuristic', 'Heuristic', (['(3)'], {}), '(3)\n', (20897, 20900), False, 'from assemblyline_v4_service.common.result import Result, ResultSection, BODY_FORMAT, Heuristic\n'), ((22968, 22980), 'assemblyline_v4_service.common.result.Heuristic', 'Heuristic', (['(1)'], {}), '(1)\n', (22977, 22980), False, 'from assemblyline_v4_service.common.result import Result, ResultSection, BODY_FORMAT, Heuristic\n'), ((23338, 23389), 'assemblyline_v4_service.common.result.ResultSection', 'ResultSection', (['f"""Result {b64index}"""'], {'parent': 'b64_res'}), "(f'Result {b64index}', parent=b64_res)\n", (23351, 23389), False, 'from assemblyline_v4_service.common.result import Result, ResultSection, BODY_FORMAT, Heuristic\n'), ((23663, 23760), 'assemblyline_v4_service.common.result.ResultSection', 'ResultSection', (['"""DECODED ASCII DUMP:"""'], {'body_format': 'BODY_FORMAT.MEMORY_DUMP', 'parent': 'sub_b64_res'}), "('DECODED ASCII DUMP:', body_format=BODY_FORMAT.MEMORY_DUMP,\n parent=sub_b64_res)\n", (23676, 23760), False, 'from assemblyline_v4_service.common.result import Result, ResultSection, BODY_FORMAT, Heuristic\n'), ((27484, 27498), 're.escape', 're.escape', (['hes'], {}), '(hes)\n', (27493, 27498), False, 'import re\n'), ((29071, 29088), 'assemblyline.common.str_utils.safe_str', 'safe_str', (['decoded'], {}), '(decoded)\n', (29079, 29088), False, 'from assemblyline.common.str_utils import safe_str\n'), ((31423, 31435), 'assemblyline_v4_service.common.result.Heuristic', 'Heuristic', (['(7)'], {}), '(7)\n', (31432, 31435), False, 'from assemblyline_v4_service.common.result import Result, ResultSection, BODY_FORMAT, Heuristic\n'), ((32489, 32501), 'assemblyline_v4_service.common.result.Heuristic', 'Heuristic', (['(9)'], {}), '(9)\n', (32498, 32501), False, 'from assemblyline_v4_service.common.result import Result, ResultSection, BODY_FORMAT, Heuristic\n'), ((32733, 32790), 'assemblyline_v4_service.common.result.ResultSection', 'ResultSection', (['f"""Result {xindex}"""'], {'parent': 'asciihex_bb_res'}), "(f'Result {xindex}', parent=asciihex_bb_res)\n", (32746, 32790), False, 'from assemblyline_v4_service.common.result import Result, ResultSection, BODY_FORMAT, Heuristic\n'), ((5924, 5951), 'binascii.a2b_hex', 'binascii.a2b_hex', (['data[4:6]'], {}), '(data[4:6])\n', (5940, 5951), False, 'import binascii\n'), ((6372, 6399), 'binascii.a2b_hex', 'binascii.a2b_hex', (['data[4:6]'], {}), '(data[4:6])\n', (6388, 6399), False, 'import binascii\n'), ((10658, 10684), 'hashlib.sha256', 'hashlib.sha256', (['base64data'], {}), '(base64data)\n', (10672, 10684), False, 'import hashlib\n'), ((20149, 20172), 'hashlib.sha256', 'hashlib.sha256', (['pos_exe'], {}), '(pos_exe)\n', (20163, 20172), False, 'import hashlib\n'), ((23850, 23867), 'assemblyline.common.str_utils.safe_str', 'safe_str', (['b64l[2]'], {}), '(b64l[2])\n', (23858, 23867), False, 'from assemblyline.common.str_utils import safe_str\n'), ((24330, 24353), 'hashlib.sha256', 'hashlib.sha256', (['all_b64'], {}), '(all_b64)\n', (24344, 24353), False, 'import hashlib\n'), ((28725, 28742), 'assemblyline.common.str_utils.safe_str', 'safe_str', (['encoded'], {}), '(encoded)\n', (28733, 28742), False, 'from assemblyline.common.str_utils import safe_str\n'), ((31086, 31110), 'assemblyline.common.str_utils.safe_str', 'safe_str', (['xor_results[1]'], {}), '(xor_results[1])\n', (31094, 31110), False, 'from assemblyline.common.str_utils import safe_str\n'), ((33067, 33082), 'assemblyline.common.str_utils.safe_str', 'safe_str', (['match'], {}), '(match)\n', (33075, 33082), False, 'from assemblyline.common.str_utils import safe_str\n'), ((33188, 33202), 'assemblyline.common.str_utils.safe_str', 'safe_str', (['data'], {}), '(data)\n', (33196, 33202), False, 'from assemblyline.common.str_utils import safe_str\n'), ((2774, 2803), 'traceback.format_exc', 'traceback.format_exc', ([], {'limit': '(2)'}), '(limit=2)\n', (2794, 2803), False, 'import traceback\n'), ((5834, 5862), 'binascii.a2b_hex', 'binascii.a2b_hex', (['data[8:10]'], {}), '(data[8:10])\n', (5850, 5862), False, 'import binascii\n'), ((5865, 5892), 'binascii.a2b_hex', 'binascii.a2b_hex', (['data[6:8]'], {}), '(data[6:8])\n', (5881, 5892), False, 'import binascii\n'), ((6313, 6340), 'binascii.a2b_hex', 'binascii.a2b_hex', (['data[6:8]'], {}), '(data[6:8])\n', (6329, 6340), False, 'import binascii\n'), ((9539, 9557), 'assemblyline.common.str_utils.safe_str', 'safe_str', (['encoding'], {}), '(encoding)\n', (9547, 9557), False, 'from assemblyline.common.str_utils import safe_str\n'), ((26463, 26479), 'assemblyline.common.str_utils.safe_str', 'safe_str', (['smatch'], {}), '(smatch)\n', (26471, 26479), False, 'from assemblyline.common.str_utils import safe_str\n'), ((27742, 27755), 'assemblyline.common.str_utils.safe_str', 'safe_str', (['hes'], {}), '(hes)\n', (27750, 27755), False, 'from assemblyline.common.str_utils import safe_str\n'), ((5045, 5056), 'assemblyline.common.str_utils.safe_str', 'safe_str', (['v'], {}), '(v)\n', (5053, 5056), False, 'from assemblyline.common.str_utils import safe_str\n'), ((5135, 5146), 'assemblyline.common.str_utils.safe_str', 'safe_str', (['v'], {}), '(v)\n', (5143, 5146), False, 'from assemblyline.common.str_utils import safe_str\n'), ((6282, 6310), 'binascii.a2b_hex', 'binascii.a2b_hex', (['data[8:10]'], {}), '(data[8:10])\n', (6298, 6310), False, 'import binascii\n'), ((19411, 19422), 'assemblyline.common.str_utils.safe_str', 'safe_str', (['i'], {}), '(i)\n', (19419, 19422), False, 'from assemblyline.common.str_utils import safe_str\n'), ((23528, 23545), 'assemblyline.common.str_utils.safe_str', 'safe_str', (['b64l[1]'], {}), '(b64l[1])\n', (23536, 23545), False, 'from assemblyline.common.str_utils import safe_str\n'), ((32253, 32264), 'assemblyline.common.str_utils.safe_str', 'safe_str', (['h'], {}), '(h)\n', (32261, 32264), False, 'from assemblyline.common.str_utils import safe_str\n'), ((32946, 32965), 'assemblyline.common.str_utils.safe_str', 'safe_str', (['transform'], {}), '(transform)\n', (32954, 32965), False, 'from assemblyline.common.str_utils import safe_str\n'), ((6221, 6250), 'binascii.a2b_hex', 'binascii.a2b_hex', (['data[10:12]'], {}), '(data[10:12])\n', (6237, 6250), False, 'import binascii\n'), ((17575, 17601), 'hashlib.sha256', 'hashlib.sha256', (['pe_extract'], {}), '(pe_extract)\n', (17589, 17601), False, 'import hashlib\n'), ((6189, 6218), 'binascii.a2b_hex', 'binascii.a2b_hex', (['data[12:14]'], {}), '(data[12:14])\n', (6205, 6218), False, 'import binascii\n'), ((12639, 12677), 're.sub', 're.sub', (["b'[^A-Za-z0-9]+'", "b''", 'asc_b64'], {}), "(b'[^A-Za-z0-9]+', b'', asc_b64)\n", (12645, 12677), False, 'import re\n'), ((6096, 6125), 'binascii.a2b_hex', 'binascii.a2b_hex', (['data[16:18]'], {}), '(data[16:18])\n', (6112, 6125), False, 'import binascii\n'), ((6128, 6157), 'binascii.a2b_hex', 'binascii.a2b_hex', (['data[14:16]'], {}), '(data[14:16])\n', (6144, 6157), False, 'import binascii\n')]
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from recipe_engine.types import freeze
DEPS = [
'adb',
'build',
'chromium',
'chromium_android',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/raw_io',
'recipe_engine/step',
]
BUILDERS = freeze({
'basic_builder': {
'target': 'Release',
'build': True,
},
'restart_usb_builder': {
'restart_usb': True,
'target': 'Release',
'build': True,
},
'coverage_builder': {
'coverage': True,
'target': 'Debug',
'build': True,
},
'tester': {},
'perf_runner': {
'perf_config': 'sharded_perf_tests.json',
},
'perf_runner_user_build': {
'perf_config': 'sharded_perf_tests.json',
'skip_wipe': True,
},
'perf_runner_disable_location': {
'perf_config': 'sharded_perf_tests.json',
'disable_location': True,
},
'perf_runner_allow_low_battery': {
'perf_config': 'sharded_perf_tests.json',
'min_battery_level': 50,
},
'perf_adb_vendor_keys': {
'adb_vendor_keys': True,
},
'perf_runner_allow_high_battery_temp': {
'perf_config': 'sharded_perf_tests.json',
'max_battery_temp': 500,
},
'gerrit_try_builder': {
'build': True,
'skip_wipe': True,
},
'webview_tester': {
'android_apply_config': ['remove_all_system_webviews'],
},
'slow_tester': {
'timeout_scale': 2,
},
'downgrade_install_tester': {
'specific_install': True,
'downgrade': True,
},
'keep_data_install_tester': {
'specific_install': True,
'keep_data': True,
},
'no_strict_mode_tester': {
'strict_mode': 'off',
},
'resource_size_builder': {
'resource_size': True,
},
'webview_cts': {
'run_webview_cts': True,
},
'last_known_devices': {
'perf_config': 'sharded_perf_tests.json',
'last_known_devices': '.last_devices',
},
'device_flags_builder': {
'device_flags': 'device_flags_file',
},
'no_cache_builder': {
'use_git_cache': False,
},
'json_results_file': {
'json_results_file': 'json_results_file',
},
'render_results': {
'render_results_dir': 'chrome/test/data/android/render_tests',
},
'result_details': {
'result_details': True,
'store_tombstones': True,
},
'enable_platform_mode': {
'perf_config': 'sharded_perf_tests.json',
'enable_platform_mode': True,
'write_buildbot_json': True,
},
'timestamp_as_point_id': {
'perf_config': 'sharded_perf_tests.json',
'timestamp_as_point_id': True
},
'telemetry_browser_tests_tester': {
'run_telemetry_browser_tests': True,
},
'use_devil_adb': {
'android_apply_config': ['use_devil_adb'],
},
'remove_system_vrcore': {
'android_apply_config': ['remove_system_vrcore'],
},
'stackwalker': {
'run_stackwalker': True,
},
'asan': {
'chromium_apply_config': ['chromium_asan'],
}
})
from recipe_engine.recipe_api import Property
PROPERTIES = {
'buildername': Property(),
}
def RunSteps(api, buildername):
config = BUILDERS[buildername]
api.chromium_android.configure_from_properties(
'base_config',
REPO_URL='svn://svn.chromium.org/chrome/trunk/src',
REPO_NAME='src/repo',
INTERNAL=True,
BUILD_CONFIG='Release')
api.chromium_android.c.get_app_manifest_vars = True
api.chromium_android.c.coverage = config.get('coverage', False)
api.chromium_android.c.asan_symbolize = True
if config.get('adb_vendor_keys'):
api.chromium.c.env.ADB_VENDOR_KEYS = api.path['start_dir'].join('.adb_key')
for c in config.get('chromium_apply_config', []):
api.chromium.apply_config(c)
for c in config.get('android_apply_config', []):
api.chromium_android.apply_config(c)
api.chromium_android.init_and_sync(
use_bot_update=False, use_git_cache=config.get('use_git_cache', True))
if config.get('build', False):
api.chromium.ensure_goma()
api.chromium.runhooks()
api.chromium_android.run_tree_truth(additional_repos=['foo'])
assert 'MAJOR' in api.chromium.get_version()
api.chromium_android.host_info()
if config.get('build', False):
api.chromium.compile(use_goma_module=True)
api.chromium_android.make_zip_archive(
'zip_build_product', 'archive.zip', include_filters=['*.apk'],
exclude_filters=['*.so', '*.a'])
else:
api.chromium_android.download_build('build-bucket',
'build_product.zip')
api.chromium_android.git_number()
if config.get('specific_install'):
api.chromium_android.adb_install_apk(
'Chrome.apk',
devices=['abc123'],
allow_downgrade=config.get('downgrade', False),
keep_data=config.get('keep_data', False),
)
api.adb.root_devices()
api.chromium_android.spawn_logcat_monitor()
failure = False
try:
# TODO(luqui): remove redundant cruft, need one consistent API.
api.chromium_android.device_status_check()
api.path.mock_add_paths(api.chromium_android.known_devices_file)
api.chromium_android.device_status_check(
restart_usb=config.get('restart_usb', False))
api.chromium_android.provision_devices(
skip_wipe=config.get('skip_wipe', False),
disable_location=config.get('disable_location', False),
min_battery_level=config.get('min_battery_level'),
max_battery_temp=config.get('max_battery_temp'),
reboot_timeout=1800)
api.chromium_android.common_tests_setup_steps(skip_wipe=True)
except api.step.StepFailure as f:
failure = f
api.chromium_android.monkey_test()
try:
if config.get('perf_config'):
api.chromium_android.run_sharded_perf_tests(
config='fake_config.json',
flaky_config='flake_fakes.json',
upload_archives_to_bucket='archives-bucket',
known_devices_file=config.get('last_known_devices', None),
enable_platform_mode=config.get('enable_platform_mode', None),
write_buildbot_json=config.get('write_buildbot_json', False),
timestamp_as_point_id=config.get('timestamp_as_point_id', False))
except api.step.StepFailure as f:
failure = f
api.chromium_android.run_instrumentation_suite(
name='WebViewInstrumentationTest',
apk_under_test=api.chromium_android.apk_path(
'WebViewInstrumentation.apk'),
test_apk=api.chromium_android.apk_path('WebViewInstrumentationTest.apk'),
flakiness_dashboard='test-results.appspot.com',
annotation='SmallTest',
except_annotation='FlakyTest',
screenshot=True,
timeout_scale=config.get('timeout_scale'),
strict_mode=config.get('strict_mode'),
additional_apks=['Additional.apk'],
device_flags=config.get('device_flags'),
json_results_file=config.get('json_results_file'),
result_details=config.get('result_details'),
store_tombstones=config.get('store_tombstones'),
render_results_dir=config.get('render_results_dir'))
api.chromium_android.run_test_suite(
'unittests',
gtest_filter='WebRtc*',
result_details=config.get('result_details'),
store_tombstones=config.get('store_tombstones'),
tool='asan')
if not failure:
api.chromium_android.run_bisect_script(extra_src='test.py',
path_to_config='test.py')
if config.get('resource_size'):
api.chromium_android.resource_sizes(
apk_path=api.chromium_android.apk_path('Example.apk'),
chartjson_file=True,
upload_archives_to_bucket='Bucket')
api.chromium_android.create_supersize_archive(
apk_path=api.chromium_android.apk_path('Example.apk'),
size_path=api.chromium_android.apk_path('Example.apk.size'))
if config.get('run_webview_cts'):
api.chromium_android.run_webview_cts(command_line_args=[
'--webview_arg_1', '--webview_arg_2'])
if config.get('run_telemetry_browser_tests'):
api.chromium_android.run_telemetry_browser_test('PopularUrlsTest')
api.chromium_android.logcat_dump()
api.chromium_android.stack_tool_steps()
if config.get('coverage', False):
api.chromium_android.coverage_report()
if config.get('run_stackwalker'):
chrome_breakpad_binary = api.path['checkout'].join(
'out', api.chromium.c.BUILD_CONFIG, 'lib.unstripped', 'libchrome.so')
webview_breakpad_binary = api.path['checkout'].join(
'out', api.chromium.c.BUILD_CONFIG, 'lib.unstripped',
'libwebviewchromium.so')
dump_syms_binary = api.path['checkout'].join(
'out', api.chromium.c.BUILD_CONFIG, 'dump_syms')
microdump_stackwalk_binary = api.path['checkout'].join(
'out', api.chromium.c.BUILD_CONFIG, 'microdump_stackwalk')
api.path.mock_add_paths(chrome_breakpad_binary)
api.path.mock_add_paths(webview_breakpad_binary)
api.path.mock_add_paths(dump_syms_binary)
api.path.mock_add_paths(microdump_stackwalk_binary)
api.chromium_android.common_tests_final_steps(
checkout_dir=api.path['checkout'])
if failure:
# pylint: disable=raising-bad-type
raise failure
def GenTests(api):
def properties_for(buildername):
return api.properties.generic(
buildername=buildername,
bot_id='tehslave',
repo_name='src/repo',
issue='123456789',
patchset='1',
rietveld='http://rietveld.example.com',
repo_url='svn://svn.chromium.org/chrome/trunk/src',
revision='4f4b02f6b7fa20a3a25682c457bbc8ad589c8a00',
internal=True)
for buildername in BUILDERS:
yield api.test('%s_basic' % buildername) + properties_for(buildername)
yield (api.test('tester_no_devices_during_recovery') +
properties_for('tester') +
api.step_data('device_recovery', retcode=1))
yield (api.test('tester_no_devices_during_status') +
properties_for('tester') +
api.step_data('device_status', retcode=1))
yield (api.test('tester_other_device_failure_during_recovery') +
properties_for('tester') +
api.step_data('device_recovery', retcode=2))
yield (api.test('tester_other_device_failure_during_status') +
properties_for('tester') +
api.step_data('device_status', retcode=2))
yield (api.test('tester_with_step_warning') +
properties_for('tester') +
api.step_data('unittests', retcode=88))
yield (api.test('tester_failing_host_info') +
properties_for('tester') +
api.step_data(
'Host Info',
api.json.output({'failures': ['foo', 'bar']}),
retcode=1))
yield (api.test('tester_blacklisted_devices') +
properties_for('tester') +
api.override_step_data('provision_devices',
api.json.output(['abc123', 'def456'])))
yield (api.test('tester_offline_devices') +
properties_for('tester') +
api.override_step_data('device_status',
api.json.output([{}, {}])))
yield (api.test('perf_tests_failure') +
properties_for('perf_runner') +
api.step_data('perf_test.foo', retcode=1))
yield (api.test('perf_tests_infra_failure') +
properties_for('perf_runner') +
api.step_data('perf_test.foo', retcode=87))
yield (api.test('perf_tests_reference_failure') +
properties_for('perf_runner') +
api.step_data('perf_test.foo.reference', retcode=1))
yield (api.test('perf_tests_infra_reference_failure') +
properties_for('perf_runner') +
api.step_data('perf_test.foo.reference', retcode=87))
yield (api.test('gerrit_refs') +
api.properties.generic(
buildername='gerrit_try_builder',
bot_id='testslave',
repo_name='src/repo',
issue='123456789',
patchset='1',
rietveld='http://rietveld.example.com',
repo_url='svn://svn.chromium.org/chrome/trunk/src',
revision='4f4b02f6b7fa20a3a25682c457bbc8ad589c8a00',
internal=True, **({'event.patchSet.ref':'refs/changes/50/176150/1'})))
yield (api.test('tombstones_m53') +
properties_for('tester') +
api.override_step_data(
'get version (2)',
api.raw_io.output_text(
'MAJOR=53\nMINOR=0\nBUILD=2800\nPATCH=0\n')))
yield (api.test('telemetry_browser_tests_failures') +
properties_for('telemetry_browser_tests_tester') +
api.override_step_data('Run telemetry browser_test PopularUrlsTest',
api.json.output({'successes': ['passed_test1', 'passed_test2'],
'failures': ['failed_test_1', 'failed_test_2']}),
retcode=1))
yield (api.test('upload_result_details_failures') +
properties_for('result_details') +
api.override_step_data('unittests: generate result details',
retcode=1))
yield (api.test('asan_setup_failure') +
properties_for('asan') +
api.override_step_data('Set up ASAN on devices.wait_for_devices',
retcode=87))
|
[
"recipe_engine.types.freeze",
"recipe_engine.recipe_api.Property"
] |
[((428, 2750), 'recipe_engine.types.freeze', 'freeze', (["{'basic_builder': {'target': 'Release', 'build': True},\n 'restart_usb_builder': {'restart_usb': True, 'target': 'Release',\n 'build': True}, 'coverage_builder': {'coverage': True, 'target':\n 'Debug', 'build': True}, 'tester': {}, 'perf_runner': {'perf_config':\n 'sharded_perf_tests.json'}, 'perf_runner_user_build': {'perf_config':\n 'sharded_perf_tests.json', 'skip_wipe': True},\n 'perf_runner_disable_location': {'perf_config':\n 'sharded_perf_tests.json', 'disable_location': True},\n 'perf_runner_allow_low_battery': {'perf_config':\n 'sharded_perf_tests.json', 'min_battery_level': 50},\n 'perf_adb_vendor_keys': {'adb_vendor_keys': True},\n 'perf_runner_allow_high_battery_temp': {'perf_config':\n 'sharded_perf_tests.json', 'max_battery_temp': 500},\n 'gerrit_try_builder': {'build': True, 'skip_wipe': True},\n 'webview_tester': {'android_apply_config': [\n 'remove_all_system_webviews']}, 'slow_tester': {'timeout_scale': 2},\n 'downgrade_install_tester': {'specific_install': True, 'downgrade': \n True}, 'keep_data_install_tester': {'specific_install': True,\n 'keep_data': True}, 'no_strict_mode_tester': {'strict_mode': 'off'},\n 'resource_size_builder': {'resource_size': True}, 'webview_cts': {\n 'run_webview_cts': True}, 'last_known_devices': {'perf_config':\n 'sharded_perf_tests.json', 'last_known_devices': '.last_devices'},\n 'device_flags_builder': {'device_flags': 'device_flags_file'},\n 'no_cache_builder': {'use_git_cache': False}, 'json_results_file': {\n 'json_results_file': 'json_results_file'}, 'render_results': {\n 'render_results_dir': 'chrome/test/data/android/render_tests'},\n 'result_details': {'result_details': True, 'store_tombstones': True},\n 'enable_platform_mode': {'perf_config': 'sharded_perf_tests.json',\n 'enable_platform_mode': True, 'write_buildbot_json': True},\n 'timestamp_as_point_id': {'perf_config': 'sharded_perf_tests.json',\n 'timestamp_as_point_id': True}, 'telemetry_browser_tests_tester': {\n 'run_telemetry_browser_tests': True}, 'use_devil_adb': {\n 'android_apply_config': ['use_devil_adb']}, 'remove_system_vrcore': {\n 'android_apply_config': ['remove_system_vrcore']}, 'stackwalker': {\n 'run_stackwalker': True}, 'asan': {'chromium_apply_config': [\n 'chromium_asan']}}"], {}), "({'basic_builder': {'target': 'Release', 'build': True},\n 'restart_usb_builder': {'restart_usb': True, 'target': 'Release',\n 'build': True}, 'coverage_builder': {'coverage': True, 'target':\n 'Debug', 'build': True}, 'tester': {}, 'perf_runner': {'perf_config':\n 'sharded_perf_tests.json'}, 'perf_runner_user_build': {'perf_config':\n 'sharded_perf_tests.json', 'skip_wipe': True},\n 'perf_runner_disable_location': {'perf_config':\n 'sharded_perf_tests.json', 'disable_location': True},\n 'perf_runner_allow_low_battery': {'perf_config':\n 'sharded_perf_tests.json', 'min_battery_level': 50},\n 'perf_adb_vendor_keys': {'adb_vendor_keys': True},\n 'perf_runner_allow_high_battery_temp': {'perf_config':\n 'sharded_perf_tests.json', 'max_battery_temp': 500},\n 'gerrit_try_builder': {'build': True, 'skip_wipe': True},\n 'webview_tester': {'android_apply_config': [\n 'remove_all_system_webviews']}, 'slow_tester': {'timeout_scale': 2},\n 'downgrade_install_tester': {'specific_install': True, 'downgrade': \n True}, 'keep_data_install_tester': {'specific_install': True,\n 'keep_data': True}, 'no_strict_mode_tester': {'strict_mode': 'off'},\n 'resource_size_builder': {'resource_size': True}, 'webview_cts': {\n 'run_webview_cts': True}, 'last_known_devices': {'perf_config':\n 'sharded_perf_tests.json', 'last_known_devices': '.last_devices'},\n 'device_flags_builder': {'device_flags': 'device_flags_file'},\n 'no_cache_builder': {'use_git_cache': False}, 'json_results_file': {\n 'json_results_file': 'json_results_file'}, 'render_results': {\n 'render_results_dir': 'chrome/test/data/android/render_tests'},\n 'result_details': {'result_details': True, 'store_tombstones': True},\n 'enable_platform_mode': {'perf_config': 'sharded_perf_tests.json',\n 'enable_platform_mode': True, 'write_buildbot_json': True},\n 'timestamp_as_point_id': {'perf_config': 'sharded_perf_tests.json',\n 'timestamp_as_point_id': True}, 'telemetry_browser_tests_tester': {\n 'run_telemetry_browser_tests': True}, 'use_devil_adb': {\n 'android_apply_config': ['use_devil_adb']}, 'remove_system_vrcore': {\n 'android_apply_config': ['remove_system_vrcore']}, 'stackwalker': {\n 'run_stackwalker': True}, 'asan': {'chromium_apply_config': [\n 'chromium_asan']}})\n", (434, 2750), False, 'from recipe_engine.types import freeze\n'), ((3380, 3390), 'recipe_engine.recipe_api.Property', 'Property', ([], {}), '()\n', (3388, 3390), False, 'from recipe_engine.recipe_api import Property\n')]
|
# ===========================================================================
# Single process:
# 0.0003s
# Multiprocessing:
# ncpu = 1: ~0.16s
# ncpu = 2: ~0.07s
# ===========================================================================
from __future__ import print_function, division, absolute_import
import os
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import numpy as np
from odin import fuel as F, visual
from odin.ml import MiniBatchPCA
from sklearn.manifold import TSNE
from odin.utils import UnitTimer, TemporaryDirectory
iris = F.load_iris()
print(iris)
pca = MiniBatchPCA()
X = iris['X'][:]
i = 0
while i < X.shape[0]:
x = X[i:i + 20]
i += 20
pca.partial_fit(x)
print("Fitting PCA ...")
with UnitTimer():
for i in range(8):
x = pca.transform(X)
with UnitTimer():
for i in range(8):
x = pca.transform_mpi(X, keep_order=True, ncpu=1, n_components=2)
print("Output shape:", x.shape)
colors = ['r' if i == 0 else ('b' if i == 1 else 'g')
for i in iris['y'][:]]
visual.plot_scatter(x[:, 0], x[:, 1], color=colors, size=8)
visual.plot_save('/tmp/tmp.pdf')
# bananab
|
[
"odin.visual.plot_save",
"odin.fuel.load_iris",
"odin.utils.UnitTimer",
"matplotlib.use",
"odin.visual.plot_scatter",
"odin.ml.MiniBatchPCA"
] |
[((335, 356), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (349, 356), False, 'import matplotlib\n'), ((577, 590), 'odin.fuel.load_iris', 'F.load_iris', ([], {}), '()\n', (588, 590), True, 'from odin import fuel as F, visual\n'), ((609, 623), 'odin.ml.MiniBatchPCA', 'MiniBatchPCA', ([], {}), '()\n', (621, 623), False, 'from odin.ml import MiniBatchPCA\n'), ((1062, 1121), 'odin.visual.plot_scatter', 'visual.plot_scatter', (['x[:, 0]', 'x[:, 1]'], {'color': 'colors', 'size': '(8)'}), '(x[:, 0], x[:, 1], color=colors, size=8)\n', (1081, 1121), False, 'from odin import fuel as F, visual\n'), ((1122, 1154), 'odin.visual.plot_save', 'visual.plot_save', (['"""/tmp/tmp.pdf"""'], {}), "('/tmp/tmp.pdf')\n", (1138, 1154), False, 'from odin import fuel as F, visual\n'), ((761, 772), 'odin.utils.UnitTimer', 'UnitTimer', ([], {}), '()\n', (770, 772), False, 'from odin.utils import UnitTimer, TemporaryDirectory\n'), ((832, 843), 'odin.utils.UnitTimer', 'UnitTimer', ([], {}), '()\n', (841, 843), False, 'from odin.utils import UnitTimer, TemporaryDirectory\n')]
|