hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7903ad4d5e4d2f4f2f298c54c4105aad64c96d1c
| 1,378
|
py
|
Python
|
examples/Legend.py
|
hishizuka/pyqtgraph
|
4820625d93ffb41f324431d0d29b395cf91f339e
|
[
"MIT"
] | 2,762
|
2015-01-02T14:34:10.000Z
|
2022-03-30T14:06:07.000Z
|
examples/Legend.py
|
hishizuka/pyqtgraph
|
4820625d93ffb41f324431d0d29b395cf91f339e
|
[
"MIT"
] | 1,901
|
2015-01-12T03:20:30.000Z
|
2022-03-31T16:33:36.000Z
|
examples/Legend.py
|
hishizuka/pyqtgraph
|
4820625d93ffb41f324431d0d29b395cf91f339e
|
[
"MIT"
] | 1,038
|
2015-01-01T04:05:49.000Z
|
2022-03-31T11:57:51.000Z
|
# -*- coding: utf-8 -*-
"""
Demonstrates basic use of LegendItem
"""
import initExample ## Add path to library (just for examples; you do not need this)
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
win = pg.plot()
win.setWindowTitle('pyqtgraph example: BarGraphItem')
# # option1: only for .plot(), following c1,c2 for example-----------------------
# win.addLegend(frame=False, colCount=2)
# bar graph
x = np.arange(10)
y = np.sin(x+2) * 3
bg1 = pg.BarGraphItem(x=x, height=y, width=0.3, brush='b', pen='w', name='bar')
win.addItem(bg1)
# curve
c1 = win.plot([np.random.randint(0,8) for i in range(10)], pen='r', symbol='t', symbolPen='r', symbolBrush='g', name='curve1')
c2 = win.plot([2,1,4,3,1,3,2,4,3,2], pen='g', fillLevel=0, fillBrush=(255,255,255,30), name='curve2')
# scatter plot
s1 = pg.ScatterPlotItem(size=10, pen=pg.mkPen(None), brush=pg.mkBrush(255, 255, 255, 120), name='scatter')
spots = [{'pos': [i, np.random.randint(-3, 3)], 'data': 1} for i in range(10)]
s1.addPoints(spots)
win.addItem(s1)
# # option2: generic method------------------------------------------------
legend = pg.LegendItem((80,60), offset=(70,20))
legend.setParentItem(win.graphicsItem())
legend.addItem(bg1, 'bar')
legend.addItem(c1, 'curve1')
legend.addItem(c2, 'curve2')
legend.addItem(s1, 'scatter')
if __name__ == '__main__':
pg.exec()
| 31.318182
| 126
| 0.646589
|
import initExample
import numpy as np
win = pg.plot()
win.setWindowTitle('pyqtgraph example: BarGraphItem')
width=0.3, brush='b', pen='w', name='bar')
win.addItem(bg1)
c1 = win.plot([np.random.randint(0,8) for i in range(10)], pen='r', symbol='t', symbolPen='r', symbolBrush='g', name='curve1')
c2 = win.plot([2,1,4,3,1,3,2,4,3,2], pen='g', fillLevel=0, fillBrush=(255,255,255,30), name='curve2')
s1 = pg.ScatterPlotItem(size=10, pen=pg.mkPen(None), brush=pg.mkBrush(255, 255, 255, 120), name='scatter')
spots = [{'pos': [i, np.random.randint(-3, 3)], 'data': 1} for i in range(10)]
s1.addPoints(spots)
win.addItem(s1)
.graphicsItem())
legend.addItem(bg1, 'bar')
legend.addItem(c1, 'curve1')
legend.addItem(c2, 'curve2')
legend.addItem(s1, 'scatter')
if __name__ == '__main__':
pg.exec()
| true
| true
|
7903ad83a3c9266683e5189ab1c26a2199f34333
| 1,389
|
py
|
Python
|
setup.py
|
timgates42/pyramid_pages
|
545b1ecb2e5dee5742135ba2a689b9635dd4efa1
|
[
"MIT"
] | 9
|
2015-12-20T04:23:31.000Z
|
2020-11-13T06:23:47.000Z
|
setup.py
|
timgates42/pyramid_pages
|
545b1ecb2e5dee5742135ba2a689b9635dd4efa1
|
[
"MIT"
] | 21
|
2015-06-01T14:15:38.000Z
|
2015-09-14T15:15:30.000Z
|
setup.py
|
timgates42/pyramid_pages
|
545b1ecb2e5dee5742135ba2a689b9635dd4efa1
|
[
"MIT"
] | 2
|
2017-04-10T18:39:17.000Z
|
2020-04-01T11:31:37.000Z
|
import os
from setuptools import find_packages, setup
this = os.path.dirname(os.path.realpath(__file__))
def read(name):
with open(os.path.join(this, name)) as f:
return f.read()
setup(
name='pyramid_pages',
version='0.0.5',
url='http://github.com/uralbash/pyramid_pages/',
author='Svintsov Dmitry',
author_email='sacrud@uralbash.ru',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite="nose.collector",
license="MIT",
description='Tree pages for pyramid',
long_description=read('README.rst'),
install_requires=read('requirements.txt'),
tests_require=read('requirements.txt') + read('requirements-test.txt'),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Framework :: Pyramid ",
"Topic :: Internet",
"Topic :: Database",
],
)
| 30.866667
| 75
| 0.62491
|
import os
from setuptools import find_packages, setup
this = os.path.dirname(os.path.realpath(__file__))
def read(name):
with open(os.path.join(this, name)) as f:
return f.read()
setup(
name='pyramid_pages',
version='0.0.5',
url='http://github.com/uralbash/pyramid_pages/',
author='Svintsov Dmitry',
author_email='sacrud@uralbash.ru',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite="nose.collector",
license="MIT",
description='Tree pages for pyramid',
long_description=read('README.rst'),
install_requires=read('requirements.txt'),
tests_require=read('requirements.txt') + read('requirements-test.txt'),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Framework :: Pyramid ",
"Topic :: Internet",
"Topic :: Database",
],
)
| true
| true
|
7903ae47b010a89c02ae6eaa38fc3169ff383f96
| 46
|
py
|
Python
|
pilot/__init__.py
|
WuraLab/wuralab.github.io
|
9da7bb4312f30e864a87b456528192a139aafecc
|
[
"MIT"
] | null | null | null |
pilot/__init__.py
|
WuraLab/wuralab.github.io
|
9da7bb4312f30e864a87b456528192a139aafecc
|
[
"MIT"
] | null | null | null |
pilot/__init__.py
|
WuraLab/wuralab.github.io
|
9da7bb4312f30e864a87b456528192a139aafecc
|
[
"MIT"
] | null | null | null |
from user import User
__all__ =[
"User"
]
| 9.2
| 21
| 0.630435
|
from user import User
__all__ =[
"User"
]
| true
| true
|
7903aec9261beaa0e04cd5547dff04f42ead45ee
| 457
|
py
|
Python
|
frequently/urls.py
|
bitlabstudio/django-frequently
|
93c76af62325afd1f09487dd1bb527fdd238ec8e
|
[
"MIT"
] | 5
|
2016-12-08T21:40:54.000Z
|
2020-04-08T07:05:22.000Z
|
frequently/urls.py
|
bitlabstudio/django-frequently
|
93c76af62325afd1f09487dd1bb527fdd238ec8e
|
[
"MIT"
] | null | null | null |
frequently/urls.py
|
bitlabstudio/django-frequently
|
93c76af62325afd1f09487dd1bb527fdd238ec8e
|
[
"MIT"
] | 1
|
2019-11-29T13:35:05.000Z
|
2019-11-29T13:35:05.000Z
|
"""URLs for the ``django-frequently`` application."""
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$',
views.EntryCategoryListView.as_view(),
name='frequently_list'),
url(r'^your-question/$',
views.EntryCreateView.as_view(),
name='frequently_submit_question'),
url(r'^(?P<slug>[a-z-0-9]+)/$',
views.EntryDetailView.as_view(),
name='frequently_entry_detail'),
]
| 22.85
| 53
| 0.625821
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$',
views.EntryCategoryListView.as_view(),
name='frequently_list'),
url(r'^your-question/$',
views.EntryCreateView.as_view(),
name='frequently_submit_question'),
url(r'^(?P<slug>[a-z-0-9]+)/$',
views.EntryDetailView.as_view(),
name='frequently_entry_detail'),
]
| true
| true
|
7903af6f4b451614c6d96b416f5faf95bbe378e2
| 3,051
|
py
|
Python
|
controlm_py/models/error_list.py
|
dcompane/controlm_py
|
c521208be2f00303383bb32ca5eb2b7ff91999d3
|
[
"MIT"
] | 2
|
2020-03-20T18:24:23.000Z
|
2021-03-05T22:05:04.000Z
|
controlm_py/models/error_list.py
|
dcompane/controlm_py
|
c521208be2f00303383bb32ca5eb2b7ff91999d3
|
[
"MIT"
] | null | null | null |
controlm_py/models/error_list.py
|
dcompane/controlm_py
|
c521208be2f00303383bb32ca5eb2b7ff91999d3
|
[
"MIT"
] | 1
|
2021-05-27T15:54:37.000Z
|
2021-05-27T15:54:37.000Z
|
# coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.220
Contact: customer_support@bmc.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ErrorList(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'errors': 'list[ErrorData]'
}
attribute_map = {
'errors': 'errors'
}
def __init__(self, errors=None): # noqa: E501
"""ErrorList - a model defined in Swagger""" # noqa: E501
self._errors = None
self.discriminator = None
if errors is not None:
self.errors = errors
@property
def errors(self):
"""Gets the errors of this ErrorList. # noqa: E501
:return: The errors of this ErrorList. # noqa: E501
:rtype: list[ErrorData]
"""
return self._errors
@errors.setter
def errors(self, errors):
"""Sets the errors of this ErrorList.
:param errors: The errors of this ErrorList. # noqa: E501
:type: list[ErrorData]
"""
self._errors = errors
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ErrorList, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ErrorList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.486486
| 80
| 0.550967
|
import pprint
import re
import six
class ErrorList(object):
swagger_types = {
'errors': 'list[ErrorData]'
}
attribute_map = {
'errors': 'errors'
}
def __init__(self, errors=None):
self._errors = None
self.discriminator = None
if errors is not None:
self.errors = errors
@property
def errors(self):
return self._errors
@errors.setter
def errors(self, errors):
self._errors = errors
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ErrorList, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ErrorList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
7903af900a11fbfd57e6ba7f79d4ea4d8ce692e5
| 470
|
py
|
Python
|
nettests/monitor.py
|
Laighno/evt
|
90b94e831aebb62c6ad19ce59c9089e9f51cfd77
|
[
"MIT"
] | 1,411
|
2018-04-23T03:57:30.000Z
|
2022-02-13T10:34:22.000Z
|
nettests/monitor.py
|
Zhang-Zexi/evt
|
e90fe4dbab4b9512d120c79f33ecc62791e088bd
|
[
"Apache-2.0"
] | 27
|
2018-06-11T10:34:42.000Z
|
2019-07-27T08:50:02.000Z
|
nettests/monitor.py
|
Zhang-Zexi/evt
|
e90fe4dbab4b9512d120c79f33ecc62791e088bd
|
[
"Apache-2.0"
] | 364
|
2018-06-09T12:11:53.000Z
|
2020-12-15T03:26:48.000Z
|
import docker
if __name__ == '__main__':
client = docker.from_env()
i = -1
name = 'evtd_'
while(True):
try:
i += 1
container = client.containers.get('{}{}'.format(name,i))
print(container.logs(tail=1))
# container.stop()
# container.remove()
# print('free {}{} succeed'.format(name, i))
except docker.errors.NotFound:
if(i >= 10):
break
| 26.111111
| 68
| 0.485106
|
import docker
if __name__ == '__main__':
client = docker.from_env()
i = -1
name = 'evtd_'
while(True):
try:
i += 1
container = client.containers.get('{}{}'.format(name,i))
print(container.logs(tail=1))
except docker.errors.NotFound:
if(i >= 10):
break
| true
| true
|
7903afb6d39cba1be067942cce430c5a52065615
| 1,583
|
py
|
Python
|
tests/clickhouse/query_dsl/test_time_range.py
|
fpacifici/snuba
|
cf732b71383c948f9387fbe64e9404ca71f8e9c5
|
[
"Apache-2.0"
] | null | null | null |
tests/clickhouse/query_dsl/test_time_range.py
|
fpacifici/snuba
|
cf732b71383c948f9387fbe64e9404ca71f8e9c5
|
[
"Apache-2.0"
] | null | null | null |
tests/clickhouse/query_dsl/test_time_range.py
|
fpacifici/snuba
|
cf732b71383c948f9387fbe64e9404ca71f8e9c5
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime
from snuba.clickhouse.query_dsl.accessors import get_time_range
from snuba.datasets.factory import get_dataset
from snuba.datasets.plans.translator.query import identity_translate
from snuba.query.parser import parse_query
from snuba.query.processors.timeseries_processor import TimeSeriesProcessor
from snuba.request.request_settings import HTTPRequestSettings
def test_get_time_range() -> None:
"""
Test finding the time range of a query.
"""
body = {
"selected_columns": ["event_id"],
"conditions": [
("timestamp", ">=", "2019-09-18T10:00:00"),
("timestamp", ">=", "2000-09-18T10:00:00"),
("timestamp", "<", "2019-09-19T12:00:00"),
[("timestamp", "<", "2019-09-18T12:00:00"), ("project_id", "IN", [1])],
("project_id", "IN", [1]),
],
}
events = get_dataset("events")
query = parse_query(body, events)
processors = events.get_default_entity().get_query_processors()
for processor in processors:
if isinstance(processor, TimeSeriesProcessor):
processor.process_query(query, HTTPRequestSettings())
from_date_ast, to_date_ast = get_time_range(identity_translate(query), "timestamp")
assert (
from_date_ast is not None
and isinstance(from_date_ast, datetime)
and from_date_ast.isoformat() == "2019-09-18T10:00:00"
)
assert (
to_date_ast is not None
and isinstance(to_date_ast, datetime)
and to_date_ast.isoformat() == "2019-09-19T12:00:00"
)
| 35.977273
| 87
| 0.660771
|
from datetime import datetime
from snuba.clickhouse.query_dsl.accessors import get_time_range
from snuba.datasets.factory import get_dataset
from snuba.datasets.plans.translator.query import identity_translate
from snuba.query.parser import parse_query
from snuba.query.processors.timeseries_processor import TimeSeriesProcessor
from snuba.request.request_settings import HTTPRequestSettings
def test_get_time_range() -> None:
body = {
"selected_columns": ["event_id"],
"conditions": [
("timestamp", ">=", "2019-09-18T10:00:00"),
("timestamp", ">=", "2000-09-18T10:00:00"),
("timestamp", "<", "2019-09-19T12:00:00"),
[("timestamp", "<", "2019-09-18T12:00:00"), ("project_id", "IN", [1])],
("project_id", "IN", [1]),
],
}
events = get_dataset("events")
query = parse_query(body, events)
processors = events.get_default_entity().get_query_processors()
for processor in processors:
if isinstance(processor, TimeSeriesProcessor):
processor.process_query(query, HTTPRequestSettings())
from_date_ast, to_date_ast = get_time_range(identity_translate(query), "timestamp")
assert (
from_date_ast is not None
and isinstance(from_date_ast, datetime)
and from_date_ast.isoformat() == "2019-09-18T10:00:00"
)
assert (
to_date_ast is not None
and isinstance(to_date_ast, datetime)
and to_date_ast.isoformat() == "2019-09-19T12:00:00"
)
| true
| true
|
7903b1b4bc3be9b2149443518cf0e1fadc48806d
| 17,589
|
py
|
Python
|
kotlin-website.py
|
Tradehunt/kotlin-web-site
|
5c2f88fb72130071746bde2c375acbb4182858c0
|
[
"Apache-2.0"
] | 1,289
|
2015-01-17T23:02:12.000Z
|
2022-03-31T07:05:05.000Z
|
kotlin-website.py
|
Tradehunt/kotlin-web-site
|
5c2f88fb72130071746bde2c375acbb4182858c0
|
[
"Apache-2.0"
] | 1,230
|
2015-01-04T08:16:08.000Z
|
2022-03-25T00:00:42.000Z
|
kotlin-website.py
|
Tradehunt/kotlin-web-site
|
5c2f88fb72130071746bde2c375acbb4182858c0
|
[
"Apache-2.0"
] | 3,395
|
2015-01-02T20:45:03.000Z
|
2022-03-30T21:01:15.000Z
|
import copy
import datetime
import glob
import json
import os
import sys
import threading
from os import path
from urllib.parse import urlparse, urljoin, ParseResult
import xmltodict
import yaml
from bs4 import BeautifulSoup
from flask import Flask, render_template, Response, send_from_directory, request
from flask.views import View
from flask.helpers import url_for, send_file, make_response
from flask_frozen import Freezer, walk_directory
from hashlib import md5
from yaml import FullLoader
from src.Feature import Feature
from src.dist import get_dist_pages
from src.github import assert_valid_git_hub_url
from src.navigation import process_video_nav, process_nav, get_current_url
from src.api import get_api_page
from src.encoder import DateAwareEncoder
from src.externals import process_nav_includes
from src.grammar import get_grammar
from src.markdown.makrdown import jinja_aware_markdown
from src.pages.MyFlatPages import MyFlatPages
from src.pdf import generate_pdf
from src.processors.processors import process_code_blocks
from src.processors.processors import set_replace_simple_code
from src.search import build_search_indices
from src.sitemap import generate_sitemap, generate_temporary_sitemap
from src.ktl_components import KTLComponentExtension
app = Flask(__name__, static_folder='_assets')
app.config.from_pyfile('mysettings.py')
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
pages = MyFlatPages(app)
freezer = Freezer(app)
ignore_stdlib = False
build_mode = False
build_contenteditable = False
build_check_links = True
build_errors = []
url_adapter = app.create_url_adapter(None)
root_folder = path.join(os.path.dirname(__file__))
data_folder = path.join(os.path.dirname(__file__), "data")
_nav_cache = None
_nav_lock = threading.RLock()
_cached_asset_version = {}
def get_asset_version(filename):
if filename in _cached_asset_version:
return _cached_asset_version[filename]
filepath = (root_folder if root_folder else ".") + filename
if filename and path.exists(filepath):
with open(filepath, 'rb') as file:
digest = md5(file.read()).hexdigest()
_cached_asset_version[filename] = digest
return digest
return None
def get_site_data():
data = {}
for data_file in os.listdir(data_folder):
if data_file.startswith('_'):
continue
if not data_file.endswith(".yml"):
continue
data_file_path = path.join(data_folder, data_file)
with open(data_file_path, encoding="UTF-8") as stream:
try:
file_name_without_extension = data_file[:-4] if data_file.endswith(".yml") else data_file
data[file_name_without_extension] = yaml.load(stream, Loader=FullLoader)
except yaml.YAMLError as exc:
sys.stderr.write('Cant parse data file ' + data_file + ': ')
sys.stderr.write(str(exc))
sys.exit(-1)
except IOError as exc:
sys.stderr.write('Cant read data file ' + data_file + ': ')
sys.stderr.write(str(exc))
sys.exit(-1)
return data
site_data = get_site_data()
def get_nav():
global _nav_cache
global _nav_lock
with _nav_lock:
if _nav_cache is not None:
nav = _nav_cache
else:
nav = get_nav_impl()
nav = copy.deepcopy(nav)
if build_mode:
_nav_cache = copy.deepcopy(nav)
# NOTE. This call depends on `request.path`, cannot cache
process_nav(request.path, nav)
return nav
def get_nav_impl():
with open(path.join(data_folder, "_nav.yml")) as stream:
nav = yaml.load(stream, Loader=FullLoader)
nav = process_nav_includes(build_mode, nav)
return nav
def get_kotlin_features():
features_dir = path.join(os.path.dirname(__file__), "kotlin-features")
features = []
for feature_meta in yaml.load(open(path.join(features_dir, "kotlin-features.yml"))):
file_path = path.join(features_dir, feature_meta['content_file'])
with open(file_path, encoding='utf-8') as f:
content = f.read()
content = content.replace("\r\n", "\n")
if file_path.endswith(".md"):
html_content = BeautifulSoup(jinja_aware_markdown(content, pages), 'html.parser')
content = process_code_blocks(html_content)
features.append(Feature(content, feature_meta))
return features
@app.context_processor
def add_year_to_context():
return {
'year': datetime.datetime.now().year
}
app.jinja_env.add_extension(KTLComponentExtension)
@app.context_processor
def add_data_to_context():
nav = get_nav()
return {
'nav': nav,
'data': site_data,
'site': {
'pdf_url': app.config['PDF_URL'],
'forum_url': app.config['FORUM_URL'],
'site_github_url': app.config['SITE_GITHUB_URL'],
'data': site_data,
'text_using_gradle': app.config['TEXT_USING_GRADLE'],
'code_baseurl': app.config['CODE_URL'],
'contenteditable': build_contenteditable
},
'headerCurrentUrl': get_current_url(nav['subnav']['content'])
}
@app.template_filter('get_domain')
def get_domain(url):
return urlparse(url).netloc
app.jinja_env.globals['get_domain'] = get_domain
@app.template_filter('split_chunk')
def split_chunk(list, size):
return [list[i:i+size] for i in range(len(list))[::size]]
app.jinja_env.globals['split_chunk'] = split_chunk
@app.template_filter('autoversion')
def autoversion_filter(filename):
asset_version = get_asset_version(filename)
if asset_version is None: return filename
original = urlparse(filename)._asdict()
original.update(query=original.get('query') + '&v=' + asset_version)
return ParseResult(**original).geturl()
@app.route('/data/events.json')
def get_events():
with open(path.join(data_folder, "events.xml"), encoding="UTF-8") as events_file:
events = xmltodict.parse(events_file.read())['events']['event']
return Response(json.dumps(events, cls=DateAwareEncoder), mimetype='application/json')
@app.route('/data/cities.json')
def get_cities():
return Response(json.dumps(site_data['cities'], cls=DateAwareEncoder), mimetype='application/json')
@app.route('/data/kotlinconf.json')
def get_kotlinconf():
return Response(json.dumps(site_data['kotlinconf'], cls=DateAwareEncoder), mimetype='application/json')
@app.route('/data/universities.json')
def get_universities():
return Response(json.dumps(site_data['universities'], cls=DateAwareEncoder), mimetype='application/json')
@app.route('/data/user-groups.json')
def get_user_groups():
return Response(json.dumps(site_data['user-groups'], cls=DateAwareEncoder), mimetype='application/json')
@app.route('/docs/reference/grammar.html')
def grammar():
grammar = get_grammar(build_mode)
if grammar is None:
return "Grammar file not found", 404
return render_template('pages/grammar.html', kotlinGrammar=grammar)
@app.route('/docs/videos.html')
def videos_page():
return render_template('pages/videos.html', videos=process_video_nav(site_data['videos']))
@app.route('/docs/kotlin-reference.pdf')
def kotlin_reference_pdf():
return send_file(path.join(root_folder, "assets", "kotlin-reference.pdf"))
@app.route('/docs/kotlin-docs.pdf')
def kotlin_docs_pdf():
return send_file(path.join(root_folder, "assets", "kotlin-reference.pdf"))
@app.route('/community/')
def community_page():
return render_template('pages/community.html')
@app.route('/user-groups/user-group-list.html')
def user_group_list():
return render_template(
'pages/user-groups/user-group-list.html',
user_groups_data=site_data['user-groups'],
number_of_groups=sum(map(lambda section: len(section['groups']), site_data['user-groups'])))
@app.route('/education/')
def education_page():
return render_template('pages/education/index.html')
@app.route('/')
def index_page():
features = get_kotlin_features()
return render_template('pages/index.html',
is_index_page=True,
features=features
)
def process_page(page_path):
# get_nav() has side effect to copy and patch files from the `external` folder
# under site folder. We need it for dev mode to make sure file is up-to-date
# TODO: extract get_nav and implement the explicit way to avoid side-effects
get_nav()
page = pages.get_or_404(page_path)
if 'redirect_path' in page.meta and page.meta['redirect_path'] is not None:
page_path = page.meta['redirect_path']
if page_path.startswith('https://') or page_path.startswith('http://'):
return render_template('redirect.html', url=page_path)
else:
return render_template('redirect.html', url=url_for('page', page_path = page_path))
if 'date' in page.meta and page['date'] is not None:
page.meta['formatted_date'] = page.meta['date'].strftime('%d %B %Y')
if page.meta['formatted_date'].startswith('0'):
page.meta['formatted_date'] = page.meta['formatted_date'][1:]
if 'github_edit_url' in page.meta:
edit_on_github_url = page.meta['github_edit_url']
else:
edit_on_github_url = app.config['EDIT_ON_GITHUB_URL'] + app.config['FLATPAGES_ROOT'] + "/" + page_path + \
app.config['FLATPAGES_EXTENSION']
assert_valid_git_hub_url(edit_on_github_url, page_path)
template = page.meta["layout"] if 'layout' in page.meta else 'default.html'
if not template.endswith(".html"):
template += ".html"
if build_check_links:
validate_links_weak(page, page_path)
return render_template(
template,
page=page,
baseurl="",
edit_on_github_url=edit_on_github_url,
)
def validate_links_weak(page, page_path):
for link in page.parsed_html.select('a'):
if 'href' not in link.attrs:
continue
href = urlparse(urljoin('/' + page_path, link['href']))
if href.scheme != '':
continue
endpoint, params = url_adapter.match(href.path, 'GET', query_args={})
if endpoint != 'page' and endpoint != 'get_index_page':
response = app.test_client().get(href.path)
if response.status_code == 404:
build_errors.append("Broken link: " + str(href.path) + " on page " + page_path)
continue
referenced_page = pages.get(params['page_path'])
if referenced_page is None:
build_errors.append("Broken link: " + str(href.path) + " on page " + page_path)
continue
if href.fragment == '':
continue
ids = []
for x in referenced_page.parsed_html.select('h1,h2,h3,h4'):
try:
ids.append(x['id'])
except KeyError:
pass
for x in referenced_page.parsed_html.select('a'):
try:
ids.append(x['name'])
except KeyError:
pass
if href.fragment not in ids:
build_errors.append("Bad anchor: " + str(href.fragment) + " on page " + page_path)
if not build_mode and len(build_errors) > 0:
errors_copy = []
for item in build_errors:
errors_copy.append(item)
build_errors.clear()
raise Exception("Validation errors " + str(len(errors_copy)) + ":\n\n" +
"\n".join(str(item) for item in errors_copy))
@freezer.register_generator
def page():
for page in pages:
yield {'page_path': page.path}
@app.route('/<path:page_path>.html')
def page(page_path):
return process_page(page_path)
@app.route('/404.html')
def page_404():
return render_template('pages/404.html')
@freezer.register_generator
def api_page():
api_folder = path.join(root_folder, 'api')
for root, dirs, files in os.walk(api_folder):
for file in files:
yield {'page_path': path.join(path.relpath(root, api_folder), file).replace(os.sep, '/')}
class RedirectTemplateView(View):
def __init__(self, url):
self.redirect_url = url
def dispatch_request(self):
return render_template('redirect.html', url=self.redirect_url)
def generate_redirect_pages():
redirects_folder = path.join(root_folder, 'redirects')
for root, dirs, files in os.walk(redirects_folder):
for file in files:
if not file.endswith(".yml"):
continue
redirects_file_path = path.join(redirects_folder, file)
with open(redirects_file_path, encoding="UTF-8") as stream:
try:
redirects = yaml.load(stream, Loader=FullLoader)
for entry in redirects:
url_to = entry["to"]
url_from = entry["from"]
url_list = url_from if isinstance(url_from, list) else [url_from]
for url in url_list:
app.add_url_rule(url, view_func=RedirectTemplateView.as_view(url, url=url_to))
except yaml.YAMLError as exc:
sys.stderr.write('Cant parse data file ' + file + ': ')
sys.stderr.write(str(exc))
sys.exit(-1)
except IOError as exc:
sys.stderr.write('Cant read data file ' + file + ': ')
sys.stderr.write(str(exc))
sys.exit(-1)
@app.errorhandler(404)
def page_not_found(e):
return render_template('pages/404.html'), 404
app.register_error_handler(404, page_not_found)
@app.route('/api/<path:page_path>')
def api_page(page_path):
path_other, ext = path.splitext(page_path)
if ext == '.html':
return process_api_page(page_path[:-5])
elif path.basename(page_path) == "package-list" or ext:
return respond_with_package_list(page_path)
elif not page_path.endswith('/'):
page_path += '/'
return process_api_page(page_path + 'index')
def process_api_page(page_path):
return render_template(
'api.html',
page=get_api_page(build_mode, page_path)
)
def respond_with_package_list(page_path):
file_path = path.join(root_folder, 'api', page_path)
if not path.exists(file_path):
return make_response(path.basename(page_path) + " not found", 404)
return send_file(file_path, mimetype="text/plain")
@app.route('/assets/<path:path>')
def asset(path):
return send_from_directory('assets', path)
@app.route('/assets/images/tutorials/<path:filename>')
def tutorial_img(filename):
return send_from_directory(path.join('assets', 'images', 'tutorials'), filename)
@freezer.register_generator
def asset():
for filename in walk_directory(path.join(root_folder, "assets")):
yield {'path': filename}
@app.route('/<path:page_path>')
def get_index_page(page_path):
"""
Handle requests which urls don't end with '.html' (for example, '/doc/')
We don't need any generator here, because such urls are equivalent to the same urls
with 'index.html' at the end.
:param page_path: str
:return: str
"""
if not page_path.endswith('/'):
page_path += '/'
return process_page(page_path + 'index')
generate_redirect_pages()
@app.after_request
def add_header(request):
request.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
request.headers["Pragma"] = "no-cache"
request.headers["Expires"] = "0"
request.headers['Cache-Control'] = 'public, max-age=0'
return request
if __name__ == '__main__':
print("\n\n\nRunning new KotlinWebSite generator/dev-mode:\n")
argv_copy = []
for arg in sys.argv:
print("arg: " + arg)
if arg == "--ignore-stdlib":
ignore_stdlib = True
elif arg == "--no-check-links":
build_check_links = False
elif arg == "--editable":
build_contenteditable = True
else:
argv_copy.append(arg)
print("\n\n")
print("ignore_stdlib: " + str(ignore_stdlib))
print("build_check_links: " + str(build_check_links))
print("build_contenteditable: " + str(build_contenteditable))
print("\n\n")
set_replace_simple_code(build_contenteditable)
with (open(path.join(root_folder, "_nav-mapped.yml"), 'w')) as output:
yaml.dump(get_nav_impl(), output)
if len(argv_copy) > 1:
if argv_copy[1] == "build":
build_mode = True
urls = freezer.freeze()
if len(build_errors) > 0:
for error in build_errors:
sys.stderr.write(error + '\n')
sys.exit(-1)
elif argv_copy[1] == "sitemap":
generate_sitemap(get_dist_pages())
# temporary sitemap
generate_temporary_sitemap()
elif argv_copy[1] == "index":
build_search_indices(get_dist_pages())
elif argv_copy[1] == "reference-pdf":
generate_pdf("kotlin-docs.pdf", site_data)
else:
print("Unknown argument: " + argv_copy[1])
sys.exit(1)
else:
app.run(host="0.0.0.0", debug=True, threaded=True, **{"extra_files": {
'/src/data/_nav.yml',
*glob.glob("/src/pages-includes/**/*", recursive=True),
}})
| 32.096715
| 114
| 0.648815
|
import copy
import datetime
import glob
import json
import os
import sys
import threading
from os import path
from urllib.parse import urlparse, urljoin, ParseResult
import xmltodict
import yaml
from bs4 import BeautifulSoup
from flask import Flask, render_template, Response, send_from_directory, request
from flask.views import View
from flask.helpers import url_for, send_file, make_response
from flask_frozen import Freezer, walk_directory
from hashlib import md5
from yaml import FullLoader
from src.Feature import Feature
from src.dist import get_dist_pages
from src.github import assert_valid_git_hub_url
from src.navigation import process_video_nav, process_nav, get_current_url
from src.api import get_api_page
from src.encoder import DateAwareEncoder
from src.externals import process_nav_includes
from src.grammar import get_grammar
from src.markdown.makrdown import jinja_aware_markdown
from src.pages.MyFlatPages import MyFlatPages
from src.pdf import generate_pdf
from src.processors.processors import process_code_blocks
from src.processors.processors import set_replace_simple_code
from src.search import build_search_indices
from src.sitemap import generate_sitemap, generate_temporary_sitemap
from src.ktl_components import KTLComponentExtension
app = Flask(__name__, static_folder='_assets')
app.config.from_pyfile('mysettings.py')
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
pages = MyFlatPages(app)
freezer = Freezer(app)
ignore_stdlib = False
build_mode = False
build_contenteditable = False
build_check_links = True
build_errors = []
url_adapter = app.create_url_adapter(None)
root_folder = path.join(os.path.dirname(__file__))
data_folder = path.join(os.path.dirname(__file__), "data")
_nav_cache = None
_nav_lock = threading.RLock()
_cached_asset_version = {}
def get_asset_version(filename):
if filename in _cached_asset_version:
return _cached_asset_version[filename]
filepath = (root_folder if root_folder else ".") + filename
if filename and path.exists(filepath):
with open(filepath, 'rb') as file:
digest = md5(file.read()).hexdigest()
_cached_asset_version[filename] = digest
return digest
return None
def get_site_data():
data = {}
for data_file in os.listdir(data_folder):
if data_file.startswith('_'):
continue
if not data_file.endswith(".yml"):
continue
data_file_path = path.join(data_folder, data_file)
with open(data_file_path, encoding="UTF-8") as stream:
try:
file_name_without_extension = data_file[:-4] if data_file.endswith(".yml") else data_file
data[file_name_without_extension] = yaml.load(stream, Loader=FullLoader)
except yaml.YAMLError as exc:
sys.stderr.write('Cant parse data file ' + data_file + ': ')
sys.stderr.write(str(exc))
sys.exit(-1)
except IOError as exc:
sys.stderr.write('Cant read data file ' + data_file + ': ')
sys.stderr.write(str(exc))
sys.exit(-1)
return data
site_data = get_site_data()
def get_nav():
global _nav_cache
global _nav_lock
with _nav_lock:
if _nav_cache is not None:
nav = _nav_cache
else:
nav = get_nav_impl()
nav = copy.deepcopy(nav)
if build_mode:
_nav_cache = copy.deepcopy(nav)
process_nav(request.path, nav)
return nav
def get_nav_impl():
with open(path.join(data_folder, "_nav.yml")) as stream:
nav = yaml.load(stream, Loader=FullLoader)
nav = process_nav_includes(build_mode, nav)
return nav
def get_kotlin_features():
features_dir = path.join(os.path.dirname(__file__), "kotlin-features")
features = []
for feature_meta in yaml.load(open(path.join(features_dir, "kotlin-features.yml"))):
file_path = path.join(features_dir, feature_meta['content_file'])
with open(file_path, encoding='utf-8') as f:
content = f.read()
content = content.replace("\r\n", "\n")
if file_path.endswith(".md"):
html_content = BeautifulSoup(jinja_aware_markdown(content, pages), 'html.parser')
content = process_code_blocks(html_content)
features.append(Feature(content, feature_meta))
return features
@app.context_processor
def add_year_to_context():
return {
'year': datetime.datetime.now().year
}
app.jinja_env.add_extension(KTLComponentExtension)
@app.context_processor
def add_data_to_context():
nav = get_nav()
return {
'nav': nav,
'data': site_data,
'site': {
'pdf_url': app.config['PDF_URL'],
'forum_url': app.config['FORUM_URL'],
'site_github_url': app.config['SITE_GITHUB_URL'],
'data': site_data,
'text_using_gradle': app.config['TEXT_USING_GRADLE'],
'code_baseurl': app.config['CODE_URL'],
'contenteditable': build_contenteditable
},
'headerCurrentUrl': get_current_url(nav['subnav']['content'])
}
@app.template_filter('get_domain')
def get_domain(url):
return urlparse(url).netloc
app.jinja_env.globals['get_domain'] = get_domain
@app.template_filter('split_chunk')
def split_chunk(list, size):
return [list[i:i+size] for i in range(len(list))[::size]]
app.jinja_env.globals['split_chunk'] = split_chunk
@app.template_filter('autoversion')
def autoversion_filter(filename):
asset_version = get_asset_version(filename)
if asset_version is None: return filename
original = urlparse(filename)._asdict()
original.update(query=original.get('query') + '&v=' + asset_version)
return ParseResult(**original).geturl()
@app.route('/data/events.json')
def get_events():
with open(path.join(data_folder, "events.xml"), encoding="UTF-8") as events_file:
events = xmltodict.parse(events_file.read())['events']['event']
return Response(json.dumps(events, cls=DateAwareEncoder), mimetype='application/json')
@app.route('/data/cities.json')
def get_cities():
return Response(json.dumps(site_data['cities'], cls=DateAwareEncoder), mimetype='application/json')
@app.route('/data/kotlinconf.json')
def get_kotlinconf():
return Response(json.dumps(site_data['kotlinconf'], cls=DateAwareEncoder), mimetype='application/json')
@app.route('/data/universities.json')
def get_universities():
return Response(json.dumps(site_data['universities'], cls=DateAwareEncoder), mimetype='application/json')
@app.route('/data/user-groups.json')
def get_user_groups():
return Response(json.dumps(site_data['user-groups'], cls=DateAwareEncoder), mimetype='application/json')
@app.route('/docs/reference/grammar.html')
def grammar():
grammar = get_grammar(build_mode)
if grammar is None:
return "Grammar file not found", 404
return render_template('pages/grammar.html', kotlinGrammar=grammar)
@app.route('/docs/videos.html')
def videos_page():
return render_template('pages/videos.html', videos=process_video_nav(site_data['videos']))
@app.route('/docs/kotlin-reference.pdf')
def kotlin_reference_pdf():
return send_file(path.join(root_folder, "assets", "kotlin-reference.pdf"))
@app.route('/docs/kotlin-docs.pdf')
def kotlin_docs_pdf():
return send_file(path.join(root_folder, "assets", "kotlin-reference.pdf"))
@app.route('/community/')
def community_page():
return render_template('pages/community.html')
@app.route('/user-groups/user-group-list.html')
def user_group_list():
return render_template(
'pages/user-groups/user-group-list.html',
user_groups_data=site_data['user-groups'],
number_of_groups=sum(map(lambda section: len(section['groups']), site_data['user-groups'])))
@app.route('/education/')
def education_page():
return render_template('pages/education/index.html')
@app.route('/')
def index_page():
features = get_kotlin_features()
return render_template('pages/index.html',
is_index_page=True,
features=features
)
def process_page(page_path):
get_nav()
page = pages.get_or_404(page_path)
if 'redirect_path' in page.meta and page.meta['redirect_path'] is not None:
page_path = page.meta['redirect_path']
if page_path.startswith('https://') or page_path.startswith('http://'):
return render_template('redirect.html', url=page_path)
else:
return render_template('redirect.html', url=url_for('page', page_path = page_path))
if 'date' in page.meta and page['date'] is not None:
page.meta['formatted_date'] = page.meta['date'].strftime('%d %B %Y')
if page.meta['formatted_date'].startswith('0'):
page.meta['formatted_date'] = page.meta['formatted_date'][1:]
if 'github_edit_url' in page.meta:
edit_on_github_url = page.meta['github_edit_url']
else:
edit_on_github_url = app.config['EDIT_ON_GITHUB_URL'] + app.config['FLATPAGES_ROOT'] + "/" + page_path + \
app.config['FLATPAGES_EXTENSION']
assert_valid_git_hub_url(edit_on_github_url, page_path)
template = page.meta["layout"] if 'layout' in page.meta else 'default.html'
if not template.endswith(".html"):
template += ".html"
if build_check_links:
validate_links_weak(page, page_path)
return render_template(
template,
page=page,
baseurl="",
edit_on_github_url=edit_on_github_url,
)
def validate_links_weak(page, page_path):
for link in page.parsed_html.select('a'):
if 'href' not in link.attrs:
continue
href = urlparse(urljoin('/' + page_path, link['href']))
if href.scheme != '':
continue
endpoint, params = url_adapter.match(href.path, 'GET', query_args={})
if endpoint != 'page' and endpoint != 'get_index_page':
response = app.test_client().get(href.path)
if response.status_code == 404:
build_errors.append("Broken link: " + str(href.path) + " on page " + page_path)
continue
referenced_page = pages.get(params['page_path'])
if referenced_page is None:
build_errors.append("Broken link: " + str(href.path) + " on page " + page_path)
continue
if href.fragment == '':
continue
ids = []
for x in referenced_page.parsed_html.select('h1,h2,h3,h4'):
try:
ids.append(x['id'])
except KeyError:
pass
for x in referenced_page.parsed_html.select('a'):
try:
ids.append(x['name'])
except KeyError:
pass
if href.fragment not in ids:
build_errors.append("Bad anchor: " + str(href.fragment) + " on page " + page_path)
if not build_mode and len(build_errors) > 0:
errors_copy = []
for item in build_errors:
errors_copy.append(item)
build_errors.clear()
raise Exception("Validation errors " + str(len(errors_copy)) + ":\n\n" +
"\n".join(str(item) for item in errors_copy))
@freezer.register_generator
def page():
for page in pages:
yield {'page_path': page.path}
@app.route('/<path:page_path>.html')
def page(page_path):
return process_page(page_path)
@app.route('/404.html')
def page_404():
return render_template('pages/404.html')
@freezer.register_generator
def api_page():
api_folder = path.join(root_folder, 'api')
for root, dirs, files in os.walk(api_folder):
for file in files:
yield {'page_path': path.join(path.relpath(root, api_folder), file).replace(os.sep, '/')}
class RedirectTemplateView(View):
def __init__(self, url):
self.redirect_url = url
def dispatch_request(self):
return render_template('redirect.html', url=self.redirect_url)
def generate_redirect_pages():
redirects_folder = path.join(root_folder, 'redirects')
for root, dirs, files in os.walk(redirects_folder):
for file in files:
if not file.endswith(".yml"):
continue
redirects_file_path = path.join(redirects_folder, file)
with open(redirects_file_path, encoding="UTF-8") as stream:
try:
redirects = yaml.load(stream, Loader=FullLoader)
for entry in redirects:
url_to = entry["to"]
url_from = entry["from"]
url_list = url_from if isinstance(url_from, list) else [url_from]
for url in url_list:
app.add_url_rule(url, view_func=RedirectTemplateView.as_view(url, url=url_to))
except yaml.YAMLError as exc:
sys.stderr.write('Cant parse data file ' + file + ': ')
sys.stderr.write(str(exc))
sys.exit(-1)
except IOError as exc:
sys.stderr.write('Cant read data file ' + file + ': ')
sys.stderr.write(str(exc))
sys.exit(-1)
@app.errorhandler(404)
def page_not_found(e):
return render_template('pages/404.html'), 404
app.register_error_handler(404, page_not_found)
@app.route('/api/<path:page_path>')
def api_page(page_path):
path_other, ext = path.splitext(page_path)
if ext == '.html':
return process_api_page(page_path[:-5])
elif path.basename(page_path) == "package-list" or ext:
return respond_with_package_list(page_path)
elif not page_path.endswith('/'):
page_path += '/'
return process_api_page(page_path + 'index')
def process_api_page(page_path):
return render_template(
'api.html',
page=get_api_page(build_mode, page_path)
)
def respond_with_package_list(page_path):
file_path = path.join(root_folder, 'api', page_path)
if not path.exists(file_path):
return make_response(path.basename(page_path) + " not found", 404)
return send_file(file_path, mimetype="text/plain")
@app.route('/assets/<path:path>')
def asset(path):
return send_from_directory('assets', path)
@app.route('/assets/images/tutorials/<path:filename>')
def tutorial_img(filename):
return send_from_directory(path.join('assets', 'images', 'tutorials'), filename)
@freezer.register_generator
def asset():
for filename in walk_directory(path.join(root_folder, "assets")):
yield {'path': filename}
@app.route('/<path:page_path>')
def get_index_page(page_path):
if not page_path.endswith('/'):
page_path += '/'
return process_page(page_path + 'index')
generate_redirect_pages()
@app.after_request
def add_header(request):
request.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
request.headers["Pragma"] = "no-cache"
request.headers["Expires"] = "0"
request.headers['Cache-Control'] = 'public, max-age=0'
return request
if __name__ == '__main__':
print("\n\n\nRunning new KotlinWebSite generator/dev-mode:\n")
argv_copy = []
for arg in sys.argv:
print("arg: " + arg)
if arg == "--ignore-stdlib":
ignore_stdlib = True
elif arg == "--no-check-links":
build_check_links = False
elif arg == "--editable":
build_contenteditable = True
else:
argv_copy.append(arg)
print("\n\n")
print("ignore_stdlib: " + str(ignore_stdlib))
print("build_check_links: " + str(build_check_links))
print("build_contenteditable: " + str(build_contenteditable))
print("\n\n")
set_replace_simple_code(build_contenteditable)
with (open(path.join(root_folder, "_nav-mapped.yml"), 'w')) as output:
yaml.dump(get_nav_impl(), output)
if len(argv_copy) > 1:
if argv_copy[1] == "build":
build_mode = True
urls = freezer.freeze()
if len(build_errors) > 0:
for error in build_errors:
sys.stderr.write(error + '\n')
sys.exit(-1)
elif argv_copy[1] == "sitemap":
generate_sitemap(get_dist_pages())
generate_temporary_sitemap()
elif argv_copy[1] == "index":
build_search_indices(get_dist_pages())
elif argv_copy[1] == "reference-pdf":
generate_pdf("kotlin-docs.pdf", site_data)
else:
print("Unknown argument: " + argv_copy[1])
sys.exit(1)
else:
app.run(host="0.0.0.0", debug=True, threaded=True, **{"extra_files": {
'/src/data/_nav.yml',
*glob.glob("/src/pages-includes/**/*", recursive=True),
}})
| true
| true
|
7903b2cc28f548b837773d028e06bc2268565a94
| 24,145
|
py
|
Python
|
.venv/Lib/site-packages/rich/pretty.py
|
jefferdo/gpt-3-client
|
7acbc5f518fe3fcb55d0bdcbf93fc87b103b1148
|
[
"MIT"
] | null | null | null |
.venv/Lib/site-packages/rich/pretty.py
|
jefferdo/gpt-3-client
|
7acbc5f518fe3fcb55d0bdcbf93fc87b103b1148
|
[
"MIT"
] | 76
|
2020-07-31T05:33:39.000Z
|
2022-03-28T05:04:17.000Z
|
rich/pretty.py
|
shyovn/rich
|
a05a5a1c2f95f25db70ac3657e99f0bab652e2cd
|
[
"MIT"
] | null | null | null |
import builtins
import os
import sys
from array import array
from collections import Counter, defaultdict, deque
from dataclasses import dataclass, fields, is_dataclass
from itertools import islice
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Union,
Tuple,
)
from rich.highlighter import ReprHighlighter
from . import get_console
from ._loop import loop_last
from ._pick import pick_bool
from .abc import RichRenderable
from .cells import cell_len
from .highlighter import ReprHighlighter
from .jupyter import JupyterMixin, JupyterRenderable
from .measure import Measurement
from .text import Text
if TYPE_CHECKING:
from .console import (
Console,
ConsoleOptions,
HighlighterType,
JustifyMethod,
OverflowMethod,
RenderResult,
)
def install(
console: "Console" = None,
overflow: "OverflowMethod" = "ignore",
crop: bool = False,
indent_guides: bool = False,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
) -> None:
"""Install automatic pretty printing in the Python REPL.
Args:
console (Console, optional): Console instance or ``None`` to use global console. Defaults to None.
overflow (Optional[OverflowMethod], optional): Overflow method. Defaults to "ignore".
crop (Optional[bool], optional): Enable cropping of long lines. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False
"""
from rich import get_console
from .console import ConsoleRenderable # needed here to prevent circular import
console = console or get_console()
assert console is not None
def display_hook(value: Any) -> None:
"""Replacement sys.displayhook which prettifies objects with Rich."""
if value is not None:
assert console is not None
builtins._ = None # type: ignore
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
),
crop=crop,
)
builtins._ = value # type: ignore
def ipy_display_hook(value: Any) -> None: # pragma: no cover
assert console is not None
# always skip rich generated jupyter renderables or None values
if isinstance(value, JupyterRenderable) or value is None:
return
# on jupyter rich display, if using one of the special representations dont use rich
if console.is_jupyter and any(attr.startswith("_repr_") for attr in dir(value)):
return
if hasattr(value, "_repr_mimebundle_"):
return
# certain renderables should start on a new line
if isinstance(value, ConsoleRenderable):
console.line()
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
margin=12,
),
crop=crop,
)
try: # pragma: no cover
ip = get_ipython() # type: ignore
from IPython.core.formatters import BaseFormatter
# replace plain text formatter with rich formatter
rich_formatter = BaseFormatter()
rich_formatter.for_type(object, func=ipy_display_hook)
ip.display_formatter.formatters["text/plain"] = rich_formatter
except Exception:
sys.displayhook = display_hook
class Pretty(JupyterMixin):
"""A rich renderable that pretty prints an object.
Args:
_object (Any): An object to pretty print.
highlighter (HighlighterType, optional): Highlighter object to apply to result, or None for ReprHighlighter. Defaults to None.
indent_size (int, optional): Number of spaces in indent. Defaults to 4.
justify (JustifyMethod, optional): Justify method, or None for default. Defaults to None.
overflow (OverflowMethod, optional): Overflow method, or None for default. Defaults to None.
no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False.
margin (int, optional): Subtrace a margin from width to force containers to expand earlier. Defaults to 0.
insert_line (bool, optional): Insert a new line if the output has multiple new lines. Defaults to False.
"""
def __init__(
self,
_object: Any,
highlighter: "HighlighterType" = None,
*,
indent_size: int = 4,
justify: "JustifyMethod" = None,
overflow: Optional["OverflowMethod"] = None,
no_wrap: Optional[bool] = False,
indent_guides: bool = False,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
margin: int = 0,
insert_line: bool = False,
) -> None:
self._object = _object
self.highlighter = highlighter or ReprHighlighter()
self.indent_size = indent_size
self.justify = justify
self.overflow = overflow
self.no_wrap = no_wrap
self.indent_guides = indent_guides
self.max_length = max_length
self.max_string = max_string
self.expand_all = expand_all
self.margin = margin
self.insert_line = insert_line
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width - self.margin,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
expand_all=self.expand_all,
)
pretty_text = Text(
pretty_str,
justify=self.justify or options.justify,
overflow=self.overflow or options.overflow,
no_wrap=pick_bool(self.no_wrap, options.no_wrap),
style="pretty",
)
pretty_text = (
self.highlighter(pretty_text)
if pretty_text
else Text(
f"{type(self._object)}.__repr__ returned empty string",
style="dim italic",
)
)
if self.indent_guides and not options.ascii_only:
pretty_text = pretty_text.with_indent_guides(
self.indent_size, style="repr.indent"
)
if self.insert_line and "\n" in pretty_text:
yield ""
yield pretty_text
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
)
text_width = (
max(cell_len(line) for line in pretty_str.splitlines()) if pretty_str else 0
)
return Measurement(text_width, text_width)
def _get_braces_for_defaultdict(_object: defaultdict) -> Tuple[str, str, str]:
return (
f"defaultdict({_object.default_factory!r}, {{",
"})",
f"defaultdict({_object.default_factory!r}, {{}})",
)
def _get_braces_for_array(_object: array) -> Tuple[str, str, str]:
return (f"array({_object.typecode!r}, [", "])", "array({_object.typecode!r})")
_BRACES: Dict[type, Callable[[Any], Tuple[str, str, str]]] = {
os._Environ: lambda _object: ("environ({", "})", "environ({})"),
array: _get_braces_for_array,
defaultdict: _get_braces_for_defaultdict,
Counter: lambda _object: ("Counter({", "})", "Counter()"),
deque: lambda _object: ("deque([", "])", "deque()"),
dict: lambda _object: ("{", "}", "{}"),
frozenset: lambda _object: ("frozenset({", "})", "frozenset()"),
list: lambda _object: ("[", "]", "[]"),
set: lambda _object: ("{", "}", "set()"),
tuple: lambda _object: ("(", ")", "()"),
}
_CONTAINERS = tuple(_BRACES.keys())
_MAPPING_CONTAINERS = (dict, os._Environ)
def is_expandable(obj: Any) -> bool:
"""Check if an object may be expanded by pretty print."""
return (
isinstance(obj, _CONTAINERS)
or (is_dataclass(obj) and not isinstance(obj, type))
or hasattr(obj, "__rich_repr__")
)
@dataclass
class Node:
"""A node in a repr tree. May be atomic or a container."""
key_repr: str = ""
value_repr: str = ""
open_brace: str = ""
close_brace: str = ""
empty: str = ""
last: bool = False
is_tuple: bool = False
children: Optional[List["Node"]] = None
key_separator = ": "
@property
def separator(self) -> str:
"""Get separator between items."""
return "" if self.last else ","
def iter_tokens(self) -> Iterable[str]:
"""Generate tokens for this node."""
if self.key_repr:
yield self.key_repr
yield self.key_separator
if self.value_repr:
yield self.value_repr
elif self.children is not None:
if self.children:
yield self.open_brace
if self.is_tuple and len(self.children) == 1:
yield from self.children[0].iter_tokens()
yield ","
else:
for child in self.children:
yield from child.iter_tokens()
if not child.last:
yield ", "
yield self.close_brace
else:
yield self.empty
def check_length(self, start_length: int, max_length: int) -> bool:
"""Check the length fits within a limit.
Args:
start_length (int): Starting length of the line (indent, prefix, suffix).
max_length (int): Maximum length.
Returns:
bool: True if the node can be rendered within max length, otherwise False.
"""
total_length = start_length
for token in self.iter_tokens():
total_length += cell_len(token)
if total_length > max_length:
return False
return True
def __str__(self) -> str:
repr_text = "".join(self.iter_tokens())
return repr_text
def render(
self, max_width: int = 80, indent_size: int = 4, expand_all: bool = False
) -> str:
"""Render the node to a pretty repr.
Args:
max_width (int, optional): Maximum width of the repr. Defaults to 80.
indent_size (int, optional): Size of indents. Defaults to 4.
expand_all (bool, optional): Expand all levels. Defaults to False.
Returns:
str: A repr string of the original object.
"""
lines = [_Line(node=self, is_root=True)]
line_no = 0
while line_no < len(lines):
line = lines[line_no]
if line.expandable and not line.expanded:
if expand_all or not line.check_length(max_width):
lines[line_no : line_no + 1] = line.expand(indent_size)
line_no += 1
repr_str = "\n".join(str(line) for line in lines)
return repr_str
@dataclass
class _Line:
"""A line in repr output."""
is_root: bool = False
node: Optional[Node] = None
text: str = ""
suffix: str = ""
whitespace: str = ""
expanded: bool = False
@property
def expandable(self) -> bool:
"""Check if the line may be expanded."""
return bool(self.node is not None and self.node.children)
def check_length(self, max_length: int) -> bool:
"""Check this line fits within a given number of cells."""
start_length = (
len(self.whitespace) + cell_len(self.text) + cell_len(self.suffix)
)
assert self.node is not None
return self.node.check_length(start_length, max_length)
def expand(self, indent_size: int) -> Iterable["_Line"]:
"""Expand this line by adding children on their own line."""
node = self.node
assert node is not None
whitespace = self.whitespace
assert node.children
if node.key_repr:
yield _Line(
text=f"{node.key_repr}{node.key_separator}{node.open_brace}",
whitespace=whitespace,
)
else:
yield _Line(text=node.open_brace, whitespace=whitespace)
child_whitespace = self.whitespace + " " * indent_size
tuple_of_one = node.is_tuple and len(node.children) == 1
for child in node.children:
separator = "," if tuple_of_one else child.separator
line = _Line(
node=child,
whitespace=child_whitespace,
suffix=separator,
)
yield line
yield _Line(
text=node.close_brace,
whitespace=whitespace,
suffix="," if (tuple_of_one and not self.is_root) else node.separator,
)
def __str__(self) -> str:
return f"{self.whitespace}{self.text}{self.node or ''}{self.suffix}"
def traverse(_object: Any, max_length: int = None, max_string: int = None) -> Node:
"""Traverse object and generate a tree.
Args:
_object (Any): Object to be traversed.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
Returns:
Node: The root of a tree structure which can be used to render a pretty repr.
"""
def to_repr(obj: Any) -> str:
"""Get repr string for an object, but catch errors."""
if (
max_string is not None
and isinstance(obj, (bytes, str))
and len(obj) > max_string
):
truncated = len(obj) - max_string
obj_repr = f"{obj[:max_string]!r}+{truncated}"
else:
try:
obj_repr = repr(obj)
except Exception as error:
obj_repr = f"<repr-error '{error}'>"
return obj_repr
visited_ids: Set[int] = set()
push_visited = visited_ids.add
pop_visited = visited_ids.remove
def _traverse(obj: Any, root: bool = False) -> Node:
"""Walk the object depth first."""
obj_type = type(obj)
py_version = (sys.version_info.major, sys.version_info.minor)
children: List[Node]
def iter_rich_args(rich_args) -> Iterable[Union[Any, Tuple[str, Any]]]:
for arg in rich_args:
if isinstance(arg, tuple):
if len(arg) == 3:
key, child, default = arg
if default == child:
continue
yield key, child
elif len(arg) == 2:
key, child = arg
yield key, child
elif len(arg) == 1:
yield arg[0]
else:
yield arg
if hasattr(obj, "__rich_repr__"):
args = list(iter_rich_args(obj.__rich_repr__()))
if args:
children = []
append = children.append
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
for last, arg in loop_last(args):
if isinstance(arg, tuple):
key, child = arg
child_node = _traverse(child)
child_node.last = last
child_node.key_repr = key
child_node.last = last
child_node.key_separator = "="
append(child_node)
else:
child_node = _traverse(arg)
child_node.last = last
append(child_node)
else:
node = Node(
value_repr=f"{obj.__class__.__name__}()", children=[], last=root
)
elif (
is_dataclass(obj)
and not isinstance(obj, type)
and (
"__create_fn__" in obj.__repr__.__qualname__ or py_version == (3, 6)
) # Check if __repr__ wasn't overriden
):
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
children = []
append = children.append
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
for last, field in loop_last(fields(obj)):
if field.repr:
child_node = _traverse(getattr(obj, field.name))
child_node.key_repr = field.name
child_node.last = last
child_node.key_separator = "="
append(child_node)
pop_visited(obj_id)
elif obj_type in _CONTAINERS:
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
open_brace, close_brace, empty = _BRACES[obj_type](obj)
if obj:
children = []
node = Node(
open_brace=open_brace,
close_brace=close_brace,
children=children,
last=root,
)
append = children.append
num_items = len(obj)
last_item_index = num_items - 1
if isinstance(obj, _MAPPING_CONTAINERS):
iter_items = iter(obj.items())
if max_length is not None:
iter_items = islice(iter_items, max_length)
for index, (key, child) in enumerate(iter_items):
child_node = _traverse(child)
child_node.key_repr = to_repr(key)
child_node.last = index == last_item_index
append(child_node)
else:
iter_values = iter(obj)
if max_length is not None:
iter_values = islice(iter_values, max_length)
for index, child in enumerate(iter_values):
child_node = _traverse(child)
child_node.last = index == last_item_index
append(child_node)
if max_length is not None and num_items > max_length:
append(Node(value_repr=f"... +{num_items-max_length}", last=True))
else:
node = Node(empty=empty, children=[], last=root)
pop_visited(obj_id)
else:
node = Node(value_repr=to_repr(obj), last=root)
node.is_tuple = isinstance(obj, tuple)
return node
node = _traverse(_object, root=True)
return node
def pretty_repr(
_object: Any,
*,
max_width: int = 80,
indent_size: int = 4,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
) -> str:
"""Prettify repr string by expanding on to new lines to fit within a given width.
Args:
_object (Any): Object to repr.
max_width (int, optional): Desired maximum width of repr string. Defaults to 80.
indent_size (int, optional): Number of spaces to indent. Defaults to 4.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
expand_all (bool, optional): Expand all containers regardless of available width. Defaults to False.
Returns:
str: A possibly multi-line representation of the object.
"""
if isinstance(_object, Node):
node = _object
else:
node = traverse(_object, max_length=max_length, max_string=max_string)
repr_str = node.render(
max_width=max_width, indent_size=indent_size, expand_all=expand_all
)
return repr_str
def pprint(
_object: Any,
*,
console: "Console" = None,
indent_guides: bool = True,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
):
"""A convenience function for pretty printing.
Args:
_object (Any): Object to pretty print.
console (Console, optional): Console instance, or None to use default. Defaults to None.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of strings before truncating, or None to disable. Defaults to None.
indent_guides (bool, optional): Enable indentation guides. Defaults to True.
expand_all (bool, optional): Expand all containers. Defaults to False.
"""
_console = get_console() if console is None else console
_console.print(
Pretty(
_object,
max_length=max_length,
max_string=max_string,
indent_guides=indent_guides,
expand_all=expand_all,
overflow="ignore",
),
soft_wrap=True,
)
if __name__ == "__main__": # pragma: no cover
class BrokenRepr:
def __repr__(self):
1 / 0
d = defaultdict(int)
d["foo"] = 5
data = {
"foo": [
1,
"Hello World!",
100.123,
323.232,
432324.0,
{5, 6, 7, (1, 2, 3, 4), 8},
],
"bar": frozenset({1, 2, 3}),
"defaultdict": defaultdict(
list, {"crumble": ["apple", "rhubarb", "butter", "sugar", "flour"]}
),
"counter": Counter(
[
"apple",
"orange",
"pear",
"kumquat",
"kumquat",
"durian" * 100,
]
),
"atomic": (False, True, None),
"Broken": BrokenRepr(),
}
data["foo"].append(data) # type: ignore
from rich import print
print(Pretty(data, indent_guides=True, max_string=20))
| 34.741007
| 134
| 0.566163
|
import builtins
import os
import sys
from array import array
from collections import Counter, defaultdict, deque
from dataclasses import dataclass, fields, is_dataclass
from itertools import islice
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Union,
Tuple,
)
from rich.highlighter import ReprHighlighter
from . import get_console
from ._loop import loop_last
from ._pick import pick_bool
from .abc import RichRenderable
from .cells import cell_len
from .highlighter import ReprHighlighter
from .jupyter import JupyterMixin, JupyterRenderable
from .measure import Measurement
from .text import Text
if TYPE_CHECKING:
from .console import (
Console,
ConsoleOptions,
HighlighterType,
JustifyMethod,
OverflowMethod,
RenderResult,
)
def install(
console: "Console" = None,
overflow: "OverflowMethod" = "ignore",
crop: bool = False,
indent_guides: bool = False,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
) -> None:
from rich import get_console
from .console import ConsoleRenderable
console = console or get_console()
assert console is not None
def display_hook(value: Any) -> None:
if value is not None:
assert console is not None
builtins._ = None
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
),
crop=crop,
)
builtins._ = value
def ipy_display_hook(value: Any) -> None:
assert console is not None
if isinstance(value, JupyterRenderable) or value is None:
return
if console.is_jupyter and any(attr.startswith("_repr_") for attr in dir(value)):
return
if hasattr(value, "_repr_mimebundle_"):
return
if isinstance(value, ConsoleRenderable):
console.line()
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
margin=12,
),
crop=crop,
)
try:
ip = get_ipython()
from IPython.core.formatters import BaseFormatter
rich_formatter = BaseFormatter()
rich_formatter.for_type(object, func=ipy_display_hook)
ip.display_formatter.formatters["text/plain"] = rich_formatter
except Exception:
sys.displayhook = display_hook
class Pretty(JupyterMixin):
def __init__(
self,
_object: Any,
highlighter: "HighlighterType" = None,
*,
indent_size: int = 4,
justify: "JustifyMethod" = None,
overflow: Optional["OverflowMethod"] = None,
no_wrap: Optional[bool] = False,
indent_guides: bool = False,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
margin: int = 0,
insert_line: bool = False,
) -> None:
self._object = _object
self.highlighter = highlighter or ReprHighlighter()
self.indent_size = indent_size
self.justify = justify
self.overflow = overflow
self.no_wrap = no_wrap
self.indent_guides = indent_guides
self.max_length = max_length
self.max_string = max_string
self.expand_all = expand_all
self.margin = margin
self.insert_line = insert_line
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width - self.margin,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
expand_all=self.expand_all,
)
pretty_text = Text(
pretty_str,
justify=self.justify or options.justify,
overflow=self.overflow or options.overflow,
no_wrap=pick_bool(self.no_wrap, options.no_wrap),
style="pretty",
)
pretty_text = (
self.highlighter(pretty_text)
if pretty_text
else Text(
f"{type(self._object)}.__repr__ returned empty string",
style="dim italic",
)
)
if self.indent_guides and not options.ascii_only:
pretty_text = pretty_text.with_indent_guides(
self.indent_size, style="repr.indent"
)
if self.insert_line and "\n" in pretty_text:
yield ""
yield pretty_text
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
)
text_width = (
max(cell_len(line) for line in pretty_str.splitlines()) if pretty_str else 0
)
return Measurement(text_width, text_width)
def _get_braces_for_defaultdict(_object: defaultdict) -> Tuple[str, str, str]:
return (
f"defaultdict({_object.default_factory!r}, {{",
"})",
f"defaultdict({_object.default_factory!r}, {{}})",
)
def _get_braces_for_array(_object: array) -> Tuple[str, str, str]:
return (f"array({_object.typecode!r}, [", "])", "array({_object.typecode!r})")
_BRACES: Dict[type, Callable[[Any], Tuple[str, str, str]]] = {
os._Environ: lambda _object: ("environ({", "})", "environ({})"),
array: _get_braces_for_array,
defaultdict: _get_braces_for_defaultdict,
Counter: lambda _object: ("Counter({", "})", "Counter()"),
deque: lambda _object: ("deque([", "])", "deque()"),
dict: lambda _object: ("{", "}", "{}"),
frozenset: lambda _object: ("frozenset({", "})", "frozenset()"),
list: lambda _object: ("[", "]", "[]"),
set: lambda _object: ("{", "}", "set()"),
tuple: lambda _object: ("(", ")", "()"),
}
_CONTAINERS = tuple(_BRACES.keys())
_MAPPING_CONTAINERS = (dict, os._Environ)
def is_expandable(obj: Any) -> bool:
return (
isinstance(obj, _CONTAINERS)
or (is_dataclass(obj) and not isinstance(obj, type))
or hasattr(obj, "__rich_repr__")
)
@dataclass
class Node:
key_repr: str = ""
value_repr: str = ""
open_brace: str = ""
close_brace: str = ""
empty: str = ""
last: bool = False
is_tuple: bool = False
children: Optional[List["Node"]] = None
key_separator = ": "
@property
def separator(self) -> str:
return "" if self.last else ","
def iter_tokens(self) -> Iterable[str]:
if self.key_repr:
yield self.key_repr
yield self.key_separator
if self.value_repr:
yield self.value_repr
elif self.children is not None:
if self.children:
yield self.open_brace
if self.is_tuple and len(self.children) == 1:
yield from self.children[0].iter_tokens()
yield ","
else:
for child in self.children:
yield from child.iter_tokens()
if not child.last:
yield ", "
yield self.close_brace
else:
yield self.empty
def check_length(self, start_length: int, max_length: int) -> bool:
total_length = start_length
for token in self.iter_tokens():
total_length += cell_len(token)
if total_length > max_length:
return False
return True
def __str__(self) -> str:
repr_text = "".join(self.iter_tokens())
return repr_text
def render(
self, max_width: int = 80, indent_size: int = 4, expand_all: bool = False
) -> str:
lines = [_Line(node=self, is_root=True)]
line_no = 0
while line_no < len(lines):
line = lines[line_no]
if line.expandable and not line.expanded:
if expand_all or not line.check_length(max_width):
lines[line_no : line_no + 1] = line.expand(indent_size)
line_no += 1
repr_str = "\n".join(str(line) for line in lines)
return repr_str
@dataclass
class _Line:
is_root: bool = False
node: Optional[Node] = None
text: str = ""
suffix: str = ""
whitespace: str = ""
expanded: bool = False
@property
def expandable(self) -> bool:
return bool(self.node is not None and self.node.children)
def check_length(self, max_length: int) -> bool:
start_length = (
len(self.whitespace) + cell_len(self.text) + cell_len(self.suffix)
)
assert self.node is not None
return self.node.check_length(start_length, max_length)
def expand(self, indent_size: int) -> Iterable["_Line"]:
node = self.node
assert node is not None
whitespace = self.whitespace
assert node.children
if node.key_repr:
yield _Line(
text=f"{node.key_repr}{node.key_separator}{node.open_brace}",
whitespace=whitespace,
)
else:
yield _Line(text=node.open_brace, whitespace=whitespace)
child_whitespace = self.whitespace + " " * indent_size
tuple_of_one = node.is_tuple and len(node.children) == 1
for child in node.children:
separator = "," if tuple_of_one else child.separator
line = _Line(
node=child,
whitespace=child_whitespace,
suffix=separator,
)
yield line
yield _Line(
text=node.close_brace,
whitespace=whitespace,
suffix="," if (tuple_of_one and not self.is_root) else node.separator,
)
def __str__(self) -> str:
return f"{self.whitespace}{self.text}{self.node or ''}{self.suffix}"
def traverse(_object: Any, max_length: int = None, max_string: int = None) -> Node:
def to_repr(obj: Any) -> str:
if (
max_string is not None
and isinstance(obj, (bytes, str))
and len(obj) > max_string
):
truncated = len(obj) - max_string
obj_repr = f"{obj[:max_string]!r}+{truncated}"
else:
try:
obj_repr = repr(obj)
except Exception as error:
obj_repr = f"<repr-error '{error}'>"
return obj_repr
visited_ids: Set[int] = set()
push_visited = visited_ids.add
pop_visited = visited_ids.remove
def _traverse(obj: Any, root: bool = False) -> Node:
obj_type = type(obj)
py_version = (sys.version_info.major, sys.version_info.minor)
children: List[Node]
def iter_rich_args(rich_args) -> Iterable[Union[Any, Tuple[str, Any]]]:
for arg in rich_args:
if isinstance(arg, tuple):
if len(arg) == 3:
key, child, default = arg
if default == child:
continue
yield key, child
elif len(arg) == 2:
key, child = arg
yield key, child
elif len(arg) == 1:
yield arg[0]
else:
yield arg
if hasattr(obj, "__rich_repr__"):
args = list(iter_rich_args(obj.__rich_repr__()))
if args:
children = []
append = children.append
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
for last, arg in loop_last(args):
if isinstance(arg, tuple):
key, child = arg
child_node = _traverse(child)
child_node.last = last
child_node.key_repr = key
child_node.last = last
child_node.key_separator = "="
append(child_node)
else:
child_node = _traverse(arg)
child_node.last = last
append(child_node)
else:
node = Node(
value_repr=f"{obj.__class__.__name__}()", children=[], last=root
)
elif (
is_dataclass(obj)
and not isinstance(obj, type)
and (
"__create_fn__" in obj.__repr__.__qualname__ or py_version == (3, 6)
)
):
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
children = []
append = children.append
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
for last, field in loop_last(fields(obj)):
if field.repr:
child_node = _traverse(getattr(obj, field.name))
child_node.key_repr = field.name
child_node.last = last
child_node.key_separator = "="
append(child_node)
pop_visited(obj_id)
elif obj_type in _CONTAINERS:
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
open_brace, close_brace, empty = _BRACES[obj_type](obj)
if obj:
children = []
node = Node(
open_brace=open_brace,
close_brace=close_brace,
children=children,
last=root,
)
append = children.append
num_items = len(obj)
last_item_index = num_items - 1
if isinstance(obj, _MAPPING_CONTAINERS):
iter_items = iter(obj.items())
if max_length is not None:
iter_items = islice(iter_items, max_length)
for index, (key, child) in enumerate(iter_items):
child_node = _traverse(child)
child_node.key_repr = to_repr(key)
child_node.last = index == last_item_index
append(child_node)
else:
iter_values = iter(obj)
if max_length is not None:
iter_values = islice(iter_values, max_length)
for index, child in enumerate(iter_values):
child_node = _traverse(child)
child_node.last = index == last_item_index
append(child_node)
if max_length is not None and num_items > max_length:
append(Node(value_repr=f"... +{num_items-max_length}", last=True))
else:
node = Node(empty=empty, children=[], last=root)
pop_visited(obj_id)
else:
node = Node(value_repr=to_repr(obj), last=root)
node.is_tuple = isinstance(obj, tuple)
return node
node = _traverse(_object, root=True)
return node
def pretty_repr(
_object: Any,
*,
max_width: int = 80,
indent_size: int = 4,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
) -> str:
if isinstance(_object, Node):
node = _object
else:
node = traverse(_object, max_length=max_length, max_string=max_string)
repr_str = node.render(
max_width=max_width, indent_size=indent_size, expand_all=expand_all
)
return repr_str
def pprint(
_object: Any,
*,
console: "Console" = None,
indent_guides: bool = True,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
):
_console = get_console() if console is None else console
_console.print(
Pretty(
_object,
max_length=max_length,
max_string=max_string,
indent_guides=indent_guides,
expand_all=expand_all,
overflow="ignore",
),
soft_wrap=True,
)
if __name__ == "__main__": # pragma: no cover
class BrokenRepr:
def __repr__(self):
1 / 0
d = defaultdict(int)
d["foo"] = 5
data = {
"foo": [
1,
"Hello World!",
100.123,
323.232,
432324.0,
{5, 6, 7, (1, 2, 3, 4), 8},
],
"bar": frozenset({1, 2, 3}),
"defaultdict": defaultdict(
list, {"crumble": ["apple", "rhubarb", "butter", "sugar", "flour"]}
),
"counter": Counter(
[
"apple",
"orange",
"pear",
"kumquat",
"kumquat",
"durian" * 100,
]
),
"atomic": (False, True, None),
"Broken": BrokenRepr(),
}
data["foo"].append(data) # type: ignore
from rich import print
print(Pretty(data, indent_guides=True, max_string=20))
| true
| true
|
7903b3457356b7d719cf8cfe5244c130b277cf39
| 626
|
py
|
Python
|
kappa/lattice/ammonia.py
|
ajkerr0/kappa
|
7a74582596f96b6a9a1488df5a4777c7b723c919
|
[
"MIT"
] | 6
|
2016-05-30T19:56:54.000Z
|
2021-01-21T19:42:24.000Z
|
kappa/lattice/ammonia.py
|
ajkerr0/kappa
|
7a74582596f96b6a9a1488df5a4777c7b723c919
|
[
"MIT"
] | 92
|
2016-05-26T19:50:51.000Z
|
2019-01-08T22:15:09.000Z
|
kappa/lattice/ammonia.py
|
ajkerr0/kappa
|
7a74582596f96b6a9a1488df5a4777c7b723c919
|
[
"MIT"
] | 4
|
2016-05-28T22:07:25.000Z
|
2021-02-26T00:12:51.000Z
|
# -*- coding: utf-8 -*-
"""
@author: alex
"""
import numpy as np
def main():
"""Main program execution."""
n,h1,h2,h3 = generate_ammonia_sites()
nList = [[1,2,3],[0],[0],[0]]
return [n,h1,h2,h3], nList
def generate_ammonia_sites():
"""Generate the locations for the atoms in the ammonia molecule"""
x,y = np.array([1.,0.,0.]), np.array([0.,1.,0.])
#atomic distance (angstroms)
a = 1.40
n = np.array([0.,0.,0.])
h1 = n + a*y
h2 = n - a*y/2. + a*x*(np.sqrt(3)/2)
h3 = h2 - a*x*np.sqrt(3)
return n,h1,h2,h3
| 17.885714
| 70
| 0.484026
|
import numpy as np
def main():
n,h1,h2,h3 = generate_ammonia_sites()
nList = [[1,2,3],[0],[0],[0]]
return [n,h1,h2,h3], nList
def generate_ammonia_sites():
x,y = np.array([1.,0.,0.]), np.array([0.,1.,0.])
a = 1.40
n = np.array([0.,0.,0.])
h1 = n + a*y
h2 = n - a*y/2. + a*x*(np.sqrt(3)/2)
h3 = h2 - a*x*np.sqrt(3)
return n,h1,h2,h3
| true
| true
|
7903b37082debbd494ccf88252b5603b2a386f2c
| 2,994
|
py
|
Python
|
vendor/packages/click/tests/test_testing.py
|
DESHRAJ/fjord
|
8899b6286b23347c9b024334e61c33fe133e836d
|
[
"BSD-3-Clause"
] | 2
|
2019-06-06T06:56:09.000Z
|
2019-06-19T06:13:33.000Z
|
vendor/packages/click/tests/test_testing.py
|
DESHRAJ/fjord
|
8899b6286b23347c9b024334e61c33fe133e836d
|
[
"BSD-3-Clause"
] | null | null | null |
vendor/packages/click/tests/test_testing.py
|
DESHRAJ/fjord
|
8899b6286b23347c9b024334e61c33fe133e836d
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
import click
from click.testing import CliRunner
from click._compat import PY2
# Use the most reasonable io that users would use for the python version.
if PY2:
from cStringIO import StringIO as ReasonableBytesIO
else:
from io import BytesIO as ReasonableBytesIO
def test_runner():
@click.command()
def test():
i = click.get_binary_stream('stdin')
o = click.get_binary_stream('stdout')
while 1:
chunk = i.read(4096)
if not chunk:
break
o.write(chunk)
o.flush()
runner = CliRunner()
result = runner.invoke(test, input='Hello World!\n')
assert not result.exception
assert result.output == 'Hello World!\n'
runner = CliRunner(echo_stdin=True)
result = runner.invoke(test, input='Hello World!\n')
assert not result.exception
assert result.output == 'Hello World!\nHello World!\n'
def test_runner_with_stream():
@click.command()
def test():
i = click.get_binary_stream('stdin')
o = click.get_binary_stream('stdout')
while 1:
chunk = i.read(4096)
if not chunk:
break
o.write(chunk)
o.flush()
runner = CliRunner()
result = runner.invoke(test, input=ReasonableBytesIO(b'Hello World!\n'))
assert not result.exception
assert result.output == 'Hello World!\n'
runner = CliRunner(echo_stdin=True)
result = runner.invoke(test, input=ReasonableBytesIO(b'Hello World!\n'))
assert not result.exception
assert result.output == 'Hello World!\nHello World!\n'
def test_prompts():
@click.command()
@click.option('--foo', prompt=True)
def test(foo):
click.echo('foo=%s' % foo)
runner = CliRunner()
result = runner.invoke(test, input='wau wau\n')
assert not result.exception
assert result.output == 'Foo: wau wau\nfoo=wau wau\n'
@click.command()
@click.option('--foo', prompt=True, hide_input=True)
def test(foo):
click.echo('foo=%s' % foo)
runner = CliRunner()
result = runner.invoke(test, input='wau wau\n')
assert not result.exception
assert result.output == 'Foo: \nfoo=wau wau\n'
def test_getchar():
@click.command()
def continue_it():
click.echo(click.getchar())
runner = CliRunner()
result = runner.invoke(continue_it, input='y')
assert not result.exception
assert result.output == 'y\n'
def test_catch_exceptions():
class CustomError(Exception):
pass
@click.command()
def cli():
raise CustomError(1)
runner = CliRunner()
result = runner.invoke(cli)
assert isinstance(result.exception, CustomError)
assert type(result.exc_info) is tuple
assert len(result.exc_info) == 3
with pytest.raises(CustomError):
runner.invoke(cli, catch_exceptions=False)
CustomError = SystemExit
result = runner.invoke(cli)
assert result.exit_code == 1
| 25.810345
| 76
| 0.637609
|
import pytest
import click
from click.testing import CliRunner
from click._compat import PY2
if PY2:
from cStringIO import StringIO as ReasonableBytesIO
else:
from io import BytesIO as ReasonableBytesIO
def test_runner():
@click.command()
def test():
i = click.get_binary_stream('stdin')
o = click.get_binary_stream('stdout')
while 1:
chunk = i.read(4096)
if not chunk:
break
o.write(chunk)
o.flush()
runner = CliRunner()
result = runner.invoke(test, input='Hello World!\n')
assert not result.exception
assert result.output == 'Hello World!\n'
runner = CliRunner(echo_stdin=True)
result = runner.invoke(test, input='Hello World!\n')
assert not result.exception
assert result.output == 'Hello World!\nHello World!\n'
def test_runner_with_stream():
@click.command()
def test():
i = click.get_binary_stream('stdin')
o = click.get_binary_stream('stdout')
while 1:
chunk = i.read(4096)
if not chunk:
break
o.write(chunk)
o.flush()
runner = CliRunner()
result = runner.invoke(test, input=ReasonableBytesIO(b'Hello World!\n'))
assert not result.exception
assert result.output == 'Hello World!\n'
runner = CliRunner(echo_stdin=True)
result = runner.invoke(test, input=ReasonableBytesIO(b'Hello World!\n'))
assert not result.exception
assert result.output == 'Hello World!\nHello World!\n'
def test_prompts():
@click.command()
@click.option('--foo', prompt=True)
def test(foo):
click.echo('foo=%s' % foo)
runner = CliRunner()
result = runner.invoke(test, input='wau wau\n')
assert not result.exception
assert result.output == 'Foo: wau wau\nfoo=wau wau\n'
@click.command()
@click.option('--foo', prompt=True, hide_input=True)
def test(foo):
click.echo('foo=%s' % foo)
runner = CliRunner()
result = runner.invoke(test, input='wau wau\n')
assert not result.exception
assert result.output == 'Foo: \nfoo=wau wau\n'
def test_getchar():
@click.command()
def continue_it():
click.echo(click.getchar())
runner = CliRunner()
result = runner.invoke(continue_it, input='y')
assert not result.exception
assert result.output == 'y\n'
def test_catch_exceptions():
class CustomError(Exception):
pass
@click.command()
def cli():
raise CustomError(1)
runner = CliRunner()
result = runner.invoke(cli)
assert isinstance(result.exception, CustomError)
assert type(result.exc_info) is tuple
assert len(result.exc_info) == 3
with pytest.raises(CustomError):
runner.invoke(cli, catch_exceptions=False)
CustomError = SystemExit
result = runner.invoke(cli)
assert result.exit_code == 1
| true
| true
|
7903b3c6c1a37be6b9eaf4ac608b56c9e2678754
| 10,126
|
py
|
Python
|
tests/unit/test_private.py
|
Cattes/coinbasepro-python
|
0a9c9ba2188f6bfa08a842a666ab12fe1cc02276
|
[
"MIT"
] | null | null | null |
tests/unit/test_private.py
|
Cattes/coinbasepro-python
|
0a9c9ba2188f6bfa08a842a666ab12fe1cc02276
|
[
"MIT"
] | null | null | null |
tests/unit/test_private.py
|
Cattes/coinbasepro-python
|
0a9c9ba2188f6bfa08a842a666ab12fe1cc02276
|
[
"MIT"
] | null | null | null |
from itertools import islice
from tests.unit.utils import Teardown
import inspect
import pytest
import time
import cbpro.messenger
import cbpro.public
import cbpro.private
class TestPrivateClient(object):
def test_private_attr(self, private_client):
assert isinstance(private_client, cbpro.public.PublicClient)
assert hasattr(private_client, 'accounts')
assert hasattr(private_client, 'orders')
assert hasattr(private_client, 'fills')
assert hasattr(private_client, 'limits')
assert hasattr(private_client, 'deposits')
assert hasattr(private_client, 'withdrawals')
assert hasattr(private_client, 'conversions')
assert hasattr(private_client, 'payments')
assert hasattr(private_client, 'coinbase')
assert hasattr(private_client, 'fees')
assert hasattr(private_client, 'reports')
assert hasattr(private_client, 'profiles')
assert hasattr(private_client, 'oracle')
def test_private_accounts(self, private_client):
accounts = private_client.accounts
assert isinstance(accounts, cbpro.messenger.Subscriber)
assert isinstance(accounts, cbpro.private.Accounts)
assert hasattr(accounts, 'list')
assert hasattr(accounts, 'get')
assert hasattr(accounts, 'history')
assert hasattr(accounts, 'holds')
def test_private_orders(self, private_client):
orders = private_client.orders
assert isinstance(orders, cbpro.messenger.Subscriber)
assert isinstance(orders, cbpro.private.Orders)
assert hasattr(orders, 'post')
assert hasattr(orders, 'cancel')
assert hasattr(orders, 'list')
assert hasattr(orders, 'get')
def test_private_fills(self, private_client):
fills = private_client.fills
assert isinstance(fills, cbpro.messenger.Subscriber)
assert isinstance(fills, cbpro.private.Fills)
assert hasattr(fills, 'list')
def test_private_limits(self, private_client):
limits = private_client.limits
assert isinstance(limits, cbpro.messenger.Subscriber)
assert isinstance(limits, cbpro.private.Limits)
assert hasattr(limits, 'get')
def test_private_deposits(self, private_client):
deposits = private_client.deposits
assert isinstance(deposits, cbpro.messenger.Subscriber)
assert isinstance(deposits, cbpro.private.Deposits)
assert hasattr(deposits, 'list')
assert hasattr(deposits, 'get')
assert hasattr(deposits, 'payment')
assert hasattr(deposits, 'coinbase')
assert hasattr(deposits, 'generate')
def test_private_withdrawals(self, private_client):
withdrawals = private_client.withdrawals
assert isinstance(withdrawals, cbpro.messenger.Subscriber)
assert isinstance(withdrawals, cbpro.private.Deposits)
assert isinstance(withdrawals, cbpro.private.Withdrawals)
assert hasattr(withdrawals, 'list')
assert hasattr(withdrawals, 'get')
assert hasattr(withdrawals, 'payment')
assert hasattr(withdrawals, 'coinbase')
assert hasattr(withdrawals, 'generate')
assert hasattr(withdrawals, 'crypto')
assert hasattr(withdrawals, 'estimate')
def test_private_conversions(self, private_client):
conversions = private_client.conversions
assert isinstance(conversions, cbpro.messenger.Subscriber)
assert isinstance(conversions, cbpro.private.Conversions)
assert hasattr(conversions, 'post')
def test_private_payments(self, private_client):
payments = private_client.payments
assert isinstance(payments, cbpro.messenger.Subscriber)
assert isinstance(payments, cbpro.private.Payments)
assert hasattr(payments, 'list')
def test_private_coinbase(self, private_client):
coinbase = private_client.coinbase
assert isinstance(coinbase, cbpro.messenger.Subscriber)
assert isinstance(coinbase, cbpro.private.Coinbase)
assert hasattr(coinbase, 'list')
def test_private_fees(self, private_client):
fees = private_client.fees
assert isinstance(fees, cbpro.messenger.Subscriber)
assert isinstance(fees, cbpro.private.Fees)
assert hasattr(fees, 'list')
def test_private_reports(self, private_client):
reports = private_client.reports
assert isinstance(reports, cbpro.messenger.Subscriber)
assert isinstance(reports, cbpro.private.Reports)
def test_private_profiles(self, private_client):
profiles = private_client.profiles
assert isinstance(profiles, cbpro.messenger.Subscriber)
assert isinstance(profiles, cbpro.private.Profiles)
assert hasattr(profiles, 'list')
assert hasattr(profiles, 'get')
assert hasattr(profiles, 'transfer')
def test_private_oracle(self, private_client):
oracle = private_client.oracle
assert isinstance(oracle, cbpro.messenger.Subscriber)
assert isinstance(oracle, cbpro.private.Oracle)
@pytest.mark.skip
class TestPrivateAccounts(Teardown):
def test_list(self, private_client):
response = private_client.accounts.list()
assert isinstance(response, list)
assert 'currency' in response[0]
def test_get(self, private_client, account_id):
response = private_client.accounts.get(account_id)
assert isinstance(response, dict)
assert 'currency' in response
def test_history(self, private_client, account_id):
response = private_client.accounts.history(account_id)
assert inspect.isgenerator(response)
accounts = list(islice(response, 5))
assert 'amount' in accounts[0]
assert 'details' in accounts[0]
def test_holds(self, private_client, account_id):
response = private_client.accounts.holds(account_id)
assert inspect.isgenerator(response)
holds = list(islice(response, 5))
assert 'type' in holds[0]
assert 'ref' in holds[0]
@pytest.mark.skip
class TestPrivateOrders(Teardown):
def test_post_limit_order(self, private_client, private_model):
json = private_model.orders.limit('buy', 'BTC-USD', 40000.0, 0.001)
response = private_client.orders.post(json)
assert isinstance(response, dict)
assert response['type'] == 'limit'
def test_post_market_order(self, private_client, private_model):
json = private_model.orders.market('buy', 'BTC-USD', size=0.001)
response = private_client.orders.post(json)
assert isinstance(response, dict)
assert 'status' in response
assert response['type'] == 'market'
@pytest.mark.parametrize('stop', ['entry', 'loss'])
def test_post_stop_order(self, private_client, private_model, stop):
json = private_model.orders.market(
'buy', 'BTC-USD', size=0.001, stop=stop, stop_price=30000
)
response = private_client.orders.post(json)
assert isinstance(response, dict)
assert response['stop'] == stop
assert response['type'] == 'market'
def test_cancel(self, private_client, private_model):
json = private_model.orders.limit('buy', 'BTC-USD', 40000.0, 0.001)
order = private_client.orders.post(json)
time.sleep(0.2)
params = private_model.orders.cancel('BTC-USD')
response = private_client.orders.cancel(order['id'], params)
assert isinstance(response, list)
assert response[0] == order['id']
def test_list(self, private_client, private_model):
params = private_model.orders.list('pending')
response = private_client.orders.list(params)
assert inspect.isgenerator(response)
orders = list(islice(response, 10))
assert isinstance(orders, list)
assert 'created_at' in orders[0]
def test_get(self, private_client, private_model):
json = private_model.orders.limit('buy', 'BTC-USD', 40000.0, 0.001)
order = private_client.orders.post(json)
time.sleep(0.2)
response = private_client.orders.get(order['id'])
assert response['id'] == order['id']
@pytest.mark.skip
class TestPrivateFills(Teardown):
def test_list(self, private_client, private_model):
params = private_model.fills.list('BTC-USD')
response = private_client.fills.list(params)
assert inspect.isgenerator(response)
fills = list(islice(response, 10))
assert isinstance(fills, list)
assert 'fill_fees' in fills[0]
@pytest.mark.skip
class TestPrivateLimits(Teardown):
def test_get(self, private_client):
response = private_client.limits.get()
assert isinstance(response, dict)
@pytest.mark.skip
class TestPrivateDeposits(Teardown):
pass
@pytest.mark.skip
class TestPrivateWithdrawals(Teardown):
pass
@pytest.mark.skip
class TestPrivateConversions(Teardown):
def test_post(self, private_client, private_model):
json = private_model.conversions.post('USD', 'USDC', 10.0)
response = private_client.conversions.post(json)
assert isinstance(response, dict)
assert 'id' in response
assert 'amount' in response
assert response['from'] == 'USD'
assert response['to'] == 'USDC'
@pytest.mark.skip
class TestPrivatePayments(Teardown):
def test_list(self, private_client):
response = private_client.payments.list()
assert isinstance(response, list)
@pytest.mark.skip
class TestPrivateCoinbase(Teardown):
def test_list(self, private_client):
response = private_client.coinbase.list()
assert isinstance(response, list)
@pytest.mark.skip
class TestPrivateFees(Teardown):
def test_list(self, private_client):
response = private_client.fees.list()
assert isinstance(response, list)
@pytest.mark.skip
class TestPrivateReports(Teardown):
pass
@pytest.mark.skip
class TestPrivateProfiles(Teardown):
pass
@pytest.mark.skip
class TestPrivateOracle(Teardown):
pass
| 35.038062
| 75
| 0.693067
|
from itertools import islice
from tests.unit.utils import Teardown
import inspect
import pytest
import time
import cbpro.messenger
import cbpro.public
import cbpro.private
class TestPrivateClient(object):
def test_private_attr(self, private_client):
assert isinstance(private_client, cbpro.public.PublicClient)
assert hasattr(private_client, 'accounts')
assert hasattr(private_client, 'orders')
assert hasattr(private_client, 'fills')
assert hasattr(private_client, 'limits')
assert hasattr(private_client, 'deposits')
assert hasattr(private_client, 'withdrawals')
assert hasattr(private_client, 'conversions')
assert hasattr(private_client, 'payments')
assert hasattr(private_client, 'coinbase')
assert hasattr(private_client, 'fees')
assert hasattr(private_client, 'reports')
assert hasattr(private_client, 'profiles')
assert hasattr(private_client, 'oracle')
def test_private_accounts(self, private_client):
accounts = private_client.accounts
assert isinstance(accounts, cbpro.messenger.Subscriber)
assert isinstance(accounts, cbpro.private.Accounts)
assert hasattr(accounts, 'list')
assert hasattr(accounts, 'get')
assert hasattr(accounts, 'history')
assert hasattr(accounts, 'holds')
def test_private_orders(self, private_client):
orders = private_client.orders
assert isinstance(orders, cbpro.messenger.Subscriber)
assert isinstance(orders, cbpro.private.Orders)
assert hasattr(orders, 'post')
assert hasattr(orders, 'cancel')
assert hasattr(orders, 'list')
assert hasattr(orders, 'get')
def test_private_fills(self, private_client):
fills = private_client.fills
assert isinstance(fills, cbpro.messenger.Subscriber)
assert isinstance(fills, cbpro.private.Fills)
assert hasattr(fills, 'list')
def test_private_limits(self, private_client):
limits = private_client.limits
assert isinstance(limits, cbpro.messenger.Subscriber)
assert isinstance(limits, cbpro.private.Limits)
assert hasattr(limits, 'get')
def test_private_deposits(self, private_client):
deposits = private_client.deposits
assert isinstance(deposits, cbpro.messenger.Subscriber)
assert isinstance(deposits, cbpro.private.Deposits)
assert hasattr(deposits, 'list')
assert hasattr(deposits, 'get')
assert hasattr(deposits, 'payment')
assert hasattr(deposits, 'coinbase')
assert hasattr(deposits, 'generate')
def test_private_withdrawals(self, private_client):
withdrawals = private_client.withdrawals
assert isinstance(withdrawals, cbpro.messenger.Subscriber)
assert isinstance(withdrawals, cbpro.private.Deposits)
assert isinstance(withdrawals, cbpro.private.Withdrawals)
assert hasattr(withdrawals, 'list')
assert hasattr(withdrawals, 'get')
assert hasattr(withdrawals, 'payment')
assert hasattr(withdrawals, 'coinbase')
assert hasattr(withdrawals, 'generate')
assert hasattr(withdrawals, 'crypto')
assert hasattr(withdrawals, 'estimate')
def test_private_conversions(self, private_client):
conversions = private_client.conversions
assert isinstance(conversions, cbpro.messenger.Subscriber)
assert isinstance(conversions, cbpro.private.Conversions)
assert hasattr(conversions, 'post')
def test_private_payments(self, private_client):
payments = private_client.payments
assert isinstance(payments, cbpro.messenger.Subscriber)
assert isinstance(payments, cbpro.private.Payments)
assert hasattr(payments, 'list')
def test_private_coinbase(self, private_client):
coinbase = private_client.coinbase
assert isinstance(coinbase, cbpro.messenger.Subscriber)
assert isinstance(coinbase, cbpro.private.Coinbase)
assert hasattr(coinbase, 'list')
def test_private_fees(self, private_client):
fees = private_client.fees
assert isinstance(fees, cbpro.messenger.Subscriber)
assert isinstance(fees, cbpro.private.Fees)
assert hasattr(fees, 'list')
def test_private_reports(self, private_client):
reports = private_client.reports
assert isinstance(reports, cbpro.messenger.Subscriber)
assert isinstance(reports, cbpro.private.Reports)
def test_private_profiles(self, private_client):
profiles = private_client.profiles
assert isinstance(profiles, cbpro.messenger.Subscriber)
assert isinstance(profiles, cbpro.private.Profiles)
assert hasattr(profiles, 'list')
assert hasattr(profiles, 'get')
assert hasattr(profiles, 'transfer')
def test_private_oracle(self, private_client):
oracle = private_client.oracle
assert isinstance(oracle, cbpro.messenger.Subscriber)
assert isinstance(oracle, cbpro.private.Oracle)
@pytest.mark.skip
class TestPrivateAccounts(Teardown):
def test_list(self, private_client):
response = private_client.accounts.list()
assert isinstance(response, list)
assert 'currency' in response[0]
def test_get(self, private_client, account_id):
response = private_client.accounts.get(account_id)
assert isinstance(response, dict)
assert 'currency' in response
def test_history(self, private_client, account_id):
response = private_client.accounts.history(account_id)
assert inspect.isgenerator(response)
accounts = list(islice(response, 5))
assert 'amount' in accounts[0]
assert 'details' in accounts[0]
def test_holds(self, private_client, account_id):
response = private_client.accounts.holds(account_id)
assert inspect.isgenerator(response)
holds = list(islice(response, 5))
assert 'type' in holds[0]
assert 'ref' in holds[0]
@pytest.mark.skip
class TestPrivateOrders(Teardown):
def test_post_limit_order(self, private_client, private_model):
json = private_model.orders.limit('buy', 'BTC-USD', 40000.0, 0.001)
response = private_client.orders.post(json)
assert isinstance(response, dict)
assert response['type'] == 'limit'
def test_post_market_order(self, private_client, private_model):
json = private_model.orders.market('buy', 'BTC-USD', size=0.001)
response = private_client.orders.post(json)
assert isinstance(response, dict)
assert 'status' in response
assert response['type'] == 'market'
@pytest.mark.parametrize('stop', ['entry', 'loss'])
def test_post_stop_order(self, private_client, private_model, stop):
json = private_model.orders.market(
'buy', 'BTC-USD', size=0.001, stop=stop, stop_price=30000
)
response = private_client.orders.post(json)
assert isinstance(response, dict)
assert response['stop'] == stop
assert response['type'] == 'market'
def test_cancel(self, private_client, private_model):
json = private_model.orders.limit('buy', 'BTC-USD', 40000.0, 0.001)
order = private_client.orders.post(json)
time.sleep(0.2)
params = private_model.orders.cancel('BTC-USD')
response = private_client.orders.cancel(order['id'], params)
assert isinstance(response, list)
assert response[0] == order['id']
def test_list(self, private_client, private_model):
params = private_model.orders.list('pending')
response = private_client.orders.list(params)
assert inspect.isgenerator(response)
orders = list(islice(response, 10))
assert isinstance(orders, list)
assert 'created_at' in orders[0]
def test_get(self, private_client, private_model):
json = private_model.orders.limit('buy', 'BTC-USD', 40000.0, 0.001)
order = private_client.orders.post(json)
time.sleep(0.2)
response = private_client.orders.get(order['id'])
assert response['id'] == order['id']
@pytest.mark.skip
class TestPrivateFills(Teardown):
def test_list(self, private_client, private_model):
params = private_model.fills.list('BTC-USD')
response = private_client.fills.list(params)
assert inspect.isgenerator(response)
fills = list(islice(response, 10))
assert isinstance(fills, list)
assert 'fill_fees' in fills[0]
@pytest.mark.skip
class TestPrivateLimits(Teardown):
def test_get(self, private_client):
response = private_client.limits.get()
assert isinstance(response, dict)
@pytest.mark.skip
class TestPrivateDeposits(Teardown):
pass
@pytest.mark.skip
class TestPrivateWithdrawals(Teardown):
pass
@pytest.mark.skip
class TestPrivateConversions(Teardown):
def test_post(self, private_client, private_model):
json = private_model.conversions.post('USD', 'USDC', 10.0)
response = private_client.conversions.post(json)
assert isinstance(response, dict)
assert 'id' in response
assert 'amount' in response
assert response['from'] == 'USD'
assert response['to'] == 'USDC'
@pytest.mark.skip
class TestPrivatePayments(Teardown):
def test_list(self, private_client):
response = private_client.payments.list()
assert isinstance(response, list)
@pytest.mark.skip
class TestPrivateCoinbase(Teardown):
def test_list(self, private_client):
response = private_client.coinbase.list()
assert isinstance(response, list)
@pytest.mark.skip
class TestPrivateFees(Teardown):
def test_list(self, private_client):
response = private_client.fees.list()
assert isinstance(response, list)
@pytest.mark.skip
class TestPrivateReports(Teardown):
pass
@pytest.mark.skip
class TestPrivateProfiles(Teardown):
pass
@pytest.mark.skip
class TestPrivateOracle(Teardown):
pass
| true
| true
|
7903b51a1ee8b4016a4a7003dbe0d29a14d08635
| 918
|
py
|
Python
|
credentials_test.py
|
paulmunyao/Password-Locker
|
918aa30ecadc1ea09cd09b2945e57e0f3ac67b7e
|
[
"Unlicense"
] | null | null | null |
credentials_test.py
|
paulmunyao/Password-Locker
|
918aa30ecadc1ea09cd09b2945e57e0f3ac67b7e
|
[
"Unlicense"
] | null | null | null |
credentials_test.py
|
paulmunyao/Password-Locker
|
918aa30ecadc1ea09cd09b2945e57e0f3ac67b7e
|
[
"Unlicense"
] | null | null | null |
from credentials import credentials
import unittest
import pyperclip
class TestUser(unittest.TestCase):
'''
Test that defines test cases for the User class
Args:
unitest.Testcase: Testcase that helps in creating test cases for class User.
'''
def setUp(self):
'''
Set up method to run before each test case
'''
self.new_user = credentials("Paul", "123")
def test__init__(self):
'''
test__init__ test case to test if the object is initialized properly
'''
self.assertEqual(self.new_user.user_name, "Paul")
self.assertEqual(self.new_user.password, "123")
def test__save_user(self):
'''
test to see if the user is saved
'''
self.new_credentials.save_credentials()
self.assertEqual(len(credentials.user_list), 1)
if __name__ == "__main__":
unittest.main()
| 26.228571
| 84
| 0.62963
|
from credentials import credentials
import unittest
import pyperclip
class TestUser(unittest.TestCase):
def setUp(self):
self.new_user = credentials("Paul", "123")
def test__init__(self):
self.assertEqual(self.new_user.user_name, "Paul")
self.assertEqual(self.new_user.password, "123")
def test__save_user(self):
self.new_credentials.save_credentials()
self.assertEqual(len(credentials.user_list), 1)
if __name__ == "__main__":
unittest.main()
| true
| true
|
7903b526f8e93b915a778b6c9b1353a988055576
| 1,084
|
py
|
Python
|
aria/parser/__init__.py
|
enricorusso/incubator-ariatosca
|
3748b1962697712bde29c9de781d867c6c5ffad1
|
[
"Apache-2.0"
] | 1
|
2018-10-13T06:32:10.000Z
|
2018-10-13T06:32:10.000Z
|
aria/parser/__init__.py
|
enricorusso/incubator-ariatosca
|
3748b1962697712bde29c9de781d867c6c5ffad1
|
[
"Apache-2.0"
] | null | null | null |
aria/parser/__init__.py
|
enricorusso/incubator-ariatosca
|
3748b1962697712bde29c9de781d867c6c5ffad1
|
[
"Apache-2.0"
] | 1
|
2020-06-16T15:13:06.000Z
|
2020-06-16T15:13:06.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Parser package.
"""
from .specification import implements_specification, iter_specifications
MODULES = (
'consumption',
'loading',
'modeling',
'presentation',
'reading',
'validation')
__all__ = (
'MODULES',
'implements_specification',
'iter_specifications')
| 30.971429
| 74
| 0.74262
|
from .specification import implements_specification, iter_specifications
MODULES = (
'consumption',
'loading',
'modeling',
'presentation',
'reading',
'validation')
__all__ = (
'MODULES',
'implements_specification',
'iter_specifications')
| true
| true
|
7903b593ae23f1e169e9267b4afb8f03f0da3218
| 3,158
|
py
|
Python
|
pvfactors/report.py
|
tcapelle/pvfactors
|
1aaf6cdd3066a3a68d93db4ad7abcf10e97b5620
|
[
"BSD-3-Clause"
] | null | null | null |
pvfactors/report.py
|
tcapelle/pvfactors
|
1aaf6cdd3066a3a68d93db4ad7abcf10e97b5620
|
[
"BSD-3-Clause"
] | null | null | null |
pvfactors/report.py
|
tcapelle/pvfactors
|
1aaf6cdd3066a3a68d93db4ad7abcf10e97b5620
|
[
"BSD-3-Clause"
] | null | null | null |
"""Module containing examples of report builder functions and classes."""
from collections import OrderedDict
import numpy as np
def example_fn_build_report(report, pvarray):
"""Example function that builds a report when used in the
:py:class:`~pvfactors.engine.PVEngine` with full mode simulations.
Here it will be a dictionary with lists of calculated values.
Parameters
----------
report : dict
Initially ``None``, this will be passed and updated by the function
pvarray : PV array object
PV array with updated calculation values
Returns
-------
report : dict
Report updated with newly calculated values
"""
# Initialize the report
if report is None:
list_keys = ['qinc_front', 'qinc_back', 'iso_front', 'iso_back']
report = OrderedDict({key: [] for key in list_keys})
# Add elements to the report
if pvarray is not None:
pvrow = pvarray.pvrows[1] # use center pvrow
report['qinc_front'].append(
pvrow.front.get_param_weighted('qinc'))
report['qinc_back'].append(
pvrow.back.get_param_weighted('qinc'))
report['iso_front'].append(
pvrow.front.get_param_weighted('isotropic'))
report['iso_back'].append(
pvrow.back.get_param_weighted('isotropic'))
else:
# No calculation was performed, because sun was down
report['qinc_front'].append(np.nan)
report['qinc_back'].append(np.nan)
report['iso_front'].append(np.nan)
report['iso_back'].append(np.nan)
return report
class ExampleReportBuilder(object):
"""A class is required to build reports when running calculations with
multiprocessing because of python constraints"""
@staticmethod
def build(report, pvarray):
"""Method that will build the simulation report. Here we're using the
previously defined
:py:function:`~pvfactors.report.example_fn_build_report`.
Parameters
----------
report : dict
Initially ``None``, this will be passed and updated by the function
pvarray : PV array object
PV array with updated calculation values
Returns
-------
report : dict
Report updated with newly calculated values
"""
return example_fn_build_report(report, pvarray)
@staticmethod
def merge(reports):
"""Method used to merge multiple reports together. Here it simply
concatenates the lists of values saved in the different reports.
Parameters
----------
reports : list of dict
List of reports that need to be concatenated together
Returns
-------
report : dict
Final report with all concatenated values
"""
report = reports[0]
# Merge only if more than 1 report
if len(reports) > 1:
keys_report = list(reports[0].keys())
for other_report in reports[1:]:
for key in keys_report:
report[key] += other_report[key]
return report
| 32.895833
| 79
| 0.622863
|
from collections import OrderedDict
import numpy as np
def example_fn_build_report(report, pvarray):
if report is None:
list_keys = ['qinc_front', 'qinc_back', 'iso_front', 'iso_back']
report = OrderedDict({key: [] for key in list_keys})
if pvarray is not None:
pvrow = pvarray.pvrows[1]
report['qinc_front'].append(
pvrow.front.get_param_weighted('qinc'))
report['qinc_back'].append(
pvrow.back.get_param_weighted('qinc'))
report['iso_front'].append(
pvrow.front.get_param_weighted('isotropic'))
report['iso_back'].append(
pvrow.back.get_param_weighted('isotropic'))
else:
report['qinc_front'].append(np.nan)
report['qinc_back'].append(np.nan)
report['iso_front'].append(np.nan)
report['iso_back'].append(np.nan)
return report
class ExampleReportBuilder(object):
@staticmethod
def build(report, pvarray):
return example_fn_build_report(report, pvarray)
@staticmethod
def merge(reports):
report = reports[0]
if len(reports) > 1:
keys_report = list(reports[0].keys())
for other_report in reports[1:]:
for key in keys_report:
report[key] += other_report[key]
return report
| true
| true
|
7903b5960f7b65c5eae95333461a9ee1d4fd86e9
| 8,242
|
py
|
Python
|
CarlaDriving/server/lane_detection/utils.py
|
eamorgado/Car-Self-driving-Simulator
|
498d54a30c665b38ae6e120d8ae8311e77ad61f2
|
[
"BSD-3-Clause"
] | 1
|
2021-01-25T02:08:55.000Z
|
2021-01-25T02:08:55.000Z
|
CarlaDriving/server/lane_detection/utils.py
|
eamorgado/Car-Self-driving-Simulator
|
498d54a30c665b38ae6e120d8ae8311e77ad61f2
|
[
"BSD-3-Clause"
] | null | null | null |
CarlaDriving/server/lane_detection/utils.py
|
eamorgado/Car-Self-driving-Simulator
|
498d54a30c665b38ae6e120d8ae8311e77ad61f2
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import cv2 as cv
import math
from server.cv_utils import *
def filterGaussian(img,size=(5,5),stdv=0):
"""Summary of filterGaussian
This will apply a noise reduction filter, we will use s 5x5 Gaussian filter to smooth
the image to lower the sensitivity to noise. (The smaller the size the less visible the blur)
To populate the Gaussian matrix we will use a kernel of normally distributed[stdv=1] numbers which will
set each pixel value equal to the weighted average of its neighboor pixels
The Gaussian distribution:
Gd = (1/2pi*stdv^2)exp(-((i-(k+1)^2) + (j - (k+1)^2))/(2*stdv^2))
i,j E [1,2k+1] for the kernel of size: (2k+1)x(2k+1)
"""
if not isCV(img):
raise ValueError("Image not in np.array format")
if not isinstance(size,tuple):
raise ValueError('filterGaussian: Size for Gaussian filter not tuple')
return cv.GaussianBlur(img,size,stdv)
def filterCanny(img,min_val=50,max_val=150,size=(5,5),stdv=0):
"""
The Canny detector is a multi-stage algorithm optimized for fast real-time edge detection,
which will reduce complexity of the image much further.
The algorithm will detect sharp changes in luminosity and will define them as edges.
The algorithm has the following stages:
- Noise reduction
- Intensity gradient - here it will apply a Sobel filter along the x and y axis to detect if edges are horizontal vertical or diagonal
- Non-maximum suppression - this shortens the frequency bandwith of the signal to sharpen it
- Hysteresis thresholding
"""
if not isCV(img):
raise ValueError("Image not in np.array format")
if min_val >= max_val:
raise ValueError('filterCanny: Value order incorrect')
gray_scale = toGrayScale(img)
#cv.imshow('Gray Scale image',gray_scale)
gaussian = filterGaussian(gray_scale,size=size,stdv=stdv)
#cv.imshow('Gaussian filter',gaussian)
return cv.Canny(gaussian,min_val,max_val)
def segmentRegionOfInterest(img):
height = img.shape[0]
polygons = np.array([
[(200, height), (1100, height), (550, 250)]
])
mask = np.zeros_like(img)
# Fill poly-function deals with multiple polygon
cv.fillPoly(mask, polygons, 255)
# Bitwise operation between canny image and mask image
masked_image = cv.bitwise_and(img, mask)
return masked_image
def houghFilter(frame,distance_resolution=2,angle_resolution=np.pi/180,min_n_intersections=50,min_line_size=30,max_line_gap=5):
"""
Params:
frame
distance_resolution: distance resolution of accumulator in pixels, larger ==> less precision
angle_resolution: angle of accumulator in radians, larger ==> less precision
min_n_intersections: minimum number of intersections
min_line_size: minimum length of line in pixels
max_line_gap: maximum distance in pixels between disconnected lines
"""
placeholder = np.array([])
hough = cv.HoughLinesP(frame,distance_resolution,angle_resolution,min_n_intersections,placeholder,min_line_size,max_line_gap)
return hough
def calculateLines(img,lines):
"""
Combines line segments into one or two lanes
Note: By looking at the slop of a line we can see if it is on the left side (m<0) or right (m>0)
"""
def calculateCoordinates(img,line_params):
"""
Calculates the coordinates for a road lane
"""
#y = m*x +b, m= slope, b=intercept
height, width, _ = img.shape
m, b = line_params
y1 = height
y2 = int(y1 * (1/2)) # make points from middle of the frame down
# bound the coordinates within the frame
x1 = max(-width, min(2 * width, int((y1 - b) / m)))
x2 = max(-width, min(2 * width, int((y2 - b) / m)))
return np.array([x1,y1, x2,y2])
lane_lines = []
if lines is None:
return np.array(lane_lines)
height, width, _ = img.shape
left_lines, right_lines = [], []
boundary = 1/3
left_region_boundary = width * (1 - boundary) # left lane line segment should be on left 2/3 of the screen
right_region_boundary = width * boundary # right lane line segment should be on left 2/3 of the screen
for line in lines:
x1,y1, x2,y2 = line.reshape(4)
if x1 == x2:
#Vertical line
continue
#Fit a polynomial to the points to get the slope and intercept
line_params = np.polyfit((x1,x2), (y1,y2), 1)
slope,intercept = line_params[0], line_params[1]
if slope < 0: #left side
if x1 < left_region_boundary and x2 < left_region_boundary:
left_lines.append((slope,intercept))
else: #right
if x1 > right_region_boundary and x2 > right_region_boundary:
right_lines.append((slope,intercept))
left_lines_avg = np.average(left_lines,axis=0)
right_lines_avg = np.average(right_lines,axis=0)
if len(left_lines) > 0:
left_line = calculateCoordinates(img,left_lines_avg)
lane_lines.append(left_line)
if len(right_lines) > 0:
right_line = calculateCoordinates(img,right_lines_avg)
lane_lines.append(right_line)
return np.array(lane_lines)
def showMidLine(img,steering_angle,color=(0, 255, 0),thickness=5):
line_image = np.zeros_like(img)
height, width, _ = img.shape
# Note: the steering angle of:
# 0-89 degree: turn left
# 90 degree: going straight
# 91-180 degree: turn right
steering_angle_radian = steering_angle / 180.0 * math.pi
x1 = int(width / 2)
y1 = height
x2 = int(x1 - height / 2 / math.tan(steering_angle_radian))
y2 = int(height / 2)
cv.line(line_image, (x1, y1), (x2, y2), color, thickness)
return line_image
def showLines(img,lines,color=(255,0,0),thickness=5):
line_img = np.zeros(img.shape, dtype=np.uint8)
if lines is not None:
for x1, y1, x2, y2 in lines:
cv.line(line_img, (x1,y1), (x2,y2), color, thickness)
return line_img
def calculateSteeringAngle(img,lines):
if len(lines) == 0:
return -90
height, width, _ = img.shape
if len(lines) == 1:
x1, _, x2, _ = lines[0]
x_offset = x2 - x1
else: #2 lines
_, _, left_x2, _ = lines[0]
_, _, right_x2, _ = lines[1]
camera_mid_offset_percent = 0.0 # 0.0 means car pointing to center, -0.03: car is centered to left, +0.03 means car pointing to right
mid = int(width / 2 * (1 + camera_mid_offset_percent))
x_offset = (left_x2 + right_x2) / 2 - mid
# find the steering angle, which is angle between navigation direction to end of center line
y_offset = int(height / 2)
angle_to_mid_radian = math.atan(x_offset / y_offset) # angle (in radian) to center vertical line
angle_to_mid_deg = int(angle_to_mid_radian * 180.0 / math.pi) # angle (in degrees) to center vertical line
steering_angle = angle_to_mid_deg + 90 # this is the steering angle needed by picar front wheel
return steering_angle
def stabilizeSteeringAngle(curr_steering_angle, new_steering_angle, num_of_lane_lines, max_angle_deviation_two_lines=2, max_angle_deviation_one_lane=1):
"""
Using last steering angle to stabilize the steering angle
This can be improved to use last N angles, etc
if new angle is too different from current angle, only turn by max_angle_deviation degrees
"""
if num_of_lane_lines == 1:
# if only one lane detected, don't deviate too much
max_angle_deviation = max_angle_deviation_one_lane
else:
# if both lane lines detected, then we can deviate more
max_angle_deviation = max_angle_deviation_two_lines
angle_deviation = new_steering_angle - curr_steering_angle
if abs(angle_deviation) > max_angle_deviation:
stabilized_steering_angle = int(curr_steering_angle
+ max_angle_deviation * angle_deviation / abs(angle_deviation))
else:
stabilized_steering_angle = new_steering_angle
return stabilized_steering_angle
| 36.149123
| 152
| 0.665372
|
import numpy as np
import cv2 as cv
import math
from server.cv_utils import *
def filterGaussian(img,size=(5,5),stdv=0):
if not isCV(img):
raise ValueError("Image not in np.array format")
if not isinstance(size,tuple):
raise ValueError('filterGaussian: Size for Gaussian filter not tuple')
return cv.GaussianBlur(img,size,stdv)
def filterCanny(img,min_val=50,max_val=150,size=(5,5),stdv=0):
if not isCV(img):
raise ValueError("Image not in np.array format")
if min_val >= max_val:
raise ValueError('filterCanny: Value order incorrect')
gray_scale = toGrayScale(img)
gaussian = filterGaussian(gray_scale,size=size,stdv=stdv)
return cv.Canny(gaussian,min_val,max_val)
def segmentRegionOfInterest(img):
height = img.shape[0]
polygons = np.array([
[(200, height), (1100, height), (550, 250)]
])
mask = np.zeros_like(img)
cv.fillPoly(mask, polygons, 255)
masked_image = cv.bitwise_and(img, mask)
return masked_image
def houghFilter(frame,distance_resolution=2,angle_resolution=np.pi/180,min_n_intersections=50,min_line_size=30,max_line_gap=5):
placeholder = np.array([])
hough = cv.HoughLinesP(frame,distance_resolution,angle_resolution,min_n_intersections,placeholder,min_line_size,max_line_gap)
return hough
def calculateLines(img,lines):
def calculateCoordinates(img,line_params):
height, width, _ = img.shape
m, b = line_params
y1 = height
y2 = int(y1 * (1/2))
x1 = max(-width, min(2 * width, int((y1 - b) / m)))
x2 = max(-width, min(2 * width, int((y2 - b) / m)))
return np.array([x1,y1, x2,y2])
lane_lines = []
if lines is None:
return np.array(lane_lines)
height, width, _ = img.shape
left_lines, right_lines = [], []
boundary = 1/3
left_region_boundary = width * (1 - boundary)
right_region_boundary = width * boundary
for line in lines:
x1,y1, x2,y2 = line.reshape(4)
if x1 == x2:
continue
line_params = np.polyfit((x1,x2), (y1,y2), 1)
slope,intercept = line_params[0], line_params[1]
if slope < 0:
if x1 < left_region_boundary and x2 < left_region_boundary:
left_lines.append((slope,intercept))
else:
if x1 > right_region_boundary and x2 > right_region_boundary:
right_lines.append((slope,intercept))
left_lines_avg = np.average(left_lines,axis=0)
right_lines_avg = np.average(right_lines,axis=0)
if len(left_lines) > 0:
left_line = calculateCoordinates(img,left_lines_avg)
lane_lines.append(left_line)
if len(right_lines) > 0:
right_line = calculateCoordinates(img,right_lines_avg)
lane_lines.append(right_line)
return np.array(lane_lines)
def showMidLine(img,steering_angle,color=(0, 255, 0),thickness=5):
line_image = np.zeros_like(img)
height, width, _ = img.shape
steering_angle_radian = steering_angle / 180.0 * math.pi
x1 = int(width / 2)
y1 = height
x2 = int(x1 - height / 2 / math.tan(steering_angle_radian))
y2 = int(height / 2)
cv.line(line_image, (x1, y1), (x2, y2), color, thickness)
return line_image
def showLines(img,lines,color=(255,0,0),thickness=5):
line_img = np.zeros(img.shape, dtype=np.uint8)
if lines is not None:
for x1, y1, x2, y2 in lines:
cv.line(line_img, (x1,y1), (x2,y2), color, thickness)
return line_img
def calculateSteeringAngle(img,lines):
if len(lines) == 0:
return -90
height, width, _ = img.shape
if len(lines) == 1:
x1, _, x2, _ = lines[0]
x_offset = x2 - x1
else:
_, _, left_x2, _ = lines[0]
_, _, right_x2, _ = lines[1]
camera_mid_offset_percent = 0.0
mid = int(width / 2 * (1 + camera_mid_offset_percent))
x_offset = (left_x2 + right_x2) / 2 - mid
y_offset = int(height / 2)
angle_to_mid_radian = math.atan(x_offset / y_offset)
angle_to_mid_deg = int(angle_to_mid_radian * 180.0 / math.pi)
steering_angle = angle_to_mid_deg + 90
return steering_angle
def stabilizeSteeringAngle(curr_steering_angle, new_steering_angle, num_of_lane_lines, max_angle_deviation_two_lines=2, max_angle_deviation_one_lane=1):
if num_of_lane_lines == 1:
max_angle_deviation = max_angle_deviation_one_lane
else:
# if both lane lines detected, then we can deviate more
max_angle_deviation = max_angle_deviation_two_lines
angle_deviation = new_steering_angle - curr_steering_angle
if abs(angle_deviation) > max_angle_deviation:
stabilized_steering_angle = int(curr_steering_angle
+ max_angle_deviation * angle_deviation / abs(angle_deviation))
else:
stabilized_steering_angle = new_steering_angle
return stabilized_steering_angle
| true
| true
|
7903b5d9d0dabfd8434bcbad2f0fc8d602ebdb81
| 179
|
py
|
Python
|
h2o-docs/src/booklets/v2_2015/source/Python_Vignette_code_examples/python_select_column_name.py
|
ahmedengu/h2o-3
|
ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11
|
[
"Apache-2.0"
] | 6,098
|
2015-05-22T02:46:12.000Z
|
2022-03-31T16:54:51.000Z
|
h2o-docs/src/booklets/v2_2015/source/Python_Vignette_code_examples/python_select_column_name.py
|
ahmedengu/h2o-3
|
ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11
|
[
"Apache-2.0"
] | 2,517
|
2015-05-23T02:10:54.000Z
|
2022-03-30T17:03:39.000Z
|
h2o-docs/src/booklets/v2_2015/source/Python_Vignette_code_examples/python_select_column_name.py
|
ahmedengu/h2o-3
|
ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11
|
[
"Apache-2.0"
] | 2,199
|
2015-05-22T04:09:55.000Z
|
2022-03-28T22:20:45.000Z
|
df['A']
# A
# ---------
# -0.613035
# -1.265520
# 0.763851
# -1.248425
# 2.105805
# 1.763502
# -0.781973
# 1.400853
# -0.746025
# -1.120648
#
# [100 rows x 1 column]
| 11.1875
| 23
| 0.497207
|
df['A']
| true
| true
|
7903b6540a65f0d4a3d52b31ef4ff55f2ef730ce
| 13,457
|
py
|
Python
|
awx_collection/plugins/modules/tower_job_template.py
|
mlyahmed/awx2
|
a474762e81b90752dbac39dcefed3224ad65df1f
|
[
"Apache-2.0"
] | null | null | null |
awx_collection/plugins/modules/tower_job_template.py
|
mlyahmed/awx2
|
a474762e81b90752dbac39dcefed3224ad65df1f
|
[
"Apache-2.0"
] | null | null | null |
awx_collection/plugins/modules/tower_job_template.py
|
mlyahmed/awx2
|
a474762e81b90752dbac39dcefed3224ad65df1f
|
[
"Apache-2.0"
] | 1
|
2021-02-07T21:08:44.000Z
|
2021-02-07T21:08:44.000Z
|
#!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <wayne@riotousliving.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_job_template
author: "Wayne Witzel III (@wwitzel3)"
version_added: "2.3"
short_description: create, update, or destroy Ansible Tower job template.
description:
- Create, update, or destroy Ansible Tower job templates. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- Name to use for the job template.
required: True
type: str
description:
description:
- Description to use for the job template.
type: str
job_type:
description:
- The job type to use for the job template.
required: False
choices: ["run", "check"]
type: str
inventory:
description:
- Name of the inventory to use for the job template.
type: str
project:
description:
- Name of the project to use for the job template.
required: True
type: str
playbook:
description:
- Path to the playbook to use for the job template within the project provided.
required: True
type: str
credential:
description:
- Name of the credential to use for the job template.
- Deprecated, mutually exclusive with 'credentials'.
version_added: 2.7
type: str
credentials:
description:
- List of credentials to use for the job template.
- Will not remove any existing credentials. This may change in the future.
version_added: 2.8
type: list
default: []
vault_credential:
description:
- Name of the vault credential to use for the job template.
- Deprecated, mutually exclusive with 'credential'.
version_added: 2.7
type: str
forks:
description:
- The number of parallel or simultaneous processes to use while executing the playbook.
type: int
limit:
description:
- A host pattern to further constrain the list of hosts managed or affected by the playbook
type: str
verbosity:
description:
- Control the output level Ansible produces as the playbook runs. 0 - Normal, 1 - Verbose, 2 - More Verbose, 3 - Debug, 4 - Connection Debug.
choices: [0, 1, 2, 3, 4]
default: 0
type: int
extra_vars:
description:
- Specify C(extra_vars) for the template.
type: dict
version_added: 3.7
extra_vars_path:
description:
- This parameter has been deprecated, please use 'extra_vars' instead.
- Path to the C(extra_vars) YAML file.
type: path
job_tags:
description:
- Comma separated list of the tags to use for the job template.
type: str
force_handlers_enabled:
description:
- Enable forcing playbook handlers to run even if a task fails.
version_added: 2.7
type: bool
default: 'no'
skip_tags:
description:
- Comma separated list of the tags to skip for the job template.
type: str
start_at_task:
description:
- Start the playbook at the task matching this name.
version_added: 2.7
type: str
diff_mode_enabled:
description:
- Enable diff mode for the job template.
version_added: 2.7
type: bool
default: 'no'
fact_caching_enabled:
description:
- Enable use of fact caching for the job template.
version_added: 2.7
type: bool
default: 'no'
host_config_key:
description:
- Allow provisioning callbacks using this host config key.
type: str
ask_diff_mode:
description:
- Prompt user to enable diff mode (show changes) to files when supported by modules.
version_added: 2.7
type: bool
default: 'no'
ask_extra_vars:
description:
- Prompt user for (extra_vars) on launch.
type: bool
default: 'no'
ask_limit:
description:
- Prompt user for a limit on launch.
version_added: 2.7
type: bool
default: 'no'
ask_tags:
description:
- Prompt user for job tags on launch.
type: bool
default: 'no'
ask_skip_tags:
description:
- Prompt user for job tags to skip on launch.
version_added: 2.7
type: bool
default: 'no'
ask_job_type:
description:
- Prompt user for job type on launch.
type: bool
default: 'no'
ask_verbosity:
description:
- Prompt user to choose a verbosity level on launch.
version_added: 2.7
type: bool
default: 'no'
ask_inventory:
description:
- Prompt user for inventory on launch.
type: bool
default: 'no'
ask_credential:
description:
- Prompt user for credential on launch.
type: bool
default: 'no'
survey_enabled:
description:
- Enable a survey on the job template.
version_added: 2.7
type: bool
default: 'no'
survey_spec:
description:
- JSON/YAML dict formatted survey definition.
version_added: 2.8
type: dict
required: False
become_enabled:
description:
- Activate privilege escalation.
type: bool
default: 'no'
concurrent_jobs_enabled:
description:
- Allow simultaneous runs of the job template.
version_added: 2.7
type: bool
default: 'no'
timeout:
description:
- Maximum time in seconds to wait for a job to finish (server-side).
type: int
custom_virtualenv:
version_added: "2.9"
description:
- Local absolute file path containing a custom Python virtualenv to use.
type: str
required: False
default: ''
state:
description:
- Desired state of the resource.
default: "present"
choices: ["present", "absent"]
type: str
extends_documentation_fragment: awx.awx.auth
notes:
- JSON for survey_spec can be found in Tower API Documentation. See
U(https://docs.ansible.com/ansible-tower/latest/html/towerapi/api_ref.html#/Job_Templates/Job_Templates_job_templates_survey_spec_create)
for POST operation payload example.
'''
EXAMPLES = '''
- name: Create tower Ping job template
tower_job_template:
name: "Ping"
job_type: "run"
inventory: "Local"
project: "Demo"
playbook: "ping.yml"
credential: "Local"
state: "present"
tower_config_file: "~/tower_cli.cfg"
survey_enabled: yes
survey_spec: "{{ lookup('file', 'my_survey.json') }}"
custom_virtualenv: "/var/lib/awx/venv/custom-venv/"
'''
from ..module_utils.ansible_tower import TowerModule, tower_auth_config, tower_check_mode
import json
try:
import tower_cli
import tower_cli.exceptions as exc
from tower_cli.conf import settings
except ImportError:
pass
def update_fields(module, p):
'''This updates the module field names
to match the field names tower-cli expects to make
calling of the modify/delete methods easier.
'''
params = p.copy()
field_map = {
'fact_caching_enabled': 'use_fact_cache',
'ask_diff_mode': 'ask_diff_mode_on_launch',
'ask_extra_vars': 'ask_variables_on_launch',
'ask_limit': 'ask_limit_on_launch',
'ask_tags': 'ask_tags_on_launch',
'ask_skip_tags': 'ask_skip_tags_on_launch',
'ask_verbosity': 'ask_verbosity_on_launch',
'ask_inventory': 'ask_inventory_on_launch',
'ask_credential': 'ask_credential_on_launch',
'ask_job_type': 'ask_job_type_on_launch',
'diff_mode_enabled': 'diff_mode',
'concurrent_jobs_enabled': 'allow_simultaneous',
'force_handlers_enabled': 'force_handlers',
}
params_update = {}
for old_k, new_k in field_map.items():
v = params.pop(old_k)
params_update[new_k] = v
extra_vars = params.get('extra_vars')
extra_vars_path = params.get('extra_vars_path')
if extra_vars:
params_update['extra_vars'] = [json.dumps(extra_vars)]
elif extra_vars_path is not None:
params_update['extra_vars'] = ['@' + extra_vars_path]
module.deprecate(
msg='extra_vars_path should not be used anymore. Use \'extra_vars: "{{ lookup(\'file\', \'/path/to/file\') | from_yaml }}"\' instead',
version="3.8"
)
params.update(params_update)
return params
def update_resources(module, p):
params = p.copy()
identity_map = {
'project': 'name',
'inventory': 'name',
'credential': 'name',
'vault_credential': 'name',
}
for k, v in identity_map.items():
try:
if params[k]:
key = 'credential' if '_credential' in k else k
result = tower_cli.get_resource(key).get(**{v: params[k]})
params[k] = result['id']
elif k in params:
# unset empty parameters to avoid ValueError: invalid literal for int() with base 10: ''
del(params[k])
except (exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update job template: {0}'.format(excinfo), changed=False)
return params
def main():
argument_spec = dict(
name=dict(required=True),
description=dict(default=''),
job_type=dict(choices=['run', 'check']),
inventory=dict(default=''),
project=dict(required=True),
playbook=dict(required=True),
credential=dict(default=''),
vault_credential=dict(default=''),
custom_virtualenv=dict(type='str', required=False),
credentials=dict(type='list', default=[]),
forks=dict(type='int'),
limit=dict(default=''),
verbosity=dict(type='int', choices=[0, 1, 2, 3, 4], default=0),
extra_vars=dict(type='dict', required=False),
extra_vars_path=dict(type='path', required=False),
job_tags=dict(default=''),
force_handlers_enabled=dict(type='bool', default=False),
skip_tags=dict(default=''),
start_at_task=dict(default=''),
timeout=dict(type='int', default=0),
fact_caching_enabled=dict(type='bool', default=False),
host_config_key=dict(default=''),
ask_diff_mode=dict(type='bool', default=False),
ask_extra_vars=dict(type='bool', default=False),
ask_limit=dict(type='bool', default=False),
ask_tags=dict(type='bool', default=False),
ask_skip_tags=dict(type='bool', default=False),
ask_job_type=dict(type='bool', default=False),
ask_verbosity=dict(type='bool', default=False),
ask_inventory=dict(type='bool', default=False),
ask_credential=dict(type='bool', default=False),
survey_enabled=dict(type='bool', default=False),
survey_spec=dict(type='dict', required=False),
become_enabled=dict(type='bool', default=False),
diff_mode_enabled=dict(type='bool', default=False),
concurrent_jobs_enabled=dict(type='bool', default=False),
state=dict(choices=['present', 'absent'], default='present'),
)
module = TowerModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
('credential', 'credentials'),
('vault_credential', 'credentials'),
('extra_vars_path', 'extra_vars'),
]
)
name = module.params.get('name')
state = module.params.pop('state')
json_output = {'job_template': name, 'state': state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
jt = tower_cli.get_resource('job_template')
params = update_resources(module, module.params)
params = update_fields(module, params)
params['create_on_missing'] = True
try:
if state == 'present':
result = jt.modify(**params)
json_output['id'] = result['id']
elif state == 'absent':
result = jt.delete(**params)
except (exc.ConnectionError, exc.BadRequest, exc.NotFound, exc.AuthError) as excinfo:
module.fail_json(msg='Failed to update job template: {0}'.format(excinfo), changed=False)
cred_list = module.params.get('credentials')
if cred_list:
cred = tower_cli.get_resource('credential')
for cred_name in cred_list:
try:
cred_id = cred.get(name=cred_name)['id']
r = jt.associate_credential(result['id'], cred_id)
except (exc.ConnectionError, exc.BadRequest, exc.NotFound, exc.AuthError) as excinfo:
module.fail_json(msg='Failed to add credential to job template: {0}'.format(excinfo), changed=False)
if r.get('changed'):
result['changed'] = True
json_output['changed'] = result['changed']
module.exit_json(**json_output)
if __name__ == '__main__':
main()
| 32.348558
| 149
| 0.623021
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_job_template
author: "Wayne Witzel III (@wwitzel3)"
version_added: "2.3"
short_description: create, update, or destroy Ansible Tower job template.
description:
- Create, update, or destroy Ansible Tower job templates. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- Name to use for the job template.
required: True
type: str
description:
description:
- Description to use for the job template.
type: str
job_type:
description:
- The job type to use for the job template.
required: False
choices: ["run", "check"]
type: str
inventory:
description:
- Name of the inventory to use for the job template.
type: str
project:
description:
- Name of the project to use for the job template.
required: True
type: str
playbook:
description:
- Path to the playbook to use for the job template within the project provided.
required: True
type: str
credential:
description:
- Name of the credential to use for the job template.
- Deprecated, mutually exclusive with 'credentials'.
version_added: 2.7
type: str
credentials:
description:
- List of credentials to use for the job template.
- Will not remove any existing credentials. This may change in the future.
version_added: 2.8
type: list
default: []
vault_credential:
description:
- Name of the vault credential to use for the job template.
- Deprecated, mutually exclusive with 'credential'.
version_added: 2.7
type: str
forks:
description:
- The number of parallel or simultaneous processes to use while executing the playbook.
type: int
limit:
description:
- A host pattern to further constrain the list of hosts managed or affected by the playbook
type: str
verbosity:
description:
- Control the output level Ansible produces as the playbook runs. 0 - Normal, 1 - Verbose, 2 - More Verbose, 3 - Debug, 4 - Connection Debug.
choices: [0, 1, 2, 3, 4]
default: 0
type: int
extra_vars:
description:
- Specify C(extra_vars) for the template.
type: dict
version_added: 3.7
extra_vars_path:
description:
- This parameter has been deprecated, please use 'extra_vars' instead.
- Path to the C(extra_vars) YAML file.
type: path
job_tags:
description:
- Comma separated list of the tags to use for the job template.
type: str
force_handlers_enabled:
description:
- Enable forcing playbook handlers to run even if a task fails.
version_added: 2.7
type: bool
default: 'no'
skip_tags:
description:
- Comma separated list of the tags to skip for the job template.
type: str
start_at_task:
description:
- Start the playbook at the task matching this name.
version_added: 2.7
type: str
diff_mode_enabled:
description:
- Enable diff mode for the job template.
version_added: 2.7
type: bool
default: 'no'
fact_caching_enabled:
description:
- Enable use of fact caching for the job template.
version_added: 2.7
type: bool
default: 'no'
host_config_key:
description:
- Allow provisioning callbacks using this host config key.
type: str
ask_diff_mode:
description:
- Prompt user to enable diff mode (show changes) to files when supported by modules.
version_added: 2.7
type: bool
default: 'no'
ask_extra_vars:
description:
- Prompt user for (extra_vars) on launch.
type: bool
default: 'no'
ask_limit:
description:
- Prompt user for a limit on launch.
version_added: 2.7
type: bool
default: 'no'
ask_tags:
description:
- Prompt user for job tags on launch.
type: bool
default: 'no'
ask_skip_tags:
description:
- Prompt user for job tags to skip on launch.
version_added: 2.7
type: bool
default: 'no'
ask_job_type:
description:
- Prompt user for job type on launch.
type: bool
default: 'no'
ask_verbosity:
description:
- Prompt user to choose a verbosity level on launch.
version_added: 2.7
type: bool
default: 'no'
ask_inventory:
description:
- Prompt user for inventory on launch.
type: bool
default: 'no'
ask_credential:
description:
- Prompt user for credential on launch.
type: bool
default: 'no'
survey_enabled:
description:
- Enable a survey on the job template.
version_added: 2.7
type: bool
default: 'no'
survey_spec:
description:
- JSON/YAML dict formatted survey definition.
version_added: 2.8
type: dict
required: False
become_enabled:
description:
- Activate privilege escalation.
type: bool
default: 'no'
concurrent_jobs_enabled:
description:
- Allow simultaneous runs of the job template.
version_added: 2.7
type: bool
default: 'no'
timeout:
description:
- Maximum time in seconds to wait for a job to finish (server-side).
type: int
custom_virtualenv:
version_added: "2.9"
description:
- Local absolute file path containing a custom Python virtualenv to use.
type: str
required: False
default: ''
state:
description:
- Desired state of the resource.
default: "present"
choices: ["present", "absent"]
type: str
extends_documentation_fragment: awx.awx.auth
notes:
- JSON for survey_spec can be found in Tower API Documentation. See
U(https://docs.ansible.com/ansible-tower/latest/html/towerapi/api_ref.html#/Job_Templates/Job_Templates_job_templates_survey_spec_create)
for POST operation payload example.
'''
EXAMPLES = '''
- name: Create tower Ping job template
tower_job_template:
name: "Ping"
job_type: "run"
inventory: "Local"
project: "Demo"
playbook: "ping.yml"
credential: "Local"
state: "present"
tower_config_file: "~/tower_cli.cfg"
survey_enabled: yes
survey_spec: "{{ lookup('file', 'my_survey.json') }}"
custom_virtualenv: "/var/lib/awx/venv/custom-venv/"
'''
from ..module_utils.ansible_tower import TowerModule, tower_auth_config, tower_check_mode
import json
try:
import tower_cli
import tower_cli.exceptions as exc
from tower_cli.conf import settings
except ImportError:
pass
def update_fields(module, p):
params = p.copy()
field_map = {
'fact_caching_enabled': 'use_fact_cache',
'ask_diff_mode': 'ask_diff_mode_on_launch',
'ask_extra_vars': 'ask_variables_on_launch',
'ask_limit': 'ask_limit_on_launch',
'ask_tags': 'ask_tags_on_launch',
'ask_skip_tags': 'ask_skip_tags_on_launch',
'ask_verbosity': 'ask_verbosity_on_launch',
'ask_inventory': 'ask_inventory_on_launch',
'ask_credential': 'ask_credential_on_launch',
'ask_job_type': 'ask_job_type_on_launch',
'diff_mode_enabled': 'diff_mode',
'concurrent_jobs_enabled': 'allow_simultaneous',
'force_handlers_enabled': 'force_handlers',
}
params_update = {}
for old_k, new_k in field_map.items():
v = params.pop(old_k)
params_update[new_k] = v
extra_vars = params.get('extra_vars')
extra_vars_path = params.get('extra_vars_path')
if extra_vars:
params_update['extra_vars'] = [json.dumps(extra_vars)]
elif extra_vars_path is not None:
params_update['extra_vars'] = ['@' + extra_vars_path]
module.deprecate(
msg='extra_vars_path should not be used anymore. Use \'extra_vars: "{{ lookup(\'file\', \'/path/to/file\') | from_yaml }}"\' instead',
version="3.8"
)
params.update(params_update)
return params
def update_resources(module, p):
params = p.copy()
identity_map = {
'project': 'name',
'inventory': 'name',
'credential': 'name',
'vault_credential': 'name',
}
for k, v in identity_map.items():
try:
if params[k]:
key = 'credential' if '_credential' in k else k
result = tower_cli.get_resource(key).get(**{v: params[k]})
params[k] = result['id']
elif k in params:
del(params[k])
except (exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update job template: {0}'.format(excinfo), changed=False)
return params
def main():
argument_spec = dict(
name=dict(required=True),
description=dict(default=''),
job_type=dict(choices=['run', 'check']),
inventory=dict(default=''),
project=dict(required=True),
playbook=dict(required=True),
credential=dict(default=''),
vault_credential=dict(default=''),
custom_virtualenv=dict(type='str', required=False),
credentials=dict(type='list', default=[]),
forks=dict(type='int'),
limit=dict(default=''),
verbosity=dict(type='int', choices=[0, 1, 2, 3, 4], default=0),
extra_vars=dict(type='dict', required=False),
extra_vars_path=dict(type='path', required=False),
job_tags=dict(default=''),
force_handlers_enabled=dict(type='bool', default=False),
skip_tags=dict(default=''),
start_at_task=dict(default=''),
timeout=dict(type='int', default=0),
fact_caching_enabled=dict(type='bool', default=False),
host_config_key=dict(default=''),
ask_diff_mode=dict(type='bool', default=False),
ask_extra_vars=dict(type='bool', default=False),
ask_limit=dict(type='bool', default=False),
ask_tags=dict(type='bool', default=False),
ask_skip_tags=dict(type='bool', default=False),
ask_job_type=dict(type='bool', default=False),
ask_verbosity=dict(type='bool', default=False),
ask_inventory=dict(type='bool', default=False),
ask_credential=dict(type='bool', default=False),
survey_enabled=dict(type='bool', default=False),
survey_spec=dict(type='dict', required=False),
become_enabled=dict(type='bool', default=False),
diff_mode_enabled=dict(type='bool', default=False),
concurrent_jobs_enabled=dict(type='bool', default=False),
state=dict(choices=['present', 'absent'], default='present'),
)
module = TowerModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
('credential', 'credentials'),
('vault_credential', 'credentials'),
('extra_vars_path', 'extra_vars'),
]
)
name = module.params.get('name')
state = module.params.pop('state')
json_output = {'job_template': name, 'state': state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
jt = tower_cli.get_resource('job_template')
params = update_resources(module, module.params)
params = update_fields(module, params)
params['create_on_missing'] = True
try:
if state == 'present':
result = jt.modify(**params)
json_output['id'] = result['id']
elif state == 'absent':
result = jt.delete(**params)
except (exc.ConnectionError, exc.BadRequest, exc.NotFound, exc.AuthError) as excinfo:
module.fail_json(msg='Failed to update job template: {0}'.format(excinfo), changed=False)
cred_list = module.params.get('credentials')
if cred_list:
cred = tower_cli.get_resource('credential')
for cred_name in cred_list:
try:
cred_id = cred.get(name=cred_name)['id']
r = jt.associate_credential(result['id'], cred_id)
except (exc.ConnectionError, exc.BadRequest, exc.NotFound, exc.AuthError) as excinfo:
module.fail_json(msg='Failed to add credential to job template: {0}'.format(excinfo), changed=False)
if r.get('changed'):
result['changed'] = True
json_output['changed'] = result['changed']
module.exit_json(**json_output)
if __name__ == '__main__':
main()
| true
| true
|
7903b71f344fdae0aaa535b9d1dc6746718b0d4e
| 6,074
|
py
|
Python
|
models/fdconv1d_lstm/train.py
|
rovo98/model-unkown-dfa-diagnosis-based-on-running-logs
|
f80c838dea6a8313165fbf10d64d5dc935cc036c
|
[
"Apache-2.0"
] | null | null | null |
models/fdconv1d_lstm/train.py
|
rovo98/model-unkown-dfa-diagnosis-based-on-running-logs
|
f80c838dea6a8313165fbf10d64d5dc935cc036c
|
[
"Apache-2.0"
] | 4
|
2020-04-30T07:57:42.000Z
|
2020-09-27T06:52:00.000Z
|
models/fdconv1d_lstm/train.py
|
rovo98/model-unkown-dfa-diagnosis-based-on-running-logs
|
f80c838dea6a8313165fbf10d64d5dc935cc036c
|
[
"Apache-2.0"
] | null | null | null |
# author rovo98
import os
import tensorflow as tf
from tensorflow.keras.utils import plot_model
from tensorflow.keras.callbacks import EarlyStopping
from model_data_input import load_processed_dataset
from models.fdconv1d_lstm.model import build_fdconv1d_lstm
from models.utils.misc import running_timer
from models.utils.misc import plot_training_history
# filter warning logs of tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# enable memory growth for every GPU.
# Using GPU devices to train the models is recommended.
# uncomment the following several lines of code to disable forcing using GPU.
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, 'Not enough GPU hardware available'
for gpu in physical_devices:
tf.config.experimental.set_memory_growth(gpu, True)
# noinspection DuplicatedCode
@running_timer
def train_model(epochs=10,
batch_size=32,
training_verbose=1,
print_model_summary=False,
using_validation=False,
validation_split=0.2,
plot_history_data=False,
history_fig_name='default',
plot_model_arch=False,
plot_model_name='default',
save_model=False,
save_model_name='default'):
# num_of_faulty_type = 3
# train_x, train_y, test_x, test_y = load_processed_dataset(
# '2020-02-22 20:34:10_czE4OmZzNDphczE2OmZlczI=_processed_logs_rnn', num_of_faulty_type,
# location='../../dataset', for_rnn=True)
#
# num_of_faulty_type = 5
# train_x, train_y, test_x, test_y = load_processed_dataset(
# '2019-12-28 00:46:37_czc1OmZzNzphczE1OmZlczQ=_processed_logs', num_of_faulty_type,
# location='../../dataset')
# 1. single faulty mode(small state size): short logs (10 - 50)
num_of_faulty_type = 3
train_x, train_y, test_x, test_y = load_processed_dataset(
'2020-03-17 15:55:22_czE4OmZzNDphczE2OmZlczI=_processed_logs', num_of_faulty_type,
location='../../dataset')
# 2. single faulty mode(small state size): long logs (60 - 100)
# num_of_faulty_type = 3
# train_x, train_y, test_x, test_y = load_processed_dataset(
# '2020-03-17 16:00:22_czE4OmZzNDphczE2OmZlczI=_processed_logs_b', num_of_faulty_type,
# location='../../dataset')
# 3. single faulty mode(big state size): short logs (10 - 50)
# num_of_faulty_type = 5
# train_x, train_y, test_x, test_y = load_processed_dataset(
# '2020-03-17 16:16:04_czgwOmZzODphczE4OmZlczQ=_processed_logs', num_of_faulty_type,
# location='../../dataset')
# 4. single faulty mode(big state size): long logs (60 - 100)
# num_of_faulty_type = 5
# train_x, train_y, test_x, test_y = load_processed_dataset(
# '2020-03-19 17:09:05_czgwOmZzODphczE4OmZlczQ=_processed_logs_b_rg', num_of_faulty_type,
# location='../../dataset')
# 5. multi faulty mode (small state size): short logs
# num_of_faulty_type = 4
# train_x, train_y, test_x, test_y = load_processed_dataset(
# '2020-03-17 16:34:50_czE3OmZzNDphczE0OmZlczI=_processed_logs', num_of_faulty_type,
# location='../../dataset')
# 6. multi faulty mode (small state size): long logs
# num_of_faulty_type = 4
# train_x, train_y, test_x, test_y = load_processed_dataset(
# '2020-03-17 16:36:40_czE3OmZzNDphczE0OmZlczI=_processed_logs_b', num_of_faulty_type,
# location='../../dataset')
# 7. multi faulty mode (big state size): short logs
# num_of_faulty_type = 16
# train_x, train_y, test_x, test_y = load_processed_dataset(
# '2020-03-17 16:40:03_czgwOmZzODphczIwOmZlczQ=_processed_logs', num_of_faulty_type,
# location='../../dataset')
# 8. multi faulty mode (big state size): long logs
# num_of_faulty_type = 16
# train_x, train_y, test_x, test_y = load_processed_dataset(
# '2020-03-17 16:41:29_czgwOmZzODphczIwOmZlczQ=_processed_logs_b', num_of_faulty_type,
# location='../../dataset')
n_timesteps, n_features = train_x.shape[1], train_x.shape[2]
# building the model.
model = build_fdconv1d_lstm((n_timesteps, n_features), num_of_faulty_type, kernel_size=31)
# print out the model summary
if print_model_summary:
model.summary()
# plot and save the model architecture.
if plot_model_arch:
plot_model(model, to_file=plot_model_name, show_shapes=True)
# fit network
if plot_history_data:
history = model.fit(x=[train_x, train_x], y=train_y, epochs=epochs, batch_size=batch_size,
verbose=training_verbose, validation_split=validation_split)
plot_training_history(history, 'fdconv1d-lstm', history_fig_name, '../exper_imgs')
elif using_validation:
es = EarlyStopping('val_categorical_accuracy', 1e-4, 3, 1, 'max')
history = model.fit(x=[train_x, train_x], y=train_y, epochs=epochs, batch_size=batch_size,
verbose=training_verbose, validation_split=validation_split, callbacks=[es])
plot_training_history(history, 'fdconv1d-lstm', history_fig_name, '../exper_imgs')
else:
model.fit(x=[train_x, train_x], y=train_y, epochs=epochs, batch_size=batch_size, verbose=training_verbose)
_, accuracy = model.evaluate(x=[test_x, test_x], y=test_y, batch_size=batch_size, verbose=0)
# saving the model
if save_model:
model.save(save_model_name)
print('>>> model saved: {}'.format(save_model_name))
print('\n>>> Accuracy on testing given testing dataset: {}'.format(accuracy * 100))
# Driver the program to test the methods above.
if __name__ == '__main__':
train_model(50,
print_model_summary=True,
using_validation=True,
history_fig_name='fdConv1d-lstm_czE4OmZzNDphczE2OmZlczI=_small.png',
save_model=True,
save_model_name='../trained_saved/fdConv1d-lstm_czE4OmZzNDphczE2OmZlczI=_small.h5')
| 43.697842
| 114
| 0.689167
|
import os
import tensorflow as tf
from tensorflow.keras.utils import plot_model
from tensorflow.keras.callbacks import EarlyStopping
from model_data_input import load_processed_dataset
from models.fdconv1d_lstm.model import build_fdconv1d_lstm
from models.utils.misc import running_timer
from models.utils.misc import plot_training_history
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, 'Not enough GPU hardware available'
for gpu in physical_devices:
tf.config.experimental.set_memory_growth(gpu, True)
@running_timer
def train_model(epochs=10,
batch_size=32,
training_verbose=1,
print_model_summary=False,
using_validation=False,
validation_split=0.2,
plot_history_data=False,
history_fig_name='default',
plot_model_arch=False,
plot_model_name='default',
save_model=False,
save_model_name='default'):
num_of_faulty_type = 3
train_x, train_y, test_x, test_y = load_processed_dataset(
'2020-03-17 15:55:22_czE4OmZzNDphczE2OmZlczI=_processed_logs', num_of_faulty_type,
location='../../dataset')
n_timesteps, n_features = train_x.shape[1], train_x.shape[2]
model = build_fdconv1d_lstm((n_timesteps, n_features), num_of_faulty_type, kernel_size=31)
if print_model_summary:
model.summary()
if plot_model_arch:
plot_model(model, to_file=plot_model_name, show_shapes=True)
if plot_history_data:
history = model.fit(x=[train_x, train_x], y=train_y, epochs=epochs, batch_size=batch_size,
verbose=training_verbose, validation_split=validation_split)
plot_training_history(history, 'fdconv1d-lstm', history_fig_name, '../exper_imgs')
elif using_validation:
es = EarlyStopping('val_categorical_accuracy', 1e-4, 3, 1, 'max')
history = model.fit(x=[train_x, train_x], y=train_y, epochs=epochs, batch_size=batch_size,
verbose=training_verbose, validation_split=validation_split, callbacks=[es])
plot_training_history(history, 'fdconv1d-lstm', history_fig_name, '../exper_imgs')
else:
model.fit(x=[train_x, train_x], y=train_y, epochs=epochs, batch_size=batch_size, verbose=training_verbose)
_, accuracy = model.evaluate(x=[test_x, test_x], y=test_y, batch_size=batch_size, verbose=0)
if save_model:
model.save(save_model_name)
print('>>> model saved: {}'.format(save_model_name))
print('\n>>> Accuracy on testing given testing dataset: {}'.format(accuracy * 100))
if __name__ == '__main__':
train_model(50,
print_model_summary=True,
using_validation=True,
history_fig_name='fdConv1d-lstm_czE4OmZzNDphczE2OmZlczI=_small.png',
save_model=True,
save_model_name='../trained_saved/fdConv1d-lstm_czE4OmZzNDphczE2OmZlczI=_small.h5')
| true
| true
|
7903b7a6bfd5a38a00aa106b422ceee6f3169781
| 89,814
|
py
|
Python
|
vision/google/cloud/vision_v1p1beta1/proto/image_annotator_pb2.py
|
jo2y/google-cloud-python
|
1b76727be16bc4335276f793340bb72d32be7166
|
[
"Apache-2.0"
] | 2
|
2018-02-01T06:30:24.000Z
|
2018-04-12T15:39:56.000Z
|
vision/google/cloud/vision_v1p1beta1/proto/image_annotator_pb2.py
|
jo2y/google-cloud-python
|
1b76727be16bc4335276f793340bb72d32be7166
|
[
"Apache-2.0"
] | 7
|
2020-03-24T15:50:06.000Z
|
2021-06-08T19:57:39.000Z
|
vision/google/cloud/vision_v1p1beta1/proto/image_annotator_pb2.py
|
jo2y/google-cloud-python
|
1b76727be16bc4335276f793340bb72d32be7166
|
[
"Apache-2.0"
] | 1
|
2018-09-19T05:55:27.000Z
|
2018-09-19T05:55:27.000Z
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/vision_v1p1beta1/proto/image_annotator.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.cloud.vision_v1p1beta1.proto import geometry_pb2 as google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_geometry__pb2
from google.cloud.vision_v1p1beta1.proto import text_annotation_pb2 as google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_text__annotation__pb2
from google.cloud.vision_v1p1beta1.proto import web_detection_pb2 as google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_web__detection__pb2
from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
from google.type import color_pb2 as google_dot_type_dot_color__pb2
from google.type import latlng_pb2 as google_dot_type_dot_latlng__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/cloud/vision_v1p1beta1/proto/image_annotator.proto',
package='google.cloud.vision.v1p1beta1',
syntax='proto3',
serialized_pb=_b('\n9google/cloud/vision_v1p1beta1/proto/image_annotator.proto\x12\x1dgoogle.cloud.vision.v1p1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x32google/cloud/vision_v1p1beta1/proto/geometry.proto\x1a\x39google/cloud/vision_v1p1beta1/proto/text_annotation.proto\x1a\x37google/cloud/vision_v1p1beta1/proto/web_detection.proto\x1a\x17google/rpc/status.proto\x1a\x17google/type/color.proto\x1a\x18google/type/latlng.proto\"\xe1\x02\n\x07\x46\x65\x61ture\x12\x39\n\x04type\x18\x01 \x01(\x0e\x32+.google.cloud.vision.v1p1beta1.Feature.Type\x12\x13\n\x0bmax_results\x18\x02 \x01(\x05\x12\r\n\x05model\x18\x03 \x01(\t\"\xf6\x01\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x12\n\x0e\x46\x41\x43\x45_DETECTION\x10\x01\x12\x16\n\x12LANDMARK_DETECTION\x10\x02\x12\x12\n\x0eLOGO_DETECTION\x10\x03\x12\x13\n\x0fLABEL_DETECTION\x10\x04\x12\x12\n\x0eTEXT_DETECTION\x10\x05\x12\x1b\n\x17\x44OCUMENT_TEXT_DETECTION\x10\x0b\x12\x19\n\x15SAFE_SEARCH_DETECTION\x10\x06\x12\x14\n\x10IMAGE_PROPERTIES\x10\x07\x12\x0e\n\nCROP_HINTS\x10\t\x12\x11\n\rWEB_DETECTION\x10\n\"7\n\x0bImageSource\x12\x15\n\rgcs_image_uri\x18\x01 \x01(\t\x12\x11\n\timage_uri\x18\x02 \x01(\t\"T\n\x05Image\x12\x0f\n\x07\x63ontent\x18\x01 \x01(\x0c\x12:\n\x06source\x18\x02 \x01(\x0b\x32*.google.cloud.vision.v1p1beta1.ImageSource\"\x9b\x0e\n\x0e\x46\x61\x63\x65\x41nnotation\x12\x42\n\rbounding_poly\x18\x01 \x01(\x0b\x32+.google.cloud.vision.v1p1beta1.BoundingPoly\x12\x45\n\x10\x66\x64_bounding_poly\x18\x02 \x01(\x0b\x32+.google.cloud.vision.v1p1beta1.BoundingPoly\x12I\n\tlandmarks\x18\x03 \x03(\x0b\x32\x36.google.cloud.vision.v1p1beta1.FaceAnnotation.Landmark\x12\x12\n\nroll_angle\x18\x04 \x01(\x02\x12\x11\n\tpan_angle\x18\x05 \x01(\x02\x12\x12\n\ntilt_angle\x18\x06 \x01(\x02\x12\x1c\n\x14\x64\x65tection_confidence\x18\x07 \x01(\x02\x12\x1e\n\x16landmarking_confidence\x18\x08 \x01(\x02\x12\x41\n\x0ejoy_likelihood\x18\t \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x12\x44\n\x11sorrow_likelihood\x18\n \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x12\x43\n\x10\x61nger_likelihood\x18\x0b \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x12\x46\n\x13surprise_likelihood\x18\x0c \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x12K\n\x18under_exposed_likelihood\x18\r \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x12\x45\n\x12\x62lurred_likelihood\x18\x0e \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x12\x46\n\x13headwear_likelihood\x18\x0f \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x1a\xc7\x07\n\x08Landmark\x12I\n\x04type\x18\x03 \x01(\x0e\x32;.google.cloud.vision.v1p1beta1.FaceAnnotation.Landmark.Type\x12\x39\n\x08position\x18\x04 \x01(\x0b\x32\'.google.cloud.vision.v1p1beta1.Position\"\xb4\x06\n\x04Type\x12\x14\n\x10UNKNOWN_LANDMARK\x10\x00\x12\x0c\n\x08LEFT_EYE\x10\x01\x12\r\n\tRIGHT_EYE\x10\x02\x12\x18\n\x14LEFT_OF_LEFT_EYEBROW\x10\x03\x12\x19\n\x15RIGHT_OF_LEFT_EYEBROW\x10\x04\x12\x19\n\x15LEFT_OF_RIGHT_EYEBROW\x10\x05\x12\x1a\n\x16RIGHT_OF_RIGHT_EYEBROW\x10\x06\x12\x19\n\x15MIDPOINT_BETWEEN_EYES\x10\x07\x12\x0c\n\x08NOSE_TIP\x10\x08\x12\r\n\tUPPER_LIP\x10\t\x12\r\n\tLOWER_LIP\x10\n\x12\x0e\n\nMOUTH_LEFT\x10\x0b\x12\x0f\n\x0bMOUTH_RIGHT\x10\x0c\x12\x10\n\x0cMOUTH_CENTER\x10\r\x12\x15\n\x11NOSE_BOTTOM_RIGHT\x10\x0e\x12\x14\n\x10NOSE_BOTTOM_LEFT\x10\x0f\x12\x16\n\x12NOSE_BOTTOM_CENTER\x10\x10\x12\x19\n\x15LEFT_EYE_TOP_BOUNDARY\x10\x11\x12\x19\n\x15LEFT_EYE_RIGHT_CORNER\x10\x12\x12\x1c\n\x18LEFT_EYE_BOTTOM_BOUNDARY\x10\x13\x12\x18\n\x14LEFT_EYE_LEFT_CORNER\x10\x14\x12\x1a\n\x16RIGHT_EYE_TOP_BOUNDARY\x10\x15\x12\x1a\n\x16RIGHT_EYE_RIGHT_CORNER\x10\x16\x12\x1d\n\x19RIGHT_EYE_BOTTOM_BOUNDARY\x10\x17\x12\x19\n\x15RIGHT_EYE_LEFT_CORNER\x10\x18\x12\x1f\n\x1bLEFT_EYEBROW_UPPER_MIDPOINT\x10\x19\x12 \n\x1cRIGHT_EYEBROW_UPPER_MIDPOINT\x10\x1a\x12\x14\n\x10LEFT_EAR_TRAGION\x10\x1b\x12\x15\n\x11RIGHT_EAR_TRAGION\x10\x1c\x12\x12\n\x0eLEFT_EYE_PUPIL\x10\x1d\x12\x13\n\x0fRIGHT_EYE_PUPIL\x10\x1e\x12\x15\n\x11\x46OREHEAD_GLABELLA\x10\x1f\x12\x11\n\rCHIN_GNATHION\x10 \x12\x14\n\x10\x43HIN_LEFT_GONION\x10!\x12\x15\n\x11\x43HIN_RIGHT_GONION\x10\"\"4\n\x0cLocationInfo\x12$\n\x07lat_lng\x18\x01 \x01(\x0b\x32\x13.google.type.LatLng\"=\n\x08Property\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\x12\x14\n\x0cuint64_value\x18\x03 \x01(\x04\"\xbc\x02\n\x10\x45ntityAnnotation\x12\x0b\n\x03mid\x18\x01 \x01(\t\x12\x0e\n\x06locale\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12\r\n\x05score\x18\x04 \x01(\x02\x12\x12\n\nconfidence\x18\x05 \x01(\x02\x12\x12\n\ntopicality\x18\x06 \x01(\x02\x12\x42\n\rbounding_poly\x18\x07 \x01(\x0b\x32+.google.cloud.vision.v1p1beta1.BoundingPoly\x12>\n\tlocations\x18\x08 \x03(\x0b\x32+.google.cloud.vision.v1p1beta1.LocationInfo\x12;\n\nproperties\x18\t \x03(\x0b\x32\'.google.cloud.vision.v1p1beta1.Property\"\xbc\x02\n\x14SafeSearchAnnotation\x12\x38\n\x05\x61\x64ult\x18\x01 \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x12\x38\n\x05spoof\x18\x02 \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x12:\n\x07medical\x18\x03 \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x12;\n\x08violence\x18\x04 \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x12\x37\n\x04racy\x18\t \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\"a\n\x0bLatLongRect\x12(\n\x0bmin_lat_lng\x18\x01 \x01(\x0b\x32\x13.google.type.LatLng\x12(\n\x0bmax_lat_lng\x18\x02 \x01(\x0b\x32\x13.google.type.LatLng\"U\n\tColorInfo\x12!\n\x05\x63olor\x18\x01 \x01(\x0b\x32\x12.google.type.Color\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x16\n\x0epixel_fraction\x18\x03 \x01(\x02\"T\n\x18\x44ominantColorsAnnotation\x12\x38\n\x06\x63olors\x18\x01 \x03(\x0b\x32(.google.cloud.vision.v1p1beta1.ColorInfo\"c\n\x0fImageProperties\x12P\n\x0f\x64ominant_colors\x18\x01 \x01(\x0b\x32\x37.google.cloud.vision.v1p1beta1.DominantColorsAnnotation\"\x7f\n\x08\x43ropHint\x12\x42\n\rbounding_poly\x18\x01 \x01(\x0b\x32+.google.cloud.vision.v1p1beta1.BoundingPoly\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\x1b\n\x13importance_fraction\x18\x03 \x01(\x02\"R\n\x13\x43ropHintsAnnotation\x12;\n\ncrop_hints\x18\x01 \x03(\x0b\x32\'.google.cloud.vision.v1p1beta1.CropHint\"(\n\x0f\x43ropHintsParams\x12\x15\n\raspect_ratios\x18\x01 \x03(\x02\"1\n\x12WebDetectionParams\x12\x1b\n\x13include_geo_results\x18\x02 \x01(\x08\"\x85\x02\n\x0cImageContext\x12\x41\n\rlat_long_rect\x18\x01 \x01(\x0b\x32*.google.cloud.vision.v1p1beta1.LatLongRect\x12\x16\n\x0elanguage_hints\x18\x02 \x03(\t\x12I\n\x11\x63rop_hints_params\x18\x04 \x01(\x0b\x32..google.cloud.vision.v1p1beta1.CropHintsParams\x12O\n\x14web_detection_params\x18\x06 \x01(\x0b\x32\x31.google.cloud.vision.v1p1beta1.WebDetectionParams\"\xc9\x01\n\x14\x41nnotateImageRequest\x12\x33\n\x05image\x18\x01 \x01(\x0b\x32$.google.cloud.vision.v1p1beta1.Image\x12\x38\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0b\x32&.google.cloud.vision.v1p1beta1.Feature\x12\x42\n\rimage_context\x18\x03 \x01(\x0b\x32+.google.cloud.vision.v1p1beta1.ImageContext\"\xc2\x06\n\x15\x41nnotateImageResponse\x12G\n\x10\x66\x61\x63\x65_annotations\x18\x01 \x03(\x0b\x32-.google.cloud.vision.v1p1beta1.FaceAnnotation\x12M\n\x14landmark_annotations\x18\x02 \x03(\x0b\x32/.google.cloud.vision.v1p1beta1.EntityAnnotation\x12I\n\x10logo_annotations\x18\x03 \x03(\x0b\x32/.google.cloud.vision.v1p1beta1.EntityAnnotation\x12J\n\x11label_annotations\x18\x04 \x03(\x0b\x32/.google.cloud.vision.v1p1beta1.EntityAnnotation\x12I\n\x10text_annotations\x18\x05 \x03(\x0b\x32/.google.cloud.vision.v1p1beta1.EntityAnnotation\x12K\n\x14\x66ull_text_annotation\x18\x0c \x01(\x0b\x32-.google.cloud.vision.v1p1beta1.TextAnnotation\x12S\n\x16safe_search_annotation\x18\x06 \x01(\x0b\x32\x33.google.cloud.vision.v1p1beta1.SafeSearchAnnotation\x12S\n\x1bimage_properties_annotation\x18\x08 \x01(\x0b\x32..google.cloud.vision.v1p1beta1.ImageProperties\x12Q\n\x15\x63rop_hints_annotation\x18\x0b \x01(\x0b\x32\x32.google.cloud.vision.v1p1beta1.CropHintsAnnotation\x12\x42\n\rweb_detection\x18\r \x01(\x0b\x32+.google.cloud.vision.v1p1beta1.WebDetection\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status\"c\n\x1a\x42\x61tchAnnotateImagesRequest\x12\x45\n\x08requests\x18\x01 \x03(\x0b\x32\x33.google.cloud.vision.v1p1beta1.AnnotateImageRequest\"f\n\x1b\x42\x61tchAnnotateImagesResponse\x12G\n\tresponses\x18\x01 \x03(\x0b\x32\x34.google.cloud.vision.v1p1beta1.AnnotateImageResponse*e\n\nLikelihood\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05\x32\xc6\x01\n\x0eImageAnnotator\x12\xb3\x01\n\x13\x42\x61tchAnnotateImages\x12\x39.google.cloud.vision.v1p1beta1.BatchAnnotateImagesRequest\x1a:.google.cloud.vision.v1p1beta1.BatchAnnotateImagesResponse\"%\x82\xd3\xe4\x93\x02\x1f\"\x1a/v1p1beta1/images:annotate:\x01*B\x82\x01\n!com.google.cloud.vision.v1p1beta1B\x13ImageAnnotatorProtoP\x01ZCgoogle.golang.org/genproto/googleapis/cloud/vision/v1p1beta1;vision\xf8\x01\x01\x62\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_geometry__pb2.DESCRIPTOR,google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_text__annotation__pb2.DESCRIPTOR,google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_web__detection__pb2.DESCRIPTOR,google_dot_rpc_dot_status__pb2.DESCRIPTOR,google_dot_type_dot_color__pb2.DESCRIPTOR,google_dot_type_dot_latlng__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_LIKELIHOOD = _descriptor.EnumDescriptor(
name='Likelihood',
full_name='google.cloud.vision.v1p1beta1.Likelihood',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VERY_UNLIKELY', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNLIKELY', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='POSSIBLE', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LIKELY', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VERY_LIKELY', index=5, number=5,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=5631,
serialized_end=5732,
)
_sym_db.RegisterEnumDescriptor(_LIKELIHOOD)
Likelihood = enum_type_wrapper.EnumTypeWrapper(_LIKELIHOOD)
UNKNOWN = 0
VERY_UNLIKELY = 1
UNLIKELY = 2
POSSIBLE = 3
LIKELY = 4
VERY_LIKELY = 5
_FEATURE_TYPE = _descriptor.EnumDescriptor(
name='Type',
full_name='google.cloud.vision.v1p1beta1.Feature.Type',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='TYPE_UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FACE_DETECTION', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LANDMARK_DETECTION', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LOGO_DETECTION', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LABEL_DETECTION', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TEXT_DETECTION', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DOCUMENT_TEXT_DETECTION', index=6, number=11,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SAFE_SEARCH_DETECTION', index=7, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IMAGE_PROPERTIES', index=8, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CROP_HINTS', index=9, number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WEB_DETECTION', index=10, number=10,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=474,
serialized_end=720,
)
_sym_db.RegisterEnumDescriptor(_FEATURE_TYPE)
_FACEANNOTATION_LANDMARK_TYPE = _descriptor.EnumDescriptor(
name='Type',
full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.Landmark.Type',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN_LANDMARK', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEFT_EYE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RIGHT_EYE', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEFT_OF_LEFT_EYEBROW', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RIGHT_OF_LEFT_EYEBROW', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEFT_OF_RIGHT_EYEBROW', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RIGHT_OF_RIGHT_EYEBROW', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MIDPOINT_BETWEEN_EYES', index=7, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOSE_TIP', index=8, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UPPER_LIP', index=9, number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LOWER_LIP', index=10, number=10,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MOUTH_LEFT', index=11, number=11,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MOUTH_RIGHT', index=12, number=12,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MOUTH_CENTER', index=13, number=13,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOSE_BOTTOM_RIGHT', index=14, number=14,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOSE_BOTTOM_LEFT', index=15, number=15,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOSE_BOTTOM_CENTER', index=16, number=16,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEFT_EYE_TOP_BOUNDARY', index=17, number=17,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEFT_EYE_RIGHT_CORNER', index=18, number=18,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEFT_EYE_BOTTOM_BOUNDARY', index=19, number=19,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEFT_EYE_LEFT_CORNER', index=20, number=20,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RIGHT_EYE_TOP_BOUNDARY', index=21, number=21,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RIGHT_EYE_RIGHT_CORNER', index=22, number=22,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RIGHT_EYE_BOTTOM_BOUNDARY', index=23, number=23,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RIGHT_EYE_LEFT_CORNER', index=24, number=24,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEFT_EYEBROW_UPPER_MIDPOINT', index=25, number=25,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RIGHT_EYEBROW_UPPER_MIDPOINT', index=26, number=26,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEFT_EAR_TRAGION', index=27, number=27,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RIGHT_EAR_TRAGION', index=28, number=28,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEFT_EYE_PUPIL', index=29, number=29,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RIGHT_EYE_PUPIL', index=30, number=30,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FOREHEAD_GLABELLA', index=31, number=31,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CHIN_GNATHION', index=32, number=32,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CHIN_LEFT_GONION', index=33, number=33,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CHIN_RIGHT_GONION', index=34, number=34,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1865,
serialized_end=2685,
)
_sym_db.RegisterEnumDescriptor(_FACEANNOTATION_LANDMARK_TYPE)
_FEATURE = _descriptor.Descriptor(
name='Feature',
full_name='google.cloud.vision.v1p1beta1.Feature',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='google.cloud.vision.v1p1beta1.Feature.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_results', full_name='google.cloud.vision.v1p1beta1.Feature.max_results', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='model', full_name='google.cloud.vision.v1p1beta1.Feature.model', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_FEATURE_TYPE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=367,
serialized_end=720,
)
_IMAGESOURCE = _descriptor.Descriptor(
name='ImageSource',
full_name='google.cloud.vision.v1p1beta1.ImageSource',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='gcs_image_uri', full_name='google.cloud.vision.v1p1beta1.ImageSource.gcs_image_uri', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='image_uri', full_name='google.cloud.vision.v1p1beta1.ImageSource.image_uri', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=722,
serialized_end=777,
)
_IMAGE = _descriptor.Descriptor(
name='Image',
full_name='google.cloud.vision.v1p1beta1.Image',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='content', full_name='google.cloud.vision.v1p1beta1.Image.content', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='source', full_name='google.cloud.vision.v1p1beta1.Image.source', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=779,
serialized_end=863,
)
_FACEANNOTATION_LANDMARK = _descriptor.Descriptor(
name='Landmark',
full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.Landmark',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.Landmark.type', index=0,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='position', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.Landmark.position', index=1,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_FACEANNOTATION_LANDMARK_TYPE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1718,
serialized_end=2685,
)
_FACEANNOTATION = _descriptor.Descriptor(
name='FaceAnnotation',
full_name='google.cloud.vision.v1p1beta1.FaceAnnotation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='bounding_poly', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.bounding_poly', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='fd_bounding_poly', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.fd_bounding_poly', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='landmarks', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.landmarks', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='roll_angle', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.roll_angle', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pan_angle', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.pan_angle', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tilt_angle', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.tilt_angle', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='detection_confidence', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.detection_confidence', index=6,
number=7, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='landmarking_confidence', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.landmarking_confidence', index=7,
number=8, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='joy_likelihood', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.joy_likelihood', index=8,
number=9, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sorrow_likelihood', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.sorrow_likelihood', index=9,
number=10, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='anger_likelihood', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.anger_likelihood', index=10,
number=11, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='surprise_likelihood', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.surprise_likelihood', index=11,
number=12, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='under_exposed_likelihood', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.under_exposed_likelihood', index=12,
number=13, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='blurred_likelihood', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.blurred_likelihood', index=13,
number=14, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='headwear_likelihood', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.headwear_likelihood', index=14,
number=15, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_FACEANNOTATION_LANDMARK, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=866,
serialized_end=2685,
)
_LOCATIONINFO = _descriptor.Descriptor(
name='LocationInfo',
full_name='google.cloud.vision.v1p1beta1.LocationInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lat_lng', full_name='google.cloud.vision.v1p1beta1.LocationInfo.lat_lng', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2687,
serialized_end=2739,
)
_PROPERTY = _descriptor.Descriptor(
name='Property',
full_name='google.cloud.vision.v1p1beta1.Property',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.cloud.vision.v1p1beta1.Property.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='google.cloud.vision.v1p1beta1.Property.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uint64_value', full_name='google.cloud.vision.v1p1beta1.Property.uint64_value', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2741,
serialized_end=2802,
)
_ENTITYANNOTATION = _descriptor.Descriptor(
name='EntityAnnotation',
full_name='google.cloud.vision.v1p1beta1.EntityAnnotation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='mid', full_name='google.cloud.vision.v1p1beta1.EntityAnnotation.mid', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='locale', full_name='google.cloud.vision.v1p1beta1.EntityAnnotation.locale', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='description', full_name='google.cloud.vision.v1p1beta1.EntityAnnotation.description', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='score', full_name='google.cloud.vision.v1p1beta1.EntityAnnotation.score', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='confidence', full_name='google.cloud.vision.v1p1beta1.EntityAnnotation.confidence', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='topicality', full_name='google.cloud.vision.v1p1beta1.EntityAnnotation.topicality', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bounding_poly', full_name='google.cloud.vision.v1p1beta1.EntityAnnotation.bounding_poly', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='locations', full_name='google.cloud.vision.v1p1beta1.EntityAnnotation.locations', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='properties', full_name='google.cloud.vision.v1p1beta1.EntityAnnotation.properties', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2805,
serialized_end=3121,
)
_SAFESEARCHANNOTATION = _descriptor.Descriptor(
name='SafeSearchAnnotation',
full_name='google.cloud.vision.v1p1beta1.SafeSearchAnnotation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='adult', full_name='google.cloud.vision.v1p1beta1.SafeSearchAnnotation.adult', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='spoof', full_name='google.cloud.vision.v1p1beta1.SafeSearchAnnotation.spoof', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='medical', full_name='google.cloud.vision.v1p1beta1.SafeSearchAnnotation.medical', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='violence', full_name='google.cloud.vision.v1p1beta1.SafeSearchAnnotation.violence', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='racy', full_name='google.cloud.vision.v1p1beta1.SafeSearchAnnotation.racy', index=4,
number=9, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3124,
serialized_end=3440,
)
_LATLONGRECT = _descriptor.Descriptor(
name='LatLongRect',
full_name='google.cloud.vision.v1p1beta1.LatLongRect',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='min_lat_lng', full_name='google.cloud.vision.v1p1beta1.LatLongRect.min_lat_lng', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_lat_lng', full_name='google.cloud.vision.v1p1beta1.LatLongRect.max_lat_lng', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3442,
serialized_end=3539,
)
_COLORINFO = _descriptor.Descriptor(
name='ColorInfo',
full_name='google.cloud.vision.v1p1beta1.ColorInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='color', full_name='google.cloud.vision.v1p1beta1.ColorInfo.color', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='score', full_name='google.cloud.vision.v1p1beta1.ColorInfo.score', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pixel_fraction', full_name='google.cloud.vision.v1p1beta1.ColorInfo.pixel_fraction', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3541,
serialized_end=3626,
)
_DOMINANTCOLORSANNOTATION = _descriptor.Descriptor(
name='DominantColorsAnnotation',
full_name='google.cloud.vision.v1p1beta1.DominantColorsAnnotation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='colors', full_name='google.cloud.vision.v1p1beta1.DominantColorsAnnotation.colors', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3628,
serialized_end=3712,
)
_IMAGEPROPERTIES = _descriptor.Descriptor(
name='ImageProperties',
full_name='google.cloud.vision.v1p1beta1.ImageProperties',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dominant_colors', full_name='google.cloud.vision.v1p1beta1.ImageProperties.dominant_colors', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3714,
serialized_end=3813,
)
_CROPHINT = _descriptor.Descriptor(
name='CropHint',
full_name='google.cloud.vision.v1p1beta1.CropHint',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='bounding_poly', full_name='google.cloud.vision.v1p1beta1.CropHint.bounding_poly', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='confidence', full_name='google.cloud.vision.v1p1beta1.CropHint.confidence', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='importance_fraction', full_name='google.cloud.vision.v1p1beta1.CropHint.importance_fraction', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3815,
serialized_end=3942,
)
_CROPHINTSANNOTATION = _descriptor.Descriptor(
name='CropHintsAnnotation',
full_name='google.cloud.vision.v1p1beta1.CropHintsAnnotation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='crop_hints', full_name='google.cloud.vision.v1p1beta1.CropHintsAnnotation.crop_hints', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3944,
serialized_end=4026,
)
_CROPHINTSPARAMS = _descriptor.Descriptor(
name='CropHintsParams',
full_name='google.cloud.vision.v1p1beta1.CropHintsParams',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='aspect_ratios', full_name='google.cloud.vision.v1p1beta1.CropHintsParams.aspect_ratios', index=0,
number=1, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4028,
serialized_end=4068,
)
_WEBDETECTIONPARAMS = _descriptor.Descriptor(
name='WebDetectionParams',
full_name='google.cloud.vision.v1p1beta1.WebDetectionParams',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='include_geo_results', full_name='google.cloud.vision.v1p1beta1.WebDetectionParams.include_geo_results', index=0,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4070,
serialized_end=4119,
)
_IMAGECONTEXT = _descriptor.Descriptor(
name='ImageContext',
full_name='google.cloud.vision.v1p1beta1.ImageContext',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lat_long_rect', full_name='google.cloud.vision.v1p1beta1.ImageContext.lat_long_rect', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='language_hints', full_name='google.cloud.vision.v1p1beta1.ImageContext.language_hints', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='crop_hints_params', full_name='google.cloud.vision.v1p1beta1.ImageContext.crop_hints_params', index=2,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='web_detection_params', full_name='google.cloud.vision.v1p1beta1.ImageContext.web_detection_params', index=3,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4122,
serialized_end=4383,
)
_ANNOTATEIMAGEREQUEST = _descriptor.Descriptor(
name='AnnotateImageRequest',
full_name='google.cloud.vision.v1p1beta1.AnnotateImageRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='image', full_name='google.cloud.vision.v1p1beta1.AnnotateImageRequest.image', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='features', full_name='google.cloud.vision.v1p1beta1.AnnotateImageRequest.features', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='image_context', full_name='google.cloud.vision.v1p1beta1.AnnotateImageRequest.image_context', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4386,
serialized_end=4587,
)
_ANNOTATEIMAGERESPONSE = _descriptor.Descriptor(
name='AnnotateImageResponse',
full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='face_annotations', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.face_annotations', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='landmark_annotations', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.landmark_annotations', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='logo_annotations', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.logo_annotations', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='label_annotations', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.label_annotations', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='text_annotations', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.text_annotations', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='full_text_annotation', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.full_text_annotation', index=5,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='safe_search_annotation', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.safe_search_annotation', index=6,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='image_properties_annotation', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.image_properties_annotation', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='crop_hints_annotation', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.crop_hints_annotation', index=8,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='web_detection', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.web_detection', index=9,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='error', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.error', index=10,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4590,
serialized_end=5424,
)
_BATCHANNOTATEIMAGESREQUEST = _descriptor.Descriptor(
name='BatchAnnotateImagesRequest',
full_name='google.cloud.vision.v1p1beta1.BatchAnnotateImagesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='requests', full_name='google.cloud.vision.v1p1beta1.BatchAnnotateImagesRequest.requests', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5426,
serialized_end=5525,
)
_BATCHANNOTATEIMAGESRESPONSE = _descriptor.Descriptor(
name='BatchAnnotateImagesResponse',
full_name='google.cloud.vision.v1p1beta1.BatchAnnotateImagesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='responses', full_name='google.cloud.vision.v1p1beta1.BatchAnnotateImagesResponse.responses', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5527,
serialized_end=5629,
)
_FEATURE.fields_by_name['type'].enum_type = _FEATURE_TYPE
_FEATURE_TYPE.containing_type = _FEATURE
_IMAGE.fields_by_name['source'].message_type = _IMAGESOURCE
_FACEANNOTATION_LANDMARK.fields_by_name['type'].enum_type = _FACEANNOTATION_LANDMARK_TYPE
_FACEANNOTATION_LANDMARK.fields_by_name['position'].message_type = google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_geometry__pb2._POSITION
_FACEANNOTATION_LANDMARK.containing_type = _FACEANNOTATION
_FACEANNOTATION_LANDMARK_TYPE.containing_type = _FACEANNOTATION_LANDMARK
_FACEANNOTATION.fields_by_name['bounding_poly'].message_type = google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_geometry__pb2._BOUNDINGPOLY
_FACEANNOTATION.fields_by_name['fd_bounding_poly'].message_type = google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_geometry__pb2._BOUNDINGPOLY
_FACEANNOTATION.fields_by_name['landmarks'].message_type = _FACEANNOTATION_LANDMARK
_FACEANNOTATION.fields_by_name['joy_likelihood'].enum_type = _LIKELIHOOD
_FACEANNOTATION.fields_by_name['sorrow_likelihood'].enum_type = _LIKELIHOOD
_FACEANNOTATION.fields_by_name['anger_likelihood'].enum_type = _LIKELIHOOD
_FACEANNOTATION.fields_by_name['surprise_likelihood'].enum_type = _LIKELIHOOD
_FACEANNOTATION.fields_by_name['under_exposed_likelihood'].enum_type = _LIKELIHOOD
_FACEANNOTATION.fields_by_name['blurred_likelihood'].enum_type = _LIKELIHOOD
_FACEANNOTATION.fields_by_name['headwear_likelihood'].enum_type = _LIKELIHOOD
_LOCATIONINFO.fields_by_name['lat_lng'].message_type = google_dot_type_dot_latlng__pb2._LATLNG
_ENTITYANNOTATION.fields_by_name['bounding_poly'].message_type = google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_geometry__pb2._BOUNDINGPOLY
_ENTITYANNOTATION.fields_by_name['locations'].message_type = _LOCATIONINFO
_ENTITYANNOTATION.fields_by_name['properties'].message_type = _PROPERTY
_SAFESEARCHANNOTATION.fields_by_name['adult'].enum_type = _LIKELIHOOD
_SAFESEARCHANNOTATION.fields_by_name['spoof'].enum_type = _LIKELIHOOD
_SAFESEARCHANNOTATION.fields_by_name['medical'].enum_type = _LIKELIHOOD
_SAFESEARCHANNOTATION.fields_by_name['violence'].enum_type = _LIKELIHOOD
_SAFESEARCHANNOTATION.fields_by_name['racy'].enum_type = _LIKELIHOOD
_LATLONGRECT.fields_by_name['min_lat_lng'].message_type = google_dot_type_dot_latlng__pb2._LATLNG
_LATLONGRECT.fields_by_name['max_lat_lng'].message_type = google_dot_type_dot_latlng__pb2._LATLNG
_COLORINFO.fields_by_name['color'].message_type = google_dot_type_dot_color__pb2._COLOR
_DOMINANTCOLORSANNOTATION.fields_by_name['colors'].message_type = _COLORINFO
_IMAGEPROPERTIES.fields_by_name['dominant_colors'].message_type = _DOMINANTCOLORSANNOTATION
_CROPHINT.fields_by_name['bounding_poly'].message_type = google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_geometry__pb2._BOUNDINGPOLY
_CROPHINTSANNOTATION.fields_by_name['crop_hints'].message_type = _CROPHINT
_IMAGECONTEXT.fields_by_name['lat_long_rect'].message_type = _LATLONGRECT
_IMAGECONTEXT.fields_by_name['crop_hints_params'].message_type = _CROPHINTSPARAMS
_IMAGECONTEXT.fields_by_name['web_detection_params'].message_type = _WEBDETECTIONPARAMS
_ANNOTATEIMAGEREQUEST.fields_by_name['image'].message_type = _IMAGE
_ANNOTATEIMAGEREQUEST.fields_by_name['features'].message_type = _FEATURE
_ANNOTATEIMAGEREQUEST.fields_by_name['image_context'].message_type = _IMAGECONTEXT
_ANNOTATEIMAGERESPONSE.fields_by_name['face_annotations'].message_type = _FACEANNOTATION
_ANNOTATEIMAGERESPONSE.fields_by_name['landmark_annotations'].message_type = _ENTITYANNOTATION
_ANNOTATEIMAGERESPONSE.fields_by_name['logo_annotations'].message_type = _ENTITYANNOTATION
_ANNOTATEIMAGERESPONSE.fields_by_name['label_annotations'].message_type = _ENTITYANNOTATION
_ANNOTATEIMAGERESPONSE.fields_by_name['text_annotations'].message_type = _ENTITYANNOTATION
_ANNOTATEIMAGERESPONSE.fields_by_name['full_text_annotation'].message_type = google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_text__annotation__pb2._TEXTANNOTATION
_ANNOTATEIMAGERESPONSE.fields_by_name['safe_search_annotation'].message_type = _SAFESEARCHANNOTATION
_ANNOTATEIMAGERESPONSE.fields_by_name['image_properties_annotation'].message_type = _IMAGEPROPERTIES
_ANNOTATEIMAGERESPONSE.fields_by_name['crop_hints_annotation'].message_type = _CROPHINTSANNOTATION
_ANNOTATEIMAGERESPONSE.fields_by_name['web_detection'].message_type = google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_web__detection__pb2._WEBDETECTION
_ANNOTATEIMAGERESPONSE.fields_by_name['error'].message_type = google_dot_rpc_dot_status__pb2._STATUS
_BATCHANNOTATEIMAGESREQUEST.fields_by_name['requests'].message_type = _ANNOTATEIMAGEREQUEST
_BATCHANNOTATEIMAGESRESPONSE.fields_by_name['responses'].message_type = _ANNOTATEIMAGERESPONSE
DESCRIPTOR.message_types_by_name['Feature'] = _FEATURE
DESCRIPTOR.message_types_by_name['ImageSource'] = _IMAGESOURCE
DESCRIPTOR.message_types_by_name['Image'] = _IMAGE
DESCRIPTOR.message_types_by_name['FaceAnnotation'] = _FACEANNOTATION
DESCRIPTOR.message_types_by_name['LocationInfo'] = _LOCATIONINFO
DESCRIPTOR.message_types_by_name['Property'] = _PROPERTY
DESCRIPTOR.message_types_by_name['EntityAnnotation'] = _ENTITYANNOTATION
DESCRIPTOR.message_types_by_name['SafeSearchAnnotation'] = _SAFESEARCHANNOTATION
DESCRIPTOR.message_types_by_name['LatLongRect'] = _LATLONGRECT
DESCRIPTOR.message_types_by_name['ColorInfo'] = _COLORINFO
DESCRIPTOR.message_types_by_name['DominantColorsAnnotation'] = _DOMINANTCOLORSANNOTATION
DESCRIPTOR.message_types_by_name['ImageProperties'] = _IMAGEPROPERTIES
DESCRIPTOR.message_types_by_name['CropHint'] = _CROPHINT
DESCRIPTOR.message_types_by_name['CropHintsAnnotation'] = _CROPHINTSANNOTATION
DESCRIPTOR.message_types_by_name['CropHintsParams'] = _CROPHINTSPARAMS
DESCRIPTOR.message_types_by_name['WebDetectionParams'] = _WEBDETECTIONPARAMS
DESCRIPTOR.message_types_by_name['ImageContext'] = _IMAGECONTEXT
DESCRIPTOR.message_types_by_name['AnnotateImageRequest'] = _ANNOTATEIMAGEREQUEST
DESCRIPTOR.message_types_by_name['AnnotateImageResponse'] = _ANNOTATEIMAGERESPONSE
DESCRIPTOR.message_types_by_name['BatchAnnotateImagesRequest'] = _BATCHANNOTATEIMAGESREQUEST
DESCRIPTOR.message_types_by_name['BatchAnnotateImagesResponse'] = _BATCHANNOTATEIMAGESRESPONSE
DESCRIPTOR.enum_types_by_name['Likelihood'] = _LIKELIHOOD
Feature = _reflection.GeneratedProtocolMessageType('Feature', (_message.Message,), dict(
DESCRIPTOR = _FEATURE,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Users describe the type of Google Cloud Vision API tasks to perform over
images by using *Feature*\ s. Each Feature indicates a type of image
detection task to perform. Features encode the Cloud Vision API vertical
to operate on and the number of top-scoring results to return.
Attributes:
type:
The feature type.
max_results:
Maximum number of results of this type.
model:
Model to use for the feature. Supported values:
"builtin/stable" (the default if unset) and "builtin/latest".
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.Feature)
))
_sym_db.RegisterMessage(Feature)
ImageSource = _reflection.GeneratedProtocolMessageType('ImageSource', (_message.Message,), dict(
DESCRIPTOR = _IMAGESOURCE,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """External image source (Google Cloud Storage image location).
Attributes:
gcs_image_uri:
NOTE: For new code ``image_uri`` below is preferred. Google
Cloud Storage image URI, which must be in the following form:
``gs://bucket_name/object_name`` (for details, see `Google
Cloud Storage Request URIs
<https://cloud.google.com/storage/docs/reference-uris>`__).
NOTE: Cloud Storage object versioning is not supported.
image_uri:
Image URI which supports: 1) Google Cloud Storage image URI,
which must be in the following form:
``gs://bucket_name/object_name`` (for details, see `Google
Cloud Storage Request URIs
<https://cloud.google.com/storage/docs/reference-uris>`__).
NOTE: Cloud Storage object versioning is not supported. 2)
Publicly accessible image HTTP/HTTPS URL. This is preferred
over the legacy ``gcs_image_uri`` above. When both
``gcs_image_uri`` and ``image_uri`` are specified,
``image_uri`` takes precedence.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.ImageSource)
))
_sym_db.RegisterMessage(ImageSource)
Image = _reflection.GeneratedProtocolMessageType('Image', (_message.Message,), dict(
DESCRIPTOR = _IMAGE,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Client image to perform Google Cloud Vision API tasks over.
Attributes:
content:
Image content, represented as a stream of bytes. Note: as with
all ``bytes`` fields, protobuffers use a pure binary
representation, whereas JSON representations use base64.
source:
Google Cloud Storage image location. If both ``content`` and
``source`` are provided for an image, ``content`` takes
precedence and is used to perform the image annotation
request.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.Image)
))
_sym_db.RegisterMessage(Image)
FaceAnnotation = _reflection.GeneratedProtocolMessageType('FaceAnnotation', (_message.Message,), dict(
Landmark = _reflection.GeneratedProtocolMessageType('Landmark', (_message.Message,), dict(
DESCRIPTOR = _FACEANNOTATION_LANDMARK,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """A face-specific landmark (for example, a face feature).
Attributes:
type:
Face landmark type.
position:
Face landmark position.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.FaceAnnotation.Landmark)
))
,
DESCRIPTOR = _FACEANNOTATION,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """A face annotation object contains the results of face detection.
Attributes:
bounding_poly:
The bounding polygon around the face. The coordinates of the
bounding box are in the original image's scale, as returned in
``ImageParams``. The bounding box is computed to "frame" the
face in accordance with human expectations. It is based on the
landmarker results. Note that one or more x and/or y
coordinates may not be generated in the ``BoundingPoly`` (the
polygon will be unbounded) if only a partial face appears in
the image to be annotated.
fd_bounding_poly:
The ``fd_bounding_poly`` bounding polygon is tighter than the
``boundingPoly``, and encloses only the skin part of the face.
Typically, it is used to eliminate the face from any image
analysis that detects the "amount of skin" visible in an
image. It is not based on the landmarker results, only on the
initial face detection, hence the fd (face detection) prefix.
landmarks:
Detected face landmarks.
roll_angle:
Roll angle, which indicates the amount of clockwise/anti-
clockwise rotation of the face relative to the image vertical
about the axis perpendicular to the face. Range [-180,180].
pan_angle:
Yaw angle, which indicates the leftward/rightward angle that
the face is pointing relative to the vertical plane
perpendicular to the image. Range [-180,180].
tilt_angle:
Pitch angle, which indicates the upwards/downwards angle that
the face is pointing relative to the image's horizontal plane.
Range [-180,180].
detection_confidence:
Detection confidence. Range [0, 1].
landmarking_confidence:
Face landmarking confidence. Range [0, 1].
joy_likelihood:
Joy likelihood.
sorrow_likelihood:
Sorrow likelihood.
anger_likelihood:
Anger likelihood.
surprise_likelihood:
Surprise likelihood.
under_exposed_likelihood:
Under-exposed likelihood.
blurred_likelihood:
Blurred likelihood.
headwear_likelihood:
Headwear likelihood.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.FaceAnnotation)
))
_sym_db.RegisterMessage(FaceAnnotation)
_sym_db.RegisterMessage(FaceAnnotation.Landmark)
LocationInfo = _reflection.GeneratedProtocolMessageType('LocationInfo', (_message.Message,), dict(
DESCRIPTOR = _LOCATIONINFO,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Detected entity location information.
Attributes:
lat_lng:
lat/long location coordinates.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.LocationInfo)
))
_sym_db.RegisterMessage(LocationInfo)
Property = _reflection.GeneratedProtocolMessageType('Property', (_message.Message,), dict(
DESCRIPTOR = _PROPERTY,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """A ``Property`` consists of a user-supplied name/value pair.
Attributes:
name:
Name of the property.
value:
Value of the property.
uint64_value:
Value of numeric properties.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.Property)
))
_sym_db.RegisterMessage(Property)
EntityAnnotation = _reflection.GeneratedProtocolMessageType('EntityAnnotation', (_message.Message,), dict(
DESCRIPTOR = _ENTITYANNOTATION,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Set of detected entity features.
Attributes:
mid:
Opaque entity ID. Some IDs may be available in `Google
Knowledge Graph Search API
<https://developers.google.com/knowledge-graph/>`__.
locale:
The language code for the locale in which the entity textual
``description`` is expressed.
description:
Entity textual description, expressed in its ``locale``
language.
score:
Overall score of the result. Range [0, 1].
confidence:
The accuracy of the entity detection in an image. For example,
for an image in which the "Eiffel Tower" entity is detected,
this field represents the confidence that there is a tower in
the query image. Range [0, 1].
topicality:
The relevancy of the ICA (Image Content Annotation) label to
the image. For example, the relevancy of "tower" is likely
higher to an image containing the detected "Eiffel Tower" than
to an image containing a detected distant towering building,
even though the confidence that there is a tower in each image
may be the same. Range [0, 1].
bounding_poly:
Image region to which this entity belongs. Not produced for
``LABEL_DETECTION`` features.
locations:
The location information for the detected entity. Multiple
``LocationInfo`` elements can be present because one location
may indicate the location of the scene in the image, and
another location may indicate the location of the place where
the image was taken. Location information is usually present
for landmarks.
properties:
Some entities may have optional user-supplied ``Property``
(name/value) fields, such a score or string that qualifies the
entity.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.EntityAnnotation)
))
_sym_db.RegisterMessage(EntityAnnotation)
SafeSearchAnnotation = _reflection.GeneratedProtocolMessageType('SafeSearchAnnotation', (_message.Message,), dict(
DESCRIPTOR = _SAFESEARCHANNOTATION,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Set of features pertaining to the image, computed by computer vision
methods over safe-search verticals (for example, adult, spoof, medical,
violence).
Attributes:
adult:
Represents the adult content likelihood for the image. Adult
content may contain elements such as nudity, pornographic
images or cartoons, or sexual activities.
spoof:
Spoof likelihood. The likelihood that an modification was made
to the image's canonical version to make it appear funny or
offensive.
medical:
Likelihood that this is a medical image.
violence:
Likelihood that this image contains violent content.
racy:
Likelihood that the request image contains racy content. Racy
content may include (but is not limited to) skimpy or sheer
clothing, strategically covered nudity, lewd or provocative
poses, or close-ups of sensitive body areas.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.SafeSearchAnnotation)
))
_sym_db.RegisterMessage(SafeSearchAnnotation)
LatLongRect = _reflection.GeneratedProtocolMessageType('LatLongRect', (_message.Message,), dict(
DESCRIPTOR = _LATLONGRECT,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Rectangle determined by min and max ``LatLng`` pairs.
Attributes:
min_lat_lng:
Min lat/long pair.
max_lat_lng:
Max lat/long pair.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.LatLongRect)
))
_sym_db.RegisterMessage(LatLongRect)
ColorInfo = _reflection.GeneratedProtocolMessageType('ColorInfo', (_message.Message,), dict(
DESCRIPTOR = _COLORINFO,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Color information consists of RGB channels, score, and the fraction of
the image that the color occupies in the image.
Attributes:
color:
RGB components of the color.
score:
Image-specific score for this color. Value in range [0, 1].
pixel_fraction:
The fraction of pixels the color occupies in the image. Value
in range [0, 1].
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.ColorInfo)
))
_sym_db.RegisterMessage(ColorInfo)
DominantColorsAnnotation = _reflection.GeneratedProtocolMessageType('DominantColorsAnnotation', (_message.Message,), dict(
DESCRIPTOR = _DOMINANTCOLORSANNOTATION,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Set of dominant colors and their corresponding scores.
Attributes:
colors:
RGB color values with their score and pixel fraction.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.DominantColorsAnnotation)
))
_sym_db.RegisterMessage(DominantColorsAnnotation)
ImageProperties = _reflection.GeneratedProtocolMessageType('ImageProperties', (_message.Message,), dict(
DESCRIPTOR = _IMAGEPROPERTIES,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Stores image properties, such as dominant colors.
Attributes:
dominant_colors:
If present, dominant colors completed successfully.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.ImageProperties)
))
_sym_db.RegisterMessage(ImageProperties)
CropHint = _reflection.GeneratedProtocolMessageType('CropHint', (_message.Message,), dict(
DESCRIPTOR = _CROPHINT,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Single crop hint that is used to generate a new crop when serving an
image.
Attributes:
bounding_poly:
The bounding polygon for the crop region. The coordinates of
the bounding box are in the original image's scale, as
returned in ``ImageParams``.
confidence:
Confidence of this being a salient region. Range [0, 1].
importance_fraction:
Fraction of importance of this salient region with respect to
the original image.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.CropHint)
))
_sym_db.RegisterMessage(CropHint)
CropHintsAnnotation = _reflection.GeneratedProtocolMessageType('CropHintsAnnotation', (_message.Message,), dict(
DESCRIPTOR = _CROPHINTSANNOTATION,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Set of crop hints that are used to generate new crops when serving
images.
Attributes:
crop_hints:
Crop hint results.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.CropHintsAnnotation)
))
_sym_db.RegisterMessage(CropHintsAnnotation)
CropHintsParams = _reflection.GeneratedProtocolMessageType('CropHintsParams', (_message.Message,), dict(
DESCRIPTOR = _CROPHINTSPARAMS,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Parameters for crop hints annotation request.
Attributes:
aspect_ratios:
Aspect ratios in floats, representing the ratio of the width
to the height of the image. For example, if the desired aspect
ratio is 4/3, the corresponding float value should be 1.33333.
If not specified, the best possible crop is returned. The
number of provided aspect ratios is limited to a maximum of
16; any aspect ratios provided after the 16th are ignored.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.CropHintsParams)
))
_sym_db.RegisterMessage(CropHintsParams)
WebDetectionParams = _reflection.GeneratedProtocolMessageType('WebDetectionParams', (_message.Message,), dict(
DESCRIPTOR = _WEBDETECTIONPARAMS,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Parameters for web detection request.
Attributes:
include_geo_results:
Whether to include results derived from the geo information in
the image.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.WebDetectionParams)
))
_sym_db.RegisterMessage(WebDetectionParams)
ImageContext = _reflection.GeneratedProtocolMessageType('ImageContext', (_message.Message,), dict(
DESCRIPTOR = _IMAGECONTEXT,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Image context and/or feature-specific parameters.
Attributes:
lat_long_rect:
lat/long rectangle that specifies the location of the image.
language_hints:
List of languages to use for TEXT\_DETECTION. In most cases,
an empty value yields the best results since it enables
automatic language detection. For languages based on the Latin
alphabet, setting ``language_hints`` is not needed. In rare
cases, when the language of the text in the image is known,
setting a hint will help get better results (although it will
be a significant hindrance if the hint is wrong). Text
detection returns an error if one or more of the specified
languages is not one of the `supported languages
</vision/docs/languages>`__.
crop_hints_params:
Parameters for crop hints annotation request.
web_detection_params:
Parameters for web detection.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.ImageContext)
))
_sym_db.RegisterMessage(ImageContext)
AnnotateImageRequest = _reflection.GeneratedProtocolMessageType('AnnotateImageRequest', (_message.Message,), dict(
DESCRIPTOR = _ANNOTATEIMAGEREQUEST,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Request for performing Google Cloud Vision API tasks over a
user-provided image, with user-requested features.
Attributes:
image:
The image to be processed.
features:
Requested features.
image_context:
Additional context that may accompany the image.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.AnnotateImageRequest)
))
_sym_db.RegisterMessage(AnnotateImageRequest)
AnnotateImageResponse = _reflection.GeneratedProtocolMessageType('AnnotateImageResponse', (_message.Message,), dict(
DESCRIPTOR = _ANNOTATEIMAGERESPONSE,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Response to an image annotation request.
Attributes:
face_annotations:
If present, face detection has completed successfully.
landmark_annotations:
If present, landmark detection has completed successfully.
logo_annotations:
If present, logo detection has completed successfully.
label_annotations:
If present, label detection has completed successfully.
text_annotations:
If present, text (OCR) detection has completed successfully.
full_text_annotation:
If present, text (OCR) detection or document (OCR) text
detection has completed successfully. This annotation provides
the structural hierarchy for the OCR detected text.
safe_search_annotation:
If present, safe-search annotation has completed successfully.
image_properties_annotation:
If present, image properties were extracted successfully.
crop_hints_annotation:
If present, crop hints have completed successfully.
web_detection:
If present, web detection has completed successfully.
error:
If set, represents the error message for the operation. Note
that filled-in image annotations are guaranteed to be correct,
even when ``error`` is set.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.AnnotateImageResponse)
))
_sym_db.RegisterMessage(AnnotateImageResponse)
BatchAnnotateImagesRequest = _reflection.GeneratedProtocolMessageType('BatchAnnotateImagesRequest', (_message.Message,), dict(
DESCRIPTOR = _BATCHANNOTATEIMAGESREQUEST,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Multiple image annotation requests are batched into a single service
call.
Attributes:
requests:
Individual image annotation requests for this batch.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.BatchAnnotateImagesRequest)
))
_sym_db.RegisterMessage(BatchAnnotateImagesRequest)
BatchAnnotateImagesResponse = _reflection.GeneratedProtocolMessageType('BatchAnnotateImagesResponse', (_message.Message,), dict(
DESCRIPTOR = _BATCHANNOTATEIMAGESRESPONSE,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Response to a batch image annotation request.
Attributes:
responses:
Individual responses to image annotation requests within the
batch.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.BatchAnnotateImagesResponse)
))
_sym_db.RegisterMessage(BatchAnnotateImagesResponse)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n!com.google.cloud.vision.v1p1beta1B\023ImageAnnotatorProtoP\001ZCgoogle.golang.org/genproto/googleapis/cloud/vision/v1p1beta1;vision\370\001\001'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class ImageAnnotatorStub(object):
"""Service that performs Google Cloud Vision API detection tasks over client
images, such as face, landmark, logo, label, and text detection. The
ImageAnnotator service returns detected entities from the images.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.BatchAnnotateImages = channel.unary_unary(
'/google.cloud.vision.v1p1beta1.ImageAnnotator/BatchAnnotateImages',
request_serializer=BatchAnnotateImagesRequest.SerializeToString,
response_deserializer=BatchAnnotateImagesResponse.FromString,
)
class ImageAnnotatorServicer(object):
"""Service that performs Google Cloud Vision API detection tasks over client
images, such as face, landmark, logo, label, and text detection. The
ImageAnnotator service returns detected entities from the images.
"""
def BatchAnnotateImages(self, request, context):
"""Run image detection and annotation for a batch of images.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ImageAnnotatorServicer_to_server(servicer, server):
rpc_method_handlers = {
'BatchAnnotateImages': grpc.unary_unary_rpc_method_handler(
servicer.BatchAnnotateImages,
request_deserializer=BatchAnnotateImagesRequest.FromString,
response_serializer=BatchAnnotateImagesResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.cloud.vision.v1p1beta1.ImageAnnotator', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BetaImageAnnotatorServicer(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""Service that performs Google Cloud Vision API detection tasks over client
images, such as face, landmark, logo, label, and text detection. The
ImageAnnotator service returns detected entities from the images.
"""
def BatchAnnotateImages(self, request, context):
"""Run image detection and annotation for a batch of images.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaImageAnnotatorStub(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""Service that performs Google Cloud Vision API detection tasks over client
images, such as face, landmark, logo, label, and text detection. The
ImageAnnotator service returns detected entities from the images.
"""
def BatchAnnotateImages(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Run image detection and annotation for a batch of images.
"""
raise NotImplementedError()
BatchAnnotateImages.future = None
def beta_create_ImageAnnotator_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_deserializers = {
('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesRequest.FromString,
}
response_serializers = {
('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesResponse.SerializeToString,
}
method_implementations = {
('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): face_utilities.unary_unary_inline(servicer.BatchAnnotateImages),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_ImageAnnotator_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_serializers = {
('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesRequest.SerializeToString,
}
response_deserializers = {
('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesResponse.FromString,
}
cardinalities = {
'BatchAnnotateImages': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'google.cloud.vision.v1p1beta1.ImageAnnotator', cardinalities, options=stub_options)
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
| 42.973206
| 9,043
| 0.743392
|
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.cloud.vision_v1p1beta1.proto import geometry_pb2 as google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_geometry__pb2
from google.cloud.vision_v1p1beta1.proto import text_annotation_pb2 as google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_text__annotation__pb2
from google.cloud.vision_v1p1beta1.proto import web_detection_pb2 as google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_web__detection__pb2
from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
from google.type import color_pb2 as google_dot_type_dot_color__pb2
from google.type import latlng_pb2 as google_dot_type_dot_latlng__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/cloud/vision_v1p1beta1/proto/image_annotator.proto',
package='google.cloud.vision.v1p1beta1',
syntax='proto3',
serialized_pb=_b('\n9google/cloud/vision_v1p1beta1/proto/image_annotator.proto\x12\x1dgoogle.cloud.vision.v1p1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x32google/cloud/vision_v1p1beta1/proto/geometry.proto\x1a\x39google/cloud/vision_v1p1beta1/proto/text_annotation.proto\x1a\x37google/cloud/vision_v1p1beta1/proto/web_detection.proto\x1a\x17google/rpc/status.proto\x1a\x17google/type/color.proto\x1a\x18google/type/latlng.proto\"\xe1\x02\n\x07\x46\x65\x61ture\x12\x39\n\x04type\x18\x01 \x01(\x0e\x32+.google.cloud.vision.v1p1beta1.Feature.Type\x12\x13\n\x0bmax_results\x18\x02 \x01(\x05\x12\r\n\x05model\x18\x03 \x01(\t\"\xf6\x01\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x12\n\x0e\x46\x41\x43\x45_DETECTION\x10\x01\x12\x16\n\x12LANDMARK_DETECTION\x10\x02\x12\x12\n\x0eLOGO_DETECTION\x10\x03\x12\x13\n\x0fLABEL_DETECTION\x10\x04\x12\x12\n\x0eTEXT_DETECTION\x10\x05\x12\x1b\n\x17\x44OCUMENT_TEXT_DETECTION\x10\x0b\x12\x19\n\x15SAFE_SEARCH_DETECTION\x10\x06\x12\x14\n\x10IMAGE_PROPERTIES\x10\x07\x12\x0e\n\nCROP_HINTS\x10\t\x12\x11\n\rWEB_DETECTION\x10\n\"7\n\x0bImageSource\x12\x15\n\rgcs_image_uri\x18\x01 \x01(\t\x12\x11\n\timage_uri\x18\x02 \x01(\t\"T\n\x05Image\x12\x0f\n\x07\x63ontent\x18\x01 \x01(\x0c\x12:\n\x06source\x18\x02 \x01(\x0b\x32*.google.cloud.vision.v1p1beta1.ImageSource\"\x9b\x0e\n\x0e\x46\x61\x63\x65\x41nnotation\x12\x42\n\rbounding_poly\x18\x01 \x01(\x0b\x32+.google.cloud.vision.v1p1beta1.BoundingPoly\x12\x45\n\x10\x66\x64_bounding_poly\x18\x02 \x01(\x0b\x32+.google.cloud.vision.v1p1beta1.BoundingPoly\x12I\n\tlandmarks\x18\x03 \x03(\x0b\x32\x36.google.cloud.vision.v1p1beta1.FaceAnnotation.Landmark\x12\x12\n\nroll_angle\x18\x04 \x01(\x02\x12\x11\n\tpan_angle\x18\x05 \x01(\x02\x12\x12\n\ntilt_angle\x18\x06 \x01(\x02\x12\x1c\n\x14\x64\x65tection_confidence\x18\x07 \x01(\x02\x12\x1e\n\x16landmarking_confidence\x18\x08 \x01(\x02\x12\x41\n\x0ejoy_likelihood\x18\t \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x12\x44\n\x11sorrow_likelihood\x18\n \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x12\x43\n\x10\x61nger_likelihood\x18\x0b \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x12\x46\n\x13surprise_likelihood\x18\x0c \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x12K\n\x18under_exposed_likelihood\x18\r \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x12\x45\n\x12\x62lurred_likelihood\x18\x0e \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x12\x46\n\x13headwear_likelihood\x18\x0f \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x1a\xc7\x07\n\x08Landmark\x12I\n\x04type\x18\x03 \x01(\x0e\x32;.google.cloud.vision.v1p1beta1.FaceAnnotation.Landmark.Type\x12\x39\n\x08position\x18\x04 \x01(\x0b\x32\'.google.cloud.vision.v1p1beta1.Position\"\xb4\x06\n\x04Type\x12\x14\n\x10UNKNOWN_LANDMARK\x10\x00\x12\x0c\n\x08LEFT_EYE\x10\x01\x12\r\n\tRIGHT_EYE\x10\x02\x12\x18\n\x14LEFT_OF_LEFT_EYEBROW\x10\x03\x12\x19\n\x15RIGHT_OF_LEFT_EYEBROW\x10\x04\x12\x19\n\x15LEFT_OF_RIGHT_EYEBROW\x10\x05\x12\x1a\n\x16RIGHT_OF_RIGHT_EYEBROW\x10\x06\x12\x19\n\x15MIDPOINT_BETWEEN_EYES\x10\x07\x12\x0c\n\x08NOSE_TIP\x10\x08\x12\r\n\tUPPER_LIP\x10\t\x12\r\n\tLOWER_LIP\x10\n\x12\x0e\n\nMOUTH_LEFT\x10\x0b\x12\x0f\n\x0bMOUTH_RIGHT\x10\x0c\x12\x10\n\x0cMOUTH_CENTER\x10\r\x12\x15\n\x11NOSE_BOTTOM_RIGHT\x10\x0e\x12\x14\n\x10NOSE_BOTTOM_LEFT\x10\x0f\x12\x16\n\x12NOSE_BOTTOM_CENTER\x10\x10\x12\x19\n\x15LEFT_EYE_TOP_BOUNDARY\x10\x11\x12\x19\n\x15LEFT_EYE_RIGHT_CORNER\x10\x12\x12\x1c\n\x18LEFT_EYE_BOTTOM_BOUNDARY\x10\x13\x12\x18\n\x14LEFT_EYE_LEFT_CORNER\x10\x14\x12\x1a\n\x16RIGHT_EYE_TOP_BOUNDARY\x10\x15\x12\x1a\n\x16RIGHT_EYE_RIGHT_CORNER\x10\x16\x12\x1d\n\x19RIGHT_EYE_BOTTOM_BOUNDARY\x10\x17\x12\x19\n\x15RIGHT_EYE_LEFT_CORNER\x10\x18\x12\x1f\n\x1bLEFT_EYEBROW_UPPER_MIDPOINT\x10\x19\x12 \n\x1cRIGHT_EYEBROW_UPPER_MIDPOINT\x10\x1a\x12\x14\n\x10LEFT_EAR_TRAGION\x10\x1b\x12\x15\n\x11RIGHT_EAR_TRAGION\x10\x1c\x12\x12\n\x0eLEFT_EYE_PUPIL\x10\x1d\x12\x13\n\x0fRIGHT_EYE_PUPIL\x10\x1e\x12\x15\n\x11\x46OREHEAD_GLABELLA\x10\x1f\x12\x11\n\rCHIN_GNATHION\x10 \x12\x14\n\x10\x43HIN_LEFT_GONION\x10!\x12\x15\n\x11\x43HIN_RIGHT_GONION\x10\"\"4\n\x0cLocationInfo\x12$\n\x07lat_lng\x18\x01 \x01(\x0b\x32\x13.google.type.LatLng\"=\n\x08Property\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\x12\x14\n\x0cuint64_value\x18\x03 \x01(\x04\"\xbc\x02\n\x10\x45ntityAnnotation\x12\x0b\n\x03mid\x18\x01 \x01(\t\x12\x0e\n\x06locale\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12\r\n\x05score\x18\x04 \x01(\x02\x12\x12\n\nconfidence\x18\x05 \x01(\x02\x12\x12\n\ntopicality\x18\x06 \x01(\x02\x12\x42\n\rbounding_poly\x18\x07 \x01(\x0b\x32+.google.cloud.vision.v1p1beta1.BoundingPoly\x12>\n\tlocations\x18\x08 \x03(\x0b\x32+.google.cloud.vision.v1p1beta1.LocationInfo\x12;\n\nproperties\x18\t \x03(\x0b\x32\'.google.cloud.vision.v1p1beta1.Property\"\xbc\x02\n\x14SafeSearchAnnotation\x12\x38\n\x05\x61\x64ult\x18\x01 \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x12\x38\n\x05spoof\x18\x02 \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x12:\n\x07medical\x18\x03 \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x12;\n\x08violence\x18\x04 \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x12\x37\n\x04racy\x18\t \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\"a\n\x0bLatLongRect\x12(\n\x0bmin_lat_lng\x18\x01 \x01(\x0b\x32\x13.google.type.LatLng\x12(\n\x0bmax_lat_lng\x18\x02 \x01(\x0b\x32\x13.google.type.LatLng\"U\n\tColorInfo\x12!\n\x05\x63olor\x18\x01 \x01(\x0b\x32\x12.google.type.Color\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x16\n\x0epixel_fraction\x18\x03 \x01(\x02\"T\n\x18\x44ominantColorsAnnotation\x12\x38\n\x06\x63olors\x18\x01 \x03(\x0b\x32(.google.cloud.vision.v1p1beta1.ColorInfo\"c\n\x0fImageProperties\x12P\n\x0f\x64ominant_colors\x18\x01 \x01(\x0b\x32\x37.google.cloud.vision.v1p1beta1.DominantColorsAnnotation\"\x7f\n\x08\x43ropHint\x12\x42\n\rbounding_poly\x18\x01 \x01(\x0b\x32+.google.cloud.vision.v1p1beta1.BoundingPoly\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\x1b\n\x13importance_fraction\x18\x03 \x01(\x02\"R\n\x13\x43ropHintsAnnotation\x12;\n\ncrop_hints\x18\x01 \x03(\x0b\x32\'.google.cloud.vision.v1p1beta1.CropHint\"(\n\x0f\x43ropHintsParams\x12\x15\n\raspect_ratios\x18\x01 \x03(\x02\"1\n\x12WebDetectionParams\x12\x1b\n\x13include_geo_results\x18\x02 \x01(\x08\"\x85\x02\n\x0cImageContext\x12\x41\n\rlat_long_rect\x18\x01 \x01(\x0b\x32*.google.cloud.vision.v1p1beta1.LatLongRect\x12\x16\n\x0elanguage_hints\x18\x02 \x03(\t\x12I\n\x11\x63rop_hints_params\x18\x04 \x01(\x0b\x32..google.cloud.vision.v1p1beta1.CropHintsParams\x12O\n\x14web_detection_params\x18\x06 \x01(\x0b\x32\x31.google.cloud.vision.v1p1beta1.WebDetectionParams\"\xc9\x01\n\x14\x41nnotateImageRequest\x12\x33\n\x05image\x18\x01 \x01(\x0b\x32$.google.cloud.vision.v1p1beta1.Image\x12\x38\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0b\x32&.google.cloud.vision.v1p1beta1.Feature\x12\x42\n\rimage_context\x18\x03 \x01(\x0b\x32+.google.cloud.vision.v1p1beta1.ImageContext\"\xc2\x06\n\x15\x41nnotateImageResponse\x12G\n\x10\x66\x61\x63\x65_annotations\x18\x01 \x03(\x0b\x32-.google.cloud.vision.v1p1beta1.FaceAnnotation\x12M\n\x14landmark_annotations\x18\x02 \x03(\x0b\x32/.google.cloud.vision.v1p1beta1.EntityAnnotation\x12I\n\x10logo_annotations\x18\x03 \x03(\x0b\x32/.google.cloud.vision.v1p1beta1.EntityAnnotation\x12J\n\x11label_annotations\x18\x04 \x03(\x0b\x32/.google.cloud.vision.v1p1beta1.EntityAnnotation\x12I\n\x10text_annotations\x18\x05 \x03(\x0b\x32/.google.cloud.vision.v1p1beta1.EntityAnnotation\x12K\n\x14\x66ull_text_annotation\x18\x0c \x01(\x0b\x32-.google.cloud.vision.v1p1beta1.TextAnnotation\x12S\n\x16safe_search_annotation\x18\x06 \x01(\x0b\x32\x33.google.cloud.vision.v1p1beta1.SafeSearchAnnotation\x12S\n\x1bimage_properties_annotation\x18\x08 \x01(\x0b\x32..google.cloud.vision.v1p1beta1.ImageProperties\x12Q\n\x15\x63rop_hints_annotation\x18\x0b \x01(\x0b\x32\x32.google.cloud.vision.v1p1beta1.CropHintsAnnotation\x12\x42\n\rweb_detection\x18\r \x01(\x0b\x32+.google.cloud.vision.v1p1beta1.WebDetection\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status\"c\n\x1a\x42\x61tchAnnotateImagesRequest\x12\x45\n\x08requests\x18\x01 \x03(\x0b\x32\x33.google.cloud.vision.v1p1beta1.AnnotateImageRequest\"f\n\x1b\x42\x61tchAnnotateImagesResponse\x12G\n\tresponses\x18\x01 \x03(\x0b\x32\x34.google.cloud.vision.v1p1beta1.AnnotateImageResponse*e\n\nLikelihood\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05\x32\xc6\x01\n\x0eImageAnnotator\x12\xb3\x01\n\x13\x42\x61tchAnnotateImages\x12\x39.google.cloud.vision.v1p1beta1.BatchAnnotateImagesRequest\x1a:.google.cloud.vision.v1p1beta1.BatchAnnotateImagesResponse\"%\x82\xd3\xe4\x93\x02\x1f\"\x1a/v1p1beta1/images:annotate:\x01*B\x82\x01\n!com.google.cloud.vision.v1p1beta1B\x13ImageAnnotatorProtoP\x01ZCgoogle.golang.org/genproto/googleapis/cloud/vision/v1p1beta1;vision\xf8\x01\x01\x62\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_geometry__pb2.DESCRIPTOR,google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_text__annotation__pb2.DESCRIPTOR,google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_web__detection__pb2.DESCRIPTOR,google_dot_rpc_dot_status__pb2.DESCRIPTOR,google_dot_type_dot_color__pb2.DESCRIPTOR,google_dot_type_dot_latlng__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_LIKELIHOOD = _descriptor.EnumDescriptor(
name='Likelihood',
full_name='google.cloud.vision.v1p1beta1.Likelihood',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VERY_UNLIKELY', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNLIKELY', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='POSSIBLE', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LIKELY', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VERY_LIKELY', index=5, number=5,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=5631,
serialized_end=5732,
)
_sym_db.RegisterEnumDescriptor(_LIKELIHOOD)
Likelihood = enum_type_wrapper.EnumTypeWrapper(_LIKELIHOOD)
UNKNOWN = 0
VERY_UNLIKELY = 1
UNLIKELY = 2
POSSIBLE = 3
LIKELY = 4
VERY_LIKELY = 5
_FEATURE_TYPE = _descriptor.EnumDescriptor(
name='Type',
full_name='google.cloud.vision.v1p1beta1.Feature.Type',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='TYPE_UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FACE_DETECTION', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LANDMARK_DETECTION', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LOGO_DETECTION', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LABEL_DETECTION', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TEXT_DETECTION', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DOCUMENT_TEXT_DETECTION', index=6, number=11,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SAFE_SEARCH_DETECTION', index=7, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IMAGE_PROPERTIES', index=8, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CROP_HINTS', index=9, number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WEB_DETECTION', index=10, number=10,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=474,
serialized_end=720,
)
_sym_db.RegisterEnumDescriptor(_FEATURE_TYPE)
_FACEANNOTATION_LANDMARK_TYPE = _descriptor.EnumDescriptor(
name='Type',
full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.Landmark.Type',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN_LANDMARK', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEFT_EYE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RIGHT_EYE', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEFT_OF_LEFT_EYEBROW', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RIGHT_OF_LEFT_EYEBROW', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEFT_OF_RIGHT_EYEBROW', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RIGHT_OF_RIGHT_EYEBROW', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MIDPOINT_BETWEEN_EYES', index=7, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOSE_TIP', index=8, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UPPER_LIP', index=9, number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LOWER_LIP', index=10, number=10,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MOUTH_LEFT', index=11, number=11,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MOUTH_RIGHT', index=12, number=12,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MOUTH_CENTER', index=13, number=13,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOSE_BOTTOM_RIGHT', index=14, number=14,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOSE_BOTTOM_LEFT', index=15, number=15,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOSE_BOTTOM_CENTER', index=16, number=16,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEFT_EYE_TOP_BOUNDARY', index=17, number=17,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEFT_EYE_RIGHT_CORNER', index=18, number=18,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEFT_EYE_BOTTOM_BOUNDARY', index=19, number=19,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEFT_EYE_LEFT_CORNER', index=20, number=20,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RIGHT_EYE_TOP_BOUNDARY', index=21, number=21,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RIGHT_EYE_RIGHT_CORNER', index=22, number=22,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RIGHT_EYE_BOTTOM_BOUNDARY', index=23, number=23,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RIGHT_EYE_LEFT_CORNER', index=24, number=24,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEFT_EYEBROW_UPPER_MIDPOINT', index=25, number=25,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RIGHT_EYEBROW_UPPER_MIDPOINT', index=26, number=26,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEFT_EAR_TRAGION', index=27, number=27,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RIGHT_EAR_TRAGION', index=28, number=28,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEFT_EYE_PUPIL', index=29, number=29,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RIGHT_EYE_PUPIL', index=30, number=30,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FOREHEAD_GLABELLA', index=31, number=31,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CHIN_GNATHION', index=32, number=32,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CHIN_LEFT_GONION', index=33, number=33,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CHIN_RIGHT_GONION', index=34, number=34,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1865,
serialized_end=2685,
)
_sym_db.RegisterEnumDescriptor(_FACEANNOTATION_LANDMARK_TYPE)
_FEATURE = _descriptor.Descriptor(
name='Feature',
full_name='google.cloud.vision.v1p1beta1.Feature',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='google.cloud.vision.v1p1beta1.Feature.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_results', full_name='google.cloud.vision.v1p1beta1.Feature.max_results', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='model', full_name='google.cloud.vision.v1p1beta1.Feature.model', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_FEATURE_TYPE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=367,
serialized_end=720,
)
_IMAGESOURCE = _descriptor.Descriptor(
name='ImageSource',
full_name='google.cloud.vision.v1p1beta1.ImageSource',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='gcs_image_uri', full_name='google.cloud.vision.v1p1beta1.ImageSource.gcs_image_uri', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='image_uri', full_name='google.cloud.vision.v1p1beta1.ImageSource.image_uri', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=722,
serialized_end=777,
)
_IMAGE = _descriptor.Descriptor(
name='Image',
full_name='google.cloud.vision.v1p1beta1.Image',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='content', full_name='google.cloud.vision.v1p1beta1.Image.content', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='source', full_name='google.cloud.vision.v1p1beta1.Image.source', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=779,
serialized_end=863,
)
_FACEANNOTATION_LANDMARK = _descriptor.Descriptor(
name='Landmark',
full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.Landmark',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.Landmark.type', index=0,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='position', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.Landmark.position', index=1,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_FACEANNOTATION_LANDMARK_TYPE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1718,
serialized_end=2685,
)
_FACEANNOTATION = _descriptor.Descriptor(
name='FaceAnnotation',
full_name='google.cloud.vision.v1p1beta1.FaceAnnotation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='bounding_poly', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.bounding_poly', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='fd_bounding_poly', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.fd_bounding_poly', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='landmarks', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.landmarks', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='roll_angle', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.roll_angle', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pan_angle', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.pan_angle', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tilt_angle', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.tilt_angle', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='detection_confidence', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.detection_confidence', index=6,
number=7, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='landmarking_confidence', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.landmarking_confidence', index=7,
number=8, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='joy_likelihood', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.joy_likelihood', index=8,
number=9, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sorrow_likelihood', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.sorrow_likelihood', index=9,
number=10, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='anger_likelihood', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.anger_likelihood', index=10,
number=11, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='surprise_likelihood', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.surprise_likelihood', index=11,
number=12, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='under_exposed_likelihood', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.under_exposed_likelihood', index=12,
number=13, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='blurred_likelihood', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.blurred_likelihood', index=13,
number=14, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='headwear_likelihood', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.headwear_likelihood', index=14,
number=15, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_FACEANNOTATION_LANDMARK, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=866,
serialized_end=2685,
)
_LOCATIONINFO = _descriptor.Descriptor(
name='LocationInfo',
full_name='google.cloud.vision.v1p1beta1.LocationInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lat_lng', full_name='google.cloud.vision.v1p1beta1.LocationInfo.lat_lng', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2687,
serialized_end=2739,
)
_PROPERTY = _descriptor.Descriptor(
name='Property',
full_name='google.cloud.vision.v1p1beta1.Property',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.cloud.vision.v1p1beta1.Property.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='google.cloud.vision.v1p1beta1.Property.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uint64_value', full_name='google.cloud.vision.v1p1beta1.Property.uint64_value', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2741,
serialized_end=2802,
)
_ENTITYANNOTATION = _descriptor.Descriptor(
name='EntityAnnotation',
full_name='google.cloud.vision.v1p1beta1.EntityAnnotation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='mid', full_name='google.cloud.vision.v1p1beta1.EntityAnnotation.mid', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='locale', full_name='google.cloud.vision.v1p1beta1.EntityAnnotation.locale', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='description', full_name='google.cloud.vision.v1p1beta1.EntityAnnotation.description', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='score', full_name='google.cloud.vision.v1p1beta1.EntityAnnotation.score', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='confidence', full_name='google.cloud.vision.v1p1beta1.EntityAnnotation.confidence', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='topicality', full_name='google.cloud.vision.v1p1beta1.EntityAnnotation.topicality', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bounding_poly', full_name='google.cloud.vision.v1p1beta1.EntityAnnotation.bounding_poly', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='locations', full_name='google.cloud.vision.v1p1beta1.EntityAnnotation.locations', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='properties', full_name='google.cloud.vision.v1p1beta1.EntityAnnotation.properties', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2805,
serialized_end=3121,
)
_SAFESEARCHANNOTATION = _descriptor.Descriptor(
name='SafeSearchAnnotation',
full_name='google.cloud.vision.v1p1beta1.SafeSearchAnnotation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='adult', full_name='google.cloud.vision.v1p1beta1.SafeSearchAnnotation.adult', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='spoof', full_name='google.cloud.vision.v1p1beta1.SafeSearchAnnotation.spoof', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='medical', full_name='google.cloud.vision.v1p1beta1.SafeSearchAnnotation.medical', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='violence', full_name='google.cloud.vision.v1p1beta1.SafeSearchAnnotation.violence', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='racy', full_name='google.cloud.vision.v1p1beta1.SafeSearchAnnotation.racy', index=4,
number=9, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3124,
serialized_end=3440,
)
_LATLONGRECT = _descriptor.Descriptor(
name='LatLongRect',
full_name='google.cloud.vision.v1p1beta1.LatLongRect',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='min_lat_lng', full_name='google.cloud.vision.v1p1beta1.LatLongRect.min_lat_lng', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_lat_lng', full_name='google.cloud.vision.v1p1beta1.LatLongRect.max_lat_lng', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3442,
serialized_end=3539,
)
_COLORINFO = _descriptor.Descriptor(
name='ColorInfo',
full_name='google.cloud.vision.v1p1beta1.ColorInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='color', full_name='google.cloud.vision.v1p1beta1.ColorInfo.color', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='score', full_name='google.cloud.vision.v1p1beta1.ColorInfo.score', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pixel_fraction', full_name='google.cloud.vision.v1p1beta1.ColorInfo.pixel_fraction', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3541,
serialized_end=3626,
)
_DOMINANTCOLORSANNOTATION = _descriptor.Descriptor(
name='DominantColorsAnnotation',
full_name='google.cloud.vision.v1p1beta1.DominantColorsAnnotation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='colors', full_name='google.cloud.vision.v1p1beta1.DominantColorsAnnotation.colors', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3628,
serialized_end=3712,
)
_IMAGEPROPERTIES = _descriptor.Descriptor(
name='ImageProperties',
full_name='google.cloud.vision.v1p1beta1.ImageProperties',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dominant_colors', full_name='google.cloud.vision.v1p1beta1.ImageProperties.dominant_colors', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3714,
serialized_end=3813,
)
_CROPHINT = _descriptor.Descriptor(
name='CropHint',
full_name='google.cloud.vision.v1p1beta1.CropHint',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='bounding_poly', full_name='google.cloud.vision.v1p1beta1.CropHint.bounding_poly', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='confidence', full_name='google.cloud.vision.v1p1beta1.CropHint.confidence', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='importance_fraction', full_name='google.cloud.vision.v1p1beta1.CropHint.importance_fraction', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3815,
serialized_end=3942,
)
_CROPHINTSANNOTATION = _descriptor.Descriptor(
name='CropHintsAnnotation',
full_name='google.cloud.vision.v1p1beta1.CropHintsAnnotation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='crop_hints', full_name='google.cloud.vision.v1p1beta1.CropHintsAnnotation.crop_hints', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3944,
serialized_end=4026,
)
_CROPHINTSPARAMS = _descriptor.Descriptor(
name='CropHintsParams',
full_name='google.cloud.vision.v1p1beta1.CropHintsParams',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='aspect_ratios', full_name='google.cloud.vision.v1p1beta1.CropHintsParams.aspect_ratios', index=0,
number=1, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4028,
serialized_end=4068,
)
_WEBDETECTIONPARAMS = _descriptor.Descriptor(
name='WebDetectionParams',
full_name='google.cloud.vision.v1p1beta1.WebDetectionParams',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='include_geo_results', full_name='google.cloud.vision.v1p1beta1.WebDetectionParams.include_geo_results', index=0,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4070,
serialized_end=4119,
)
_IMAGECONTEXT = _descriptor.Descriptor(
name='ImageContext',
full_name='google.cloud.vision.v1p1beta1.ImageContext',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lat_long_rect', full_name='google.cloud.vision.v1p1beta1.ImageContext.lat_long_rect', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='language_hints', full_name='google.cloud.vision.v1p1beta1.ImageContext.language_hints', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='crop_hints_params', full_name='google.cloud.vision.v1p1beta1.ImageContext.crop_hints_params', index=2,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='web_detection_params', full_name='google.cloud.vision.v1p1beta1.ImageContext.web_detection_params', index=3,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4122,
serialized_end=4383,
)
_ANNOTATEIMAGEREQUEST = _descriptor.Descriptor(
name='AnnotateImageRequest',
full_name='google.cloud.vision.v1p1beta1.AnnotateImageRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='image', full_name='google.cloud.vision.v1p1beta1.AnnotateImageRequest.image', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='features', full_name='google.cloud.vision.v1p1beta1.AnnotateImageRequest.features', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='image_context', full_name='google.cloud.vision.v1p1beta1.AnnotateImageRequest.image_context', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4386,
serialized_end=4587,
)
_ANNOTATEIMAGERESPONSE = _descriptor.Descriptor(
name='AnnotateImageResponse',
full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='face_annotations', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.face_annotations', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='landmark_annotations', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.landmark_annotations', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='logo_annotations', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.logo_annotations', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='label_annotations', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.label_annotations', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='text_annotations', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.text_annotations', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='full_text_annotation', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.full_text_annotation', index=5,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='safe_search_annotation', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.safe_search_annotation', index=6,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='image_properties_annotation', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.image_properties_annotation', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='crop_hints_annotation', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.crop_hints_annotation', index=8,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='web_detection', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.web_detection', index=9,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='error', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.error', index=10,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4590,
serialized_end=5424,
)
_BATCHANNOTATEIMAGESREQUEST = _descriptor.Descriptor(
name='BatchAnnotateImagesRequest',
full_name='google.cloud.vision.v1p1beta1.BatchAnnotateImagesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='requests', full_name='google.cloud.vision.v1p1beta1.BatchAnnotateImagesRequest.requests', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5426,
serialized_end=5525,
)
_BATCHANNOTATEIMAGESRESPONSE = _descriptor.Descriptor(
name='BatchAnnotateImagesResponse',
full_name='google.cloud.vision.v1p1beta1.BatchAnnotateImagesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='responses', full_name='google.cloud.vision.v1p1beta1.BatchAnnotateImagesResponse.responses', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5527,
serialized_end=5629,
)
_FEATURE.fields_by_name['type'].enum_type = _FEATURE_TYPE
_FEATURE_TYPE.containing_type = _FEATURE
_IMAGE.fields_by_name['source'].message_type = _IMAGESOURCE
_FACEANNOTATION_LANDMARK.fields_by_name['type'].enum_type = _FACEANNOTATION_LANDMARK_TYPE
_FACEANNOTATION_LANDMARK.fields_by_name['position'].message_type = google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_geometry__pb2._POSITION
_FACEANNOTATION_LANDMARK.containing_type = _FACEANNOTATION
_FACEANNOTATION_LANDMARK_TYPE.containing_type = _FACEANNOTATION_LANDMARK
_FACEANNOTATION.fields_by_name['bounding_poly'].message_type = google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_geometry__pb2._BOUNDINGPOLY
_FACEANNOTATION.fields_by_name['fd_bounding_poly'].message_type = google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_geometry__pb2._BOUNDINGPOLY
_FACEANNOTATION.fields_by_name['landmarks'].message_type = _FACEANNOTATION_LANDMARK
_FACEANNOTATION.fields_by_name['joy_likelihood'].enum_type = _LIKELIHOOD
_FACEANNOTATION.fields_by_name['sorrow_likelihood'].enum_type = _LIKELIHOOD
_FACEANNOTATION.fields_by_name['anger_likelihood'].enum_type = _LIKELIHOOD
_FACEANNOTATION.fields_by_name['surprise_likelihood'].enum_type = _LIKELIHOOD
_FACEANNOTATION.fields_by_name['under_exposed_likelihood'].enum_type = _LIKELIHOOD
_FACEANNOTATION.fields_by_name['blurred_likelihood'].enum_type = _LIKELIHOOD
_FACEANNOTATION.fields_by_name['headwear_likelihood'].enum_type = _LIKELIHOOD
_LOCATIONINFO.fields_by_name['lat_lng'].message_type = google_dot_type_dot_latlng__pb2._LATLNG
_ENTITYANNOTATION.fields_by_name['bounding_poly'].message_type = google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_geometry__pb2._BOUNDINGPOLY
_ENTITYANNOTATION.fields_by_name['locations'].message_type = _LOCATIONINFO
_ENTITYANNOTATION.fields_by_name['properties'].message_type = _PROPERTY
_SAFESEARCHANNOTATION.fields_by_name['adult'].enum_type = _LIKELIHOOD
_SAFESEARCHANNOTATION.fields_by_name['spoof'].enum_type = _LIKELIHOOD
_SAFESEARCHANNOTATION.fields_by_name['medical'].enum_type = _LIKELIHOOD
_SAFESEARCHANNOTATION.fields_by_name['violence'].enum_type = _LIKELIHOOD
_SAFESEARCHANNOTATION.fields_by_name['racy'].enum_type = _LIKELIHOOD
_LATLONGRECT.fields_by_name['min_lat_lng'].message_type = google_dot_type_dot_latlng__pb2._LATLNG
_LATLONGRECT.fields_by_name['max_lat_lng'].message_type = google_dot_type_dot_latlng__pb2._LATLNG
_COLORINFO.fields_by_name['color'].message_type = google_dot_type_dot_color__pb2._COLOR
_DOMINANTCOLORSANNOTATION.fields_by_name['colors'].message_type = _COLORINFO
_IMAGEPROPERTIES.fields_by_name['dominant_colors'].message_type = _DOMINANTCOLORSANNOTATION
_CROPHINT.fields_by_name['bounding_poly'].message_type = google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_geometry__pb2._BOUNDINGPOLY
_CROPHINTSANNOTATION.fields_by_name['crop_hints'].message_type = _CROPHINT
_IMAGECONTEXT.fields_by_name['lat_long_rect'].message_type = _LATLONGRECT
_IMAGECONTEXT.fields_by_name['crop_hints_params'].message_type = _CROPHINTSPARAMS
_IMAGECONTEXT.fields_by_name['web_detection_params'].message_type = _WEBDETECTIONPARAMS
_ANNOTATEIMAGEREQUEST.fields_by_name['image'].message_type = _IMAGE
_ANNOTATEIMAGEREQUEST.fields_by_name['features'].message_type = _FEATURE
_ANNOTATEIMAGEREQUEST.fields_by_name['image_context'].message_type = _IMAGECONTEXT
_ANNOTATEIMAGERESPONSE.fields_by_name['face_annotations'].message_type = _FACEANNOTATION
_ANNOTATEIMAGERESPONSE.fields_by_name['landmark_annotations'].message_type = _ENTITYANNOTATION
_ANNOTATEIMAGERESPONSE.fields_by_name['logo_annotations'].message_type = _ENTITYANNOTATION
_ANNOTATEIMAGERESPONSE.fields_by_name['label_annotations'].message_type = _ENTITYANNOTATION
_ANNOTATEIMAGERESPONSE.fields_by_name['text_annotations'].message_type = _ENTITYANNOTATION
_ANNOTATEIMAGERESPONSE.fields_by_name['full_text_annotation'].message_type = google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_text__annotation__pb2._TEXTANNOTATION
_ANNOTATEIMAGERESPONSE.fields_by_name['safe_search_annotation'].message_type = _SAFESEARCHANNOTATION
_ANNOTATEIMAGERESPONSE.fields_by_name['image_properties_annotation'].message_type = _IMAGEPROPERTIES
_ANNOTATEIMAGERESPONSE.fields_by_name['crop_hints_annotation'].message_type = _CROPHINTSANNOTATION
_ANNOTATEIMAGERESPONSE.fields_by_name['web_detection'].message_type = google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_web__detection__pb2._WEBDETECTION
_ANNOTATEIMAGERESPONSE.fields_by_name['error'].message_type = google_dot_rpc_dot_status__pb2._STATUS
_BATCHANNOTATEIMAGESREQUEST.fields_by_name['requests'].message_type = _ANNOTATEIMAGEREQUEST
_BATCHANNOTATEIMAGESRESPONSE.fields_by_name['responses'].message_type = _ANNOTATEIMAGERESPONSE
DESCRIPTOR.message_types_by_name['Feature'] = _FEATURE
DESCRIPTOR.message_types_by_name['ImageSource'] = _IMAGESOURCE
DESCRIPTOR.message_types_by_name['Image'] = _IMAGE
DESCRIPTOR.message_types_by_name['FaceAnnotation'] = _FACEANNOTATION
DESCRIPTOR.message_types_by_name['LocationInfo'] = _LOCATIONINFO
DESCRIPTOR.message_types_by_name['Property'] = _PROPERTY
DESCRIPTOR.message_types_by_name['EntityAnnotation'] = _ENTITYANNOTATION
DESCRIPTOR.message_types_by_name['SafeSearchAnnotation'] = _SAFESEARCHANNOTATION
DESCRIPTOR.message_types_by_name['LatLongRect'] = _LATLONGRECT
DESCRIPTOR.message_types_by_name['ColorInfo'] = _COLORINFO
DESCRIPTOR.message_types_by_name['DominantColorsAnnotation'] = _DOMINANTCOLORSANNOTATION
DESCRIPTOR.message_types_by_name['ImageProperties'] = _IMAGEPROPERTIES
DESCRIPTOR.message_types_by_name['CropHint'] = _CROPHINT
DESCRIPTOR.message_types_by_name['CropHintsAnnotation'] = _CROPHINTSANNOTATION
DESCRIPTOR.message_types_by_name['CropHintsParams'] = _CROPHINTSPARAMS
DESCRIPTOR.message_types_by_name['WebDetectionParams'] = _WEBDETECTIONPARAMS
DESCRIPTOR.message_types_by_name['ImageContext'] = _IMAGECONTEXT
DESCRIPTOR.message_types_by_name['AnnotateImageRequest'] = _ANNOTATEIMAGEREQUEST
DESCRIPTOR.message_types_by_name['AnnotateImageResponse'] = _ANNOTATEIMAGERESPONSE
DESCRIPTOR.message_types_by_name['BatchAnnotateImagesRequest'] = _BATCHANNOTATEIMAGESREQUEST
DESCRIPTOR.message_types_by_name['BatchAnnotateImagesResponse'] = _BATCHANNOTATEIMAGESRESPONSE
DESCRIPTOR.enum_types_by_name['Likelihood'] = _LIKELIHOOD
Feature = _reflection.GeneratedProtocolMessageType('Feature', (_message.Message,), dict(
DESCRIPTOR = _FEATURE,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Users describe the type of Google Cloud Vision API tasks to perform over
images by using *Feature*\ s. Each Feature indicates a type of image
detection task to perform. Features encode the Cloud Vision API vertical
to operate on and the number of top-scoring results to return.
Attributes:
type:
The feature type.
max_results:
Maximum number of results of this type.
model:
Model to use for the feature. Supported values:
"builtin/stable" (the default if unset) and "builtin/latest".
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.Feature)
))
_sym_db.RegisterMessage(Feature)
ImageSource = _reflection.GeneratedProtocolMessageType('ImageSource', (_message.Message,), dict(
DESCRIPTOR = _IMAGESOURCE,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """External image source (Google Cloud Storage image location).
Attributes:
gcs_image_uri:
NOTE: For new code ``image_uri`` below is preferred. Google
Cloud Storage image URI, which must be in the following form:
``gs://bucket_name/object_name`` (for details, see `Google
Cloud Storage Request URIs
<https://cloud.google.com/storage/docs/reference-uris>`__).
NOTE: Cloud Storage object versioning is not supported.
image_uri:
Image URI which supports: 1) Google Cloud Storage image URI,
which must be in the following form:
``gs://bucket_name/object_name`` (for details, see `Google
Cloud Storage Request URIs
<https://cloud.google.com/storage/docs/reference-uris>`__).
NOTE: Cloud Storage object versioning is not supported. 2)
Publicly accessible image HTTP/HTTPS URL. This is preferred
over the legacy ``gcs_image_uri`` above. When both
``gcs_image_uri`` and ``image_uri`` are specified,
``image_uri`` takes precedence.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.ImageSource)
))
_sym_db.RegisterMessage(ImageSource)
Image = _reflection.GeneratedProtocolMessageType('Image', (_message.Message,), dict(
DESCRIPTOR = _IMAGE,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Client image to perform Google Cloud Vision API tasks over.
Attributes:
content:
Image content, represented as a stream of bytes. Note: as with
all ``bytes`` fields, protobuffers use a pure binary
representation, whereas JSON representations use base64.
source:
Google Cloud Storage image location. If both ``content`` and
``source`` are provided for an image, ``content`` takes
precedence and is used to perform the image annotation
request.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.Image)
))
_sym_db.RegisterMessage(Image)
FaceAnnotation = _reflection.GeneratedProtocolMessageType('FaceAnnotation', (_message.Message,), dict(
Landmark = _reflection.GeneratedProtocolMessageType('Landmark', (_message.Message,), dict(
DESCRIPTOR = _FACEANNOTATION_LANDMARK,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """A face-specific landmark (for example, a face feature).
Attributes:
type:
Face landmark type.
position:
Face landmark position.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.FaceAnnotation.Landmark)
))
,
DESCRIPTOR = _FACEANNOTATION,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """A face annotation object contains the results of face detection.
Attributes:
bounding_poly:
The bounding polygon around the face. The coordinates of the
bounding box are in the original image's scale, as returned in
``ImageParams``. The bounding box is computed to "frame" the
face in accordance with human expectations. It is based on the
landmarker results. Note that one or more x and/or y
coordinates may not be generated in the ``BoundingPoly`` (the
polygon will be unbounded) if only a partial face appears in
the image to be annotated.
fd_bounding_poly:
The ``fd_bounding_poly`` bounding polygon is tighter than the
``boundingPoly``, and encloses only the skin part of the face.
Typically, it is used to eliminate the face from any image
analysis that detects the "amount of skin" visible in an
image. It is not based on the landmarker results, only on the
initial face detection, hence the fd (face detection) prefix.
landmarks:
Detected face landmarks.
roll_angle:
Roll angle, which indicates the amount of clockwise/anti-
clockwise rotation of the face relative to the image vertical
about the axis perpendicular to the face. Range [-180,180].
pan_angle:
Yaw angle, which indicates the leftward/rightward angle that
the face is pointing relative to the vertical plane
perpendicular to the image. Range [-180,180].
tilt_angle:
Pitch angle, which indicates the upwards/downwards angle that
the face is pointing relative to the image's horizontal plane.
Range [-180,180].
detection_confidence:
Detection confidence. Range [0, 1].
landmarking_confidence:
Face landmarking confidence. Range [0, 1].
joy_likelihood:
Joy likelihood.
sorrow_likelihood:
Sorrow likelihood.
anger_likelihood:
Anger likelihood.
surprise_likelihood:
Surprise likelihood.
under_exposed_likelihood:
Under-exposed likelihood.
blurred_likelihood:
Blurred likelihood.
headwear_likelihood:
Headwear likelihood.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.FaceAnnotation)
))
_sym_db.RegisterMessage(FaceAnnotation)
_sym_db.RegisterMessage(FaceAnnotation.Landmark)
LocationInfo = _reflection.GeneratedProtocolMessageType('LocationInfo', (_message.Message,), dict(
DESCRIPTOR = _LOCATIONINFO,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Detected entity location information.
Attributes:
lat_lng:
lat/long location coordinates.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.LocationInfo)
))
_sym_db.RegisterMessage(LocationInfo)
Property = _reflection.GeneratedProtocolMessageType('Property', (_message.Message,), dict(
DESCRIPTOR = _PROPERTY,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """A ``Property`` consists of a user-supplied name/value pair.
Attributes:
name:
Name of the property.
value:
Value of the property.
uint64_value:
Value of numeric properties.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.Property)
))
_sym_db.RegisterMessage(Property)
EntityAnnotation = _reflection.GeneratedProtocolMessageType('EntityAnnotation', (_message.Message,), dict(
DESCRIPTOR = _ENTITYANNOTATION,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Set of detected entity features.
Attributes:
mid:
Opaque entity ID. Some IDs may be available in `Google
Knowledge Graph Search API
<https://developers.google.com/knowledge-graph/>`__.
locale:
The language code for the locale in which the entity textual
``description`` is expressed.
description:
Entity textual description, expressed in its ``locale``
language.
score:
Overall score of the result. Range [0, 1].
confidence:
The accuracy of the entity detection in an image. For example,
for an image in which the "Eiffel Tower" entity is detected,
this field represents the confidence that there is a tower in
the query image. Range [0, 1].
topicality:
The relevancy of the ICA (Image Content Annotation) label to
the image. For example, the relevancy of "tower" is likely
higher to an image containing the detected "Eiffel Tower" than
to an image containing a detected distant towering building,
even though the confidence that there is a tower in each image
may be the same. Range [0, 1].
bounding_poly:
Image region to which this entity belongs. Not produced for
``LABEL_DETECTION`` features.
locations:
The location information for the detected entity. Multiple
``LocationInfo`` elements can be present because one location
may indicate the location of the scene in the image, and
another location may indicate the location of the place where
the image was taken. Location information is usually present
for landmarks.
properties:
Some entities may have optional user-supplied ``Property``
(name/value) fields, such a score or string that qualifies the
entity.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.EntityAnnotation)
))
_sym_db.RegisterMessage(EntityAnnotation)
SafeSearchAnnotation = _reflection.GeneratedProtocolMessageType('SafeSearchAnnotation', (_message.Message,), dict(
DESCRIPTOR = _SAFESEARCHANNOTATION,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Set of features pertaining to the image, computed by computer vision
methods over safe-search verticals (for example, adult, spoof, medical,
violence).
Attributes:
adult:
Represents the adult content likelihood for the image. Adult
content may contain elements such as nudity, pornographic
images or cartoons, or sexual activities.
spoof:
Spoof likelihood. The likelihood that an modification was made
to the image's canonical version to make it appear funny or
offensive.
medical:
Likelihood that this is a medical image.
violence:
Likelihood that this image contains violent content.
racy:
Likelihood that the request image contains racy content. Racy
content may include (but is not limited to) skimpy or sheer
clothing, strategically covered nudity, lewd or provocative
poses, or close-ups of sensitive body areas.
""",
))
_sym_db.RegisterMessage(SafeSearchAnnotation)
LatLongRect = _reflection.GeneratedProtocolMessageType('LatLongRect', (_message.Message,), dict(
DESCRIPTOR = _LATLONGRECT,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Rectangle determined by min and max ``LatLng`` pairs.
Attributes:
min_lat_lng:
Min lat/long pair.
max_lat_lng:
Max lat/long pair.
""",
))
_sym_db.RegisterMessage(LatLongRect)
ColorInfo = _reflection.GeneratedProtocolMessageType('ColorInfo', (_message.Message,), dict(
DESCRIPTOR = _COLORINFO,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Color information consists of RGB channels, score, and the fraction of
the image that the color occupies in the image.
Attributes:
color:
RGB components of the color.
score:
Image-specific score for this color. Value in range [0, 1].
pixel_fraction:
The fraction of pixels the color occupies in the image. Value
in range [0, 1].
""",
))
_sym_db.RegisterMessage(ColorInfo)
DominantColorsAnnotation = _reflection.GeneratedProtocolMessageType('DominantColorsAnnotation', (_message.Message,), dict(
DESCRIPTOR = _DOMINANTCOLORSANNOTATION,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Set of dominant colors and their corresponding scores.
Attributes:
colors:
RGB color values with their score and pixel fraction.
""",
))
_sym_db.RegisterMessage(DominantColorsAnnotation)
ImageProperties = _reflection.GeneratedProtocolMessageType('ImageProperties', (_message.Message,), dict(
DESCRIPTOR = _IMAGEPROPERTIES,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Stores image properties, such as dominant colors.
Attributes:
dominant_colors:
If present, dominant colors completed successfully.
""",
))
_sym_db.RegisterMessage(ImageProperties)
CropHint = _reflection.GeneratedProtocolMessageType('CropHint', (_message.Message,), dict(
DESCRIPTOR = _CROPHINT,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Single crop hint that is used to generate a new crop when serving an
image.
Attributes:
bounding_poly:
The bounding polygon for the crop region. The coordinates of
the bounding box are in the original image's scale, as
returned in ``ImageParams``.
confidence:
Confidence of this being a salient region. Range [0, 1].
importance_fraction:
Fraction of importance of this salient region with respect to
the original image.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.CropHint)
))
_sym_db.RegisterMessage(CropHint)
CropHintsAnnotation = _reflection.GeneratedProtocolMessageType('CropHintsAnnotation', (_message.Message,), dict(
DESCRIPTOR = _CROPHINTSANNOTATION,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Set of crop hints that are used to generate new crops when serving
images.
Attributes:
crop_hints:
Crop hint results.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.CropHintsAnnotation)
))
_sym_db.RegisterMessage(CropHintsAnnotation)
CropHintsParams = _reflection.GeneratedProtocolMessageType('CropHintsParams', (_message.Message,), dict(
DESCRIPTOR = _CROPHINTSPARAMS,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Parameters for crop hints annotation request.
Attributes:
aspect_ratios:
Aspect ratios in floats, representing the ratio of the width
to the height of the image. For example, if the desired aspect
ratio is 4/3, the corresponding float value should be 1.33333.
If not specified, the best possible crop is returned. The
number of provided aspect ratios is limited to a maximum of
16; any aspect ratios provided after the 16th are ignored.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.CropHintsParams)
))
_sym_db.RegisterMessage(CropHintsParams)
WebDetectionParams = _reflection.GeneratedProtocolMessageType('WebDetectionParams', (_message.Message,), dict(
DESCRIPTOR = _WEBDETECTIONPARAMS,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Parameters for web detection request.
Attributes:
include_geo_results:
Whether to include results derived from the geo information in
the image.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.WebDetectionParams)
))
_sym_db.RegisterMessage(WebDetectionParams)
ImageContext = _reflection.GeneratedProtocolMessageType('ImageContext', (_message.Message,), dict(
DESCRIPTOR = _IMAGECONTEXT,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Image context and/or feature-specific parameters.
Attributes:
lat_long_rect:
lat/long rectangle that specifies the location of the image.
language_hints:
List of languages to use for TEXT\_DETECTION. In most cases,
an empty value yields the best results since it enables
automatic language detection. For languages based on the Latin
alphabet, setting ``language_hints`` is not needed. In rare
cases, when the language of the text in the image is known,
setting a hint will help get better results (although it will
be a significant hindrance if the hint is wrong). Text
detection returns an error if one or more of the specified
languages is not one of the `supported languages
</vision/docs/languages>`__.
crop_hints_params:
Parameters for crop hints annotation request.
web_detection_params:
Parameters for web detection.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.ImageContext)
))
_sym_db.RegisterMessage(ImageContext)
AnnotateImageRequest = _reflection.GeneratedProtocolMessageType('AnnotateImageRequest', (_message.Message,), dict(
DESCRIPTOR = _ANNOTATEIMAGEREQUEST,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Request for performing Google Cloud Vision API tasks over a
user-provided image, with user-requested features.
Attributes:
image:
The image to be processed.
features:
Requested features.
image_context:
Additional context that may accompany the image.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.AnnotateImageRequest)
))
_sym_db.RegisterMessage(AnnotateImageRequest)
AnnotateImageResponse = _reflection.GeneratedProtocolMessageType('AnnotateImageResponse', (_message.Message,), dict(
DESCRIPTOR = _ANNOTATEIMAGERESPONSE,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Response to an image annotation request.
Attributes:
face_annotations:
If present, face detection has completed successfully.
landmark_annotations:
If present, landmark detection has completed successfully.
logo_annotations:
If present, logo detection has completed successfully.
label_annotations:
If present, label detection has completed successfully.
text_annotations:
If present, text (OCR) detection has completed successfully.
full_text_annotation:
If present, text (OCR) detection or document (OCR) text
detection has completed successfully. This annotation provides
the structural hierarchy for the OCR detected text.
safe_search_annotation:
If present, safe-search annotation has completed successfully.
image_properties_annotation:
If present, image properties were extracted successfully.
crop_hints_annotation:
If present, crop hints have completed successfully.
web_detection:
If present, web detection has completed successfully.
error:
If set, represents the error message for the operation. Note
that filled-in image annotations are guaranteed to be correct,
even when ``error`` is set.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.AnnotateImageResponse)
))
_sym_db.RegisterMessage(AnnotateImageResponse)
BatchAnnotateImagesRequest = _reflection.GeneratedProtocolMessageType('BatchAnnotateImagesRequest', (_message.Message,), dict(
DESCRIPTOR = _BATCHANNOTATEIMAGESREQUEST,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Multiple image annotation requests are batched into a single service
call.
Attributes:
requests:
Individual image annotation requests for this batch.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.BatchAnnotateImagesRequest)
))
_sym_db.RegisterMessage(BatchAnnotateImagesRequest)
BatchAnnotateImagesResponse = _reflection.GeneratedProtocolMessageType('BatchAnnotateImagesResponse', (_message.Message,), dict(
DESCRIPTOR = _BATCHANNOTATEIMAGESRESPONSE,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Response to a batch image annotation request.
Attributes:
responses:
Individual responses to image annotation requests within the
batch.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.BatchAnnotateImagesResponse)
))
_sym_db.RegisterMessage(BatchAnnotateImagesResponse)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n!com.google.cloud.vision.v1p1beta1B\023ImageAnnotatorProtoP\001ZCgoogle.golang.org/genproto/googleapis/cloud/vision/v1p1beta1;vision\370\001\001'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class ImageAnnotatorStub(object):
def __init__(self, channel):
self.BatchAnnotateImages = channel.unary_unary(
'/google.cloud.vision.v1p1beta1.ImageAnnotator/BatchAnnotateImages',
request_serializer=BatchAnnotateImagesRequest.SerializeToString,
response_deserializer=BatchAnnotateImagesResponse.FromString,
)
class ImageAnnotatorServicer(object):
def BatchAnnotateImages(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ImageAnnotatorServicer_to_server(servicer, server):
rpc_method_handlers = {
'BatchAnnotateImages': grpc.unary_unary_rpc_method_handler(
servicer.BatchAnnotateImages,
request_deserializer=BatchAnnotateImagesRequest.FromString,
response_serializer=BatchAnnotateImagesResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.cloud.vision.v1p1beta1.ImageAnnotator', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BetaImageAnnotatorServicer(object):
def BatchAnnotateImages(self, request, context):
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaImageAnnotatorStub(object):
def BatchAnnotateImages(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
raise NotImplementedError()
BatchAnnotateImages.future = None
def beta_create_ImageAnnotator_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
request_deserializers = {
('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesRequest.FromString,
}
response_serializers = {
('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesResponse.SerializeToString,
}
method_implementations = {
('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): face_utilities.unary_unary_inline(servicer.BatchAnnotateImages),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_ImageAnnotator_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
request_serializers = {
('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesRequest.SerializeToString,
}
response_deserializers = {
('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesResponse.FromString,
}
cardinalities = {
'BatchAnnotateImages': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'google.cloud.vision.v1p1beta1.ImageAnnotator', cardinalities, options=stub_options)
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
| true
| true
|
7903b7b89b3b30b8208076289a71ea4edac78442
| 1,316
|
py
|
Python
|
setup.py
|
nprapps/copydoc
|
e1ab09b287beb0439748c319cf165cbc06c66624
|
[
"MIT"
] | 13
|
2016-03-31T20:22:24.000Z
|
2021-11-08T10:26:02.000Z
|
setup.py
|
nprapps/copydoc
|
e1ab09b287beb0439748c319cf165cbc06c66624
|
[
"MIT"
] | 12
|
2016-04-04T21:36:37.000Z
|
2018-06-11T21:46:42.000Z
|
setup.py
|
nprapps/copydoc
|
e1ab09b287beb0439748c319cf165cbc06c66624
|
[
"MIT"
] | 5
|
2016-11-25T21:19:50.000Z
|
2021-08-10T20:06:19.000Z
|
import os.path
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def read(filename):
return open(os.path.join(os.path.dirname(__file__), filename)).read()
setup(
name='copydoc',
version='1.0.9',
author='NPR Visuals',
author_email='nprapps@npr.org',
url='https://github.com/nprapps/copydoc/',
description='Parse Google docs for use in content management',
long_description=read('README.rst'),
py_modules=('copydoc',),
license="MIT License",
keywords='google gdocs',
install_requires=[
'beautifulsoup4==4.4.1'
],
extras_require={
'dev': [
'Sphinx==1.5.6',
'nose2==0.5.0',
'tox==2.3.1',
'flake8==3.5.0'
]
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
)
| 27.416667
| 73
| 0.584347
|
import os.path
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def read(filename):
return open(os.path.join(os.path.dirname(__file__), filename)).read()
setup(
name='copydoc',
version='1.0.9',
author='NPR Visuals',
author_email='nprapps@npr.org',
url='https://github.com/nprapps/copydoc/',
description='Parse Google docs for use in content management',
long_description=read('README.rst'),
py_modules=('copydoc',),
license="MIT License",
keywords='google gdocs',
install_requires=[
'beautifulsoup4==4.4.1'
],
extras_require={
'dev': [
'Sphinx==1.5.6',
'nose2==0.5.0',
'tox==2.3.1',
'flake8==3.5.0'
]
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
)
| true
| true
|
7903b7f25ce3bde17056fc18aecf3c4a13848ab1
| 2,370
|
py
|
Python
|
src/aioros/param_manager.py
|
mgrrx/aioros
|
9bd750020d0d5fb466891346f61b6f083cbb8f05
|
[
"Apache-2.0"
] | 8
|
2020-08-27T17:16:59.000Z
|
2022-02-02T13:39:41.000Z
|
src/aioros/param_manager.py
|
mgrrx/aioros
|
9bd750020d0d5fb466891346f61b6f083cbb8f05
|
[
"Apache-2.0"
] | 3
|
2022-02-09T19:18:12.000Z
|
2022-03-08T21:12:00.000Z
|
src/aioros/param_manager.py
|
mgrrx/aioros
|
9bd750020d0d5fb466891346f61b6f083cbb8f05
|
[
"Apache-2.0"
] | null | null | null |
from asyncio import AbstractEventLoop
from asyncio import iscoroutinefunction
from collections import defaultdict
from typing import Any
from typing import Callable
from typing import DefaultDict
from typing import Dict
from typing import NamedTuple
from typing import Set
from typing import Tuple
from .api.master_api_client import MasterApiClient
CallbackFunc = Callable[[str, Any], None]
class Callback(NamedTuple):
callback: CallbackFunc
class ParamManager:
def __init__(
self,
master_api_client: MasterApiClient,
loop: AbstractEventLoop
) -> None:
self._master_api_client = master_api_client
self._loop = loop
self._callbacks: DefaultDict[str, Set[Callback]] = defaultdict(set)
self._cache: Dict[str, Any] = {}
async def subscribe_param(
self,
key: str,
callback: CallbackFunc
) -> Tuple[Any, Callback]:
if key not in self._callbacks:
param_value = await self._master_api_client.subscribe_param(key)
self._cache[key] = param_value
else:
param_value = self._cache[key]
cb = Callback(callback)
self._callbacks[key].add(cb)
return param_value, cb
async def unsubscribe_callback(
self,
callback: Callback
) -> bool:
for key, callbacks in self._callbacks.items():
if callback in callbacks:
callbacks.discard(callback)
break
else:
return False
if not callbacks:
await self._master_api_client.unsusbcribe_param(key)
self._cache.pop(key)
self._callbacks.pop(key)
return True
def update(self, key: str, value: Any) -> bool:
self._cache[key] = value
callbacks = set()
namespace = '/'
for ns in key.split('/'):
if not ns:
continue
namespace += ns
callbacks |= set(self._callbacks.get(namespace, set()))
namespace += '/'
if not callbacks:
return False
for callback in callbacks:
if iscoroutinefunction(callback.callback):
self._loop.create_task(callback.callback(key, value))
else:
self._loop.call_soon(callback.callback, key, value)
return True
| 27.55814
| 76
| 0.613924
|
from asyncio import AbstractEventLoop
from asyncio import iscoroutinefunction
from collections import defaultdict
from typing import Any
from typing import Callable
from typing import DefaultDict
from typing import Dict
from typing import NamedTuple
from typing import Set
from typing import Tuple
from .api.master_api_client import MasterApiClient
CallbackFunc = Callable[[str, Any], None]
class Callback(NamedTuple):
callback: CallbackFunc
class ParamManager:
def __init__(
self,
master_api_client: MasterApiClient,
loop: AbstractEventLoop
) -> None:
self._master_api_client = master_api_client
self._loop = loop
self._callbacks: DefaultDict[str, Set[Callback]] = defaultdict(set)
self._cache: Dict[str, Any] = {}
async def subscribe_param(
self,
key: str,
callback: CallbackFunc
) -> Tuple[Any, Callback]:
if key not in self._callbacks:
param_value = await self._master_api_client.subscribe_param(key)
self._cache[key] = param_value
else:
param_value = self._cache[key]
cb = Callback(callback)
self._callbacks[key].add(cb)
return param_value, cb
async def unsubscribe_callback(
self,
callback: Callback
) -> bool:
for key, callbacks in self._callbacks.items():
if callback in callbacks:
callbacks.discard(callback)
break
else:
return False
if not callbacks:
await self._master_api_client.unsusbcribe_param(key)
self._cache.pop(key)
self._callbacks.pop(key)
return True
def update(self, key: str, value: Any) -> bool:
self._cache[key] = value
callbacks = set()
namespace = '/'
for ns in key.split('/'):
if not ns:
continue
namespace += ns
callbacks |= set(self._callbacks.get(namespace, set()))
namespace += '/'
if not callbacks:
return False
for callback in callbacks:
if iscoroutinefunction(callback.callback):
self._loop.create_task(callback.callback(key, value))
else:
self._loop.call_soon(callback.callback, key, value)
return True
| true
| true
|
7903bac5b20dfc446bbb6cdb13db604f3fb24884
| 7,024
|
py
|
Python
|
third-party/language/generator.py
|
mousedoc/Prism
|
d7cb2e541894a9a190606e18661b79514aef6d33
|
[
"CC0-1.0"
] | null | null | null |
third-party/language/generator.py
|
mousedoc/Prism
|
d7cb2e541894a9a190606e18661b79514aef6d33
|
[
"CC0-1.0"
] | 1
|
2018-05-19T06:52:45.000Z
|
2018-05-19T06:52:45.000Z
|
third-party/language/generator.py
|
mousedoc/Prism
|
d7cb2e541894a9a190606e18661b79514aef6d33
|
[
"CC0-1.0"
] | null | null | null |
import xlrd
import os
import sys
import copy
import json
import codecs
from collections import OrderedDict
# Constant Values
PARENT_NAME_ROW = 0
PARENT_NAME_COL = 0
COLUMN_NAMES_ROW = 1
DATA_STARTING_ROW = 2
ROOT_NAME = '*root'
ID_COLUMN_NAME = 'id'
PARENT_COLUMN_NAME = '*parent'
IGNORE_WILDCARD = '_'
REQUIRE_VERSION = (3, 5)
EXCEL_PATH = './excel/'
JSON_PATH = '../../asset/json/'
# Class
class TypeUtility:
# xlrd is giving number as float
@staticmethod
def check_integer(value):
return type(value) == float and int(value) == value
# xlrd is giving boolean as integer
@staticmethod
def check_boolean(value):
return type(value) == int
@staticmethod
def convert_value(value):
if TypeUtility.check_integer(value):
return int(value)
elif TypeUtility.check_boolean(value):
return bool(value)
else:
return value
class Table:
def __init__(self, sheet):
self.init_name(sheet)
self.init_parent_name(sheet)
self.init_metadata(sheet)
self.init_descriptors(sheet)
self.init_id_index_map()
def init_name(self, sheet):
self.name = sheet.name
def init_parent_name(self, sheet):
row = sheet.row_values(PARENT_NAME_ROW)
self.parent_name = row[PARENT_NAME_COL]
if type(self.parent_name) is not str:
raise Exception('[' + self.name + ']' + 'Parent name is not string')
sys.exit()
self.is_root = self.parent_name == ROOT_NAME
def init_metadata(self, sheet):
row = sheet.row_values(COLUMN_NAMES_ROW)
self.is_parent = False
self.is_child = False
self.column_names = []
for value in row:
if type(value) is not str:
raise Exception('[' + self.name + ']' + 'Column name is not string')
sys.exit()
if value == ID_COLUMN_NAME:
self.is_parent = True
if value == PARENT_COLUMN_NAME:
self.is_child = True
self.column_names.append(value)
if self.is_root and self.is_child:
raise Exception('[' + self.name + ']' + 'Root table must not have a "' + PARENT_COLUMN_NAME + '" column')
sys.exit()
if not self.is_root and not self.is_child:
raise Exception('[' + self.name + ']' + 'Child table must have a "' + PARENT_COLUMN_NAME + '" column')
sys.exit()
def init_descriptors(self, sheet):
self.descriptors = []
id_table = []
for i in range(DATA_STARTING_ROW, sheet.nrows):
#add metadata row count
rowcount = i + 1
col = sheet.row_values(i)
desc = self.get_descriptor(col)
if self.is_parent:
id = desc[ID_COLUMN_NAME]
if not id:
raise Exception('[' + self.name + ']' + 'Descriptor id must have a value - row : ' + str(i + 1))
sys.exit()
if id in id_table:
raise Exception('[' + self.name + ']' + 'Descriptor id is duplicated - row : ' + str(i + 1))
sys.exit()
id_table.append(id)
self.descriptors.append(desc)
def get_descriptor(self, col):
descriptor = OrderedDict()
for i in range(0, len(col)):
key = self.column_names[i]
if key[0] == IGNORE_WILDCARD:
continue
descriptor[key] = TypeUtility.convert_value(col[i])
return descriptor
def init_id_index_map(self):
if not self.is_parent:
return
self.id_index_map = {}
for descriptor in self.descriptors:
id = descriptor[ID_COLUMN_NAME]
self.id_index_map[id] = self.descriptors.index(descriptor)
def merge_child_table(self, table):
self.add_child_descriptor_list(table.name)
for descriptor in table.descriptors:
parent_id = descriptor[PARENT_COLUMN_NAME]
parent_idx = self.id_index_map[parent_id]
parent_descriptor = self.descriptors[parent_idx]
parent_descriptor[table.name].append(descriptor)
def add_child_descriptor_list(self, name):
for descriptor in self.descriptors:
descriptor[name] = []
def remove_parent_column(self):
for descriptor in self.descriptors:
del descriptor[PARENT_COLUMN_NAME]
def save_to_json(self, pretty_print, export_path):
if pretty_print:
string = json.dumps(self.descriptors, ensure_ascii=False, indent=4)
else:
string = json.dumps(self.descriptors, ensure_ascii=False)
with codecs.open(export_path + self.name + '.json', 'w', 'utf-8') as f:
f.write(string)
class Converter:
def __init__(self, pretty_print, export_path):
self.pretty_print = pretty_print
self.export_path = export_path
def convert(self, filename):
print(filename + ' convert starting...')
sheets = Converter.get_sheets(filename)
root_table, tables = Converter.get_tables(sheets)
Converter.post_process(tables)
root_table.save_to_json(self.pretty_print, self.export_path)
print(filename + ' convert is Done\n')
@staticmethod
def get_sheets(filename):
path = os.path.abspath(filename)
workbook = xlrd.open_workbook(path)
return workbook.sheets()
@staticmethod
def get_tables(sheets):
tables = {}
root_tables = []
for sheet in sheets:
if sheet.name[0] == IGNORE_WILDCARD:
continue
table = Table(sheet)
tables[table.name] = table
if table.is_root:
root_tables.append(table)
if len(root_tables) == 1:
return root_tables[0], tables
else:
raise Exception('Root table must be one')
sys.exit()
@staticmethod
def post_process(tables):
for name, table in tables.items():
if table.is_root:
continue
parent_table = tables[table.parent_name]
if not parent_table.is_parent:
raise Exception('Parent table must have a id column')
sys.exit()
parent_table.merge_child_table(table)
table.remove_parent_column()
# Script
current_version = sys.version_info
if current_version < REQUIRE_VERSION:
raise Exception('[eeror]You Need Python 3.5 or later')
sys.exit()
json_path = sys.argv[1] if len(sys.argv) > 1 else './'
converter = Converter(True, JSON_PATH + json_path)
for path, dirs, files in os.walk(EXCEL_PATH):
for file in files:
if file[0] is "~":
continue
if os.path.splitext(file)[1].lower() == '.xlsx':
converter.convert(EXCEL_PATH + file)
| 30.017094
| 117
| 0.589408
|
import xlrd
import os
import sys
import copy
import json
import codecs
from collections import OrderedDict
PARENT_NAME_ROW = 0
PARENT_NAME_COL = 0
COLUMN_NAMES_ROW = 1
DATA_STARTING_ROW = 2
ROOT_NAME = '*root'
ID_COLUMN_NAME = 'id'
PARENT_COLUMN_NAME = '*parent'
IGNORE_WILDCARD = '_'
REQUIRE_VERSION = (3, 5)
EXCEL_PATH = './excel/'
JSON_PATH = '../../asset/json/'
class TypeUtility:
@staticmethod
def check_integer(value):
return type(value) == float and int(value) == value
@staticmethod
def check_boolean(value):
return type(value) == int
@staticmethod
def convert_value(value):
if TypeUtility.check_integer(value):
return int(value)
elif TypeUtility.check_boolean(value):
return bool(value)
else:
return value
class Table:
def __init__(self, sheet):
self.init_name(sheet)
self.init_parent_name(sheet)
self.init_metadata(sheet)
self.init_descriptors(sheet)
self.init_id_index_map()
def init_name(self, sheet):
self.name = sheet.name
def init_parent_name(self, sheet):
row = sheet.row_values(PARENT_NAME_ROW)
self.parent_name = row[PARENT_NAME_COL]
if type(self.parent_name) is not str:
raise Exception('[' + self.name + ']' + 'Parent name is not string')
sys.exit()
self.is_root = self.parent_name == ROOT_NAME
def init_metadata(self, sheet):
row = sheet.row_values(COLUMN_NAMES_ROW)
self.is_parent = False
self.is_child = False
self.column_names = []
for value in row:
if type(value) is not str:
raise Exception('[' + self.name + ']' + 'Column name is not string')
sys.exit()
if value == ID_COLUMN_NAME:
self.is_parent = True
if value == PARENT_COLUMN_NAME:
self.is_child = True
self.column_names.append(value)
if self.is_root and self.is_child:
raise Exception('[' + self.name + ']' + 'Root table must not have a "' + PARENT_COLUMN_NAME + '" column')
sys.exit()
if not self.is_root and not self.is_child:
raise Exception('[' + self.name + ']' + 'Child table must have a "' + PARENT_COLUMN_NAME + '" column')
sys.exit()
def init_descriptors(self, sheet):
self.descriptors = []
id_table = []
for i in range(DATA_STARTING_ROW, sheet.nrows):
rowcount = i + 1
col = sheet.row_values(i)
desc = self.get_descriptor(col)
if self.is_parent:
id = desc[ID_COLUMN_NAME]
if not id:
raise Exception('[' + self.name + ']' + 'Descriptor id must have a value - row : ' + str(i + 1))
sys.exit()
if id in id_table:
raise Exception('[' + self.name + ']' + 'Descriptor id is duplicated - row : ' + str(i + 1))
sys.exit()
id_table.append(id)
self.descriptors.append(desc)
def get_descriptor(self, col):
descriptor = OrderedDict()
for i in range(0, len(col)):
key = self.column_names[i]
if key[0] == IGNORE_WILDCARD:
continue
descriptor[key] = TypeUtility.convert_value(col[i])
return descriptor
def init_id_index_map(self):
if not self.is_parent:
return
self.id_index_map = {}
for descriptor in self.descriptors:
id = descriptor[ID_COLUMN_NAME]
self.id_index_map[id] = self.descriptors.index(descriptor)
def merge_child_table(self, table):
self.add_child_descriptor_list(table.name)
for descriptor in table.descriptors:
parent_id = descriptor[PARENT_COLUMN_NAME]
parent_idx = self.id_index_map[parent_id]
parent_descriptor = self.descriptors[parent_idx]
parent_descriptor[table.name].append(descriptor)
def add_child_descriptor_list(self, name):
for descriptor in self.descriptors:
descriptor[name] = []
def remove_parent_column(self):
for descriptor in self.descriptors:
del descriptor[PARENT_COLUMN_NAME]
def save_to_json(self, pretty_print, export_path):
if pretty_print:
string = json.dumps(self.descriptors, ensure_ascii=False, indent=4)
else:
string = json.dumps(self.descriptors, ensure_ascii=False)
with codecs.open(export_path + self.name + '.json', 'w', 'utf-8') as f:
f.write(string)
class Converter:
def __init__(self, pretty_print, export_path):
self.pretty_print = pretty_print
self.export_path = export_path
def convert(self, filename):
print(filename + ' convert starting...')
sheets = Converter.get_sheets(filename)
root_table, tables = Converter.get_tables(sheets)
Converter.post_process(tables)
root_table.save_to_json(self.pretty_print, self.export_path)
print(filename + ' convert is Done\n')
@staticmethod
def get_sheets(filename):
path = os.path.abspath(filename)
workbook = xlrd.open_workbook(path)
return workbook.sheets()
@staticmethod
def get_tables(sheets):
tables = {}
root_tables = []
for sheet in sheets:
if sheet.name[0] == IGNORE_WILDCARD:
continue
table = Table(sheet)
tables[table.name] = table
if table.is_root:
root_tables.append(table)
if len(root_tables) == 1:
return root_tables[0], tables
else:
raise Exception('Root table must be one')
sys.exit()
@staticmethod
def post_process(tables):
for name, table in tables.items():
if table.is_root:
continue
parent_table = tables[table.parent_name]
if not parent_table.is_parent:
raise Exception('Parent table must have a id column')
sys.exit()
parent_table.merge_child_table(table)
table.remove_parent_column()
current_version = sys.version_info
if current_version < REQUIRE_VERSION:
raise Exception('[eeror]You Need Python 3.5 or later')
sys.exit()
json_path = sys.argv[1] if len(sys.argv) > 1 else './'
converter = Converter(True, JSON_PATH + json_path)
for path, dirs, files in os.walk(EXCEL_PATH):
for file in files:
if file[0] is "~":
continue
if os.path.splitext(file)[1].lower() == '.xlsx':
converter.convert(EXCEL_PATH + file)
| true
| true
|
7903bb00451b5531a51de1222aa17b9837016a32
| 19,453
|
py
|
Python
|
test/functional/test_framework/test_framework.py
|
YayatEl/ideacoin
|
a85e2f217e2ae04f2f12d80d709e4bc689a0103c
|
[
"MIT"
] | 1
|
2017-08-21T09:30:30.000Z
|
2017-08-21T09:30:30.000Z
|
test/functional/test_framework/test_framework.py
|
YayatEl/ideacoin
|
a85e2f217e2ae04f2f12d80d709e4bc689a0103c
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/test_framework.py
|
YayatEl/ideacoin
|
a85e2f217e2ae04f2f12d80d709e4bc689a0103c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
from collections import deque
from enum import Enum
import logging
import optparse
import os
import pdb
import shutil
import sys
import tempfile
import time
import traceback
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
initialize_datadir,
log_filename,
p2p_port,
set_node_times,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
BITCOIND_PROC_WAIT_TIMEOUT = 60
class BitcoinTestFramework(object):
"""Base class for a bitcoin test script.
Individual bitcoin test scripts should subclass this class and override the following methods:
- __init__()
- add_options()
- setup_chain()
- setup_network()
- run_test()
The main() method should not be overridden.
This class also contains various public and private helper methods."""
# Methods to override in subclass test scripts.
def __init__(self):
self.num_nodes = 4
self.setup_clean_chain = False
self.nodes = []
self.mocktime = 0
def add_options(self, parser):
pass
def setup_chain(self):
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean(self.options.tmpdir, self.num_nodes)
else:
self._initialize_chain(self.options.tmpdir, self.num_nodes, self.options.cachedir)
def setup_network(self):
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.nodes = self.start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
def run_test(self):
raise NotImplementedError
# Main function. This should not be overridden by the subclass test scripts.
def main(self):
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop bitcoinds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../../src"),
help="Source directory containing bitcoind/bitcoin-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH']
check_json_precision()
# Set up temp directory and start logging
if self.options.tmpdir:
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
success = TestStatus.FAILED
try:
self.setup_chain()
self.setup_network()
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
self.log.info("Note: bitcoinds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if os.getenv("PYTHON_DEBUG", ""):
# Dump the end of the debug logs, to aid in debugging rare
# travis failures.
import glob
filenames = [self.options.tmpdir + "/test_framework.log"]
filenames += glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
MAX_LINES_TO_PRINT = 1000
for fn in filenames:
try:
with open(fn, 'r') as f:
print("From", fn, ":")
print("".join(deque(f, MAX_LINES_TO_PRINT)))
except OSError:
print("Opening file %s failed." % fn)
traceback.print_exc()
if success == TestStatus.PASSED:
self.log.info("Tests successful")
sys.exit(TEST_EXIT_PASSED)
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
sys.exit(TEST_EXIT_SKIPPED)
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
logging.shutdown()
sys.exit(TEST_EXIT_FAILED)
# Public helper methods. These can be accessed by the subclass test scripts.
def start_node(self, i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None, stderr=None):
"""Start a bitcoind and return RPC connection to it"""
if extra_args is None:
extra_args = []
if binary is None:
binary = os.getenv("BITCOIND", "bitcoind")
node = TestNode(i, dirname, extra_args, rpchost, timewait, binary, stderr, self.mocktime, coverage_dir=self.options.coveragedir)
node.start()
node.wait_for_rpc_connection()
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
return node
def start_nodes(self, num_nodes, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""Start multiple bitcoinds, return RPC connections to them"""
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [None] * num_nodes
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
nodes = []
try:
for i in range(num_nodes):
nodes.append(TestNode(i, dirname, extra_args[i], rpchost, timewait=timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir))
nodes[i].start()
for node in nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
if self.options.coveragedir is not None:
for node in nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
return nodes
def stop_node(self, i):
"""Stop a bitcoind test node"""
self.nodes[i].stop_node()
while not self.nodes[i].is_node_stopped():
time.sleep(0.1)
def stop_nodes(self):
"""Stop multiple bitcoind test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node()
for node in self.nodes:
# Wait for nodes to stop
while not node.is_node_stopped():
time.sleep(0.1)
def assert_start_raises_init_error(self, i, dirname, extra_args=None, expected_msg=None):
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
self.start_node(i, dirname, extra_args, stderr=log_stderr)
self.stop_node(i)
except Exception as e:
assert 'bitcoind exited' in str(e) # node must have shutdown
self.nodes[i].running = False
self.nodes[i].process = None
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8')
if expected_msg not in stderr:
raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
else:
if expected_msg is None:
assert_msg = "bitcoind should have exited with an error"
else:
assert_msg = "bitcoind should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
def enable_mocktime(self):
"""Enable mocktime for the script.
mocktime may be needed for scripts that use the cached version of the
blockchain. If the cached version of the blockchain is used without
mocktime then the mempools will not sync due to IBD.
For backwared compatibility of the python scripts with previous
versions of the cache, this helper function sets mocktime to Jan 1,
2014 + (201 * 10 * 60)"""
self.mocktime = 1388534400 + (201 * 10 * 60)
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self, test_dir, num_nodes, cachedir):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(os.path.join(cachedir, 'node' + str(i))):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(os.path.join(cachedir, "node" + str(i))):
shutil.rmtree(os.path.join(cachedir, "node" + str(i)))
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
datadir = initialize_datadir(cachedir, i)
args = [os.getenv("BITCOIND", "bitcoind"), "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0"]
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.nodes.append(TestNode(i, cachedir, extra_args=[], rpchost=None, timewait=None, binary=None, stderr=None, mocktime=self.mocktime, coverage_dir=None))
self.nodes[i].args = args
self.nodes[i].start()
# Wait for RPC connections to be ready
for node in self.nodes:
node.wait_for_rpc_connection()
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
self.enable_mocktime()
block_time = self.mocktime - (201 * 10 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 10 * 60
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
self.disable_mocktime()
for i in range(MAX_NODES):
os.remove(log_filename(cachedir, i, "debug.log"))
os.remove(log_filename(cachedir, i, "db.log"))
os.remove(log_filename(cachedir, i, "peers.dat"))
os.remove(log_filename(cachedir, i, "fee_estimates.dat"))
for i in range(num_nodes):
from_dir = os.path.join(cachedir, "node" + str(i))
to_dir = os.path.join(test_dir, "node" + str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in bitcoin.conf
def _initialize_chain_clean(self, test_dir, num_nodes):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(num_nodes):
initialize_datadir(test_dir, i)
class ComparisonTestFramework(BitcoinTestFramework):
"""Test framework for doing p2p comparison testing
Sets up some bitcoind binaries:
- 1 binary: test binary
- 2 binaries: 1 test binary, 1 ref binary
- n>2 binaries: 1 test binary, n-1 ref binaries"""
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "bitcoind"),
help="bitcoind binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BITCOIND", "bitcoind"),
help="bitcoind binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']]*self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.nodes = self.start_nodes(
self.num_nodes, self.options.tmpdir, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary] * (self.num_nodes - 1))
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
| 41.389362
| 310
| 0.611114
|
from collections import deque
from enum import Enum
import logging
import optparse
import os
import pdb
import shutil
import sys
import tempfile
import time
import traceback
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
initialize_datadir,
log_filename,
p2p_port,
set_node_times,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
BITCOIND_PROC_WAIT_TIMEOUT = 60
class BitcoinTestFramework(object):
def __init__(self):
self.num_nodes = 4
self.setup_clean_chain = False
self.nodes = []
self.mocktime = 0
def add_options(self, parser):
pass
def setup_chain(self):
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean(self.options.tmpdir, self.num_nodes)
else:
self._initialize_chain(self.options.tmpdir, self.num_nodes, self.options.cachedir)
def setup_network(self):
self.setup_nodes()
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.nodes = self.start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
def run_test(self):
raise NotImplementedError
def main(self):
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop bitcoinds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../../src"),
help="Source directory containing bitcoind/bitcoin-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH']
check_json_precision()
# Set up temp directory and start logging
if self.options.tmpdir:
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
success = TestStatus.FAILED
try:
self.setup_chain()
self.setup_network()
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
self.log.info("Note: bitcoinds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if os.getenv("PYTHON_DEBUG", ""):
# Dump the end of the debug logs, to aid in debugging rare
# travis failures.
import glob
filenames = [self.options.tmpdir + "/test_framework.log"]
filenames += glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
MAX_LINES_TO_PRINT = 1000
for fn in filenames:
try:
with open(fn, 'r') as f:
print("From", fn, ":")
print("".join(deque(f, MAX_LINES_TO_PRINT)))
except OSError:
print("Opening file %s failed." % fn)
traceback.print_exc()
if success == TestStatus.PASSED:
self.log.info("Tests successful")
sys.exit(TEST_EXIT_PASSED)
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
sys.exit(TEST_EXIT_SKIPPED)
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
logging.shutdown()
sys.exit(TEST_EXIT_FAILED)
# Public helper methods. These can be accessed by the subclass test scripts.
def start_node(self, i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None, stderr=None):
if extra_args is None:
extra_args = []
if binary is None:
binary = os.getenv("BITCOIND", "bitcoind")
node = TestNode(i, dirname, extra_args, rpchost, timewait, binary, stderr, self.mocktime, coverage_dir=self.options.coveragedir)
node.start()
node.wait_for_rpc_connection()
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
return node
def start_nodes(self, num_nodes, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [None] * num_nodes
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
nodes = []
try:
for i in range(num_nodes):
nodes.append(TestNode(i, dirname, extra_args[i], rpchost, timewait=timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir))
nodes[i].start()
for node in nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
if self.options.coveragedir is not None:
for node in nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
return nodes
def stop_node(self, i):
self.nodes[i].stop_node()
while not self.nodes[i].is_node_stopped():
time.sleep(0.1)
def stop_nodes(self):
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node()
for node in self.nodes:
# Wait for nodes to stop
while not node.is_node_stopped():
time.sleep(0.1)
def assert_start_raises_init_error(self, i, dirname, extra_args=None, expected_msg=None):
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
self.start_node(i, dirname, extra_args, stderr=log_stderr)
self.stop_node(i)
except Exception as e:
assert 'bitcoind exited' in str(e) # node must have shutdown
self.nodes[i].running = False
self.nodes[i].process = None
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8')
if expected_msg not in stderr:
raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
else:
if expected_msg is None:
assert_msg = "bitcoind should have exited with an error"
else:
assert_msg = "bitcoind should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
def enable_mocktime(self):
self.mocktime = 1388534400 + (201 * 10 * 60)
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self, test_dir, num_nodes, cachedir):
assert num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(os.path.join(cachedir, 'node' + str(i))):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
for i in range(MAX_NODES):
if os.path.isdir(os.path.join(cachedir, "node" + str(i))):
shutil.rmtree(os.path.join(cachedir, "node" + str(i)))
for i in range(MAX_NODES):
datadir = initialize_datadir(cachedir, i)
args = [os.getenv("BITCOIND", "bitcoind"), "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0"]
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.nodes.append(TestNode(i, cachedir, extra_args=[], rpchost=None, timewait=None, binary=None, stderr=None, mocktime=self.mocktime, coverage_dir=None))
self.nodes[i].args = args
self.nodes[i].start()
for node in self.nodes:
node.wait_for_rpc_connection()
self.enable_mocktime()
block_time = self.mocktime - (201 * 10 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 10 * 60
sync_blocks(self.nodes)
self.stop_nodes()
self.nodes = []
self.disable_mocktime()
for i in range(MAX_NODES):
os.remove(log_filename(cachedir, i, "debug.log"))
os.remove(log_filename(cachedir, i, "db.log"))
os.remove(log_filename(cachedir, i, "peers.dat"))
os.remove(log_filename(cachedir, i, "fee_estimates.dat"))
for i in range(num_nodes):
from_dir = os.path.join(cachedir, "node" + str(i))
to_dir = os.path.join(test_dir, "node" + str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i)
def _initialize_chain_clean(self, test_dir, num_nodes):
for i in range(num_nodes):
initialize_datadir(test_dir, i)
class ComparisonTestFramework(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "bitcoind"),
help="bitcoind binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BITCOIND", "bitcoind"),
help="bitcoind binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']]*self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.nodes = self.start_nodes(
self.num_nodes, self.options.tmpdir, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary] * (self.num_nodes - 1))
class SkipTest(Exception):
def __init__(self, message):
self.message = message
| true
| true
|
7903bb9042f99d0b5e4cb0907b46c9f0cc7a98e2
| 5,746
|
py
|
Python
|
gridworld_hallways/make_grid_mdp.py
|
andrewmw94/gandalf_2020_experiments
|
bc671d5c33f16f3388b661623a8663835e62d74c
|
[
"MIT"
] | null | null | null |
gridworld_hallways/make_grid_mdp.py
|
andrewmw94/gandalf_2020_experiments
|
bc671d5c33f16f3388b661623a8663835e62d74c
|
[
"MIT"
] | null | null | null |
gridworld_hallways/make_grid_mdp.py
|
andrewmw94/gandalf_2020_experiments
|
bc671d5c33f16f3388b661623a8663835e62d74c
|
[
"MIT"
] | null | null | null |
# MIT License
# Copyright (c) 2020 Andrew Wells
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
print_LTL_game = True
obstacle_cells = [[3,0], [3,1], [3,3], [3,4], [3,5], [3,6], [3,7], [3,8], [5,2], [5,3], [5,4], [5,5], [5,6], [5,7], [5,8], [5,9], [7,0], [7,1], [7,3], [7,4], [7,5], [7,6], [7,7], [7,8]]
num_rows = 10
num_cols = 10
probN = 0.69
probE = 0.1
probW = 0.1
probB = 0.01
probS = 0.1
def rc2i_short(row, col):
if row < num_rows and row >= 0 and col < num_cols and col >= 0:
return row * num_rows + col
return -1
def rc2i(row, col):
cell = -1
if row < num_rows and row >= 0 and col < num_cols and col >= 0:
cell = row * num_rows + col
for c in obstacle_cells:
if cell == rc2i_short(c[0], c[1]):
return -1
return cell
def printNorth(row, col):
extraProb = 0
str = "[] x={} -> ".format(rc2i(i,j))
if(rc2i(i-1, j) == -1):
extraProb += probN
else:
str = str + " {}:(x'={}) +".format(probN, rc2i(i-1, j))
if(rc2i(i+1, j) == -1):
extraProb = extraProb + probB
else:
str = str + " {}:(x'={}) +".format(probB, rc2i(i+1, j))
if(rc2i(i, j+1) == -1):
extraProb = extraProb + probE
else:
str = str + " {}:(x'={}) +".format(probE, rc2i(i, j+1))
if(rc2i(i, j-1) == -1):
extraProb = extraProb + probW
else:
str = str + " {}:(x'={}) +".format(probW, rc2i(i, j-1))
print(str + " {}:(x'={});".format(probS+extraProb, rc2i(i,j)))
def printSouth(row, col):
extraProb = 0
str = "[] x={} -> ".format(rc2i(i,j))
if(rc2i(i-1, j) == -1):
extraProb += probB
else:
str = str + " {}:(x'={}) +".format(probB, rc2i(i-1, j))
if(rc2i(i+1, j) == -1):
extraProb = extraProb + probN
else:
str = str + " {}:(x'={}) +".format(probN, rc2i(i+1, j))
if(rc2i(i, j+1) == -1):
extraProb = extraProb + probW
else:
str = str + " {}:(x'={}) +".format(probW, rc2i(i, j+1))
if(rc2i(i, j-1) == -1):
extraProb = extraProb + probE
else:
str = str + " {}:(x'={}) +".format(probE, rc2i(i, j-1))
print(str + " {}:(x'={});".format(probS+extraProb, rc2i(i,j)))
def printEast(row, col):
extraProb = 0
str = "[] x={} -> ".format(rc2i(i,j))
if(rc2i(i-1, j) == -1):
extraProb += probW
else:
str = str + " {}:(x'={}) +".format(probW, rc2i(i-1, j))
if(rc2i(i+1, j) == -1):
extraProb = extraProb + probE
else:
str = str + " {}:(x'={}) +".format(probE, rc2i(i+1, j))
if(rc2i(i, j+1) == -1):
extraProb = extraProb + probN
else:
str = str + " {}:(x'={}) +".format(probN, rc2i(i, j+1))
if(rc2i(i, j-1) == -1):
extraProb = extraProb + probB
else:
str = str + " {}:(x'={}) +".format(probB, rc2i(i, j-1))
print(str + " {}:(x'={});".format(probS+extraProb, rc2i(i,j)))
def printWest(row, col):
extraProb = 0
str = "[] x={} -> ".format(rc2i(i,j))
if(rc2i(i-1, j) == -1):
extraProb += probE
else:
str = str + " {}:(x'={}) +".format(probE, rc2i(i-1, j))
if(rc2i(i+1, j) == -1):
extraProb = extraProb + probW
else:
str = str + " {}:(x'={}) +".format(probW, rc2i(i+1, j))
if(rc2i(i, j+1) == -1):
extraProb = extraProb + probB
else:
str = str + " {}:(x'={}) +".format(probB, rc2i(i, j+1))
if(rc2i(i, j-1) == -1):
extraProb = extraProb + probN
else:
str = str + " {}:(x'={}) +".format(probN, rc2i(i, j-1))
print(str + " {}:(x'={});".format(probS+extraProb, rc2i(i,j)))
print("mdp")
print("")
print("module M1")
print("")
if print_LTL_game:
print(" x : [0..{}] init 0;".format(num_rows*num_cols))
else:
print(" x : [0..{}] init 0;".format(num_rows*num_cols-1))
#print inner cells
for i in range (num_rows):
for j in range (num_cols):
##Moving north
printNorth(i,j)
printSouth(i,j)
printEast(i,j)
printWest(i,j)
if print_LTL_game:
print("")
for i in range (num_rows*num_cols):
print("[] x={} -> 1:(x'={});".format(i, num_rows*num_cols))
print("[] x={} -> 1:(x'={});".format(num_rows*num_cols, num_rows*num_cols))
print("")
print("endmodule")
print("")
print("// labels")
print("label \"initial\" = (x=0);")
print("label \"loca\" = (x=26);")
print("label \"locb\" = (x=85);")
print("label \"locc\" = (x=16);")
print("label \"locd\" = (x=7);")
print("label \"loce\" = (x=45);")
print("label \"locf\" = (x=91);")
print("label \"locg\" = (x=41);")
print("label \"loch\" = (x=67);")
print("label \"loci\" = (x=20);")
print("label \"zbad\" = (x=2);")
print("label \"done\" = (x={});".format(num_rows*num_cols))
| 32.647727
| 185
| 0.540724
|
print_LTL_game = True
obstacle_cells = [[3,0], [3,1], [3,3], [3,4], [3,5], [3,6], [3,7], [3,8], [5,2], [5,3], [5,4], [5,5], [5,6], [5,7], [5,8], [5,9], [7,0], [7,1], [7,3], [7,4], [7,5], [7,6], [7,7], [7,8]]
num_rows = 10
num_cols = 10
probN = 0.69
probE = 0.1
probW = 0.1
probB = 0.01
probS = 0.1
def rc2i_short(row, col):
if row < num_rows and row >= 0 and col < num_cols and col >= 0:
return row * num_rows + col
return -1
def rc2i(row, col):
cell = -1
if row < num_rows and row >= 0 and col < num_cols and col >= 0:
cell = row * num_rows + col
for c in obstacle_cells:
if cell == rc2i_short(c[0], c[1]):
return -1
return cell
def printNorth(row, col):
extraProb = 0
str = "[] x={} -> ".format(rc2i(i,j))
if(rc2i(i-1, j) == -1):
extraProb += probN
else:
str = str + " {}:(x'={}) +".format(probN, rc2i(i-1, j))
if(rc2i(i+1, j) == -1):
extraProb = extraProb + probB
else:
str = str + " {}:(x'={}) +".format(probB, rc2i(i+1, j))
if(rc2i(i, j+1) == -1):
extraProb = extraProb + probE
else:
str = str + " {}:(x'={}) +".format(probE, rc2i(i, j+1))
if(rc2i(i, j-1) == -1):
extraProb = extraProb + probW
else:
str = str + " {}:(x'={}) +".format(probW, rc2i(i, j-1))
print(str + " {}:(x'={});".format(probS+extraProb, rc2i(i,j)))
def printSouth(row, col):
extraProb = 0
str = "[] x={} -> ".format(rc2i(i,j))
if(rc2i(i-1, j) == -1):
extraProb += probB
else:
str = str + " {}:(x'={}) +".format(probB, rc2i(i-1, j))
if(rc2i(i+1, j) == -1):
extraProb = extraProb + probN
else:
str = str + " {}:(x'={}) +".format(probN, rc2i(i+1, j))
if(rc2i(i, j+1) == -1):
extraProb = extraProb + probW
else:
str = str + " {}:(x'={}) +".format(probW, rc2i(i, j+1))
if(rc2i(i, j-1) == -1):
extraProb = extraProb + probE
else:
str = str + " {}:(x'={}) +".format(probE, rc2i(i, j-1))
print(str + " {}:(x'={});".format(probS+extraProb, rc2i(i,j)))
def printEast(row, col):
extraProb = 0
str = "[] x={} -> ".format(rc2i(i,j))
if(rc2i(i-1, j) == -1):
extraProb += probW
else:
str = str + " {}:(x'={}) +".format(probW, rc2i(i-1, j))
if(rc2i(i+1, j) == -1):
extraProb = extraProb + probE
else:
str = str + " {}:(x'={}) +".format(probE, rc2i(i+1, j))
if(rc2i(i, j+1) == -1):
extraProb = extraProb + probN
else:
str = str + " {}:(x'={}) +".format(probN, rc2i(i, j+1))
if(rc2i(i, j-1) == -1):
extraProb = extraProb + probB
else:
str = str + " {}:(x'={}) +".format(probB, rc2i(i, j-1))
print(str + " {}:(x'={});".format(probS+extraProb, rc2i(i,j)))
def printWest(row, col):
extraProb = 0
str = "[] x={} -> ".format(rc2i(i,j))
if(rc2i(i-1, j) == -1):
extraProb += probE
else:
str = str + " {}:(x'={}) +".format(probE, rc2i(i-1, j))
if(rc2i(i+1, j) == -1):
extraProb = extraProb + probW
else:
str = str + " {}:(x'={}) +".format(probW, rc2i(i+1, j))
if(rc2i(i, j+1) == -1):
extraProb = extraProb + probB
else:
str = str + " {}:(x'={}) +".format(probB, rc2i(i, j+1))
if(rc2i(i, j-1) == -1):
extraProb = extraProb + probN
else:
str = str + " {}:(x'={}) +".format(probN, rc2i(i, j-1))
print(str + " {}:(x'={});".format(probS+extraProb, rc2i(i,j)))
print("mdp")
print("")
print("module M1")
print("")
if print_LTL_game:
print(" x : [0..{}] init 0;".format(num_rows*num_cols))
else:
print(" x : [0..{}] init 0;".format(num_rows*num_cols-1))
for i in range (num_rows):
for j in range (num_cols):
tNorth(i,j)
printSouth(i,j)
printEast(i,j)
printWest(i,j)
if print_LTL_game:
print("")
for i in range (num_rows*num_cols):
print("[] x={} -> 1:(x'={});".format(i, num_rows*num_cols))
print("[] x={} -> 1:(x'={});".format(num_rows*num_cols, num_rows*num_cols))
print("")
print("endmodule")
print("")
print("// labels")
print("label \"initial\" = (x=0);")
print("label \"loca\" = (x=26);")
print("label \"locb\" = (x=85);")
print("label \"locc\" = (x=16);")
print("label \"locd\" = (x=7);")
print("label \"loce\" = (x=45);")
print("label \"locf\" = (x=91);")
print("label \"locg\" = (x=41);")
print("label \"loch\" = (x=67);")
print("label \"loci\" = (x=20);")
print("label \"zbad\" = (x=2);")
print("label \"done\" = (x={});".format(num_rows*num_cols))
| true
| true
|
7903bc29f8b2ffc3d00958b1e270a12c5e6daea1
| 1,951
|
py
|
Python
|
examples.py
|
Reveal-Energy-Services/orchid-python-api
|
21ed6058009f6b8793050a934238d2858a7fa0c9
|
[
"Apache-2.0"
] | null | null | null |
examples.py
|
Reveal-Energy-Services/orchid-python-api
|
21ed6058009f6b8793050a934238d2858a7fa0c9
|
[
"Apache-2.0"
] | 28
|
2020-08-14T14:08:43.000Z
|
2022-02-07T14:11:38.000Z
|
examples.py
|
Reveal-Energy-Services/orchid-python-api
|
21ed6058009f6b8793050a934238d2858a7fa0c9
|
[
"Apache-2.0"
] | 1
|
2021-12-01T21:20:07.000Z
|
2021-12-01T21:20:07.000Z
|
#
# This file is part of Orchid and related technologies.
#
# Copyright (c) 2017-2021 Reveal Energy Services. All Rights Reserved.
#
# LEGAL NOTICE:
# Orchid contains trade secrets and otherwise confidential information
# owned by Reveal Energy Services. Access to and use of this information is
# strictly limited and controlled by the Company. This file may not be copied,
# distributed, or otherwise disclosed outside of the Company's facilities
# except under appropriate precautions to maintain the confidentiality hereof,
# and may not be used in any way not expressly authorized by the Company.
#
import pathlib
def _stem_names():
"""Returns the sequence of example stem names."""
example_stems = ['completion_analysis', 'plot_time_series', 'plot_trajectories',
'plot_treatment', 'search_data_frames', 'volume_2_first_response']
return example_stems
def notebook_names():
"""Returns the sequence of example notebook names."""
result = [str(pathlib.Path(s).with_suffix('.ipynb')) for s in _stem_names()]
return result
def ordered_script_names():
script_name_pairs = [
('plot_trajectories.py', 0),
('plot_treatment.py', 1),
('plot_time_series.py', 2),
('completion_analysis.py', 3),
('volume_2_first_response.py', 4),
('search_data_frames.py', 5),
]
ordered_pairs = sorted(script_name_pairs, key=lambda op: op[1])
ordered_names = [op[0] for op in ordered_pairs]
difference = set(script_names()).difference(set(ordered_names))
assert len(difference) == 0, f'Ordered set, {ordered_names},' \
f' differs from, set {script_names()}' \
f' by, {difference}.'
return ordered_names
def script_names():
"""Returns the sequence of example script names."""
result = [str(pathlib.Path(s).with_suffix('.py')) for s in _stem_names()]
return result
| 36.811321
| 87
| 0.678626
|
# except under appropriate precautions to maintain the confidentiality hereof,
# and may not be used in any way not expressly authorized by the Company.
#
import pathlib
def _stem_names():
example_stems = ['completion_analysis', 'plot_time_series', 'plot_trajectories',
'plot_treatment', 'search_data_frames', 'volume_2_first_response']
return example_stems
def notebook_names():
result = [str(pathlib.Path(s).with_suffix('.ipynb')) for s in _stem_names()]
return result
def ordered_script_names():
script_name_pairs = [
('plot_trajectories.py', 0),
('plot_treatment.py', 1),
('plot_time_series.py', 2),
('completion_analysis.py', 3),
('volume_2_first_response.py', 4),
('search_data_frames.py', 5),
]
ordered_pairs = sorted(script_name_pairs, key=lambda op: op[1])
ordered_names = [op[0] for op in ordered_pairs]
difference = set(script_names()).difference(set(ordered_names))
assert len(difference) == 0, f'Ordered set, {ordered_names},' \
f' differs from, set {script_names()}' \
f' by, {difference}.'
return ordered_names
def script_names():
result = [str(pathlib.Path(s).with_suffix('.py')) for s in _stem_names()]
return result
| true
| true
|
7903bccfa7e81d8f324f520fdfa7701cddc8a79b
| 479
|
py
|
Python
|
nlu/components/embeddings/sentence_bert/BertSentenceEmbedding.py
|
milyiyo/nlu
|
d209ed11c6a84639c268f08435552248391c5573
|
[
"Apache-2.0"
] | 480
|
2020-08-24T02:36:40.000Z
|
2022-03-30T08:09:43.000Z
|
nlu/components/embeddings/sentence_bert/BertSentenceEmbedding.py
|
milyiyo/nlu
|
d209ed11c6a84639c268f08435552248391c5573
|
[
"Apache-2.0"
] | 28
|
2020-09-26T18:55:43.000Z
|
2022-03-26T01:05:45.000Z
|
nlu/components/embeddings/sentence_bert/BertSentenceEmbedding.py
|
milyiyo/nlu
|
d209ed11c6a84639c268f08435552248391c5573
|
[
"Apache-2.0"
] | 76
|
2020-09-25T22:55:12.000Z
|
2022-03-17T20:25:52.000Z
|
from sparknlp.annotator import *
class BertSentence:
@staticmethod
def get_default_model():
return BertSentenceEmbeddings.pretrained() \
.setInputCols("sentence") \
.setOutputCol("sentence_embeddings")
@staticmethod
def get_pretrained_model(name, language, bucket=None):
return BertSentenceEmbeddings.pretrained(name,language,bucket) \
.setInputCols('sentence') \
.setOutputCol("sentence_embeddings")
| 25.210526
| 72
| 0.686848
|
from sparknlp.annotator import *
class BertSentence:
@staticmethod
def get_default_model():
return BertSentenceEmbeddings.pretrained() \
.setInputCols("sentence") \
.setOutputCol("sentence_embeddings")
@staticmethod
def get_pretrained_model(name, language, bucket=None):
return BertSentenceEmbeddings.pretrained(name,language,bucket) \
.setInputCols('sentence') \
.setOutputCol("sentence_embeddings")
| true
| true
|
7903bed3fb4151a3c8f3f57d8f2eb3e36e26bf1e
| 13,834
|
py
|
Python
|
tests/h/activity/bucketing_test.py
|
y3g0r/h
|
a057144956fe25e669aeba5d0f0eb38f9dc09566
|
[
"BSD-2-Clause"
] | null | null | null |
tests/h/activity/bucketing_test.py
|
y3g0r/h
|
a057144956fe25e669aeba5d0f0eb38f9dc09566
|
[
"BSD-2-Clause"
] | null | null | null |
tests/h/activity/bucketing_test.py
|
y3g0r/h
|
a057144956fe25e669aeba5d0f0eb38f9dc09566
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import datetime
from unittest.mock import Mock
import pytest
from h.activity import bucketing
from tests.common import factories
UTCNOW = datetime.datetime(year=1970, month=2, day=21, hour=19, minute=30)
FIVE_MINS_AGO = UTCNOW - datetime.timedelta(minutes=5)
YESTERDAY = UTCNOW - datetime.timedelta(days=1)
THIRD_MARCH_1968 = datetime.datetime(year=1968, month=3, day=3)
FIFTH_NOVEMBER_1969 = datetime.datetime(year=1969, month=11, day=5)
class timeframe_with: # noqa: N801
def __init__(self, label, document_buckets):
self.label = label
self.document_buckets = document_buckets
def __eq__(self, timeframe):
return (
self.label == timeframe.label
and self.document_buckets == timeframe.document_buckets
)
def __repr__(self):
return '{class_} "{label}" with {n} document buckets'.format(
class_=self.__class__, label=self.label, n=len(self.document_buckets)
)
@pytest.mark.usefixtures("factories")
class TestDocumentBucket:
def test_init_sets_the_document_title(self, db_session, document):
title_meta = factories.DocumentMeta(
type="title", value=["The Document Title"], document=document
)
document.title = "The Document Title"
db_session.add(title_meta)
db_session.flush()
bucket = bucketing.DocumentBucket(document)
assert bucket.title == "The Document Title"
def test_init_uses_the_document_web_uri(self, db_session, document):
document.web_uri = "http://example.com"
bucket = bucketing.DocumentBucket(document)
assert bucket.uri == "http://example.com"
def test_init_sets_None_uri_when_no_http_or_https_can_be_found(
self, db_session, document
):
document.web_uri = None
bucket = bucketing.DocumentBucket(document)
assert bucket.uri is None
def test_init_sets_the_domain_from_the_extracted_uri(self, db_session, document):
document.web_uri = "https://www.example.com/foobar.html"
bucket = bucketing.DocumentBucket(document)
assert bucket.domain == "www.example.com"
def test_init_sets_domain_to_local_file_when_no_uri_is_set(
self, db_session, document
):
docuri_pdf = factories.DocumentURI(
uri="urn:x-pdf:fingerprint", document=document
)
db_session.add(docuri_pdf)
db_session.flush()
bucket = bucketing.DocumentBucket(document)
assert bucket.domain == "Local file"
def test_annotations_count_returns_count_of_annotations(self, db_session, document):
bucket = bucketing.DocumentBucket(document)
for _ in range(7):
annotation = factories.Annotation()
bucket.append(annotation)
assert bucket.annotations_count == 7
def test_append_appends_the_annotation(self, document):
bucket = bucketing.DocumentBucket(document)
annotations = []
for _ in range(7):
annotation = factories.Annotation()
annotations.append(annotation)
bucket.append(annotation)
assert bucket.annotations == annotations
def test_append_adds_unique_annotation_tag_to_bucket(self, document):
ann_1 = factories.Annotation(tags=["foo", "bar"])
ann_2 = factories.Annotation(tags=["foo", "baz"])
bucket = bucketing.DocumentBucket(document)
bucket.append(ann_1)
bucket.append(ann_2)
assert bucket.tags == {"foo", "bar", "baz"}
def test_append_adds_unique_annotation_user_to_bucket(self, document):
ann_1 = factories.Annotation(userid="luke")
ann_2 = factories.Annotation(userid="alice")
ann_3 = factories.Annotation(userid="luke")
bucket = bucketing.DocumentBucket(document)
bucket.append(ann_1)
bucket.append(ann_2)
bucket.append(ann_3)
assert bucket.users == {"luke", "alice"}
def test_eq(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
for _ in range(5):
annotation = factories.Annotation()
bucket_1.append(annotation)
bucket_2.append(annotation)
assert bucket_1 == bucket_2
def test_eq_annotations_mismatch(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
bucket_1.annotations = [1, 2, 3]
bucket_2.annotations = [2, 3, 4]
assert not bucket_1 == bucket_2
def test_eq_tags_mismatch(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
bucket_1.tags.update(["foo", "bar"])
bucket_2.tags.update(["foo", "baz"])
assert not bucket_1 == bucket_2
def test_eq_users_mismatch(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
bucket_1.users.update(["alice", "luke"])
bucket_2.users.update(["luke", "paula"])
assert not bucket_1 == bucket_2
def test_eq_uri_mismatch(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
bucket_1.uri = "http://example.com"
bucket_2.uri = "http://example.org"
assert not bucket_1 == bucket_2
def test_eq_domain_mismatch(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
bucket_1.domain = "example.com"
bucket_2.domain = "example.org"
assert not bucket_1 == bucket_2
def test_eq_title_mismatch(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
bucket_1.title = "First Title"
bucket_2.title = "Second Title"
assert not bucket_1 == bucket_2
def test_incontext_link_returns_link_to_first_annotation(self, document, patch):
incontext_link = patch("h.links.incontext_link")
bucket = bucketing.DocumentBucket(document)
ann = factories.Annotation()
bucket.append(ann)
request = Mock()
assert bucket.incontext_link(request) == incontext_link.return_value
def test_incontext_link_returns_none_if_bucket_empty(self, document, patch):
patch("h.links.incontext_link")
bucket = bucketing.DocumentBucket(document)
request = Mock()
assert bucket.incontext_link(request) is None
@pytest.fixture
def document(self, db_session):
document = factories.Document()
db_session.add(document)
db_session.flush()
return document
@pytest.mark.usefixtures("factories", "utcnow")
class TestBucket:
def test_no_annotations(self):
assert bucketing.bucket([]) == []
@pytest.mark.parametrize(
"annotation_datetime,timeframe_label",
[(FIVE_MINS_AGO, "Last 7 days"), (THIRD_MARCH_1968, "Mar 1968")],
)
def test_one_annotation(self, annotation_datetime, timeframe_label):
annotation = factories.Annotation(
document=factories.Document(), updated=annotation_datetime
)
timeframes = bucketing.bucket([annotation])
assert timeframes == [
timeframe_with(
timeframe_label,
{
annotation.document: bucketing.DocumentBucket(
annotation.document, [annotation]
)
},
)
]
@pytest.mark.parametrize(
"annotation_datetime,timeframe_label",
[(FIVE_MINS_AGO, "Last 7 days"), (THIRD_MARCH_1968, "Mar 1968")],
)
def test_multiple_annotations_of_one_document_in_one_timeframe(
self, annotation_datetime, timeframe_label
):
results = [
factories.Annotation(
target_uri="https://example.com", updated=annotation_datetime
)
for _ in range(3)
]
timeframes = bucketing.bucket(results)
document = results[0].document
assert timeframes == [
timeframe_with(
timeframe_label, {document: bucketing.DocumentBucket(document, results)}
)
]
@pytest.mark.parametrize(
"annotation_datetime,timeframe_label",
[(YESTERDAY, "Last 7 days"), (THIRD_MARCH_1968, "Mar 1968")],
)
def test_annotations_of_multiple_documents_in_one_timeframe(
self, annotation_datetime, timeframe_label
):
annotation_1 = factories.Annotation(
target_uri="http://example1.com", updated=annotation_datetime
)
annotation_2 = factories.Annotation(
target_uri="http://example2.com", updated=annotation_datetime
)
annotation_3 = factories.Annotation(
target_uri="http://example3.com", updated=annotation_datetime
)
timeframes = bucketing.bucket([annotation_1, annotation_2, annotation_3])
assert timeframes == [
timeframe_with(
timeframe_label,
{
annotation_1.document: bucketing.DocumentBucket(
annotation_1.document, [annotation_1]
),
annotation_2.document: bucketing.DocumentBucket(
annotation_2.document, [annotation_2]
),
annotation_3.document: bucketing.DocumentBucket(
annotation_3.document, [annotation_3]
),
},
)
]
def test_annotations_of_the_same_document_in_different_timeframes(self):
results = [
factories.Annotation(),
factories.Annotation(updated=FIFTH_NOVEMBER_1969),
factories.Annotation(updated=THIRD_MARCH_1968),
]
document = factories.Document()
for annotation in results:
annotation.document = document
timeframes = bucketing.bucket(results)
expected_bucket_1 = bucketing.DocumentBucket(document, [results[0]])
expected_bucket_2 = bucketing.DocumentBucket(document, [results[1]])
expected_bucket_3 = bucketing.DocumentBucket(document, [results[2]])
assert timeframes == [
timeframe_with("Last 7 days", {document: expected_bucket_1}),
timeframe_with("Nov 1969", {document: expected_bucket_2}),
timeframe_with("Mar 1968", {document: expected_bucket_3}),
]
def test_recent_and_older_annotations_together(self):
results = [
factories.Annotation(target_uri="http://example1.com"),
factories.Annotation(target_uri="http://example2.com"),
factories.Annotation(target_uri="http://example3.com"),
factories.Annotation(
target_uri="http://example4.com", updated=THIRD_MARCH_1968
),
factories.Annotation(
target_uri="http://example5.com", updated=THIRD_MARCH_1968
),
factories.Annotation(
target_uri="http://example6.com", updated=THIRD_MARCH_1968
),
]
timeframes = bucketing.bucket(results)
expected_bucket_1 = bucketing.DocumentBucket(results[0].document, [results[0]])
expected_bucket_2 = bucketing.DocumentBucket(results[1].document, [results[1]])
expected_bucket_3 = bucketing.DocumentBucket(results[2].document, [results[2]])
expected_bucket_4 = bucketing.DocumentBucket(results[3].document, [results[3]])
expected_bucket_5 = bucketing.DocumentBucket(results[4].document, [results[4]])
expected_bucket_6 = bucketing.DocumentBucket(results[5].document, [results[5]])
assert timeframes == [
timeframe_with(
"Last 7 days",
{
results[0].document: expected_bucket_1,
results[1].document: expected_bucket_2,
results[2].document: expected_bucket_3,
},
),
timeframe_with(
"Mar 1968",
{
results[3].document: expected_bucket_4,
results[4].document: expected_bucket_5,
results[5].document: expected_bucket_6,
},
),
]
def test_annotations_from_different_days_in_same_month(self):
"""
Test bucketing multiple annotations from different days of same month.
Annotations from different days of the same month should go into one
bucket.
"""
one_month_ago = UTCNOW - datetime.timedelta(days=30)
annotations = [
factories.Annotation(
target_uri="http://example.com", updated=one_month_ago
),
factories.Annotation(
target_uri="http://example.com",
updated=one_month_ago - datetime.timedelta(days=1),
),
factories.Annotation(
target_uri="http://example.com",
updated=one_month_ago - datetime.timedelta(days=2),
),
]
timeframes = bucketing.bucket(annotations)
expected_bucket = bucketing.DocumentBucket(annotations[0].document)
expected_bucket.update(annotations)
assert timeframes == [
timeframe_with("Jan 1970", {annotations[0].document: expected_bucket})
]
@pytest.fixture
def utcnow(self, patch):
utcnow = patch("h.activity.bucketing.utcnow")
utcnow.return_value = UTCNOW
return utcnow
| 34.671679
| 88
| 0.630331
|
import datetime
from unittest.mock import Mock
import pytest
from h.activity import bucketing
from tests.common import factories
UTCNOW = datetime.datetime(year=1970, month=2, day=21, hour=19, minute=30)
FIVE_MINS_AGO = UTCNOW - datetime.timedelta(minutes=5)
YESTERDAY = UTCNOW - datetime.timedelta(days=1)
THIRD_MARCH_1968 = datetime.datetime(year=1968, month=3, day=3)
FIFTH_NOVEMBER_1969 = datetime.datetime(year=1969, month=11, day=5)
class timeframe_with:
def __init__(self, label, document_buckets):
self.label = label
self.document_buckets = document_buckets
def __eq__(self, timeframe):
return (
self.label == timeframe.label
and self.document_buckets == timeframe.document_buckets
)
def __repr__(self):
return '{class_} "{label}" with {n} document buckets'.format(
class_=self.__class__, label=self.label, n=len(self.document_buckets)
)
@pytest.mark.usefixtures("factories")
class TestDocumentBucket:
def test_init_sets_the_document_title(self, db_session, document):
title_meta = factories.DocumentMeta(
type="title", value=["The Document Title"], document=document
)
document.title = "The Document Title"
db_session.add(title_meta)
db_session.flush()
bucket = bucketing.DocumentBucket(document)
assert bucket.title == "The Document Title"
def test_init_uses_the_document_web_uri(self, db_session, document):
document.web_uri = "http://example.com"
bucket = bucketing.DocumentBucket(document)
assert bucket.uri == "http://example.com"
def test_init_sets_None_uri_when_no_http_or_https_can_be_found(
self, db_session, document
):
document.web_uri = None
bucket = bucketing.DocumentBucket(document)
assert bucket.uri is None
def test_init_sets_the_domain_from_the_extracted_uri(self, db_session, document):
document.web_uri = "https://www.example.com/foobar.html"
bucket = bucketing.DocumentBucket(document)
assert bucket.domain == "www.example.com"
def test_init_sets_domain_to_local_file_when_no_uri_is_set(
self, db_session, document
):
docuri_pdf = factories.DocumentURI(
uri="urn:x-pdf:fingerprint", document=document
)
db_session.add(docuri_pdf)
db_session.flush()
bucket = bucketing.DocumentBucket(document)
assert bucket.domain == "Local file"
def test_annotations_count_returns_count_of_annotations(self, db_session, document):
bucket = bucketing.DocumentBucket(document)
for _ in range(7):
annotation = factories.Annotation()
bucket.append(annotation)
assert bucket.annotations_count == 7
def test_append_appends_the_annotation(self, document):
bucket = bucketing.DocumentBucket(document)
annotations = []
for _ in range(7):
annotation = factories.Annotation()
annotations.append(annotation)
bucket.append(annotation)
assert bucket.annotations == annotations
def test_append_adds_unique_annotation_tag_to_bucket(self, document):
ann_1 = factories.Annotation(tags=["foo", "bar"])
ann_2 = factories.Annotation(tags=["foo", "baz"])
bucket = bucketing.DocumentBucket(document)
bucket.append(ann_1)
bucket.append(ann_2)
assert bucket.tags == {"foo", "bar", "baz"}
def test_append_adds_unique_annotation_user_to_bucket(self, document):
ann_1 = factories.Annotation(userid="luke")
ann_2 = factories.Annotation(userid="alice")
ann_3 = factories.Annotation(userid="luke")
bucket = bucketing.DocumentBucket(document)
bucket.append(ann_1)
bucket.append(ann_2)
bucket.append(ann_3)
assert bucket.users == {"luke", "alice"}
def test_eq(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
for _ in range(5):
annotation = factories.Annotation()
bucket_1.append(annotation)
bucket_2.append(annotation)
assert bucket_1 == bucket_2
def test_eq_annotations_mismatch(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
bucket_1.annotations = [1, 2, 3]
bucket_2.annotations = [2, 3, 4]
assert not bucket_1 == bucket_2
def test_eq_tags_mismatch(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
bucket_1.tags.update(["foo", "bar"])
bucket_2.tags.update(["foo", "baz"])
assert not bucket_1 == bucket_2
def test_eq_users_mismatch(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
bucket_1.users.update(["alice", "luke"])
bucket_2.users.update(["luke", "paula"])
assert not bucket_1 == bucket_2
def test_eq_uri_mismatch(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
bucket_1.uri = "http://example.com"
bucket_2.uri = "http://example.org"
assert not bucket_1 == bucket_2
def test_eq_domain_mismatch(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
bucket_1.domain = "example.com"
bucket_2.domain = "example.org"
assert not bucket_1 == bucket_2
def test_eq_title_mismatch(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
bucket_1.title = "First Title"
bucket_2.title = "Second Title"
assert not bucket_1 == bucket_2
def test_incontext_link_returns_link_to_first_annotation(self, document, patch):
incontext_link = patch("h.links.incontext_link")
bucket = bucketing.DocumentBucket(document)
ann = factories.Annotation()
bucket.append(ann)
request = Mock()
assert bucket.incontext_link(request) == incontext_link.return_value
def test_incontext_link_returns_none_if_bucket_empty(self, document, patch):
patch("h.links.incontext_link")
bucket = bucketing.DocumentBucket(document)
request = Mock()
assert bucket.incontext_link(request) is None
@pytest.fixture
def document(self, db_session):
document = factories.Document()
db_session.add(document)
db_session.flush()
return document
@pytest.mark.usefixtures("factories", "utcnow")
class TestBucket:
def test_no_annotations(self):
assert bucketing.bucket([]) == []
@pytest.mark.parametrize(
"annotation_datetime,timeframe_label",
[(FIVE_MINS_AGO, "Last 7 days"), (THIRD_MARCH_1968, "Mar 1968")],
)
def test_one_annotation(self, annotation_datetime, timeframe_label):
annotation = factories.Annotation(
document=factories.Document(), updated=annotation_datetime
)
timeframes = bucketing.bucket([annotation])
assert timeframes == [
timeframe_with(
timeframe_label,
{
annotation.document: bucketing.DocumentBucket(
annotation.document, [annotation]
)
},
)
]
@pytest.mark.parametrize(
"annotation_datetime,timeframe_label",
[(FIVE_MINS_AGO, "Last 7 days"), (THIRD_MARCH_1968, "Mar 1968")],
)
def test_multiple_annotations_of_one_document_in_one_timeframe(
self, annotation_datetime, timeframe_label
):
results = [
factories.Annotation(
target_uri="https://example.com", updated=annotation_datetime
)
for _ in range(3)
]
timeframes = bucketing.bucket(results)
document = results[0].document
assert timeframes == [
timeframe_with(
timeframe_label, {document: bucketing.DocumentBucket(document, results)}
)
]
@pytest.mark.parametrize(
"annotation_datetime,timeframe_label",
[(YESTERDAY, "Last 7 days"), (THIRD_MARCH_1968, "Mar 1968")],
)
def test_annotations_of_multiple_documents_in_one_timeframe(
self, annotation_datetime, timeframe_label
):
annotation_1 = factories.Annotation(
target_uri="http://example1.com", updated=annotation_datetime
)
annotation_2 = factories.Annotation(
target_uri="http://example2.com", updated=annotation_datetime
)
annotation_3 = factories.Annotation(
target_uri="http://example3.com", updated=annotation_datetime
)
timeframes = bucketing.bucket([annotation_1, annotation_2, annotation_3])
assert timeframes == [
timeframe_with(
timeframe_label,
{
annotation_1.document: bucketing.DocumentBucket(
annotation_1.document, [annotation_1]
),
annotation_2.document: bucketing.DocumentBucket(
annotation_2.document, [annotation_2]
),
annotation_3.document: bucketing.DocumentBucket(
annotation_3.document, [annotation_3]
),
},
)
]
def test_annotations_of_the_same_document_in_different_timeframes(self):
results = [
factories.Annotation(),
factories.Annotation(updated=FIFTH_NOVEMBER_1969),
factories.Annotation(updated=THIRD_MARCH_1968),
]
document = factories.Document()
for annotation in results:
annotation.document = document
timeframes = bucketing.bucket(results)
expected_bucket_1 = bucketing.DocumentBucket(document, [results[0]])
expected_bucket_2 = bucketing.DocumentBucket(document, [results[1]])
expected_bucket_3 = bucketing.DocumentBucket(document, [results[2]])
assert timeframes == [
timeframe_with("Last 7 days", {document: expected_bucket_1}),
timeframe_with("Nov 1969", {document: expected_bucket_2}),
timeframe_with("Mar 1968", {document: expected_bucket_3}),
]
def test_recent_and_older_annotations_together(self):
results = [
factories.Annotation(target_uri="http://example1.com"),
factories.Annotation(target_uri="http://example2.com"),
factories.Annotation(target_uri="http://example3.com"),
factories.Annotation(
target_uri="http://example4.com", updated=THIRD_MARCH_1968
),
factories.Annotation(
target_uri="http://example5.com", updated=THIRD_MARCH_1968
),
factories.Annotation(
target_uri="http://example6.com", updated=THIRD_MARCH_1968
),
]
timeframes = bucketing.bucket(results)
expected_bucket_1 = bucketing.DocumentBucket(results[0].document, [results[0]])
expected_bucket_2 = bucketing.DocumentBucket(results[1].document, [results[1]])
expected_bucket_3 = bucketing.DocumentBucket(results[2].document, [results[2]])
expected_bucket_4 = bucketing.DocumentBucket(results[3].document, [results[3]])
expected_bucket_5 = bucketing.DocumentBucket(results[4].document, [results[4]])
expected_bucket_6 = bucketing.DocumentBucket(results[5].document, [results[5]])
assert timeframes == [
timeframe_with(
"Last 7 days",
{
results[0].document: expected_bucket_1,
results[1].document: expected_bucket_2,
results[2].document: expected_bucket_3,
},
),
timeframe_with(
"Mar 1968",
{
results[3].document: expected_bucket_4,
results[4].document: expected_bucket_5,
results[5].document: expected_bucket_6,
},
),
]
def test_annotations_from_different_days_in_same_month(self):
one_month_ago = UTCNOW - datetime.timedelta(days=30)
annotations = [
factories.Annotation(
target_uri="http://example.com", updated=one_month_ago
),
factories.Annotation(
target_uri="http://example.com",
updated=one_month_ago - datetime.timedelta(days=1),
),
factories.Annotation(
target_uri="http://example.com",
updated=one_month_ago - datetime.timedelta(days=2),
),
]
timeframes = bucketing.bucket(annotations)
expected_bucket = bucketing.DocumentBucket(annotations[0].document)
expected_bucket.update(annotations)
assert timeframes == [
timeframe_with("Jan 1970", {annotations[0].document: expected_bucket})
]
@pytest.fixture
def utcnow(self, patch):
utcnow = patch("h.activity.bucketing.utcnow")
utcnow.return_value = UTCNOW
return utcnow
| true
| true
|
7903bfa7c4c3f4d03ee77c8660e180b2e796228e
| 1,136
|
py
|
Python
|
CS-383_Cloud-Computing_2020-Spring/association-rule-mining/attempt2.py
|
CraftingGamerTom/wsu-computer-science
|
aa40fc95a84ac95535284048f6f572def1375f7d
|
[
"MIT"
] | null | null | null |
CS-383_Cloud-Computing_2020-Spring/association-rule-mining/attempt2.py
|
CraftingGamerTom/wsu-computer-science
|
aa40fc95a84ac95535284048f6f572def1375f7d
|
[
"MIT"
] | null | null | null |
CS-383_Cloud-Computing_2020-Spring/association-rule-mining/attempt2.py
|
CraftingGamerTom/wsu-computer-science
|
aa40fc95a84ac95535284048f6f572def1375f7d
|
[
"MIT"
] | null | null | null |
# ----------------------------------------------------------------
# ---------- ASSOCIATION RULE MINING : NOTEABLE ATTEMPT 2 ---------
# ----------------------------------------------------------------
# ------------------ DAILY DATASET --------------------
association_rules = apriori(dailyRankedCrimes.values, min_support=0.02, min_confidence=0.95, min_lift=3, min_length=4, use_colnames = True)
association_results = list(association_rules)
print(len(association_results))
# 17
# ------------------ YEARLY DATASET --------------------
association_rules = apriori(yearlyRankedCrimes.values, min_support=0.02, min_confidence=0.95, min_lift=3, min_length=4, use_colnames = True)
association_results = list(association_rules)
print(len(association_results))
# 2
# Not Many Rules, playing with the settings:
association_rules = apriori(yearlyRankedCrimes.values, min_support=0.0045, min_confidence=0.95, min_lift=1, min_length=2, use_colnames = True)
association_results = list(association_rules)
print(len(association_results))
# 41
# This is better
# I printed the Rules using the common commands (found in common-commands.py)
| 37.866667
| 142
| 0.634683
|
association_rules = apriori(dailyRankedCrimes.values, min_support=0.02, min_confidence=0.95, min_lift=3, min_length=4, use_colnames = True)
association_results = list(association_rules)
print(len(association_results))
association_rules = apriori(yearlyRankedCrimes.values, min_support=0.02, min_confidence=0.95, min_lift=3, min_length=4, use_colnames = True)
association_results = list(association_rules)
print(len(association_results))
association_rules = apriori(yearlyRankedCrimes.values, min_support=0.0045, min_confidence=0.95, min_lift=1, min_length=2, use_colnames = True)
association_results = list(association_rules)
print(len(association_results))
| true
| true
|
7903c03f0f3f65b772e643c060f92266b40db3fb
| 3,090
|
py
|
Python
|
app/recipe_app/tests/test_ingredients_api.py
|
oyekanmiayo/recipe-app-api
|
cc7cab599e8fab164acbb9958784b2cce4aced09
|
[
"MIT"
] | null | null | null |
app/recipe_app/tests/test_ingredients_api.py
|
oyekanmiayo/recipe-app-api
|
cc7cab599e8fab164acbb9958784b2cce4aced09
|
[
"MIT"
] | null | null | null |
app/recipe_app/tests/test_ingredients_api.py
|
oyekanmiayo/recipe-app-api
|
cc7cab599e8fab164acbb9958784b2cce4aced09
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient
from recipe_app.serializers import IngredientSerializer
INGREDIENTS_URL = reverse('recipe_app:ingredient-list')
def create_user(**params):
return get_user_model().objects.create_user(**params)
class PublicIngredientsAPITests(TestCase):
"""Test endpoints that don't require authentication."""
def setUp(self):
self.client = APIClient()
def test_login_required_to_view_ingredients(self):
"""Test that authentication is needed to view the ingredients."""
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientsAPITests(TestCase):
"""Test endpoints that require authentication."""
def setUp(self):
self.client = APIClient()
self.user = create_user(
fname='Test',
lname='User',
email='test@gmail.com',
password='testpass'
)
self.client.force_authenticate(user=self.user)
def test_retrieve_ingredients_is_successful(self):
"""Test retrieve ingredients"""
Ingredient.objects.create(user=self.user, name='Carrot')
Ingredient.objects.create(user=self.user, name='Lemon')
res = self.client.get(INGREDIENTS_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_retrieved_ingredients_limited_to_user(self):
"""Tests that only the user's ingredients are retrieved"""
user2 = create_user(
fname='Test2',
lname='User2',
email='test2@gmail.com',
password='test2pass'
)
Ingredient.objects.create(user=user2, name='Carrot')
ingredient = Ingredient.objects.create(user=self.user, name='Lemon')
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredient_is_successful(self):
"""Test that creating a new ingredient is successful."""
payload = {
'name': 'Lemon'
}
self.client.post(INGREDIENTS_URL, payload)
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_ingredient_with_invalid_details_invalid(self):
"""Test that ingredients is not created with invalid details"""
payload = {
'name': ''
}
res = self.client.post(INGREDIENTS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
| 29.428571
| 76
| 0.667961
|
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient
from recipe_app.serializers import IngredientSerializer
INGREDIENTS_URL = reverse('recipe_app:ingredient-list')
def create_user(**params):
return get_user_model().objects.create_user(**params)
class PublicIngredientsAPITests(TestCase):
def setUp(self):
self.client = APIClient()
def test_login_required_to_view_ingredients(self):
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientsAPITests(TestCase):
def setUp(self):
self.client = APIClient()
self.user = create_user(
fname='Test',
lname='User',
email='test@gmail.com',
password='testpass'
)
self.client.force_authenticate(user=self.user)
def test_retrieve_ingredients_is_successful(self):
Ingredient.objects.create(user=self.user, name='Carrot')
Ingredient.objects.create(user=self.user, name='Lemon')
res = self.client.get(INGREDIENTS_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_retrieved_ingredients_limited_to_user(self):
user2 = create_user(
fname='Test2',
lname='User2',
email='test2@gmail.com',
password='test2pass'
)
Ingredient.objects.create(user=user2, name='Carrot')
ingredient = Ingredient.objects.create(user=self.user, name='Lemon')
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredient_is_successful(self):
payload = {
'name': 'Lemon'
}
self.client.post(INGREDIENTS_URL, payload)
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_ingredient_with_invalid_details_invalid(self):
payload = {
'name': ''
}
res = self.client.post(INGREDIENTS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
| true
| true
|
7903c0da71807c70134ec55f471896dec576ea31
| 6,346
|
py
|
Python
|
users-api/routes.py
|
pwegrzyn/wegpat-gmail.com
|
3fce9d0bc32c1d6be94cd664eb13a69255975fd0
|
[
"MIT"
] | null | null | null |
users-api/routes.py
|
pwegrzyn/wegpat-gmail.com
|
3fce9d0bc32c1d6be94cd664eb13a69255975fd0
|
[
"MIT"
] | 5
|
2021-09-02T12:22:08.000Z
|
2022-03-02T09:15:20.000Z
|
users-api/routes.py
|
pwegrzyn/wegpat-gmail.com
|
3fce9d0bc32c1d6be94cd664eb13a69255975fd0
|
[
"MIT"
] | null | null | null |
from flask import jsonify, request
from flask_restx import Resource, reqparse, fields, marshal_with
import requests
import redis
import os
import logging
import time
import datetime
import json
from app import api, db
from models import User
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
user_fields = {
"id": fields.Integer,
"uuid": fields.Integer,
"status": fields.String
}
@api.route("/users")
class Users(Resource):
users_post_reqparser = reqparse.RequestParser()
users_post_reqparser.add_argument(
"uuid",
type=int,
location="json",
required=True,
help="Please provide the UUID -",
)
@api.expect(users_post_reqparser)
@marshal_with(user_fields)
def post(self):
args = self.users_post_reqparser.parse_args()
new_user = User(uuid=args["uuid"])
db.session.add(new_user)
db.session.flush()
db.session.commit()
return new_user, 201
@marshal_with(user_fields)
def get(self):
# TODO: some authorization would be nice
return User.query.all(), 200
@api.route("/usersByUUID/<int:uuid>")
class UserByUUID(Resource):
@marshal_with(user_fields)
def get(self, uuid):
user = User.query.filter_by(uuid=uuid).first()
if user is None:
# we should really return 404 here and don't do POST magic
# in a GET request but this will make some thing much easier...
user = User(uuid=uuid)
db.session.add(user)
db.session.flush()
db.session.commit()
return user, 200
@api.route("/users/<int:id>")
class SingleUser(Resource):
user_put_reqparser = reqparse.RequestParser()
user_put_reqparser.add_argument(
"status",
type=str,
location="json",
required=True,
help="Please provide the status value (healty, covid_positive, covid_negative) -",
)
@marshal_with(user_fields)
def get(self, id):
found_user = User.query.filter_by(uuid=id).first()
if found_user is None:
api.abort(404, "User does not exist.")
return found_user, 200
@marshal_with(user_fields)
def put(self, id):
user = User.query.filter_by(uuid=id).first()
if user is None:
api.abort(404, "User does not exist.")
args = self.user_put_reqparser.parse_args()
user.status = args["status"]
db.session.commit()
if args["status"] == "covid_positive":
self._submit_filtering_jobs(user.uuid)
return user, 200
def delete(self, id):
user = User.query.filter_by(uuid=id).first()
if user is None:
api.abort(404, "User does not exist.")
db.session.delete(user)
db.session.commit()
return {"msg": "ok"}, 200
@staticmethod
def _chunks(l, n):
n = max(1, n)
return (l[i : i + n] for i in range(0, len(l), n))
def _submit_filtering_jobs(self, uuid):
"""
Here we create the task and put it on the job queue.
"""
# Some optimization: we make a request to the Location API
# to get all the geohash prefixes for all locations the diagonzed patient
# has visited in the last two weeks
two_weeks_ago = datetime.date.today() - datetime.timedelta(14)
params = {
"from": int(two_weeks_ago.strftime("%s")),
"to": int(time.time()),
"unit": "seconds",
}
# TODO: Do not hardcode URIs or ports, use env vars instead
# TODO: Do not assume that the period is always 2 weeks long, make it parametrized
location_api_resp = requests.get(
f"http://location-api:5000/geohashRegionsForUser/{uuid}", params=params
)
if location_api_resp.status_code != 200:
logger.warning(location_api_resp)
api.abort(
500, "There was a problem when requesting data from the Location API"
)
visited_regions_geohash_prefixes = location_api_resp.json()
logger.info(f"Visited Regions for diagonzed patient: {str(visited_regions_geohash_prefixes)}")
location_api_resp_users = requests.get("http://location-api:5000/users")
if location_api_resp_users.status_code != 200:
logger.warning(location_api_resp_users)
api.abort(
500, "There was a problem when requesting data from the Location API"
)
all_influx_users = list(set(location_api_resp_users.json()) - {str(uuid)})
logger.info(f"All Influx users without diagnozed patient: {str(all_influx_users)}")
# So, we should split the whole job into rougly N*k jobs, where N is the
# number of workers listening on the queue, so that each worker will get roughly
# k tasks to execute (so we can achieve nice load balancing).
# Let's assume for simplicity now that we have always 3 workers and k = 1.
n_workers = 3
task_size = len(all_influx_users) // n_workers
all_influx_users_partitioned = SingleUser._chunks(all_influx_users, task_size)
# Create the tasks and put the onto the Redis queue
redis_instance = redis.Redis(
host=os.getenv("REDIS_HOST", "queue"),
port=os.getenv("REDIS_PORT", 6379),
db=os.getenv("REDIS_DB_ID", 0),
)
redis_namespace = os.getenv("REDIS_NAMESPACE", "worker")
redis_collection = os.getenv("REDIS_COLLECTION", "jobs")
logger.info(f"Connected with Redis ({redis_namespace}:{redis_collection})")
for idx, users_batch in enumerate(all_influx_users_partitioned):
job = {
"type": "scan_users_locations",
"args": {
"user_id_range": users_batch,
"diagnozed_uuid": uuid,
"diagnozed_visited_regions": visited_regions_geohash_prefixes,
},
}
redis_instance.rpush(
f"{redis_namespace}:{redis_collection}", json.dumps(job)
)
logger.info(
f"Successfully pushed job #{idx} to the Job Queue:\n{json.dumps(job)}"
)
logger.info("Finished pushing jobs to the Queue.")
| 34.302703
| 102
| 0.615663
|
from flask import jsonify, request
from flask_restx import Resource, reqparse, fields, marshal_with
import requests
import redis
import os
import logging
import time
import datetime
import json
from app import api, db
from models import User
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
user_fields = {
"id": fields.Integer,
"uuid": fields.Integer,
"status": fields.String
}
@api.route("/users")
class Users(Resource):
users_post_reqparser = reqparse.RequestParser()
users_post_reqparser.add_argument(
"uuid",
type=int,
location="json",
required=True,
help="Please provide the UUID -",
)
@api.expect(users_post_reqparser)
@marshal_with(user_fields)
def post(self):
args = self.users_post_reqparser.parse_args()
new_user = User(uuid=args["uuid"])
db.session.add(new_user)
db.session.flush()
db.session.commit()
return new_user, 201
@marshal_with(user_fields)
def get(self):
return User.query.all(), 200
@api.route("/usersByUUID/<int:uuid>")
class UserByUUID(Resource):
@marshal_with(user_fields)
def get(self, uuid):
user = User.query.filter_by(uuid=uuid).first()
if user is None:
# in a GET request but this will make some thing much easier...
user = User(uuid=uuid)
db.session.add(user)
db.session.flush()
db.session.commit()
return user, 200
@api.route("/users/<int:id>")
class SingleUser(Resource):
user_put_reqparser = reqparse.RequestParser()
user_put_reqparser.add_argument(
"status",
type=str,
location="json",
required=True,
help="Please provide the status value (healty, covid_positive, covid_negative) -",
)
@marshal_with(user_fields)
def get(self, id):
found_user = User.query.filter_by(uuid=id).first()
if found_user is None:
api.abort(404, "User does not exist.")
return found_user, 200
@marshal_with(user_fields)
def put(self, id):
user = User.query.filter_by(uuid=id).first()
if user is None:
api.abort(404, "User does not exist.")
args = self.user_put_reqparser.parse_args()
user.status = args["status"]
db.session.commit()
if args["status"] == "covid_positive":
self._submit_filtering_jobs(user.uuid)
return user, 200
def delete(self, id):
user = User.query.filter_by(uuid=id).first()
if user is None:
api.abort(404, "User does not exist.")
db.session.delete(user)
db.session.commit()
return {"msg": "ok"}, 200
@staticmethod
def _chunks(l, n):
n = max(1, n)
return (l[i : i + n] for i in range(0, len(l), n))
def _submit_filtering_jobs(self, uuid):
# Some optimization: we make a request to the Location API
# to get all the geohash prefixes for all locations the diagonzed patient
# has visited in the last two weeks
two_weeks_ago = datetime.date.today() - datetime.timedelta(14)
params = {
"from": int(two_weeks_ago.strftime("%s")),
"to": int(time.time()),
"unit": "seconds",
}
# TODO: Do not hardcode URIs or ports, use env vars instead
# TODO: Do not assume that the period is always 2 weeks long, make it parametrized
location_api_resp = requests.get(
f"http://location-api:5000/geohashRegionsForUser/{uuid}", params=params
)
if location_api_resp.status_code != 200:
logger.warning(location_api_resp)
api.abort(
500, "There was a problem when requesting data from the Location API"
)
visited_regions_geohash_prefixes = location_api_resp.json()
logger.info(f"Visited Regions for diagonzed patient: {str(visited_regions_geohash_prefixes)}")
location_api_resp_users = requests.get("http://location-api:5000/users")
if location_api_resp_users.status_code != 200:
logger.warning(location_api_resp_users)
api.abort(
500, "There was a problem when requesting data from the Location API"
)
all_influx_users = list(set(location_api_resp_users.json()) - {str(uuid)})
logger.info(f"All Influx users without diagnozed patient: {str(all_influx_users)}")
# So, we should split the whole job into rougly N*k jobs, where N is the
# number of workers listening on the queue, so that each worker will get roughly
# k tasks to execute (so we can achieve nice load balancing).
# Let's assume for simplicity now that we have always 3 workers and k = 1.
n_workers = 3
task_size = len(all_influx_users) // n_workers
all_influx_users_partitioned = SingleUser._chunks(all_influx_users, task_size)
redis_instance = redis.Redis(
host=os.getenv("REDIS_HOST", "queue"),
port=os.getenv("REDIS_PORT", 6379),
db=os.getenv("REDIS_DB_ID", 0),
)
redis_namespace = os.getenv("REDIS_NAMESPACE", "worker")
redis_collection = os.getenv("REDIS_COLLECTION", "jobs")
logger.info(f"Connected with Redis ({redis_namespace}:{redis_collection})")
for idx, users_batch in enumerate(all_influx_users_partitioned):
job = {
"type": "scan_users_locations",
"args": {
"user_id_range": users_batch,
"diagnozed_uuid": uuid,
"diagnozed_visited_regions": visited_regions_geohash_prefixes,
},
}
redis_instance.rpush(
f"{redis_namespace}:{redis_collection}", json.dumps(job)
)
logger.info(
f"Successfully pushed job #{idx} to the Job Queue:\n{json.dumps(job)}"
)
logger.info("Finished pushing jobs to the Queue.")
| true
| true
|
7903c1c3dfad8328cf22af691edd0e774639b5f7
| 4,860
|
py
|
Python
|
pypy/objspace/std/test/test_iterobject.py
|
m4sterchain/mesapy
|
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
|
[
"Apache-2.0",
"OpenSSL"
] | 381
|
2018-08-18T03:37:22.000Z
|
2022-02-06T23:57:36.000Z
|
pypy/objspace/std/test/test_iterobject.py
|
m4sterchain/mesapy
|
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
|
[
"Apache-2.0",
"OpenSSL"
] | 16
|
2018-09-22T18:12:47.000Z
|
2022-02-22T20:03:59.000Z
|
pypy/objspace/std/test/test_iterobject.py
|
m4sterchain/mesapy
|
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
|
[
"Apache-2.0",
"OpenSSL"
] | 30
|
2018-08-20T03:16:34.000Z
|
2022-01-12T17:39:22.000Z
|
from pypy.objspace.std.iterobject import W_SeqIterObject
from pypy.interpreter.error import OperationError
class TestW_IterObject:
def body3(self, w_iter):
w = self.space.wrap
assert self.space.eq_w(self.space.next(w_iter), w(5))
assert self.space.eq_w(self.space.next(w_iter), w(3))
assert self.space.eq_w(self.space.next(w_iter), w(99))
self.body0(w_iter)
def body0(self, w_iter):
raises(OperationError, self.space.next, w_iter)
raises(OperationError, self.space.next, w_iter)
def test_iter(self):
w = self.space.wrap
w_tuple = self.space.newtuple([w(5), w(3), w(99)])
w_iter = W_SeqIterObject(w_tuple)
self.body3(w_iter)
def test_iter_builtin(self):
w = self.space.wrap
w_tuple = self.space.newtuple([w(5), w(3), w(99)])
w_iter = self.space.iter(w_tuple)
self.body3(w_iter)
def test_emptyiter(self):
w_list = self.space.newlist([])
w_iter = W_SeqIterObject(w_list)
self.body0(w_iter)
def test_emptyiter_builtin(self):
w_list = self.space.newlist([])
w_iter = self.space.iter(w_list)
self.body0(w_iter)
class AppTestW_IterObjectApp:
def test_user_iter(self):
class C(object):
def next(self):
raise StopIteration
def __iter__(self):
return self
assert list(C()) == []
def test_iter_getitem(self):
class C(object):
def __getitem__(self, i):
return range(2)[i]
assert list(C()) == range(2)
def test_iter_fail_noseq(self):
class C(object):
pass
raises(TypeError,
iter,
C())
class AppTest_IterObject(object):
def test_no_len_on_list_iter(self):
iterable = [1,2,3,4]
raises(TypeError, len, iter(iterable))
def test_no_len_on_tuple_iter(self):
iterable = (1,2,3,4)
raises(TypeError, len, iter(iterable))
def test_no_len_on_deque_iter(self):
from _collections import deque
iterable = deque([1,2,3,4])
raises(TypeError, len, iter(iterable))
def test_no_len_on_reversed(self):
it = reversed("foobar")
raises(TypeError, len, it)
def test_no_len_on_reversed_seqiter(self):
# this one fails on CPython. See http://bugs.python.org/issue3689
it = reversed([5,6,7])
raises(TypeError, len, it)
def test_no_len_on_UserList_iter_reversed(self):
import sys, _abcoll
sys.modules['collections'] = _abcoll
from UserList import UserList
iterable = UserList([1,2,3,4])
raises(TypeError, len, iter(iterable))
raises(TypeError, len, reversed(iterable))
del sys.modules['collections']
def test_reversed_frees_empty(self):
import gc
for typ in list, unicode:
free = [False]
class U(typ):
def __del__(self):
free[0] = True
r = reversed(U())
raises(StopIteration, next, r)
gc.collect(); gc.collect(); gc.collect()
assert free[0]
def test_reversed_mutation(self):
n = 10
d = range(n)
it = reversed(d)
next(it)
next(it)
assert it.__length_hint__() == n-2
d.append(n)
assert it.__length_hint__() == n-2
d[1:] = []
assert it.__length_hint__() == 0
assert list(it) == []
d.extend(xrange(20))
assert it.__length_hint__() == 0
def test_no_len_on_set_iter(self):
iterable = set([1,2,3,4])
raises(TypeError, len, iter(iterable))
def test_no_len_on_xrange(self):
iterable = xrange(10)
raises(TypeError, len, iter(iterable))
def test_contains(self):
logger = []
class Foo(object):
def __init__(self, value, name=None):
self.value = value
self.name = name or value
def __repr__(self):
return '<Foo %s>' % self.name
def __eq__(self, other):
logger.append((self, other))
return self.value == other.value
foo1, foo2, foo3 = Foo(1), Foo(2), Foo(3)
foo42 = Foo(42)
foo_list = [foo1, foo2, foo3]
foo42 in (x for x in foo_list)
logger_copy = logger[:] # prevent re-evaluation during pytest error print
assert logger_copy == [(foo42, foo1), (foo42, foo2), (foo42, foo3)]
del logger[:]
foo2_bis = Foo(2, '2 bis')
foo2_bis in (x for x in foo_list)
logger_copy = logger[:] # prevent re-evaluation during pytest error print
assert logger_copy == [(foo2_bis, foo1), (foo2_bis, foo2)]
| 31.153846
| 82
| 0.571193
|
from pypy.objspace.std.iterobject import W_SeqIterObject
from pypy.interpreter.error import OperationError
class TestW_IterObject:
def body3(self, w_iter):
w = self.space.wrap
assert self.space.eq_w(self.space.next(w_iter), w(5))
assert self.space.eq_w(self.space.next(w_iter), w(3))
assert self.space.eq_w(self.space.next(w_iter), w(99))
self.body0(w_iter)
def body0(self, w_iter):
raises(OperationError, self.space.next, w_iter)
raises(OperationError, self.space.next, w_iter)
def test_iter(self):
w = self.space.wrap
w_tuple = self.space.newtuple([w(5), w(3), w(99)])
w_iter = W_SeqIterObject(w_tuple)
self.body3(w_iter)
def test_iter_builtin(self):
w = self.space.wrap
w_tuple = self.space.newtuple([w(5), w(3), w(99)])
w_iter = self.space.iter(w_tuple)
self.body3(w_iter)
def test_emptyiter(self):
w_list = self.space.newlist([])
w_iter = W_SeqIterObject(w_list)
self.body0(w_iter)
def test_emptyiter_builtin(self):
w_list = self.space.newlist([])
w_iter = self.space.iter(w_list)
self.body0(w_iter)
class AppTestW_IterObjectApp:
def test_user_iter(self):
class C(object):
def next(self):
raise StopIteration
def __iter__(self):
return self
assert list(C()) == []
def test_iter_getitem(self):
class C(object):
def __getitem__(self, i):
return range(2)[i]
assert list(C()) == range(2)
def test_iter_fail_noseq(self):
class C(object):
pass
raises(TypeError,
iter,
C())
class AppTest_IterObject(object):
def test_no_len_on_list_iter(self):
iterable = [1,2,3,4]
raises(TypeError, len, iter(iterable))
def test_no_len_on_tuple_iter(self):
iterable = (1,2,3,4)
raises(TypeError, len, iter(iterable))
def test_no_len_on_deque_iter(self):
from _collections import deque
iterable = deque([1,2,3,4])
raises(TypeError, len, iter(iterable))
def test_no_len_on_reversed(self):
it = reversed("foobar")
raises(TypeError, len, it)
def test_no_len_on_reversed_seqiter(self):
it = reversed([5,6,7])
raises(TypeError, len, it)
def test_no_len_on_UserList_iter_reversed(self):
import sys, _abcoll
sys.modules['collections'] = _abcoll
from UserList import UserList
iterable = UserList([1,2,3,4])
raises(TypeError, len, iter(iterable))
raises(TypeError, len, reversed(iterable))
del sys.modules['collections']
def test_reversed_frees_empty(self):
import gc
for typ in list, unicode:
free = [False]
class U(typ):
def __del__(self):
free[0] = True
r = reversed(U())
raises(StopIteration, next, r)
gc.collect(); gc.collect(); gc.collect()
assert free[0]
def test_reversed_mutation(self):
n = 10
d = range(n)
it = reversed(d)
next(it)
next(it)
assert it.__length_hint__() == n-2
d.append(n)
assert it.__length_hint__() == n-2
d[1:] = []
assert it.__length_hint__() == 0
assert list(it) == []
d.extend(xrange(20))
assert it.__length_hint__() == 0
def test_no_len_on_set_iter(self):
iterable = set([1,2,3,4])
raises(TypeError, len, iter(iterable))
def test_no_len_on_xrange(self):
iterable = xrange(10)
raises(TypeError, len, iter(iterable))
def test_contains(self):
logger = []
class Foo(object):
def __init__(self, value, name=None):
self.value = value
self.name = name or value
def __repr__(self):
return '<Foo %s>' % self.name
def __eq__(self, other):
logger.append((self, other))
return self.value == other.value
foo1, foo2, foo3 = Foo(1), Foo(2), Foo(3)
foo42 = Foo(42)
foo_list = [foo1, foo2, foo3]
foo42 in (x for x in foo_list)
logger_copy = logger[:]
assert logger_copy == [(foo42, foo1), (foo42, foo2), (foo42, foo3)]
del logger[:]
foo2_bis = Foo(2, '2 bis')
foo2_bis in (x for x in foo_list)
logger_copy = logger[:]
assert logger_copy == [(foo2_bis, foo1), (foo2_bis, foo2)]
| true
| true
|
7903c21a6228db7f3d68950da6c9441484d5fd1f
| 1,109
|
py
|
Python
|
python/NanoVG/__init__.py
|
mariotaku/nanovg
|
57ebea95f90a98ad72e6cf188785c0c4f857933c
|
[
"Zlib"
] | 28
|
2021-05-06T03:21:57.000Z
|
2022-03-31T18:28:52.000Z
|
python/NanoVG/__init__.py
|
mariotaku/nanovg
|
57ebea95f90a98ad72e6cf188785c0c4f857933c
|
[
"Zlib"
] | null | null | null |
python/NanoVG/__init__.py
|
mariotaku/nanovg
|
57ebea95f90a98ad72e6cf188785c0c4f857933c
|
[
"Zlib"
] | 6
|
2021-08-29T04:18:09.000Z
|
2022-02-10T13:52:21.000Z
|
from NanoVG.defs import *
from NanoVG.library import *
from NanoVG.api import *
__author__ = 'vaiorabbit'
__version__ = '1.1.0'
__license__ = 'zlib'
# Python-NanoVG : A Python bindings of NanoVG
# Copyright (c) 2017-2018 vaiorabbit
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
#
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
#
# 3. This notice may not be removed or altered from any source
# distribution.
| 36.966667
| 80
| 0.744815
|
from NanoVG.defs import *
from NanoVG.library import *
from NanoVG.api import *
__author__ = 'vaiorabbit'
__version__ = '1.1.0'
__license__ = 'zlib'
| true
| true
|
7903c499261b86a4d0af4d39596ae363f1bf4d6c
| 6,942
|
py
|
Python
|
MxShop/extra_apps/DjangoUeditor/widgets.py
|
youshuad/django-vue-shop
|
dbede2301b10cb95ef30d0bbbbd594b240071fc1
|
[
"MIT"
] | 66
|
2019-05-13T11:45:14.000Z
|
2020-11-02T11:58:52.000Z
|
MxShop/extra_apps/DjangoUeditor/widgets.py
|
youshuad/django-vue-shop
|
dbede2301b10cb95ef30d0bbbbd594b240071fc1
|
[
"MIT"
] | 11
|
2020-12-21T05:21:33.000Z
|
2021-08-29T07:44:23.000Z
|
DjangoUeditor/widgets.py
|
jeeyshe/site
|
f136050635cac9cc0174387ea60249f5e26e45a3
|
[
"MIT"
] | 20
|
2019-12-30T06:23:17.000Z
|
2020-10-06T01:48:58.000Z
|
# coding:utf-8
from django import forms
from django.conf import settings
from django.contrib.admin.widgets import AdminTextareaWidget
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from django.utils.http import urlencode
from . import settings as USettings
from .commands import *
from django.utils.six import string_types
# 修正输入的文件路径,输入路径的标准格式:abc,不需要前后置的路径符号
# 如果输入的路径参数是一个函数则执行,否则可以拉接受时间格式化,用来生成如file20121208.bmp的重命名格式
def calc_path(OutputPath, instance=None):
if callable(OutputPath):
try:
OutputPath = OutputPath(instance)
except:
OutputPath = ""
else:
try:
import datetime
OutputPath = datetime.datetime.now().strftime(OutputPath)
except:
pass
return OutputPath
# width=600, height=300, toolbars="full", imagePath="", filePath="", upload_settings={},
# settings={},command=None,event_handler=None
class UEditorWidget(forms.Textarea):
def __init__(self, attrs=None):
params = attrs.copy()
width = params.pop("width")
height = params.pop("height")
toolbars = params.pop("toolbars", "full")
imagePath = params.pop("imagePath", "")
filePath = params.pop("filePath", "")
upload_settings = params.pop("upload_settings", {})
settings = params.pop("settings", {})
command = params.pop("command", None)
event_handler = params.pop("event_handler", None)
# 扩展命令
self.command = command
self.event_handler = event_handler
# 上传路径
self.upload_settings = upload_settings.copy()
self.upload_settings.update({
"imagePathFormat": imagePath,
"filePathFormat": filePath
})
# 保存
self._upload_settings = self.upload_settings.copy()
self.recalc_path(None)
self.ueditor_settings = {
'toolbars': toolbars,
'initialFrameWidth': width,
'initialFrameHeight': height
}
# 以下处理工具栏设置,将normal,mini等模式名称转化为工具栏配置值
if toolbars == "full":
del self.ueditor_settings['toolbars']
elif isinstance(toolbars, string_types) and toolbars in USettings.TOOLBARS_SETTINGS:
self.ueditor_settings[
"toolbars"] = USettings.TOOLBARS_SETTINGS[toolbars]
else:
self.ueditor_settings["toolbars"] = toolbars
# raise ValueError('toolbars should be a string defined in DjangoUeditor.settings.TOOLBARS_SETTINGS, options are full(default), besttome, mini and normal!')
self.ueditor_settings.update(settings)
super(UEditorWidget, self).__init__(attrs)
def recalc_path(self, model_inst):
"""计算上传路径,允许是function"""
try:
uSettings = self.upload_settings
if 'filePathFormat' in self._upload_settings:
uSettings['filePathFormat'] = calc_path(
self._upload_settings['filePathFormat'], model_inst)
if 'imagePathFormat' in self._upload_settings:
uSettings['imagePathFormat'] = calc_path(
self._upload_settings['imagePathFormat'], model_inst)
if 'scrawlPathFormat' in self._upload_settings:
uSettings['scrawlPathFormat'] = calc_path(
self._upload_settings['scrawlPathFormat'], model_inst)
if 'videoPathFormat' in self._upload_settings:
uSettings['videoPathFormat'] = calc_path(
self._upload_settings['videoPathFormat'], model_inst),
if 'snapscreenPathFormat' in self._upload_settings:
uSettings['snapscreenPathFormat'] = calc_path(
self._upload_settings['snapscreenPathFormat'], model_inst)
if 'catcherPathFormat' in self._upload_settings:
uSettings['catcherPathFormat'] = calc_path(
self._upload_settings['catcherPathFormat'], model_inst)
if 'imageManagerListPath' in self._upload_settings:
uSettings['imageManagerListPath'] = calc_path(
self._upload_settings['imageManagerListPath'], model_inst)
if 'fileManagerListPath' in self._upload_settings:
uSettings['fileManagerListPath'] = calc_path(
self._upload_settings['fileManagerListPath'], model_inst)
# 设置默认值,未指定涂鸦、截图、远程抓图、图片目录时,默认均等于imagePath
if uSettings['imagePathFormat'] != "":
default_path = uSettings['imagePathFormat']
uSettings['scrawlPathFormat'] = uSettings.get(
'scrawlPathFormat', default_path)
uSettings['videoPathFormat'] = uSettings.get(
'videoPathFormat', default_path)
uSettings['snapscreenPathFormat'] = uSettings.get(
'snapscreenPathFormat', default_path)
uSettings['catcherPathFormat'] = uSettings.get(
'catcherPathFormat', default_path)
uSettings['imageManagerListPath'] = uSettings.get(
'imageManagerListPath', default_path)
if uSettings['filePathFormat'] != "":
uSettings['fileManagerListPath'] = uSettings.get(
'fileManagerListPath', uSettings['filePathFormat'])
except:
pass
def render(self, name, value, attrs=None, renderer=None):
if value is None:
value = ''
# 传入模板的参数
editor_id = "id_%s" % name.replace("-", "_")
uSettings = {
"name": name,
"id": editor_id,
"value": value
}
if isinstance(self.command, list):
cmdjs = ""
if isinstance(self.command, list):
for cmd in self.command:
cmdjs = cmdjs + cmd.render(editor_id)
else:
cmdjs = self.command.render(editor_id)
uSettings["commands"] = cmdjs
uSettings["settings"] = self.ueditor_settings.copy()
uSettings["settings"].update({
"serverUrl": "/ueditor/controller/?%s" % urlencode(self._upload_settings)
})
# 生成事件侦听
if self.event_handler:
uSettings["bindEvents"] = self.event_handler.render(editor_id)
context = {
'UEditor': uSettings,
'STATIC_URL': settings.STATIC_URL,
'STATIC_ROOT': settings.STATIC_ROOT,
'MEDIA_URL': settings.MEDIA_URL,
'MEDIA_ROOT': settings.MEDIA_ROOT
}
return mark_safe(render_to_string('ueditor.html', context))
class Media:
js = ("ueditor/ueditor.config.js",
"ueditor/ueditor.all.min.js")
class AdminUEditorWidget(AdminTextareaWidget, UEditorWidget):
def __init__(self, **kwargs):
super(AdminUEditorWidget, self).__init__(**kwargs)
| 39.220339
| 168
| 0.610631
|
from django import forms
from django.conf import settings
from django.contrib.admin.widgets import AdminTextareaWidget
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from django.utils.http import urlencode
from . import settings as USettings
from .commands import *
from django.utils.six import string_types
def calc_path(OutputPath, instance=None):
if callable(OutputPath):
try:
OutputPath = OutputPath(instance)
except:
OutputPath = ""
else:
try:
import datetime
OutputPath = datetime.datetime.now().strftime(OutputPath)
except:
pass
return OutputPath
class UEditorWidget(forms.Textarea):
def __init__(self, attrs=None):
params = attrs.copy()
width = params.pop("width")
height = params.pop("height")
toolbars = params.pop("toolbars", "full")
imagePath = params.pop("imagePath", "")
filePath = params.pop("filePath", "")
upload_settings = params.pop("upload_settings", {})
settings = params.pop("settings", {})
command = params.pop("command", None)
event_handler = params.pop("event_handler", None)
self.command = command
self.event_handler = event_handler
self.upload_settings = upload_settings.copy()
self.upload_settings.update({
"imagePathFormat": imagePath,
"filePathFormat": filePath
})
self._upload_settings = self.upload_settings.copy()
self.recalc_path(None)
self.ueditor_settings = {
'toolbars': toolbars,
'initialFrameWidth': width,
'initialFrameHeight': height
}
if toolbars == "full":
del self.ueditor_settings['toolbars']
elif isinstance(toolbars, string_types) and toolbars in USettings.TOOLBARS_SETTINGS:
self.ueditor_settings[
"toolbars"] = USettings.TOOLBARS_SETTINGS[toolbars]
else:
self.ueditor_settings["toolbars"] = toolbars
self.ueditor_settings.update(settings)
super(UEditorWidget, self).__init__(attrs)
def recalc_path(self, model_inst):
try:
uSettings = self.upload_settings
if 'filePathFormat' in self._upload_settings:
uSettings['filePathFormat'] = calc_path(
self._upload_settings['filePathFormat'], model_inst)
if 'imagePathFormat' in self._upload_settings:
uSettings['imagePathFormat'] = calc_path(
self._upload_settings['imagePathFormat'], model_inst)
if 'scrawlPathFormat' in self._upload_settings:
uSettings['scrawlPathFormat'] = calc_path(
self._upload_settings['scrawlPathFormat'], model_inst)
if 'videoPathFormat' in self._upload_settings:
uSettings['videoPathFormat'] = calc_path(
self._upload_settings['videoPathFormat'], model_inst),
if 'snapscreenPathFormat' in self._upload_settings:
uSettings['snapscreenPathFormat'] = calc_path(
self._upload_settings['snapscreenPathFormat'], model_inst)
if 'catcherPathFormat' in self._upload_settings:
uSettings['catcherPathFormat'] = calc_path(
self._upload_settings['catcherPathFormat'], model_inst)
if 'imageManagerListPath' in self._upload_settings:
uSettings['imageManagerListPath'] = calc_path(
self._upload_settings['imageManagerListPath'], model_inst)
if 'fileManagerListPath' in self._upload_settings:
uSettings['fileManagerListPath'] = calc_path(
self._upload_settings['fileManagerListPath'], model_inst)
if uSettings['imagePathFormat'] != "":
default_path = uSettings['imagePathFormat']
uSettings['scrawlPathFormat'] = uSettings.get(
'scrawlPathFormat', default_path)
uSettings['videoPathFormat'] = uSettings.get(
'videoPathFormat', default_path)
uSettings['snapscreenPathFormat'] = uSettings.get(
'snapscreenPathFormat', default_path)
uSettings['catcherPathFormat'] = uSettings.get(
'catcherPathFormat', default_path)
uSettings['imageManagerListPath'] = uSettings.get(
'imageManagerListPath', default_path)
if uSettings['filePathFormat'] != "":
uSettings['fileManagerListPath'] = uSettings.get(
'fileManagerListPath', uSettings['filePathFormat'])
except:
pass
def render(self, name, value, attrs=None, renderer=None):
if value is None:
value = ''
editor_id = "id_%s" % name.replace("-", "_")
uSettings = {
"name": name,
"id": editor_id,
"value": value
}
if isinstance(self.command, list):
cmdjs = ""
if isinstance(self.command, list):
for cmd in self.command:
cmdjs = cmdjs + cmd.render(editor_id)
else:
cmdjs = self.command.render(editor_id)
uSettings["commands"] = cmdjs
uSettings["settings"] = self.ueditor_settings.copy()
uSettings["settings"].update({
"serverUrl": "/ueditor/controller/?%s" % urlencode(self._upload_settings)
})
if self.event_handler:
uSettings["bindEvents"] = self.event_handler.render(editor_id)
context = {
'UEditor': uSettings,
'STATIC_URL': settings.STATIC_URL,
'STATIC_ROOT': settings.STATIC_ROOT,
'MEDIA_URL': settings.MEDIA_URL,
'MEDIA_ROOT': settings.MEDIA_ROOT
}
return mark_safe(render_to_string('ueditor.html', context))
class Media:
js = ("ueditor/ueditor.config.js",
"ueditor/ueditor.all.min.js")
class AdminUEditorWidget(AdminTextareaWidget, UEditorWidget):
def __init__(self, **kwargs):
super(AdminUEditorWidget, self).__init__(**kwargs)
| true
| true
|
7903c61fd5f344cf014597410b78efd5e28345c5
| 516
|
py
|
Python
|
FirstChild/src/states/ground_save.py
|
KrystopherWeeton/RLBot
|
a77d408208f17f1b3678e8b92b8525e460e80dfa
|
[
"MIT"
] | null | null | null |
FirstChild/src/states/ground_save.py
|
KrystopherWeeton/RLBot
|
a77d408208f17f1b3678e8b92b8525e460e80dfa
|
[
"MIT"
] | 2
|
2021-06-08T22:07:03.000Z
|
2021-09-08T02:22:44.000Z
|
FirstChild/src/states/ground_save.py
|
KrystopherWeeton/RLBot
|
a77d408208f17f1b3678e8b92b8525e460e80dfa
|
[
"MIT"
] | null | null | null |
from rlbot.utils.structures.game_data_struct import Physics, GameTickPacket, PlayerInfo
from rlbot.agents.base_agent import SimpleControllerState, BaseAgent
from states.state import State
from util.packet import ParsedPacket
class GroundSave(State):
def score(self, parsed_packet: ParsedPacket, packet: GameTickPacket, agent: BaseAgent) -> float:
return None
def get_output(self, parsed_packet: ParsedPacket, packet: GameTickPacket, agent: BaseAgent) -> SimpleControllerState:
return None
| 39.692308
| 121
| 0.792636
|
from rlbot.utils.structures.game_data_struct import Physics, GameTickPacket, PlayerInfo
from rlbot.agents.base_agent import SimpleControllerState, BaseAgent
from states.state import State
from util.packet import ParsedPacket
class GroundSave(State):
def score(self, parsed_packet: ParsedPacket, packet: GameTickPacket, agent: BaseAgent) -> float:
return None
def get_output(self, parsed_packet: ParsedPacket, packet: GameTickPacket, agent: BaseAgent) -> SimpleControllerState:
return None
| true
| true
|
7903c69f4a242244a521410241942fab32da7f37
| 9,481
|
py
|
Python
|
libsig/FZZ_unique_ring_signature.py
|
vs-uulm/libsig_pets
|
7eda22ea87faa6f949a154f9d6fd0f3814294bbf
|
[
"MIT"
] | null | null | null |
libsig/FZZ_unique_ring_signature.py
|
vs-uulm/libsig_pets
|
7eda22ea87faa6f949a154f9d6fd0f3814294bbf
|
[
"MIT"
] | null | null | null |
libsig/FZZ_unique_ring_signature.py
|
vs-uulm/libsig_pets
|
7eda22ea87faa6f949a154f9d6fd0f3814294bbf
|
[
"MIT"
] | null | null | null |
"""
This file implements the signature scheme from "Unique Ring Signatures: A Practical
Construction" by Matthew Franklin and Haibin Zhang
"""
import sys
import math
from random import randint
import hashlib
from libsig.AbstractRingSignatureScheme import AbstractRingSignatureScheme
#from AbstractRingSignatureScheme import AbstractRingSignatureScheme
#from libsig import primes
# ----------- HELPER FUNCTIONS -----------
# function to find divisors in order to find generators
def find_divisors(x):
"""
This is the "function to find divisors in order to find generators" module.
This DocTest verifies that the module is correctly calculating all divisors
of a number x.
>>> find_divisors(10)
[1, 2, 5, 10]
>>> find_divisors(112)
[1, 2, 4, 7, 8, 14, 16, 28, 56, 112]
"""
divisors = [ i for i in range(1,x+1) if x % i == 0]
return divisors
# function to find random generator of G
def find_generator(p):
'''
The order of any element in a group can be divided by p-1.
Step 1: Calculate all Divisors.
Step 2: Test for a random element e of G wether e to the power of a Divisor is 1.
if neither is one but e to the power of p-1, a generator is found.
'''
# Init
# Generate element which is tested for generator characteristics.
# Saved in list to prevent checking the same element twice.
testGen = randint(1,p)
listTested = []
listTested.append(testGen)
# Step 1.
divisors = find_divisors(p)
# try for all random numbers
# Caution: this leads to a truly random generator but is not very efficient.
while len(listTested) < p-1:
# only test each possible generator once
if testGen in listTested:
# Step 2.
for div in divisors:
testPotency = math.pow(testGen,div) % (p+1)
if testPotency == 1.0 and div != divisors[-1]:
# element does not have the same order like the group,
# therefore try next element
break
elif testPotency == 1.0 and div == divisors[-1]:
# generator is found
return testGen
# try new element
testGen = randint(1,p)
listTested.append(testGen)
def list_to_string(input_list):
'''
convert a list into a concatenated string of all its elements
'''
result = ''.join(map(str,input_list))
return result
# ----------- HELPER FUNCTIONS END -----------
class UniqueRingSignature(AbstractRingSignatureScheme):
'''
| output: pp = (lamdba, q, G, H, H2) with,
| q is prime,
| g is generator of G,
| G is multiplicative Group with prime order q,
| H1 and H2 are two Hash functions H1: {0,1}* -> G,
| (as well as H2: {0,1}* -> Zq which is the same).
'''
# set prime p (Sophie-Germain and therefore save)
#q = 53
q = 59
# find random generator of G
g = find_generator(q-1)
# hash functions with desired range and the usage of secure hashes
h1 = lambda x: int(hashlib.sha256(str(x).encode()).hexdigest(),16)%(UniqueRingSignature.q)
# this way to share the information should be improved
h2 = lambda x: int(hashlib.sha512(str(x).encode()).hexdigest(),16)%(UniqueRingSignature.q)
# list of public keys
Rp = list()
@staticmethod
def keygen(verbose=False):
#print("---- KeyGen Started ---- \n")
r = randint(1,UniqueRingSignature.q)
# x = g**r % q
x = pow(UniqueRingSignature.g, r,UniqueRingSignature.q)
# y = g**x
y = pow(UniqueRingSignature.g, x, UniqueRingSignature.q)
if verbose == True:
print("KeyGen Config: public key y=" + str(y) + ", private key x=" + str(x) + "\n")
print("---- KeyGen Completed ---- \n")
# Caution! I know, keygen should NOT return the private key, but this is needed to "play" through a whole signature - validation process
return x,y
@staticmethod
def ringsign(x, pubkey, message,verbose=False):
'''
input: x is the privkey from user i,
| all public keys: pubkeys,
| the message
output: (R,m, (H(mR)^xi), c1,t1,...,cn,tn),
| R: all the pubkeys concatenated,
| cj,tj: random number within Zq
'''
# calculate R = pk1,pk2,..,pkn
R = list_to_string(pubkey)
g = UniqueRingSignature.g
q = UniqueRingSignature.q
h1 = UniqueRingSignature.h1
h2 = UniqueRingSignature.h2
# message + pubkeys concatenated
mR = message + str(R)
C = list()
T = list()
A = list()
B = list()
ri = -1
# simulation step
#
for i in pubkey:
# Step 1:
#
a = 0
b = 0
c = 0
t = 0
if pow(g,x,q) != i:
c, t = randint(1,q), randint(1,q)
a = (pow(g, t) * pow(int(i), c)) % q
b = (pow(h1(mR), t) * pow(pow(h1(mR),x),c)) % q
else:
# Step 2:
#
ri = randint(1, q)
a = pow(g, ri, q)
b = pow(h1(mR), ri, q)
# insert to allocate place
c = -1
t = -1
A.append(a)
B.append(b)
C.append(c)
T.append(t)
# for end
# Step 3:
#
cj = 0
# list count from 0
ab = ''.join('{}{}'.format(*t) for t in zip(A,B))
usernr = 0
for i in range(len(pubkey)):
if pubkey[i] != (pow(g,x,q)):
cj = (cj + C[i]) % q
else:
usernr = i
ci = h2(message + R + ab) - (cj % (q-1))
# update ci, this was initialized with -1
C[usernr] = ci
ti = ((ri - (C[usernr]*x)) % (q-1))
if ti < 0:
ti = (q-1) + ti
# update ti, this was initialized with -1
T[usernr] = ti
# Step 4:
#
# concatenate ct: c1,t1,c2,t2,...,cn,tn
ct = ','.join('{},{}'.format(*t) for t in zip(C,T))
# returning result
result = R + ","+message+","+str(pow(h1(mR),x, q))+"," + ct
if verbose == True:
print("RingSign Result: "+ result)
print("---- RingSign Completed ---- \n")
return result
@staticmethod
def verify(R, message, signature,verbose=False):
'''
Input: the public keys R
| the message
| the signature computed with ringsign
Output: whether the message was signed by R or not
'''
g = UniqueRingSignature.g
q = UniqueRingSignature.q
h1 = UniqueRingSignature.h1
h2 = UniqueRingSignature.h2
# parse the signature
parsed = signature.split(",")
tt = int(parsed[2])
cjs = list()
tjs = list()
for i in range(0,int(((len(parsed))/2)-1)):
cjs.append(int(parsed[3+2*i]))
tjs.append(int(parsed[4+2*i]))
#print(str(cjs)+" "+str(tjs) + " "+ str(tt))
# check signature
# sum of all cjs
# =?
# self.pp['h2'](message + R + gyh1)
mR = list_to_string(R)
val1 = sum(cjs) % q
# for all users in R:
# g**tj * yj ** cj , h1(m||R)**tj * tt**cj
gyh1 = ""
for i in range(len(tjs)):
if tjs[i] < 0:
tjs[i] = (q-1) + tjs[i]
if cjs[i] < 0:
cjs[i] = (q-1) + cjs[i]
gy = (pow(g,(tjs[i]),q) * (pow((R[i]),(cjs[i]),q))) % q
h = (pow(int(h1(message + mR)), int(tjs[i])) * pow(tt,int(cjs[i]))) % q
gyh1 = gyh1 + str( gy) + str( h)
val2 = str(h2(message + list_to_string(R) + gyh1))
if int(val1) == int(val2):
if verbose == True:
print("Signature is valid!\n")
print("Common Result: " + str(val1))
print("---- Validation Completed ---- \n")
return True
else:
if verbose == True:
print("Signature is not valid!\n")
print(str(val1) + " != " + str(val2))
print("---- Validation Completed ---- \n")
return False
def local_test(verbose=True):
# verbose output
print(verbose)
# user 1 will signate and validate later,
# therefore his private key is saved for test purposes
privKey1,pubkey = UniqueRingSignature.keygen(verbose)
UniqueRingSignature.Rp.append(pubkey)
a,pubkey = UniqueRingSignature.keygen(verbose)
UniqueRingSignature.Rp.append(pubkey)
# usernr start from 0
# ringsign(self, privkey, usernr, pubkeys, message)
ring = UniqueRingSignature.ringsign(privKey1, UniqueRingSignature.Rp, "asdf", verbose)
if verbose:
print("Result of Signature Validation:")
# verify(pubkeys, message, signature):
UniqueRingSignature.verify(UniqueRingSignature.Rp, "asdf", ring, verbose)
if __name__ == '__main__':
# doctest start
import doctest
doctest.testmod()
if len(sys.argv) > 1:
verbose = False
if sys.argv[1] == "True":
verbose = True
# run a local test
local_test(verbose)
| 30.194268
| 144
| 0.528214
|
import sys
import math
from random import randint
import hashlib
from libsig.AbstractRingSignatureScheme import AbstractRingSignatureScheme
def find_divisors(x):
divisors = [ i for i in range(1,x+1) if x % i == 0]
return divisors
def find_generator(p):
testGen = randint(1,p)
listTested = []
listTested.append(testGen)
divisors = find_divisors(p)
while len(listTested) < p-1:
if testGen in listTested:
for div in divisors:
testPotency = math.pow(testGen,div) % (p+1)
if testPotency == 1.0 and div != divisors[-1]:
break
elif testPotency == 1.0 and div == divisors[-1]:
return testGen
testGen = randint(1,p)
listTested.append(testGen)
def list_to_string(input_list):
result = ''.join(map(str,input_list))
return result
class UniqueRingSignature(AbstractRingSignatureScheme):
q = 59
g = find_generator(q-1)
h1 = lambda x: int(hashlib.sha256(str(x).encode()).hexdigest(),16)%(UniqueRingSignature.q)
h2 = lambda x: int(hashlib.sha512(str(x).encode()).hexdigest(),16)%(UniqueRingSignature.q)
Rp = list()
@staticmethod
def keygen(verbose=False):
r = randint(1,UniqueRingSignature.q)
x = pow(UniqueRingSignature.g, r,UniqueRingSignature.q)
y = pow(UniqueRingSignature.g, x, UniqueRingSignature.q)
if verbose == True:
print("KeyGen Config: public key y=" + str(y) + ", private key x=" + str(x) + "\n")
print("---- KeyGen Completed ---- \n")
return x,y
@staticmethod
def ringsign(x, pubkey, message,verbose=False):
R = list_to_string(pubkey)
g = UniqueRingSignature.g
q = UniqueRingSignature.q
h1 = UniqueRingSignature.h1
h2 = UniqueRingSignature.h2
mR = message + str(R)
C = list()
T = list()
A = list()
B = list()
ri = -1
for i in pubkey:
a = 0
b = 0
c = 0
t = 0
if pow(g,x,q) != i:
c, t = randint(1,q), randint(1,q)
a = (pow(g, t) * pow(int(i), c)) % q
b = (pow(h1(mR), t) * pow(pow(h1(mR),x),c)) % q
else:
ri = randint(1, q)
a = pow(g, ri, q)
b = pow(h1(mR), ri, q)
c = -1
t = -1
A.append(a)
B.append(b)
C.append(c)
T.append(t)
cj = 0
ab = ''.join('{}{}'.format(*t) for t in zip(A,B))
usernr = 0
for i in range(len(pubkey)):
if pubkey[i] != (pow(g,x,q)):
cj = (cj + C[i]) % q
else:
usernr = i
ci = h2(message + R + ab) - (cj % (q-1))
C[usernr] = ci
ti = ((ri - (C[usernr]*x)) % (q-1))
if ti < 0:
ti = (q-1) + ti
T[usernr] = ti
ct = ','.join('{},{}'.format(*t) for t in zip(C,T))
result = R + ","+message+","+str(pow(h1(mR),x, q))+"," + ct
if verbose == True:
print("RingSign Result: "+ result)
print("---- RingSign Completed ---- \n")
return result
@staticmethod
def verify(R, message, signature,verbose=False):
g = UniqueRingSignature.g
q = UniqueRingSignature.q
h1 = UniqueRingSignature.h1
h2 = UniqueRingSignature.h2
parsed = signature.split(",")
tt = int(parsed[2])
cjs = list()
tjs = list()
for i in range(0,int(((len(parsed))/2)-1)):
cjs.append(int(parsed[3+2*i]))
tjs.append(int(parsed[4+2*i]))
mR = list_to_string(R)
val1 = sum(cjs) % q
gyh1 = ""
for i in range(len(tjs)):
if tjs[i] < 0:
tjs[i] = (q-1) + tjs[i]
if cjs[i] < 0:
cjs[i] = (q-1) + cjs[i]
gy = (pow(g,(tjs[i]),q) * (pow((R[i]),(cjs[i]),q))) % q
h = (pow(int(h1(message + mR)), int(tjs[i])) * pow(tt,int(cjs[i]))) % q
gyh1 = gyh1 + str( gy) + str( h)
val2 = str(h2(message + list_to_string(R) + gyh1))
if int(val1) == int(val2):
if verbose == True:
print("Signature is valid!\n")
print("Common Result: " + str(val1))
print("---- Validation Completed ---- \n")
return True
else:
if verbose == True:
print("Signature is not valid!\n")
print(str(val1) + " != " + str(val2))
print("---- Validation Completed ---- \n")
return False
def local_test(verbose=True):
print(verbose)
privKey1,pubkey = UniqueRingSignature.keygen(verbose)
UniqueRingSignature.Rp.append(pubkey)
a,pubkey = UniqueRingSignature.keygen(verbose)
UniqueRingSignature.Rp.append(pubkey)
ring = UniqueRingSignature.ringsign(privKey1, UniqueRingSignature.Rp, "asdf", verbose)
if verbose:
print("Result of Signature Validation:")
UniqueRingSignature.verify(UniqueRingSignature.Rp, "asdf", ring, verbose)
if __name__ == '__main__':
import doctest
doctest.testmod()
if len(sys.argv) > 1:
verbose = False
if sys.argv[1] == "True":
verbose = True
local_test(verbose)
| true
| true
|
7903c6c9ed2da0586724572b26fb2a47f98dcd26
| 434
|
py
|
Python
|
backend/device/test_defs.py
|
open-home-iot/hint
|
b674f83ee61d7cc653acec15b92b98618f8e23b5
|
[
"MIT"
] | null | null | null |
backend/device/test_defs.py
|
open-home-iot/hint
|
b674f83ee61d7cc653acec15b92b98618f8e23b5
|
[
"MIT"
] | 3
|
2020-12-28T23:31:47.000Z
|
2021-04-18T09:30:43.000Z
|
backend/device/test_defs.py
|
megacorpincorporated/hint
|
136700c743a647cc9bf35548a7baeaac238e3b1f
|
[
"MIT"
] | null | null | null |
HUME_UUID = "9cb37270-69f5-4dc0-9fd5-7183da5ffc19"
DEVICE_UUID_1 = "e2bf93b6-9b5d-4944-a863-611b6b6600e7"
DEVICE_UUID_2 = "e2bf93b6-9b5d-4944-a863-611b6b6600e1"
DEVICE_UUID_3 = "e2bf93b6-9b5d-4944-a863-611b6b6600e2"
BASIC_LED_CAPS = {
'uuid': DEVICE_UUID_1,
'name': 'Basic LED',
'category': 1,
'type': 1,
'states': [
{
'id': 0,
'control': [{'on': 1}, {'off': 0}]
}
]
}
| 24.111111
| 54
| 0.589862
|
HUME_UUID = "9cb37270-69f5-4dc0-9fd5-7183da5ffc19"
DEVICE_UUID_1 = "e2bf93b6-9b5d-4944-a863-611b6b6600e7"
DEVICE_UUID_2 = "e2bf93b6-9b5d-4944-a863-611b6b6600e1"
DEVICE_UUID_3 = "e2bf93b6-9b5d-4944-a863-611b6b6600e2"
BASIC_LED_CAPS = {
'uuid': DEVICE_UUID_1,
'name': 'Basic LED',
'category': 1,
'type': 1,
'states': [
{
'id': 0,
'control': [{'on': 1}, {'off': 0}]
}
]
}
| true
| true
|
7903c76982c4c65c01d45654cddbfe1feb0ed4a6
| 43,384
|
py
|
Python
|
evaluations/evaluation-python-code/python/06_licma_analysis_results/evaluation_of_licma_results.py
|
stg-tud/python-crypto-misuses-study-results
|
be38da80990b699d26dfbd52ac85d5c790f079be
|
[
"CC-BY-4.0"
] | 1
|
2021-12-29T12:58:09.000Z
|
2021-12-29T12:58:09.000Z
|
evaluations/evaluation-python-code/python/06_licma_analysis_results/evaluation_of_licma_results.py
|
stg-tud/python-crypto-misuses-study-results
|
be38da80990b699d26dfbd52ac85d5c790f079be
|
[
"CC-BY-4.0"
] | null | null | null |
evaluations/evaluation-python-code/python/06_licma_analysis_results/evaluation_of_licma_results.py
|
stg-tud/python-crypto-misuses-study-results
|
be38da80990b699d26dfbd52ac85d5c790f079be
|
[
"CC-BY-4.0"
] | 1
|
2021-09-14T13:23:30.000Z
|
2021-09-14T13:23:30.000Z
|
import csv
from collections import defaultdict
import re
line = "=================================================="
counter_lib_hit_total_wd = set()
counter_lib_hit_warning_wd = set()
counter_lib_hit_critical_wd = set()
counter_lib_rule_total_wd = defaultdict(int)
counter_lib_rule_warning_wd = defaultdict(int)
counter_lib_rule_critical_wd = defaultdict(int)
def set_counter_lib_total_wd(path, rule):
global counter_lib_hit_total_wd
global counter_lib_rule_total_wd
tmp_len = len(counter_lib_hit_total_wd)
counter_lib_hit_total_wd.add(path)
if len(counter_lib_hit_total_wd) > tmp_len:
counter_lib_rule_total_wd[rule] = counter_lib_rule_total_wd[rule] + 1
def set_counter_lib_warning_wd(path, rule):
global counter_lib_hit_warning_wd
global counter_lib_rule_warning_wd
tmp_len = len(counter_lib_hit_warning_wd)
counter_lib_hit_warning_wd.add(path)
if len(counter_lib_hit_warning_wd) > tmp_len:
counter_lib_rule_warning_wd[rule] = counter_lib_rule_warning_wd[rule] + 1
def set_counter_lib_critical_wd(path, rule):
global counter_lib_hit_critical_wd
global counter_lib_rule_critical_wd
tmp_len = len(counter_lib_hit_critical_wd)
counter_lib_hit_critical_wd.add(path)
if len(counter_lib_hit_critical_wd) > tmp_len:
counter_lib_rule_critical_wd[rule] = counter_lib_rule_critical_wd[rule] + 1
def evaluate_licma_log(log_file_name):
counter_processing = 0
counter_error = 0
counter_parsing_not_possible = 0
counter_maximum_recursion_depth_exceeded = 0
counter_rules = {}
with open(log_file_name) as log_file:
for line in log_file.readlines():
if "INFO | processing" in line:
counter_processing = counter_processing + 1
if "ERROR" in line:
counter_error = counter_error + 1
if "ERROR | parsing not possible" in line:
counter_parsing_not_possible = counter_parsing_not_possible + 1
elif "ERROR | maximum recursion depth exceeded" in line:
counter_maximum_recursion_depth_exceeded = counter_maximum_recursion_depth_exceeded + 1
attributes = line.split(" | ")
key = attributes[3] + " " + attributes[4]
if key in counter_rules.keys():
counter_rules[key] = counter_rules[key] + 1
else:
counter_rules[key] = 1
else:
if not "INFO" in line:
print(line)
print("Number of processed files: " + str(counter_processing))
print("Number of successfully processed files: " + str(counter_processing - counter_parsing_not_possible))
print("Number of processed files without any error: " + str(
counter_processing - counter_parsing_not_possible - counter_maximum_recursion_depth_exceeded))
print("Errors: " + str(counter_error))
print("==> Parsing not possible: " + str(counter_parsing_not_possible))
print("==> Maximum recursion depth exceeded: " + str(counter_maximum_recursion_depth_exceeded))
for key in counter_rules.keys():
print("====> " + key + ": " + str(counter_rules[key]))
def evaluate_licma_results3(result_file_name):
global line
# repositories all
number_of_all_results = 0
results_no_duplicates = set()
number_of_repositories = set()
distribution_misuses_repo_all = defaultdict(int)
number_of_warning_misuses_total = 0
distribution_misuses_repo_warnings = defaultdict(int)
number_of_critical_misuses_total = 0
distribution_misuses_repo_critical = defaultdict(int)
# repositories without libs
number_of_misuses_in_repos_wl = 0
distribution_misuses_repo_all_wl = defaultdict(int)
number_of_warning_misuses_repos_wl = 0
distribution_misuses_repo_warnings_wl = defaultdict(int)
number_of_critical_misuses_repos_wl = 0
distribution_misuses_repo_critical_wl = defaultdict(int)
# libraries
number_of_used_libs = set()
number_of_libs_nd = set()
number_of_misuses_in_libs = 0
number_of_misuses_in_libs_no_duplicates = set()
number_of_misuses_in_libs_warning = 0
number_of_misuses_in_libs_critical = 0
distribution_misuses_in_libs = defaultdict(int)
distribution_misuses_in_libs_warning = defaultdict(int)
distribution_misuses_in_libs_critical = defaultdict(int)
number_of_misuses_in_libs_no_duplicates_warning = 0
number_of_misuses_in_libs_no_duplicates_critical = 0
distribution_misuses_in_libs_no_duplicates = defaultdict(int)
distribution_misuses_in_libs_no_duplicates_warning = defaultdict(int)
distribution_misuses_in_libs_no_duplicates_critical = defaultdict(int)
# tops
top_repos = defaultdict(int)
top_repos_warning = defaultdict(int)
top_repos_critical = defaultdict(int)
top_repos_wl = defaultdict(int)
top_repos_warning_wl = defaultdict(int)
top_repos_critical_wl = defaultdict(int)
top_libs = defaultdict(int)
top_libs_warning = defaultdict(int)
top_libs_critical = defaultdict(int)
top_libs_nd = defaultdict(int)
top_libs_warning_nd = defaultdict(int)
top_libs_critical_nd = defaultdict(int)
# top rules
top_rules = defaultdict(int)
# critical lines of code
critical_lines = list()
with open(result_file_name) as result_file:
csv_reader = csv.reader(result_file, delimiter=';')
for result in list(csv_reader)[1:]: # skip HEAD line
file = result[0]
rule = result[1]
hit_type = result[2]
misuse = result[3]
misuse_line = result[4]
parameter_value = result[5]
parameter_type = result[6]
parameter_line = result[7]
if "requirements_licma_analysis" in file:
is_lib = True
else:
is_lib = False
repo_name = file.split("/")[5]
lib_name = file.split("/")[7]
lib_file = "/".join(file.split("/")[7:])
lib_hit = ",".join(
[lib_file, rule, hit_type, misuse, misuse_line, parameter_value, parameter_type, parameter_line])
result_string = ",".join(
[file, rule, hit_type, misuse, misuse_line, parameter_value, parameter_type, parameter_line])
# tops
top_repos[repo_name] = top_repos[repo_name] + 1
if hit_type == "warning":
top_repos_warning[repo_name] = top_repos_warning[repo_name] + 1
if hit_type == "critical":
top_repos_critical[repo_name] = top_repos_critical[repo_name] + 1
critical_lines.append((file, rule, misuse_line, parameter_line, parameter_value))
# top rule
top_rules[rule] = top_rules[rule] + 1
# count all results and check if there are some duplicates(there should no ones)
number_of_all_results = number_of_all_results + 1
results_no_duplicates.add(str(result_string))
# number of repositories
number_of_repositories.add(repo_name)
distribution_misuses_repo_all[rule] = distribution_misuses_repo_all[rule] + 1
if hit_type == "warning":
number_of_warning_misuses_total = number_of_warning_misuses_total + 1
distribution_misuses_repo_warnings[rule] = distribution_misuses_repo_warnings[rule] + 1
if hit_type == "critical":
number_of_critical_misuses_total = number_of_critical_misuses_total + 1
distribution_misuses_repo_critical[rule] = distribution_misuses_repo_warnings[rule] + 1
# without misuses in libs
if not is_lib:
# tops
top_repos_wl[repo_name] = top_repos_wl[repo_name] + 1
if hit_type == "warning":
top_repos_warning_wl[repo_name] = top_repos_warning_wl[repo_name] + 1
if hit_type == "critical":
top_repos_critical_wl[repo_name] = top_repos_critical_wl[repo_name] + 1
number_of_misuses_in_repos_wl = number_of_misuses_in_repos_wl + 1
distribution_misuses_repo_all_wl[rule] = distribution_misuses_repo_all_wl[rule] + 1
if hit_type == "warning":
number_of_warning_misuses_repos_wl = number_of_warning_misuses_repos_wl + 1
distribution_misuses_repo_warnings_wl[rule] = distribution_misuses_repo_warnings_wl[rule] + 1
if hit_type == "critical":
number_of_critical_misuses_repos_wl = number_of_critical_misuses_repos_wl + 1
distribution_misuses_repo_critical_wl[rule] = distribution_misuses_repo_warnings_wl[rule] + 1
if is_lib:
# tops
top_libs[repo_name] = top_libs[repo_name] + 1
if hit_type == "warning":
top_libs_warning[repo_name] = top_libs_warning[repo_name] + 1
if hit_type == "critical":
top_libs_critical[repo_name] = top_libs_critical[repo_name] + 1
# number of used libs in repos
number_of_used_libs.add(repo_name + "," + lib_name)
# number of different libs
number_of_libs_nd.add(lib_name)
number_of_misuses_in_libs = number_of_misuses_in_libs + 1
distribution_misuses_in_libs[rule] = distribution_misuses_in_libs[rule] + 1
if hit_type == "warning":
number_of_misuses_in_libs_warning = number_of_misuses_in_libs_warning + 1
distribution_misuses_in_libs_warning[rule] = distribution_misuses_in_libs_warning[rule] + 1
if hit_type == "critical":
number_of_misuses_in_libs_critical = number_of_misuses_in_libs_critical + 1
distribution_misuses_in_libs_critical[rule] = distribution_misuses_in_libs_critical[rule] + 1
tmp = len(number_of_misuses_in_libs_no_duplicates)
number_of_misuses_in_libs_no_duplicates.add(lib_hit)
if len(number_of_misuses_in_libs_no_duplicates) > tmp:
# tops
top_libs_nd[lib_name] = top_libs_nd[lib_name] + 1
if hit_type == "warning":
top_libs_warning_nd[lib_name] = top_libs_warning_nd[lib_name] + 1
if hit_type == "critical":
top_libs_critical_nd[lib_name] = top_libs_critical_nd[lib_name] + 1
distribution_misuses_in_libs_no_duplicates[rule] = distribution_misuses_in_libs_no_duplicates[
rule] + 1
if hit_type == "warning":
number_of_misuses_in_libs_no_duplicates_warning = number_of_misuses_in_libs_no_duplicates_warning + 1
distribution_misuses_in_libs_no_duplicates_warning[rule] = \
distribution_misuses_in_libs_no_duplicates_warning[rule] + 1
if hit_type == "critical":
number_of_misuses_in_libs_no_duplicates_critical = number_of_misuses_in_libs_no_duplicates_critical + 1
distribution_misuses_in_libs_no_duplicates_critical[rule] = \
distribution_misuses_in_libs_no_duplicates_critical[rule] + 1
# general
print("General")
print(line)
print("Number of all results: " + str(number_of_all_results))
print("Number of results no duplicates: " + str(len(results_no_duplicates)))
if number_of_all_results == len(results_no_duplicates):
print("No duplicates ==> TRUE")
else:
print("No duplicates ==> FALSE")
print(line)
# repos
print("Repositories")
print(line)
print("Number of repositories with misuses: " + str(len(number_of_repositories)))
print("==> Number of found misuses in these repositories: " + str(len(results_no_duplicates)))
for key in distribution_misuses_repo_all.keys():
print("====> " + key + ": " + str(distribution_misuses_repo_all[key]))
print("==> Number of found warning misuses in these repositories: " + str(number_of_warning_misuses_total))
for key in distribution_misuses_repo_warnings.keys():
print("====> " + key + ": " + str(distribution_misuses_repo_warnings[key]))
print("==> Number of found critical misuses in these repositories: " + str(number_of_critical_misuses_total))
for key in distribution_misuses_repo_critical.keys():
print("====> " + key + ": " + str(distribution_misuses_repo_critical[key]))
# repos without libs
print("==> Number of misuses in repos without libs: " + str(number_of_misuses_in_repos_wl))
for key in distribution_misuses_repo_all_wl.keys():
print("====> " + key + ": " + str(distribution_misuses_repo_all_wl[key]))
print("==> Number of warning misuses in repos without libs: " + str(number_of_warning_misuses_repos_wl))
for key in distribution_misuses_repo_warnings_wl.keys():
print("====> " + key + ": " + str(distribution_misuses_repo_warnings_wl[key]))
print("==> Number of critical misuses in repos without libs: " + str(number_of_critical_misuses_repos_wl))
for key in distribution_misuses_repo_critical_wl.keys():
print("====> " + key + ": " + str(distribution_misuses_repo_critical_wl[key]))
print(line)
# libs
print("LIBRARIES")
print(line)
print("Number of libs in these repositories were a misuse was found: " + str(len(number_of_used_libs)))
print("==> Number of misuses in these libraries: " + str(number_of_misuses_in_libs))
for key in distribution_misuses_in_libs.keys():
print("====> " + key + ": " + str(distribution_misuses_in_libs[key]))
print("==> Number of warning misuses in these libraries: " + str(number_of_misuses_in_libs_warning))
for key in distribution_misuses_in_libs_warning.keys():
print("====> " + key + ": " + str(distribution_misuses_in_libs_warning[key]))
print("==> Number of critical misuses in these libraries: " + str(number_of_misuses_in_libs_critical))
for key in distribution_misuses_in_libs_critical.keys():
print("====> " + key + ": " + str(distribution_misuses_in_libs_critical[key]))
print("==> Unique number of these libs: " + str(len(number_of_libs_nd)))
print("====> Number of misuses in libraries no duplicates: " + str(len(number_of_misuses_in_libs_no_duplicates)))
for key in distribution_misuses_in_libs_no_duplicates.keys():
print("======> " + key + ": " + str(distribution_misuses_in_libs_no_duplicates[key]))
print("====> Number of warning misuses in libraries no duplicates: " + str(
number_of_misuses_in_libs_no_duplicates_warning))
for key in distribution_misuses_in_libs_no_duplicates_warning.keys():
print("======> " + key + ": " + str(distribution_misuses_in_libs_no_duplicates_warning[key]))
print("====> Number of critical misuses in libraries no duplicates: " + str(
number_of_misuses_in_libs_no_duplicates_critical))
for key in distribution_misuses_in_libs_no_duplicates_critical.keys():
print("======> " + key + ": " + str(distribution_misuses_in_libs_no_duplicates_critical[key]))
print(line)
# tops
print("Tops")
print(line)
print_top("Top 10 Repositories:", top_repos)
print_top("Top 10 Repositories(WARNING):", top_repos_warning)
print_top("Top 10 Repositories(CRITICAL):", top_repos_critical)
print_top("Top 10 Repositories without libs:", top_repos_wl)
print_top("Top 10 Repositories without libs(WARNING):", top_repos_warning_wl)
print_top("Top 10 Repositories without libs(CRITICAL):", top_repos_critical_wl)
print_top("Top 10 Libraries with multiple usage in repos:", top_libs)
print_top("Top 10 Libraries with multiple usage in repos(WARNING):", top_libs_warning)
print_top("Top 10 Libraries with multiple usage in repos(CRITICAL):", top_libs_critical)
print_top("Top 10 Libraries no duplicate usage in repo:", top_libs_nd)
print_top("Top 10 Libraries no duplicate usage in repo(WARNING):", top_libs_warning_nd)
print_top("Top 10 Libraries no duplicate usage in repo(CRITICAL):", top_libs_critical_nd)
print_top("Top 10 rules(ALL FOUND MISUSES):", top_rules)
print_critical_lines(critical_lines)
def evaluate_licma_results(result_file_name):
global line
# repositories all
number_of_all_results = 0
results_no_duplicates = set()
number_of_repositories = set()
number_of_warning_misuses_total = 0
number_of_critical_misuses_total = 0
# repositories without libs
number_of_misuses_in_repos_wl = 0
number_of_warning_misuses_repos_wl = 0
number_of_critical_misuses_repos_wl = 0
# libraries
number_of_used_libs = set()
number_of_libs_nd = set()
number_of_misuses_in_libs = 0
number_of_misuses_in_libs_no_duplicates = set()
number_of_misuses_in_libs_warning = 0
number_of_misuses_in_libs_critical = 0
number_of_misuses_in_libs_no_duplicates_warning = 0
number_of_misuses_in_libs_no_duplicates_critical = 0
# tops repos
top_repos_with_misuses = defaultdict(int)
top_repos_with_misuses_warning_repo = defaultdict(int)
top_repos_with_misuses_warning_lib = defaultdict(int)
top_repos_with_misuses_critical_repo = defaultdict(int)
top_repos_with_misuses_critical_lib = defaultdict(int)
# tops dependencies
top_dependencies_with_misuses = defaultdict(int)
top_dependencies_with_misuses_warning = defaultdict(int)
top_dependencies_with_misuses_critical = defaultdict(int)
# rule distribution(used Python library)
rule_distribution = defaultdict(int)
rule_distribution_warning_repo = defaultdict(int)
rule_distribution_warning_lib = defaultdict(int)
rule_distribution_critical_repo = defaultdict(int)
rule_distribution_critical_lib = defaultdict(int)
# rule per application
rule_per_application_critical = defaultdict(int)
rule_per_application_critical2 = defaultdict(int)
# Distribution for LICMA rules
licma_rule_distribution = defaultdict(int)
licma_rule_distribution_warning_repo = defaultdict(int)
licma_rule_distribution_warning_lib = defaultdict(int)
licma_rule_distribution_critical_repo = defaultdict(int)
licma_rule_distribution_critical_lib = defaultdict(int)
# critical lines of code
critical_lines = list()
with open(result_file_name) as result_file:
csv_reader = csv.reader(result_file, delimiter=';')
for result in list(csv_reader)[1:]: # skip HEAD line
# get attributes
# #################################################################
file = result[0]
rule = result[1][9:]
rule_number = re.findall(r'\d+', rule)[0]
if rule_number not in ["1", "2", "3", "4", "5"]:
raise Exception("Rule number invalid: " + str(rule_number))
hit_type = result[2]
misuse = result[3]
misuse_line = result[4]
parameter_value = result[5]
parameter_type = result[6]
parameter_line = result[7]
if "requirements_licma_analysis" in file:
is_lib = True
else:
is_lib = False
repo_name = file.split("/")[5]
lib_name = file.split("/")[7]
lib_file = "/".join(file.split("/")[7:])
lib_hit = ",".join(
[lib_file, rule, hit_type, misuse, misuse_line, parameter_value, parameter_type, parameter_line])
result_string = ",".join(
[file, rule, hit_type, misuse, misuse_line, parameter_value, parameter_type, parameter_line])
# count
# #################################################################
# always count
# #################################################################
# count all results and check if there are some duplicates(there should no ones)
number_of_all_results = number_of_all_results + 1
results_no_duplicates.add(str(result_string))
# top
top_repos_with_misuses[repo_name] = top_repos_with_misuses[repo_name] + 1
# number of repositories
number_of_repositories.add(repo_name)
if hit_type == "warning":
number_of_warning_misuses_total = number_of_warning_misuses_total + 1
if hit_type == "critical":
critical_lines.append((file, rule, misuse_line, parameter_line, parameter_value))
number_of_critical_misuses_total = number_of_critical_misuses_total + 1
rule_per_application_critical[repo_name + " " + rule] = re.findall(r'\d+', rule)[0]
rule_per_application_critical2[repo_name + " " + rule] = rule_per_application_critical2[repo_name + " " + rule] + 1
# only repos
# #################################################################
if not is_lib:
# always
number_of_misuses_in_repos_wl = number_of_misuses_in_repos_wl + 1
# rule distribution per Python library
rule_distribution[rule] = rule_distribution[rule] + 1
# rule distribution per LICMA rule
licma_rule_distribution[rule_number] = licma_rule_distribution[rule_number] + 1
if hit_type == "warning":
number_of_warning_misuses_repos_wl = number_of_warning_misuses_repos_wl + 1
top_repos_with_misuses_warning_repo[repo_name] = top_repos_with_misuses_warning_repo[repo_name] + 1
rule_distribution_warning_repo[rule] = rule_distribution_warning_repo[rule] + 1
licma_rule_distribution_warning_repo[rule_number] = licma_rule_distribution_warning_repo[rule_number] + 1
if hit_type == "critical":
number_of_critical_misuses_repos_wl = number_of_critical_misuses_repos_wl + 1
top_repos_with_misuses_critical_repo[repo_name] = top_repos_with_misuses_critical_repo[
repo_name] + 1
rule_distribution_critical_repo[rule] = rule_distribution_critical_repo[rule] + 1
licma_rule_distribution_critical_repo[rule_number] = licma_rule_distribution_critical_repo[rule_number] + 1
# only dependencies
# #################################################################
if is_lib:
# always
# #############################################################
# number of used libs in repos
number_of_used_libs.add(repo_name + "," + lib_name)
# number of different libs
number_of_libs_nd.add(lib_name)
number_of_misuses_in_libs = number_of_misuses_in_libs + 1
if hit_type == "warning":
number_of_misuses_in_libs_warning = number_of_misuses_in_libs_warning + 1
top_repos_with_misuses_warning_lib[repo_name] = top_repos_with_misuses_warning_lib[repo_name] + 1
if hit_type == "critical":
number_of_misuses_in_libs_critical = number_of_misuses_in_libs_critical + 1
top_repos_with_misuses_critical_lib[repo_name] = top_repos_with_misuses_critical_lib[repo_name] + 1
tmp = len(number_of_misuses_in_libs_no_duplicates)
number_of_misuses_in_libs_no_duplicates.add(lib_hit)
# only for new libs hits
# #############################################################
if len(number_of_misuses_in_libs_no_duplicates) > tmp:
# rule distribution per Python library
rule_distribution[rule] = rule_distribution[rule] + 1
top_dependencies_with_misuses[lib_name] = top_dependencies_with_misuses[lib_name] + 1
# rule distribution per LICMA rule
licma_rule_distribution[rule_number] = licma_rule_distribution[rule_number] + 1
if hit_type == "warning":
number_of_misuses_in_libs_no_duplicates_warning = number_of_misuses_in_libs_no_duplicates_warning + 1
rule_distribution_warning_lib[rule] = rule_distribution_warning_lib[rule] + 1
top_dependencies_with_misuses_warning[lib_name] = top_dependencies_with_misuses_warning[lib_name] + 1
licma_rule_distribution_warning_lib[rule_number] = licma_rule_distribution_warning_lib[rule_number] + 1
if hit_type == "critical":
number_of_misuses_in_libs_no_duplicates_critical = number_of_misuses_in_libs_no_duplicates_critical + 1
rule_distribution_critical_lib[rule] = rule_distribution_critical_lib[rule] + 1
top_dependencies_with_misuses_critical[lib_name] = top_dependencies_with_misuses_critical[lib_name] + 1
licma_rule_distribution_critical_lib[rule_number] = licma_rule_distribution_critical_lib[rule_number] + 1
# general
print("General")
print(line)
print("Number of all results: " + str(number_of_all_results))
print("Number of results no duplicates: " + str(len(results_no_duplicates)))
if number_of_all_results == len(results_no_duplicates):
print("No duplicates ==> TRUE")
else:
print("No duplicates ==> FALSE")
print(line)
# repos
print("Repositories")
print(line)
print("Number of repositories with misuses: " + str(len(number_of_repositories)))
print("==> Number of found misuses in these repositories: " + str(len(results_no_duplicates)))
print("==> Number of found warning misuses in these repositories: " + str(number_of_warning_misuses_total))
print("==> Number of found critical misuses in these repositories: " + str(number_of_critical_misuses_total))
# repos without libs
print("==> Number of misuses in repos without libs: " + str(number_of_misuses_in_repos_wl))
print("==> Number of warning misuses in repos without libs: " + str(number_of_warning_misuses_repos_wl))
print("==> Number of critical misuses in repos without libs: " + str(number_of_critical_misuses_repos_wl))
print(line)
# libs
print("LIBRARIES")
print(line)
print("Number of libs in these repositories were a misuse was found: " + str(len(number_of_used_libs)))
print("==> Number of misuses in these libraries: " + str(number_of_misuses_in_libs))
print("==> Number of warning misuses in these libraries: " + str(number_of_misuses_in_libs_warning))
print("==> Number of critical misuses in these libraries: " + str(number_of_misuses_in_libs_critical))
print("==> Unique number of these libs: " + str(len(number_of_libs_nd)))
print("====> Number of misuses in libraries no duplicates: " + str(len(number_of_misuses_in_libs_no_duplicates)))
print("====> Number of warning misuses in libraries no duplicates: " + str(
number_of_misuses_in_libs_no_duplicates_warning))
print("====> Number of critical misuses in libraries no duplicates: " + str(
number_of_misuses_in_libs_no_duplicates_critical))
print(line)
# tops repos
print("Tops")
print(line)
print_top("Top Repositories:", get_top_to_sorted_list(top_repos_with_misuses), top_repos_with_misuses_critical_repo,
top_repos_with_misuses_critical_lib, top_repos_with_misuses_warning_repo,
top_repos_with_misuses_warning_lib)
# print_critical_lines(critical_lines)
print("symbolic y coords + addplot coordinates for diagram:")
print(line)
top_repos_to_tex(get_top_to_sorted_list(top_repos_with_misuses)[-16:], top_repos_with_misuses_critical_repo,
top_repos_with_misuses_critical_lib, top_repos_with_misuses_warning_repo,
top_repos_with_misuses_warning_lib)
print_top("Rule distribution", get_top_to_sorted_list(rule_distribution), rule_distribution_critical_repo,
rule_distribution_critical_lib, rule_distribution_warning_repo, rule_distribution_warning_lib)
rule_distribution_to_tex(rule_distribution, rule_distribution_critical_repo, rule_distribution_critical_lib,
rule_distribution_warning_repo, rule_distribution_warning_lib)
# tops dependencies
print_top_dependencies("Top dependencies", get_top_to_sorted_list(top_dependencies_with_misuses), top_dependencies_with_misuses_critical, top_dependencies_with_misuses_warning)
print(line)
top_dependencies_to_tex(get_top_to_sorted_list(top_dependencies_with_misuses), top_dependencies_with_misuses_critical, top_dependencies_with_misuses_warning)
print(line)
print(rule_per_application_critical)
print(len(rule_per_application_critical))
print(rule_per_application_critical2)
print(len(rule_per_application_critical2))
applications_per_rule = defaultdict(int)
for key in rule_per_application_critical.keys():
applications_per_rule[rule_per_application_critical[key]] = applications_per_rule[rule_per_application_critical[key]] + 1
for key in applications_per_rule.keys():
print("Rule " + str(key) + ": " + str(applications_per_rule[key]))
get_files_of_interest("/home/ubuntu/PycharmProjects/licma/evaluations/evaluation-python-code/python/04_identify_relevant_python_code/files_of_interest.txt")
print_top("Rules", get_top_to_sorted_list(licma_rule_distribution), licma_rule_distribution_critical_repo, licma_rule_distribution_critical_lib, licma_rule_distribution_warning_repo, licma_rule_distribution_warning_lib)
top_repos_to_tex(get_licma_rule_to_sorted_list(licma_rule_distribution), licma_rule_distribution_critical_repo, licma_rule_distribution_critical_lib, licma_rule_distribution_warning_repo, licma_rule_distribution_warning_lib)
sum = 0
for rule, number in [(key, rule_per_application_critical2[key]) for key in rule_per_application_critical2.keys()]:
print(rule + ": " + str(number))
sum = sum + number
print(sum)
print(len(rule_per_application_critical2))
def get_files_of_interest(file_path):
number_of_repos = set()
counter = 0
with open(file_path, mode="r") as file:
for line in file.readlines():
l = line.split("/")
counter = counter + 1
#print(l[5])
number_of_repos.add(l[5])
print("Files of interest: " + str(counter))
print("Corresponding repositories: " + str(len(number_of_repos)))
def print_critical_lines(critical_lines):
# (file, rule, misuse_line, parameter_line, parameter_value)
print(line)
print("CRITICAL LINES OF CODE")
print(line)
critical_lines.sort()
for critical_line in critical_lines:
with open(critical_line[0], mode="r") as code_file:
lines_of_code = code_file.readlines()
if critical_line[4] == "":
value = "\"\""
else:
value = critical_line[4]
print("Rule: " + critical_line[1] + " Value: " + value)
if int(critical_line[2]) != int(critical_line[3]):
print("Assignment: " + lines_of_code[int(critical_line[3]) - 1])
print(lines_of_code[int(critical_line[2]) - 2] + lines_of_code[int(critical_line[2]) - 1] + lines_of_code[
int(critical_line[2])])
print(line)
def get_github_address(repo_name):
github_address = "https://github.com/" + repo_name.replace("__", "/")
return github_address
def print_coords_dependencies(top_dependencies, values, last):
if last:
print("\\addplot[color=darkgray,draw=darkgray,fill=darkgray!20] coordinates")
else:
print("\\addplot coordinates")
print("{")
for dependency, misuses in top_dependencies:
print("(" + str(values[dependency]) + "," + dependency + ")")
print("};")
def print_coords_repo(top_repos, values, last):
if last:
print("\\addplot[fill=yellow!50,show sum on top] coordinates")
else:
print("\\addplot coordinates")
print("{")
for repo, misuses in top_repos:
print("(" + str(values[repo]) + "," + get_github_address(repo) + ")")
print("};")
def print_coords_rule(rules, values_all, values_warning, values_critical, i, last, before_last, before_before_last):
all = " ALL"
warning = " WARNING"
critical = " CRITICAL"
if last:
print("\\addplot[color=violet,draw=violet,fill=violet!50] coordinates")
elif before_last:
print("\\addplot[color=darkgray,draw=darkgray,fill=darkgray!20] coordinates")
elif before_before_last:
print("\\addplot[color=brown,draw=brown,fill=brown!20] coordinates")
else:
print("\\addplot coordinates")
print("{")
for rule, hits in rules:
if (rule, i) in values_all.keys():
print("(" + str(values_all[rule, i]) + "," + rule + all + ")")
else:
print("(" + str(0) + "," + rule + all + ")")
if (rule, i) in values_warning.keys():
print("(" + str(values_warning[rule, i]) + "," + rule + warning + ")")
else:
print("(" + str(0) + "," + rule + warning + ")")
if (rule, i) in values_critical.keys():
print("(" + str(values_critical[rule, i]) + "," + rule + critical + ")")
else:
print("(" + str(0) + "," + rule + critical + ")")
print("};")
def top_dependencies_to_tex(top_dependencies, cl, wl):
print("symbolic y coords={")
for dependency, misuses in top_dependencies:
last_dependency, last_misuses = top_dependencies[-1]
if dependency == last_dependency:
print(dependency + "}]")
else:
print(dependency + ",")
print_coords_dependencies(top_dependencies, cl, False)
print_coords_dependencies(top_dependencies, wl, True)
print("\legend{critical misuses in dependencies, warning misuses in dependencies}")
def top_repos_to_tex(top_repos, cr, cl, wr, wl):
print("symbolic y coords={")
for repo, misuses in top_repos:
github_address = get_github_address(repo)
last_repo, last_misuses = top_repos[-1]
if repo == last_repo:
print(github_address + "}]")
else:
print(github_address + ",")
print_coords_repo(top_repos, cr, False)
print_coords_repo(top_repos, cl, False)
print_coords_repo(top_repos, wr, False)
print_coords_repo(top_repos, wl, True)
print("\legend{critical misuses in repositories, critical misuses in dependencies, warning misuses in repositories, warning misuses in dependencies}")
def group_rule_by_lib(rule_distribution):
group_distributions = defaultdict(int)
for distribution in rule_distribution.keys():
if "cryptography" in distribution:
group_distributions["cryptography"] = group_distributions["cryptography"] + rule_distribution[distribution]
elif "M2Crypto" in distribution:
group_distributions["M2Crypto"] = group_distributions["M2Crypto"] + rule_distribution[distribution]
elif "PyCrypto" in distribution:
group_distributions["PyCrypto"] = group_distributions["PyCrypto"] + rule_distribution[distribution]
elif "PyNaCl" in distribution:
group_distributions["PyNaCl"] = group_distributions["PyNaCl"] + rule_distribution[distribution]
return group_distributions
def group_rule_by_lib_and_rule(rule_distribution):
group_distributions = defaultdict(int)
for distribution in rule_distribution.keys():
rule_number = re.findall(r'\d+', distribution)[0]
if "cryptography" in distribution:
group_distributions[("cryptography", rule_number)] = group_distributions[("cryptography", rule_number)] + \
rule_distribution[distribution]
elif "M2Crypto" in distribution:
group_distributions[("M2Crypto", rule_number)] = group_distributions[("M2Crypto", rule_number)] + \
rule_distribution[distribution]
elif "PyCrypto" in distribution:
group_distributions[("PyCrypto", rule_number)] = group_distributions[("PyCrypto", rule_number)] + \
rule_distribution[distribution]
elif "PyNaCl" in distribution:
group_distributions[("PyNaCl", rule_number)] = group_distributions[("PyNaCl", rule_number)] + \
rule_distribution[distribution]
return group_distributions
def rule_distribution_to_tex(rule_distribution, cr, cl, wr, wl):
rule_distribution_grouped = group_rule_by_lib(rule_distribution)
rule_distribution_grouped_sorted = get_top_to_sorted_list(rule_distribution_grouped)
all = " ALL"
warning = " WARNING"
critical = " CRITICAL"
print("symbolic y coords={")
for rule, number in rule_distribution_grouped_sorted:
last_rule, last_number = rule_distribution_grouped_sorted[-1]
if rule == last_rule:
print(rule + all + ",")
print(rule + warning + ",")
print(rule + critical + "}]")
else:
print(rule + all + ",")
print(rule + warning + ",")
print(rule + critical + ",")
rule_distribution_grouped_by_lib_and_rule = group_rule_by_lib_and_rule(rule_distribution)
rule_distribution_warning_grouped_by_lib_and_rule = group_rule_by_lib_and_rule(dict_plus(wr, wl))
rule_distribution_critical_grouped_by_lib_and_rule = group_rule_by_lib_and_rule(dict_plus(cr, cl))
for i in ["1", "2", "3", "4", "5"]:
last = False
before_last = False
before_before_last = False
if i == "5":
last = True
elif i == "4":
before_last = True
elif i == "3":
before_before_last = True
print_coords_rule(rule_distribution_grouped_sorted, rule_distribution_grouped_by_lib_and_rule,
rule_distribution_warning_grouped_by_lib_and_rule,
rule_distribution_critical_grouped_by_lib_and_rule, i, last, before_last, before_before_last)
print("\legend{Rule 1, Rule 2, Rule 3, Rule 4, Rule 5}")
print(line)
print("rule-name;rule1;rule2;rule3;rule4;rule5")
for rule, number in rule_distribution_grouped_sorted:
print_rule_distribution_table(rule, "CRITICAL", rule_distribution_critical_grouped_by_lib_and_rule.get((rule, "1")),
rule_distribution_critical_grouped_by_lib_and_rule.get((rule, "2")),
rule_distribution_critical_grouped_by_lib_and_rule.get((rule, "3")),
rule_distribution_critical_grouped_by_lib_and_rule.get((rule, "4")),
rule_distribution_critical_grouped_by_lib_and_rule.get((rule, "5")))
print_rule_distribution_table(rule, "WARNING", rule_distribution_warning_grouped_by_lib_and_rule.get((rule, "1")),
rule_distribution_warning_grouped_by_lib_and_rule.get((rule, "2")),
rule_distribution_warning_grouped_by_lib_and_rule.get((rule, "3")),
rule_distribution_warning_grouped_by_lib_and_rule.get((rule, "4")),
rule_distribution_warning_grouped_by_lib_and_rule.get((rule, "5")))
print_rule_distribution_table(rule, "ALL", rule_distribution_grouped_by_lib_and_rule.get((rule, "1")),
rule_distribution_grouped_by_lib_and_rule.get((rule, "2")),
rule_distribution_grouped_by_lib_and_rule.get((rule, "3")),
rule_distribution_grouped_by_lib_and_rule.get((rule, "4")),
rule_distribution_grouped_by_lib_and_rule.get((rule, "5")))
def print_rule_distribution_table(rule, type, rule1, rule2, rule3, rule4, rule5):
print(rule + " " + str(type) + ";" + str(rule1)
+ ";" + str(rule2)
+ ";" + str(rule3)
+ ";" + str(rule4)
+ ";" + str(rule5))
def dict_plus(dict1, dict2):
new_dict = defaultdict(int)
for key in dict1.keys():
new_dict[key] = new_dict[key] + dict1[key]
for key in dict2.keys():
new_dict[key] = new_dict[key] + dict2[key]
return new_dict
def get_top_to_sorted_list(top_statistics):
top_statistics_list = sorted([(key, top_statistics[key]) for key in top_statistics.keys()],
key=lambda element: (element[1], element[0]), reverse=False)
return top_statistics_list
def get_licma_rule_to_sorted_list(top_statistics):
top_statistics_list = sorted([(key, top_statistics[key]) for key in top_statistics.keys()],
key=lambda element: (element[0], element[1]), reverse=True)
return top_statistics_list
def print_top(top_name, top_repos, cr, cl, wr, wl):
print(line)
print(top_name)
print(line)
counter = 0
for repo, misuses in top_repos:
counter = counter + 1
print("==> " + str(counter) + " " + repo + ": " + str(misuses))
print("====> critical repo: " + str(cr[repo]))
print("====> critical lib: " + str(cl[repo]))
print("====> warning repo: " + str(wr[repo]))
print("====> warning lib: " + str(wl[repo]))
def print_top_dependencies(top_name, top_dependencies, cl, wl):
print(line)
print(top_name)
print(line)
sum_all = 0
sum_c = 0
sum_w = 0
counter = 0
for dependency, misuses in top_dependencies:
sum_all = sum_all + misuses
sum_c = sum_c + cl[dependency]
sum_w = sum_w + wl[dependency]
counter = counter + 1
print("==> " + str(counter) + " " + dependency + ": " + str(misuses))
print("====> critical lib: " + str(cl[dependency]))
print("====> warning lib: " + str(wl[dependency]))
print("Sum critical: " + str(sum_c))
print("Sum warning: " + str(sum_w))
print("Sum all: " + str(sum_all))
if __name__ == '__main__':
print("Evaluation of the log file")
print("==================================================")
evaluate_licma_log("FINAL_licma2020-08-06 15:19:38.992667-log.txt")
print("\n")
print("Evaluation of the result file")
print("==================================================")
evaluate_licma_results("FINAL_licma-result-2020-08-06 15:20:20.507612.csv")
# ['File', 'Rule', 'Hit-Type', 'Misuse', 'Misuse-Line', 'Parameter-Value', 'Parameter-Type', 'Parameter-Line']
| 46.4
| 228
| 0.653213
|
import csv
from collections import defaultdict
import re
line = "=================================================="
counter_lib_hit_total_wd = set()
counter_lib_hit_warning_wd = set()
counter_lib_hit_critical_wd = set()
counter_lib_rule_total_wd = defaultdict(int)
counter_lib_rule_warning_wd = defaultdict(int)
counter_lib_rule_critical_wd = defaultdict(int)
def set_counter_lib_total_wd(path, rule):
global counter_lib_hit_total_wd
global counter_lib_rule_total_wd
tmp_len = len(counter_lib_hit_total_wd)
counter_lib_hit_total_wd.add(path)
if len(counter_lib_hit_total_wd) > tmp_len:
counter_lib_rule_total_wd[rule] = counter_lib_rule_total_wd[rule] + 1
def set_counter_lib_warning_wd(path, rule):
global counter_lib_hit_warning_wd
global counter_lib_rule_warning_wd
tmp_len = len(counter_lib_hit_warning_wd)
counter_lib_hit_warning_wd.add(path)
if len(counter_lib_hit_warning_wd) > tmp_len:
counter_lib_rule_warning_wd[rule] = counter_lib_rule_warning_wd[rule] + 1
def set_counter_lib_critical_wd(path, rule):
global counter_lib_hit_critical_wd
global counter_lib_rule_critical_wd
tmp_len = len(counter_lib_hit_critical_wd)
counter_lib_hit_critical_wd.add(path)
if len(counter_lib_hit_critical_wd) > tmp_len:
counter_lib_rule_critical_wd[rule] = counter_lib_rule_critical_wd[rule] + 1
def evaluate_licma_log(log_file_name):
counter_processing = 0
counter_error = 0
counter_parsing_not_possible = 0
counter_maximum_recursion_depth_exceeded = 0
counter_rules = {}
with open(log_file_name) as log_file:
for line in log_file.readlines():
if "INFO | processing" in line:
counter_processing = counter_processing + 1
if "ERROR" in line:
counter_error = counter_error + 1
if "ERROR | parsing not possible" in line:
counter_parsing_not_possible = counter_parsing_not_possible + 1
elif "ERROR | maximum recursion depth exceeded" in line:
counter_maximum_recursion_depth_exceeded = counter_maximum_recursion_depth_exceeded + 1
attributes = line.split(" | ")
key = attributes[3] + " " + attributes[4]
if key in counter_rules.keys():
counter_rules[key] = counter_rules[key] + 1
else:
counter_rules[key] = 1
else:
if not "INFO" in line:
print(line)
print("Number of processed files: " + str(counter_processing))
print("Number of successfully processed files: " + str(counter_processing - counter_parsing_not_possible))
print("Number of processed files without any error: " + str(
counter_processing - counter_parsing_not_possible - counter_maximum_recursion_depth_exceeded))
print("Errors: " + str(counter_error))
print("==> Parsing not possible: " + str(counter_parsing_not_possible))
print("==> Maximum recursion depth exceeded: " + str(counter_maximum_recursion_depth_exceeded))
for key in counter_rules.keys():
print("====> " + key + ": " + str(counter_rules[key]))
def evaluate_licma_results3(result_file_name):
global line
number_of_all_results = 0
results_no_duplicates = set()
number_of_repositories = set()
distribution_misuses_repo_all = defaultdict(int)
number_of_warning_misuses_total = 0
distribution_misuses_repo_warnings = defaultdict(int)
number_of_critical_misuses_total = 0
distribution_misuses_repo_critical = defaultdict(int)
number_of_misuses_in_repos_wl = 0
distribution_misuses_repo_all_wl = defaultdict(int)
number_of_warning_misuses_repos_wl = 0
distribution_misuses_repo_warnings_wl = defaultdict(int)
number_of_critical_misuses_repos_wl = 0
distribution_misuses_repo_critical_wl = defaultdict(int)
number_of_used_libs = set()
number_of_libs_nd = set()
number_of_misuses_in_libs = 0
number_of_misuses_in_libs_no_duplicates = set()
number_of_misuses_in_libs_warning = 0
number_of_misuses_in_libs_critical = 0
distribution_misuses_in_libs = defaultdict(int)
distribution_misuses_in_libs_warning = defaultdict(int)
distribution_misuses_in_libs_critical = defaultdict(int)
number_of_misuses_in_libs_no_duplicates_warning = 0
number_of_misuses_in_libs_no_duplicates_critical = 0
distribution_misuses_in_libs_no_duplicates = defaultdict(int)
distribution_misuses_in_libs_no_duplicates_warning = defaultdict(int)
distribution_misuses_in_libs_no_duplicates_critical = defaultdict(int)
top_repos = defaultdict(int)
top_repos_warning = defaultdict(int)
top_repos_critical = defaultdict(int)
top_repos_wl = defaultdict(int)
top_repos_warning_wl = defaultdict(int)
top_repos_critical_wl = defaultdict(int)
top_libs = defaultdict(int)
top_libs_warning = defaultdict(int)
top_libs_critical = defaultdict(int)
top_libs_nd = defaultdict(int)
top_libs_warning_nd = defaultdict(int)
top_libs_critical_nd = defaultdict(int)
top_rules = defaultdict(int)
critical_lines = list()
with open(result_file_name) as result_file:
csv_reader = csv.reader(result_file, delimiter=';')
for result in list(csv_reader)[1:]:
file = result[0]
rule = result[1]
hit_type = result[2]
misuse = result[3]
misuse_line = result[4]
parameter_value = result[5]
parameter_type = result[6]
parameter_line = result[7]
if "requirements_licma_analysis" in file:
is_lib = True
else:
is_lib = False
repo_name = file.split("/")[5]
lib_name = file.split("/")[7]
lib_file = "/".join(file.split("/")[7:])
lib_hit = ",".join(
[lib_file, rule, hit_type, misuse, misuse_line, parameter_value, parameter_type, parameter_line])
result_string = ",".join(
[file, rule, hit_type, misuse, misuse_line, parameter_value, parameter_type, parameter_line])
top_repos[repo_name] = top_repos[repo_name] + 1
if hit_type == "warning":
top_repos_warning[repo_name] = top_repos_warning[repo_name] + 1
if hit_type == "critical":
top_repos_critical[repo_name] = top_repos_critical[repo_name] + 1
critical_lines.append((file, rule, misuse_line, parameter_line, parameter_value))
top_rules[rule] = top_rules[rule] + 1
number_of_all_results = number_of_all_results + 1
results_no_duplicates.add(str(result_string))
number_of_repositories.add(repo_name)
distribution_misuses_repo_all[rule] = distribution_misuses_repo_all[rule] + 1
if hit_type == "warning":
number_of_warning_misuses_total = number_of_warning_misuses_total + 1
distribution_misuses_repo_warnings[rule] = distribution_misuses_repo_warnings[rule] + 1
if hit_type == "critical":
number_of_critical_misuses_total = number_of_critical_misuses_total + 1
distribution_misuses_repo_critical[rule] = distribution_misuses_repo_warnings[rule] + 1
if not is_lib:
top_repos_wl[repo_name] = top_repos_wl[repo_name] + 1
if hit_type == "warning":
top_repos_warning_wl[repo_name] = top_repos_warning_wl[repo_name] + 1
if hit_type == "critical":
top_repos_critical_wl[repo_name] = top_repos_critical_wl[repo_name] + 1
number_of_misuses_in_repos_wl = number_of_misuses_in_repos_wl + 1
distribution_misuses_repo_all_wl[rule] = distribution_misuses_repo_all_wl[rule] + 1
if hit_type == "warning":
number_of_warning_misuses_repos_wl = number_of_warning_misuses_repos_wl + 1
distribution_misuses_repo_warnings_wl[rule] = distribution_misuses_repo_warnings_wl[rule] + 1
if hit_type == "critical":
number_of_critical_misuses_repos_wl = number_of_critical_misuses_repos_wl + 1
distribution_misuses_repo_critical_wl[rule] = distribution_misuses_repo_warnings_wl[rule] + 1
if is_lib:
top_libs[repo_name] = top_libs[repo_name] + 1
if hit_type == "warning":
top_libs_warning[repo_name] = top_libs_warning[repo_name] + 1
if hit_type == "critical":
top_libs_critical[repo_name] = top_libs_critical[repo_name] + 1
number_of_used_libs.add(repo_name + "," + lib_name)
number_of_libs_nd.add(lib_name)
number_of_misuses_in_libs = number_of_misuses_in_libs + 1
distribution_misuses_in_libs[rule] = distribution_misuses_in_libs[rule] + 1
if hit_type == "warning":
number_of_misuses_in_libs_warning = number_of_misuses_in_libs_warning + 1
distribution_misuses_in_libs_warning[rule] = distribution_misuses_in_libs_warning[rule] + 1
if hit_type == "critical":
number_of_misuses_in_libs_critical = number_of_misuses_in_libs_critical + 1
distribution_misuses_in_libs_critical[rule] = distribution_misuses_in_libs_critical[rule] + 1
tmp = len(number_of_misuses_in_libs_no_duplicates)
number_of_misuses_in_libs_no_duplicates.add(lib_hit)
if len(number_of_misuses_in_libs_no_duplicates) > tmp:
top_libs_nd[lib_name] = top_libs_nd[lib_name] + 1
if hit_type == "warning":
top_libs_warning_nd[lib_name] = top_libs_warning_nd[lib_name] + 1
if hit_type == "critical":
top_libs_critical_nd[lib_name] = top_libs_critical_nd[lib_name] + 1
distribution_misuses_in_libs_no_duplicates[rule] = distribution_misuses_in_libs_no_duplicates[
rule] + 1
if hit_type == "warning":
number_of_misuses_in_libs_no_duplicates_warning = number_of_misuses_in_libs_no_duplicates_warning + 1
distribution_misuses_in_libs_no_duplicates_warning[rule] = \
distribution_misuses_in_libs_no_duplicates_warning[rule] + 1
if hit_type == "critical":
number_of_misuses_in_libs_no_duplicates_critical = number_of_misuses_in_libs_no_duplicates_critical + 1
distribution_misuses_in_libs_no_duplicates_critical[rule] = \
distribution_misuses_in_libs_no_duplicates_critical[rule] + 1
print("General")
print(line)
print("Number of all results: " + str(number_of_all_results))
print("Number of results no duplicates: " + str(len(results_no_duplicates)))
if number_of_all_results == len(results_no_duplicates):
print("No duplicates ==> TRUE")
else:
print("No duplicates ==> FALSE")
print(line)
print("Repositories")
print(line)
print("Number of repositories with misuses: " + str(len(number_of_repositories)))
print("==> Number of found misuses in these repositories: " + str(len(results_no_duplicates)))
for key in distribution_misuses_repo_all.keys():
print("====> " + key + ": " + str(distribution_misuses_repo_all[key]))
print("==> Number of found warning misuses in these repositories: " + str(number_of_warning_misuses_total))
for key in distribution_misuses_repo_warnings.keys():
print("====> " + key + ": " + str(distribution_misuses_repo_warnings[key]))
print("==> Number of found critical misuses in these repositories: " + str(number_of_critical_misuses_total))
for key in distribution_misuses_repo_critical.keys():
print("====> " + key + ": " + str(distribution_misuses_repo_critical[key]))
print("==> Number of misuses in repos without libs: " + str(number_of_misuses_in_repos_wl))
for key in distribution_misuses_repo_all_wl.keys():
print("====> " + key + ": " + str(distribution_misuses_repo_all_wl[key]))
print("==> Number of warning misuses in repos without libs: " + str(number_of_warning_misuses_repos_wl))
for key in distribution_misuses_repo_warnings_wl.keys():
print("====> " + key + ": " + str(distribution_misuses_repo_warnings_wl[key]))
print("==> Number of critical misuses in repos without libs: " + str(number_of_critical_misuses_repos_wl))
for key in distribution_misuses_repo_critical_wl.keys():
print("====> " + key + ": " + str(distribution_misuses_repo_critical_wl[key]))
print(line)
print("LIBRARIES")
print(line)
print("Number of libs in these repositories were a misuse was found: " + str(len(number_of_used_libs)))
print("==> Number of misuses in these libraries: " + str(number_of_misuses_in_libs))
for key in distribution_misuses_in_libs.keys():
print("====> " + key + ": " + str(distribution_misuses_in_libs[key]))
print("==> Number of warning misuses in these libraries: " + str(number_of_misuses_in_libs_warning))
for key in distribution_misuses_in_libs_warning.keys():
print("====> " + key + ": " + str(distribution_misuses_in_libs_warning[key]))
print("==> Number of critical misuses in these libraries: " + str(number_of_misuses_in_libs_critical))
for key in distribution_misuses_in_libs_critical.keys():
print("====> " + key + ": " + str(distribution_misuses_in_libs_critical[key]))
print("==> Unique number of these libs: " + str(len(number_of_libs_nd)))
print("====> Number of misuses in libraries no duplicates: " + str(len(number_of_misuses_in_libs_no_duplicates)))
for key in distribution_misuses_in_libs_no_duplicates.keys():
print("======> " + key + ": " + str(distribution_misuses_in_libs_no_duplicates[key]))
print("====> Number of warning misuses in libraries no duplicates: " + str(
number_of_misuses_in_libs_no_duplicates_warning))
for key in distribution_misuses_in_libs_no_duplicates_warning.keys():
print("======> " + key + ": " + str(distribution_misuses_in_libs_no_duplicates_warning[key]))
print("====> Number of critical misuses in libraries no duplicates: " + str(
number_of_misuses_in_libs_no_duplicates_critical))
for key in distribution_misuses_in_libs_no_duplicates_critical.keys():
print("======> " + key + ": " + str(distribution_misuses_in_libs_no_duplicates_critical[key]))
print(line)
print("Tops")
print(line)
print_top("Top 10 Repositories:", top_repos)
print_top("Top 10 Repositories(WARNING):", top_repos_warning)
print_top("Top 10 Repositories(CRITICAL):", top_repos_critical)
print_top("Top 10 Repositories without libs:", top_repos_wl)
print_top("Top 10 Repositories without libs(WARNING):", top_repos_warning_wl)
print_top("Top 10 Repositories without libs(CRITICAL):", top_repos_critical_wl)
print_top("Top 10 Libraries with multiple usage in repos:", top_libs)
print_top("Top 10 Libraries with multiple usage in repos(WARNING):", top_libs_warning)
print_top("Top 10 Libraries with multiple usage in repos(CRITICAL):", top_libs_critical)
print_top("Top 10 Libraries no duplicate usage in repo:", top_libs_nd)
print_top("Top 10 Libraries no duplicate usage in repo(WARNING):", top_libs_warning_nd)
print_top("Top 10 Libraries no duplicate usage in repo(CRITICAL):", top_libs_critical_nd)
print_top("Top 10 rules(ALL FOUND MISUSES):", top_rules)
print_critical_lines(critical_lines)
def evaluate_licma_results(result_file_name):
global line
number_of_all_results = 0
results_no_duplicates = set()
number_of_repositories = set()
number_of_warning_misuses_total = 0
number_of_critical_misuses_total = 0
number_of_misuses_in_repos_wl = 0
number_of_warning_misuses_repos_wl = 0
number_of_critical_misuses_repos_wl = 0
number_of_used_libs = set()
number_of_libs_nd = set()
number_of_misuses_in_libs = 0
number_of_misuses_in_libs_no_duplicates = set()
number_of_misuses_in_libs_warning = 0
number_of_misuses_in_libs_critical = 0
number_of_misuses_in_libs_no_duplicates_warning = 0
number_of_misuses_in_libs_no_duplicates_critical = 0
top_repos_with_misuses = defaultdict(int)
top_repos_with_misuses_warning_repo = defaultdict(int)
top_repos_with_misuses_warning_lib = defaultdict(int)
top_repos_with_misuses_critical_repo = defaultdict(int)
top_repos_with_misuses_critical_lib = defaultdict(int)
top_dependencies_with_misuses = defaultdict(int)
top_dependencies_with_misuses_warning = defaultdict(int)
top_dependencies_with_misuses_critical = defaultdict(int)
rule_distribution = defaultdict(int)
rule_distribution_warning_repo = defaultdict(int)
rule_distribution_warning_lib = defaultdict(int)
rule_distribution_critical_repo = defaultdict(int)
rule_distribution_critical_lib = defaultdict(int)
rule_per_application_critical = defaultdict(int)
rule_per_application_critical2 = defaultdict(int)
licma_rule_distribution = defaultdict(int)
licma_rule_distribution_warning_repo = defaultdict(int)
licma_rule_distribution_warning_lib = defaultdict(int)
licma_rule_distribution_critical_repo = defaultdict(int)
licma_rule_distribution_critical_lib = defaultdict(int)
critical_lines = list()
with open(result_file_name) as result_file:
csv_reader = csv.reader(result_file, delimiter=';')
for result in list(csv_reader)[1:]:
print("\legend{critical misuses in dependencies, warning misuses in dependencies}")
def top_repos_to_tex(top_repos, cr, cl, wr, wl):
print("symbolic y coords={")
for repo, misuses in top_repos:
github_address = get_github_address(repo)
last_repo, last_misuses = top_repos[-1]
if repo == last_repo:
print(github_address + "}]")
else:
print(github_address + ",")
print_coords_repo(top_repos, cr, False)
print_coords_repo(top_repos, cl, False)
print_coords_repo(top_repos, wr, False)
print_coords_repo(top_repos, wl, True)
print("\legend{critical misuses in repositories, critical misuses in dependencies, warning misuses in repositories, warning misuses in dependencies}")
def group_rule_by_lib(rule_distribution):
group_distributions = defaultdict(int)
for distribution in rule_distribution.keys():
if "cryptography" in distribution:
group_distributions["cryptography"] = group_distributions["cryptography"] + rule_distribution[distribution]
elif "M2Crypto" in distribution:
group_distributions["M2Crypto"] = group_distributions["M2Crypto"] + rule_distribution[distribution]
elif "PyCrypto" in distribution:
group_distributions["PyCrypto"] = group_distributions["PyCrypto"] + rule_distribution[distribution]
elif "PyNaCl" in distribution:
group_distributions["PyNaCl"] = group_distributions["PyNaCl"] + rule_distribution[distribution]
return group_distributions
def group_rule_by_lib_and_rule(rule_distribution):
group_distributions = defaultdict(int)
for distribution in rule_distribution.keys():
rule_number = re.findall(r'\d+', distribution)[0]
if "cryptography" in distribution:
group_distributions[("cryptography", rule_number)] = group_distributions[("cryptography", rule_number)] + \
rule_distribution[distribution]
elif "M2Crypto" in distribution:
group_distributions[("M2Crypto", rule_number)] = group_distributions[("M2Crypto", rule_number)] + \
rule_distribution[distribution]
elif "PyCrypto" in distribution:
group_distributions[("PyCrypto", rule_number)] = group_distributions[("PyCrypto", rule_number)] + \
rule_distribution[distribution]
elif "PyNaCl" in distribution:
group_distributions[("PyNaCl", rule_number)] = group_distributions[("PyNaCl", rule_number)] + \
rule_distribution[distribution]
return group_distributions
def rule_distribution_to_tex(rule_distribution, cr, cl, wr, wl):
rule_distribution_grouped = group_rule_by_lib(rule_distribution)
rule_distribution_grouped_sorted = get_top_to_sorted_list(rule_distribution_grouped)
all = " ALL"
warning = " WARNING"
critical = " CRITICAL"
print("symbolic y coords={")
for rule, number in rule_distribution_grouped_sorted:
last_rule, last_number = rule_distribution_grouped_sorted[-1]
if rule == last_rule:
print(rule + all + ",")
print(rule + warning + ",")
print(rule + critical + "}]")
else:
print(rule + all + ",")
print(rule + warning + ",")
print(rule + critical + ",")
rule_distribution_grouped_by_lib_and_rule = group_rule_by_lib_and_rule(rule_distribution)
rule_distribution_warning_grouped_by_lib_and_rule = group_rule_by_lib_and_rule(dict_plus(wr, wl))
rule_distribution_critical_grouped_by_lib_and_rule = group_rule_by_lib_and_rule(dict_plus(cr, cl))
for i in ["1", "2", "3", "4", "5"]:
last = False
before_last = False
before_before_last = False
if i == "5":
last = True
elif i == "4":
before_last = True
elif i == "3":
before_before_last = True
print_coords_rule(rule_distribution_grouped_sorted, rule_distribution_grouped_by_lib_and_rule,
rule_distribution_warning_grouped_by_lib_and_rule,
rule_distribution_critical_grouped_by_lib_and_rule, i, last, before_last, before_before_last)
print("\legend{Rule 1, Rule 2, Rule 3, Rule 4, Rule 5}")
print(line)
print("rule-name;rule1;rule2;rule3;rule4;rule5")
for rule, number in rule_distribution_grouped_sorted:
print_rule_distribution_table(rule, "CRITICAL", rule_distribution_critical_grouped_by_lib_and_rule.get((rule, "1")),
rule_distribution_critical_grouped_by_lib_and_rule.get((rule, "2")),
rule_distribution_critical_grouped_by_lib_and_rule.get((rule, "3")),
rule_distribution_critical_grouped_by_lib_and_rule.get((rule, "4")),
rule_distribution_critical_grouped_by_lib_and_rule.get((rule, "5")))
print_rule_distribution_table(rule, "WARNING", rule_distribution_warning_grouped_by_lib_and_rule.get((rule, "1")),
rule_distribution_warning_grouped_by_lib_and_rule.get((rule, "2")),
rule_distribution_warning_grouped_by_lib_and_rule.get((rule, "3")),
rule_distribution_warning_grouped_by_lib_and_rule.get((rule, "4")),
rule_distribution_warning_grouped_by_lib_and_rule.get((rule, "5")))
print_rule_distribution_table(rule, "ALL", rule_distribution_grouped_by_lib_and_rule.get((rule, "1")),
rule_distribution_grouped_by_lib_and_rule.get((rule, "2")),
rule_distribution_grouped_by_lib_and_rule.get((rule, "3")),
rule_distribution_grouped_by_lib_and_rule.get((rule, "4")),
rule_distribution_grouped_by_lib_and_rule.get((rule, "5")))
def print_rule_distribution_table(rule, type, rule1, rule2, rule3, rule4, rule5):
print(rule + " " + str(type) + ";" + str(rule1)
+ ";" + str(rule2)
+ ";" + str(rule3)
+ ";" + str(rule4)
+ ";" + str(rule5))
def dict_plus(dict1, dict2):
new_dict = defaultdict(int)
for key in dict1.keys():
new_dict[key] = new_dict[key] + dict1[key]
for key in dict2.keys():
new_dict[key] = new_dict[key] + dict2[key]
return new_dict
def get_top_to_sorted_list(top_statistics):
top_statistics_list = sorted([(key, top_statistics[key]) for key in top_statistics.keys()],
key=lambda element: (element[1], element[0]), reverse=False)
return top_statistics_list
def get_licma_rule_to_sorted_list(top_statistics):
top_statistics_list = sorted([(key, top_statistics[key]) for key in top_statistics.keys()],
key=lambda element: (element[0], element[1]), reverse=True)
return top_statistics_list
def print_top(top_name, top_repos, cr, cl, wr, wl):
print(line)
print(top_name)
print(line)
counter = 0
for repo, misuses in top_repos:
counter = counter + 1
print("==> " + str(counter) + " " + repo + ": " + str(misuses))
print("====> critical repo: " + str(cr[repo]))
print("====> critical lib: " + str(cl[repo]))
print("====> warning repo: " + str(wr[repo]))
print("====> warning lib: " + str(wl[repo]))
def print_top_dependencies(top_name, top_dependencies, cl, wl):
print(line)
print(top_name)
print(line)
sum_all = 0
sum_c = 0
sum_w = 0
counter = 0
for dependency, misuses in top_dependencies:
sum_all = sum_all + misuses
sum_c = sum_c + cl[dependency]
sum_w = sum_w + wl[dependency]
counter = counter + 1
print("==> " + str(counter) + " " + dependency + ": " + str(misuses))
print("====> critical lib: " + str(cl[dependency]))
print("====> warning lib: " + str(wl[dependency]))
print("Sum critical: " + str(sum_c))
print("Sum warning: " + str(sum_w))
print("Sum all: " + str(sum_all))
if __name__ == '__main__':
print("Evaluation of the log file")
print("==================================================")
evaluate_licma_log("FINAL_licma2020-08-06 15:19:38.992667-log.txt")
print("\n")
print("Evaluation of the result file")
print("==================================================")
evaluate_licma_results("FINAL_licma-result-2020-08-06 15:20:20.507612.csv")
| true
| true
|
7903c9aa486e83599e115ccf91ffbb25e516c6fd
| 5,488
|
py
|
Python
|
plugins/otp/otp.py
|
hosom/jarvis
|
2eadb3b2d07672af296e7e7c7fe2d6be9db9557f
|
[
"BSD-3-Clause"
] | 1
|
2018-03-08T20:39:51.000Z
|
2018-03-08T20:39:51.000Z
|
plugins/otp/otp.py
|
hosom/jarvis
|
2eadb3b2d07672af296e7e7c7fe2d6be9db9557f
|
[
"BSD-3-Clause"
] | null | null | null |
plugins/otp/otp.py
|
hosom/jarvis
|
2eadb3b2d07672af296e7e7c7fe2d6be9db9557f
|
[
"BSD-3-Clause"
] | 1
|
2018-11-02T01:53:52.000Z
|
2018-11-02T01:53:52.000Z
|
import datetime
import threading
import contextlib
import pyotp
import qrcode
from errbot import BotPlugin, botcmd, arg_botcmd, cmdfilter
# OTP expires every hour
_OTP_EXPIRE = datetime.timedelta(hours=1)
_BASE_TIME = datetime.datetime(year=datetime.MINYEAR, month=1, day=1)
class otp(BotPlugin):
'''
Implement One Time Passwords for command filtering.
'''
# lock protects storage
lock = threading.Lock()
def activate(self):
super(otp, self).activate()
# Set the data directory for the plugin
self.DATA_DIR = '{0}/ '.format(self.bot_config.BOT_DATA_DIR)
if 'commands' not in self:
self['commands'] = set()
if 'secrets' not in self:
self['secrets'] = dict()
@contextlib.contextmanager
def stored(self, key):
'''
This is a convenience tool to make plugin storage easier.
'''
value = self[key]
try:
yield value
finally:
self[key] = value
def get_configuration_template(self):
return dict(
provision_via_chat=False,
max_retries=10
)
def build_qrcode(self, user, url):
'''Internal method used to build the QRCode image for token provisioning.'''
prefix = self.DATA_DIR
qrcode.make(url).save('{0}{1}-qrcode.png'.format(prefix, user), format='png')
def get_identity(self, message):
'''Wrapper to make sure the correct identity object is used.'''
try:
return message.frm.aclattr
except AttributeError:
return message.frm.person
@botcmd(admin_only=True)
def otp_delete_all(self, message, args):
'''
WARNING: This command removes ALL OTP entries.
'''
self['commands'] = set()
self['secrets'] = dict()
return 'Removed **all** OTP tokens and command filters.'
@arg_botcmd('cmd', type=str, admin_only=True, template='otp_add_command')
def otp_add_command(self, message, cmd=None):
'''
Add a command to OTP command filtering.
'''
with self.lock:
with self.stored('commands') as commands:
commands.add(cmd)
return dict(command=cmd)
#return 'Added {0} to OTP filtered commands.'.format(cmd)
@arg_botcmd('cmd', type=str, admin_only=True, template='otp_remove_command')
def otp_remove_command(self, message, cmd=None):
'''
Remove a command from OTP command filtering.
'''
with self.lock:
with self.stored('commands') as commands:
if cmd not in commands:
return dict(err=True, command=cmd)
commands.remove(cmd)
return dict(err=False, command=cmd)
@botcmd(admin_only=True, template='otp_commands')
def otp_commands(self, message, args):
'''
List the commands that are filtered by OTP.
'''
return dict(commands=self['commands'])
@arg_botcmd('user', type=str, admin_only=True, template='otp_secret_create')
def otp_secret_create(self, message, user=None):
'''
Send a new secret for a user.
'''
secret = pyotp.random_base32()
with self.lock:
with self.stored('secrets') as secrets:
secrets[user] = (secret, 0, _BASE_TIME)
totp = pyotp.TOTP(secret)
url = totp.provisioning_uri(user)
self.build_qrcode(user, url)
if self.config:
if self.config.get('provision_via_chat'):
f = open('{0}{1}-qrcode.png'.format(self.DATA_DIR, user), 'rb')
self.send_stream_request(self.build_identifier(user), f, name='OTP-secret.png')
self.send_templated(self.build_identifier(user), 'otp_secret_create_pm', dict(url=url))
return dict(chat_enrollment=True, user=user)
return dict(chat_enrollment=False, user=user)
@arg_botcmd('otp', type=int, template='otp_auth')
def otp_auth(self, message, otp=None):
'''
Authenticate with OTP to the bot to pass OTP filtering.
'''
# OTP shouldn't be done in a group chat channel.
if message.is_group:
return dict(group_chat=True)
identity = self.get_identity(message)
if identity not in self['secrets']:
return dict(not_enrolled=True)
secret, attempts, _ = self['secrets'][identity]
totp = pyotp.TOTP(secret)
if totp.verify(otp):
with self.lock:
with self.stored('secrets') as secrets:
secret, _, _ = secrets[identity]
secrets[identity] = (secret, 0, datetime.datetime.now())
return dict(success=True)
else:
# Increase the number of attempts, or burn secret
with self.lock:
with self.stored('secrets') as secrets:
secret, attempts, ts = secrets[identity]
if attempts > self.config.get('max_retries'):
secret = ''
secrets[identity] = (secret, attempts+1, ts)
return dict(success=False)
@cmdfilter
def otp_filter(self, message, command, args, dry_run):
'''
Filter commands to determine if user has recently validated with OTP.
'''
with self.lock:
if command in self['commands']:
self.log.info('{0} is protected by OTP. Processing.'.format(command))
identity = self.get_identity(message)
secrets = self['secrets']
if identity not in secrets:
# Command is filtered, user doesn't have an OTP token
self.send_templated(message.frm, 'otp_filter', dict(not_enrolled=True))
return None, None, None
_, _, lastotp = secrets[identity]
if datetime.datetime.now() - lastotp > _OTP_EXPIRE:
self.log.info('{0} has not authenticated with OTP since expire'.format(identity))
self.send_templated(message.frm, 'otp_filter', dict(auth_required=True))
return None, None, None
self.log.info('OTP ok, permit command.')
return message, command, args
| 30.831461
| 92
| 0.676749
|
import datetime
import threading
import contextlib
import pyotp
import qrcode
from errbot import BotPlugin, botcmd, arg_botcmd, cmdfilter
_OTP_EXPIRE = datetime.timedelta(hours=1)
_BASE_TIME = datetime.datetime(year=datetime.MINYEAR, month=1, day=1)
class otp(BotPlugin):
lock = threading.Lock()
def activate(self):
super(otp, self).activate()
self.DATA_DIR = '{0}/ '.format(self.bot_config.BOT_DATA_DIR)
if 'commands' not in self:
self['commands'] = set()
if 'secrets' not in self:
self['secrets'] = dict()
@contextlib.contextmanager
def stored(self, key):
value = self[key]
try:
yield value
finally:
self[key] = value
def get_configuration_template(self):
return dict(
provision_via_chat=False,
max_retries=10
)
def build_qrcode(self, user, url):
prefix = self.DATA_DIR
qrcode.make(url).save('{0}{1}-qrcode.png'.format(prefix, user), format='png')
def get_identity(self, message):
try:
return message.frm.aclattr
except AttributeError:
return message.frm.person
@botcmd(admin_only=True)
def otp_delete_all(self, message, args):
self['commands'] = set()
self['secrets'] = dict()
return 'Removed **all** OTP tokens and command filters.'
@arg_botcmd('cmd', type=str, admin_only=True, template='otp_add_command')
def otp_add_command(self, message, cmd=None):
with self.lock:
with self.stored('commands') as commands:
commands.add(cmd)
return dict(command=cmd)
@arg_botcmd('cmd', type=str, admin_only=True, template='otp_remove_command')
def otp_remove_command(self, message, cmd=None):
with self.lock:
with self.stored('commands') as commands:
if cmd not in commands:
return dict(err=True, command=cmd)
commands.remove(cmd)
return dict(err=False, command=cmd)
@botcmd(admin_only=True, template='otp_commands')
def otp_commands(self, message, args):
return dict(commands=self['commands'])
@arg_botcmd('user', type=str, admin_only=True, template='otp_secret_create')
def otp_secret_create(self, message, user=None):
secret = pyotp.random_base32()
with self.lock:
with self.stored('secrets') as secrets:
secrets[user] = (secret, 0, _BASE_TIME)
totp = pyotp.TOTP(secret)
url = totp.provisioning_uri(user)
self.build_qrcode(user, url)
if self.config:
if self.config.get('provision_via_chat'):
f = open('{0}{1}-qrcode.png'.format(self.DATA_DIR, user), 'rb')
self.send_stream_request(self.build_identifier(user), f, name='OTP-secret.png')
self.send_templated(self.build_identifier(user), 'otp_secret_create_pm', dict(url=url))
return dict(chat_enrollment=True, user=user)
return dict(chat_enrollment=False, user=user)
@arg_botcmd('otp', type=int, template='otp_auth')
def otp_auth(self, message, otp=None):
if message.is_group:
return dict(group_chat=True)
identity = self.get_identity(message)
if identity not in self['secrets']:
return dict(not_enrolled=True)
secret, attempts, _ = self['secrets'][identity]
totp = pyotp.TOTP(secret)
if totp.verify(otp):
with self.lock:
with self.stored('secrets') as secrets:
secret, _, _ = secrets[identity]
secrets[identity] = (secret, 0, datetime.datetime.now())
return dict(success=True)
else:
# Increase the number of attempts, or burn secret
with self.lock:
with self.stored('secrets') as secrets:
secret, attempts, ts = secrets[identity]
if attempts > self.config.get('max_retries'):
secret = ''
secrets[identity] = (secret, attempts+1, ts)
return dict(success=False)
@cmdfilter
def otp_filter(self, message, command, args, dry_run):
with self.lock:
if command in self['commands']:
self.log.info('{0} is protected by OTP. Processing.'.format(command))
identity = self.get_identity(message)
secrets = self['secrets']
if identity not in secrets:
# Command is filtered, user doesn't have an OTP token
self.send_templated(message.frm, 'otp_filter', dict(not_enrolled=True))
return None, None, None
_, _, lastotp = secrets[identity]
if datetime.datetime.now() - lastotp > _OTP_EXPIRE:
self.log.info('{0} has not authenticated with OTP since expire'.format(identity))
self.send_templated(message.frm, 'otp_filter', dict(auth_required=True))
return None, None, None
self.log.info('OTP ok, permit command.')
return message, command, args
| true
| true
|
7903cb1355b2a599e9572c69f2232806e2320dac
| 1,170
|
py
|
Python
|
Murphi/ModularMurphi/GenModStateFunc.py
|
icsa-caps/HieraGen
|
4026c1718878d2ef69dd13d3e6e10cab69174fda
|
[
"MIT"
] | 6
|
2020-07-07T15:45:13.000Z
|
2021-08-29T06:44:29.000Z
|
Murphi/ModularMurphi/GenModStateFunc.py
|
icsa-caps/HieraGen
|
4026c1718878d2ef69dd13d3e6e10cab69174fda
|
[
"MIT"
] | null | null | null |
Murphi/ModularMurphi/GenModStateFunc.py
|
icsa-caps/HieraGen
|
4026c1718878d2ef69dd13d3e6e10cab69174fda
|
[
"MIT"
] | null | null | null |
from typing import List, Dict
from DataObjects.ClassCluster import Cluster
from Murphi.ModularMurphi.MurphiTokens import MurphiTokens
from Murphi.ModularMurphi.TemplateClass import TemplateHandler
from DataObjects.ClassMachine import Machine
class GenModStateFunc(MurphiTokens, TemplateHandler):
def __init__(self, handler_dir: str):
TemplateHandler.__init__(self, handler_dir)
def gen_mod_state_func(self, clusters: List[Cluster]):
mod_state_func = "--" + __name__ + self.nl
machine_dict: Dict[str, Machine] = {}
for cluster in clusters:
for machine in cluster.system_tuple:
if machine.arch.get_unique_id_str() not in machine_dict:
machine_dict[machine.arch.get_unique_id_str()] = machine
for machine in machine_dict.values():
mod_state_func += self._stringReplKeys(self._openTemplate(self.fmodifystate),
[machine.arch.get_unique_id_str(), self.kmachines,
self.statesuf, self.instsuf, self.iState]) + self.nl
return mod_state_func + self.nl
| 39
| 104
| 0.660684
|
from typing import List, Dict
from DataObjects.ClassCluster import Cluster
from Murphi.ModularMurphi.MurphiTokens import MurphiTokens
from Murphi.ModularMurphi.TemplateClass import TemplateHandler
from DataObjects.ClassMachine import Machine
class GenModStateFunc(MurphiTokens, TemplateHandler):
def __init__(self, handler_dir: str):
TemplateHandler.__init__(self, handler_dir)
def gen_mod_state_func(self, clusters: List[Cluster]):
mod_state_func = "--" + __name__ + self.nl
machine_dict: Dict[str, Machine] = {}
for cluster in clusters:
for machine in cluster.system_tuple:
if machine.arch.get_unique_id_str() not in machine_dict:
machine_dict[machine.arch.get_unique_id_str()] = machine
for machine in machine_dict.values():
mod_state_func += self._stringReplKeys(self._openTemplate(self.fmodifystate),
[machine.arch.get_unique_id_str(), self.kmachines,
self.statesuf, self.instsuf, self.iState]) + self.nl
return mod_state_func + self.nl
| true
| true
|
7903cb2ed5f224ec5ea81319fd74bde714681376
| 3,322
|
py
|
Python
|
server/main/urls.py
|
somtirtharoy/edd
|
b69c42d6d3f383347054f2df76d4e577642b2021
|
[
"BSD-3-Clause-LBNL"
] | 13
|
2016-11-15T07:33:40.000Z
|
2021-09-22T12:19:13.000Z
|
server/main/urls.py
|
somtirtharoy/edd
|
b69c42d6d3f383347054f2df76d4e577642b2021
|
[
"BSD-3-Clause-LBNL"
] | 40
|
2017-04-04T15:20:14.000Z
|
2022-03-31T04:34:37.000Z
|
server/main/urls.py
|
somtirtharoy/edd
|
b69c42d6d3f383347054f2df76d4e577642b2021
|
[
"BSD-3-Clause-LBNL"
] | 10
|
2017-09-21T07:27:01.000Z
|
2022-03-10T17:02:19.000Z
|
from django.contrib.auth.decorators import login_required
from django.urls import include, path
from . import views
app_name = "main"
# These are the URL endpoints nested under a link to a specific Study, for use with include() in
# the two URL paths for study below. Because this list is included twice, there should be no
# URL with the name kwarg here, as that will result in conflicts looking up URLs by name.
study_url_patterns = [
path("", login_required(views.StudyDetailView.as_view()), name="detail"),
path(
"overview/", login_required(views.StudyOverviewView.as_view()), name="overview"
),
path("description/", login_required(views.StudyLinesView.as_view()), name="lines"),
path("describe/", include("edd.describe.urls", namespace="describe")),
path("load/", include("edd.load.urls", namespace="load")),
# kept verbose name of description for link backward-compatibility
path("experiment-description/", login_required(views.StudyLinesView.as_view())),
path("assaydata/", login_required(views.study_assay_table_data), name="assaydata"),
path("edddata/", login_required(views.study_edddata), name="edddata"),
path(
"measurements/<int:protocol>/",
include(
[
path("", login_required(views.study_measurements), name="measurements"),
path(
"<int:assay>/",
login_required(views.study_measurements),
name="assay_measurements",
),
]
),
),
path(
"permissions/",
login_required(views.StudyPermissionJSONView.as_view()),
name="permissions",
),
path(
"files/<int:file_id>/",
include(
[
# require the ID in URL
path(
"",
login_required(views.StudyAttachmentView.as_view()),
name="attachment_list",
),
# optional to include file name in URL; reverse() should include it
path(
"<path:file_name>/",
login_required(views.StudyAttachmentView.as_view()),
name="attachment",
),
]
),
),
]
urlpatterns = [
# "homepage" URLs
path("", login_required(views.StudyIndexView.as_view()), name="index"),
path(
"study/", login_required(views.StudyCreateView.as_view()), name="create_study"
),
# Individual study-specific pages loaded by primary key
# reverse('main:edd-pk:overview', kwargs={'pk': pk})
path("study/<int:pk>/", include((study_url_patterns, "edd-pk"))),
# Individual study-specific pages loaded by slug
# reverse('main:overview', kwargs={'slug': slug})
path("s/<slug:slug>/", include(study_url_patterns)),
# edd.describe URLs that can work without a study reference
# these pages should migrate outside of applicaiton, see EDD-1244
path("describe/", include("edd.describe.flat_urls", namespace="describe_flat")),
path("search/", include("edd.search.urls", namespace="search")),
# Miscellaneous URLs; most/all of these should eventually be delegated to REST API
path("load/", include("edd.load.flat_urls", namespace="load_flat")),
]
| 40.512195
| 96
| 0.614088
|
from django.contrib.auth.decorators import login_required
from django.urls import include, path
from . import views
app_name = "main"
study_url_patterns = [
path("", login_required(views.StudyDetailView.as_view()), name="detail"),
path(
"overview/", login_required(views.StudyOverviewView.as_view()), name="overview"
),
path("description/", login_required(views.StudyLinesView.as_view()), name="lines"),
path("describe/", include("edd.describe.urls", namespace="describe")),
path("load/", include("edd.load.urls", namespace="load")),
path("experiment-description/", login_required(views.StudyLinesView.as_view())),
path("assaydata/", login_required(views.study_assay_table_data), name="assaydata"),
path("edddata/", login_required(views.study_edddata), name="edddata"),
path(
"measurements/<int:protocol>/",
include(
[
path("", login_required(views.study_measurements), name="measurements"),
path(
"<int:assay>/",
login_required(views.study_measurements),
name="assay_measurements",
),
]
),
),
path(
"permissions/",
login_required(views.StudyPermissionJSONView.as_view()),
name="permissions",
),
path(
"files/<int:file_id>/",
include(
[
path(
"",
login_required(views.StudyAttachmentView.as_view()),
name="attachment_list",
),
path(
"<path:file_name>/",
login_required(views.StudyAttachmentView.as_view()),
name="attachment",
),
]
),
),
]
urlpatterns = [
path("", login_required(views.StudyIndexView.as_view()), name="index"),
path(
"study/", login_required(views.StudyCreateView.as_view()), name="create_study"
),
path("study/<int:pk>/", include((study_url_patterns, "edd-pk"))),
path("s/<slug:slug>/", include(study_url_patterns)),
path("describe/", include("edd.describe.flat_urls", namespace="describe_flat")),
path("search/", include("edd.search.urls", namespace="search")),
path("load/", include("edd.load.flat_urls", namespace="load_flat")),
]
| true
| true
|
7903cc60c6a57a64d4cb67b5b5352e148b0204f1
| 8,766
|
py
|
Python
|
aiida/repository/backend/abstract.py
|
azadoks/aiida-core
|
b806b7fef8fc79090deccfe2019b77cb922e0581
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
aiida/repository/backend/abstract.py
|
azadoks/aiida-core
|
b806b7fef8fc79090deccfe2019b77cb922e0581
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
aiida/repository/backend/abstract.py
|
azadoks/aiida-core
|
b806b7fef8fc79090deccfe2019b77cb922e0581
|
[
"MIT",
"BSD-3-Clause"
] | 1
|
2019-12-27T17:34:52.000Z
|
2019-12-27T17:34:52.000Z
|
# -*- coding: utf-8 -*-
"""Class that defines the abstract interface for an object repository.
The scope of this class is intentionally very narrow. Any backend implementation should merely provide the methods to
store binary blobs, or "objects", and return a string-based key that unique identifies the object that was just created.
This key should then be able to be used to retrieve the bytes of the corresponding object or to delete it.
"""
import abc
import contextlib
import hashlib
import io
import pathlib
from typing import BinaryIO, Iterable, Iterator, List, Optional, Tuple, Union
from aiida.common.hashing import chunked_file_hash
__all__ = ('AbstractRepositoryBackend',)
class AbstractRepositoryBackend(metaclass=abc.ABCMeta):
"""Class that defines the abstract interface for an object repository.
The repository backend only deals with raw bytes, both when creating new objects as well as when returning a stream
or the content of an existing object. The encoding and decoding of the byte content should be done by the client
upstream. The file repository backend is also not expected to keep any kind of file hierarchy but must be assumed
to be a simple flat data store. When files are created in the file object repository, the implementation will return
a string-based key with which the content of the stored object can be addressed. This key is guaranteed to be unique
and persistent. Persisting the key or mapping it onto a virtual file hierarchy is again up to the client upstream.
"""
@property
@abc.abstractmethod
def uuid(self) -> Optional[str]:
"""Return the unique identifier of the repository."""
@property
@abc.abstractmethod
def key_format(self) -> Optional[str]:
"""Return the format for the keys of the repository.
Important for when migrating between backends (e.g. archive -> main), as if they are not equal then it is
necessary to re-compute all the `Node.repository_metadata` before importing (otherwise they will not match
with the repository).
"""
@abc.abstractmethod
def initialise(self, **kwargs) -> None:
"""Initialise the repository if it hasn't already been initialised.
:param kwargs: parameters for the initialisation.
"""
@property
@abc.abstractmethod
def is_initialised(self) -> bool:
"""Return whether the repository has been initialised."""
@abc.abstractmethod
def erase(self) -> None:
"""Delete the repository itself and all its contents.
.. note:: This should not merely delete the contents of the repository but any resources it created. For
example, if the repository is essentially a folder on disk, the folder itself should also be deleted, not
just its contents.
"""
@staticmethod
def is_readable_byte_stream(handle) -> bool:
return hasattr(handle, 'read') and hasattr(handle, 'mode') and 'b' in handle.mode
def put_object_from_filelike(self, handle: BinaryIO) -> str:
"""Store the byte contents of a file in the repository.
:param handle: filelike object with the byte content to be stored.
:return: the generated fully qualified identifier for the object within the repository.
:raises TypeError: if the handle is not a byte stream.
"""
if not isinstance(handle, io.BufferedIOBase) and not self.is_readable_byte_stream(handle):
raise TypeError(f'handle does not seem to be a byte stream: {type(handle)}.')
return self._put_object_from_filelike(handle)
@abc.abstractmethod
def _put_object_from_filelike(self, handle: BinaryIO) -> str:
pass
def put_object_from_file(self, filepath: Union[str, pathlib.Path]) -> str:
"""Store a new object with contents of the file located at `filepath` on this file system.
:param filepath: absolute path of file whose contents to copy to the repository.
:return: the generated fully qualified identifier for the object within the repository.
:raises TypeError: if the handle is not a byte stream.
"""
with open(filepath, mode='rb') as handle:
return self.put_object_from_filelike(handle)
@abc.abstractmethod
def has_objects(self, keys: List[str]) -> List[bool]:
"""Return whether the repository has an object with the given key.
:param keys:
list of fully qualified identifiers for objects within the repository.
:return:
list of logicals, in the same order as the keys provided, with value True if the respective
object exists and False otherwise.
"""
def has_object(self, key: str) -> bool:
"""Return whether the repository has an object with the given key.
:param key: fully qualified identifier for the object within the repository.
:return: True if the object exists, False otherwise.
"""
return self.has_objects([key])[0]
@abc.abstractmethod
def list_objects(self) -> Iterable[str]:
"""Return iterable that yields all available objects by key.
:return: An iterable for all the available object keys.
"""
@contextlib.contextmanager
def open(self, key: str) -> Iterator[BinaryIO]:
"""Open a file handle to an object stored under the given key.
.. note:: this should only be used to open a handle to read an existing file. To write a new file use the method
``put_object_from_filelike`` instead.
:param key: fully qualified identifier for the object within the repository.
:return: yield a byte stream object.
:raise FileNotFoundError: if the file does not exist.
:raise OSError: if the file could not be opened.
"""
if not self.has_object(key):
raise FileNotFoundError(f'object with key `{key}` does not exist.')
def get_object_content(self, key: str) -> bytes:
"""Return the content of a object identified by key.
:param key: fully qualified identifier for the object within the repository.
:raise FileNotFoundError: if the file does not exist.
:raise OSError: if the file could not be opened.
"""
with self.open(key) as handle: # pylint: disable=not-context-manager
return handle.read()
@abc.abstractmethod
def iter_object_streams(self, keys: List[str]) -> Iterator[Tuple[str, BinaryIO]]:
"""Return an iterator over the (read-only) byte streams of objects identified by key.
.. note:: handles should only be read within the context of this iterator.
:param keys: fully qualified identifiers for the objects within the repository.
:return: an iterator over the object byte streams.
:raise FileNotFoundError: if the file does not exist.
:raise OSError: if a file could not be opened.
"""
def get_object_hash(self, key: str) -> str:
"""Return the SHA-256 hash of an object stored under the given key.
.. important::
A SHA-256 hash should always be returned,
to ensure consistency across different repository implementations.
:param key: fully qualified identifier for the object within the repository.
:raise FileNotFoundError: if the file does not exist.
:raise OSError: if the file could not be opened.
"""
with self.open(key) as handle: # pylint: disable=not-context-manager
return chunked_file_hash(handle, hashlib.sha256)
@abc.abstractmethod
def delete_objects(self, keys: List[str]) -> None:
"""Delete the objects from the repository.
:param keys: list of fully qualified identifiers for the objects within the repository.
:raise FileNotFoundError: if any of the files does not exist.
:raise OSError: if any of the files could not be deleted.
"""
keys_exist = self.has_objects(keys)
if not all(keys_exist):
error_message = 'some of the keys provided do not correspond to any object in the repository:\n'
for indx, key_exists in enumerate(keys_exist):
if not key_exists:
error_message += f' > object with key `{keys[indx]}` does not exist.\n'
raise FileNotFoundError(error_message)
def delete_object(self, key: str) -> None:
"""Delete the object from the repository.
:param key: fully qualified identifier for the object within the repository.
:raise FileNotFoundError: if the file does not exist.
:raise OSError: if the file could not be deleted.
"""
return self.delete_objects([key])
| 44.497462
| 120
| 0.682523
|
import abc
import contextlib
import hashlib
import io
import pathlib
from typing import BinaryIO, Iterable, Iterator, List, Optional, Tuple, Union
from aiida.common.hashing import chunked_file_hash
__all__ = ('AbstractRepositoryBackend',)
class AbstractRepositoryBackend(metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
def uuid(self) -> Optional[str]:
@property
@abc.abstractmethod
def key_format(self) -> Optional[str]:
@abc.abstractmethod
def initialise(self, **kwargs) -> None:
@property
@abc.abstractmethod
def is_initialised(self) -> bool:
@abc.abstractmethod
def erase(self) -> None:
@staticmethod
def is_readable_byte_stream(handle) -> bool:
return hasattr(handle, 'read') and hasattr(handle, 'mode') and 'b' in handle.mode
def put_object_from_filelike(self, handle: BinaryIO) -> str:
if not isinstance(handle, io.BufferedIOBase) and not self.is_readable_byte_stream(handle):
raise TypeError(f'handle does not seem to be a byte stream: {type(handle)}.')
return self._put_object_from_filelike(handle)
@abc.abstractmethod
def _put_object_from_filelike(self, handle: BinaryIO) -> str:
pass
def put_object_from_file(self, filepath: Union[str, pathlib.Path]) -> str:
with open(filepath, mode='rb') as handle:
return self.put_object_from_filelike(handle)
@abc.abstractmethod
def has_objects(self, keys: List[str]) -> List[bool]:
def has_object(self, key: str) -> bool:
return self.has_objects([key])[0]
@abc.abstractmethod
def list_objects(self) -> Iterable[str]:
@contextlib.contextmanager
def open(self, key: str) -> Iterator[BinaryIO]:
if not self.has_object(key):
raise FileNotFoundError(f'object with key `{key}` does not exist.')
def get_object_content(self, key: str) -> bytes:
with self.open(key) as handle:
return handle.read()
@abc.abstractmethod
def iter_object_streams(self, keys: List[str]) -> Iterator[Tuple[str, BinaryIO]]:
def get_object_hash(self, key: str) -> str:
with self.open(key) as handle:
return chunked_file_hash(handle, hashlib.sha256)
@abc.abstractmethod
def delete_objects(self, keys: List[str]) -> None:
keys_exist = self.has_objects(keys)
if not all(keys_exist):
error_message = 'some of the keys provided do not correspond to any object in the repository:\n'
for indx, key_exists in enumerate(keys_exist):
if not key_exists:
error_message += f' > object with key `{keys[indx]}` does not exist.\n'
raise FileNotFoundError(error_message)
def delete_object(self, key: str) -> None:
return self.delete_objects([key])
| true
| true
|
7903ccfa871f29fa09227f2f965adb2e5fea3146
| 288
|
py
|
Python
|
camel_space.py
|
brennanbrown/code-challenges
|
e7d0f9547ead58ee58f1365f5ea35743525a9a82
|
[
"BSD-2-Clause"
] | 1
|
2021-06-14T07:36:41.000Z
|
2021-06-14T07:36:41.000Z
|
camel_space.py
|
brennanbrown/code-challenges
|
e7d0f9547ead58ee58f1365f5ea35743525a9a82
|
[
"BSD-2-Clause"
] | null | null | null |
camel_space.py
|
brennanbrown/code-challenges
|
e7d0f9547ead58ee58f1365f5ea35743525a9a82
|
[
"BSD-2-Clause"
] | null | null | null |
import re
def camel_space(string):
string = re.sub(r'(?<!^)(?=[A-Z])', ' ', string)
return string
Test.assert_equals(solution("helloWorld"), "hello World")
Test.assert_equals(solution("camelCase"), "camel Case")
Test.assert_equals(solution("breakCamelCase"), "break Camel Case")
| 32
| 66
| 0.697917
|
import re
def camel_space(string):
string = re.sub(r'(?<!^)(?=[A-Z])', ' ', string)
return string
Test.assert_equals(solution("helloWorld"), "hello World")
Test.assert_equals(solution("camelCase"), "camel Case")
Test.assert_equals(solution("breakCamelCase"), "break Camel Case")
| true
| true
|
7903cdf449b5f4a3d2dfbe161bb74bd3196fd594
| 686
|
py
|
Python
|
cotk/scripts/import_local_resources.py
|
kepei1106/cotk
|
29b25b9469468dfd6d2aba433c2b935831351de7
|
[
"Apache-2.0"
] | null | null | null |
cotk/scripts/import_local_resources.py
|
kepei1106/cotk
|
29b25b9469468dfd6d2aba433c2b935831351de7
|
[
"Apache-2.0"
] | null | null | null |
cotk/scripts/import_local_resources.py
|
kepei1106/cotk
|
29b25b9469468dfd6d2aba433c2b935831351de7
|
[
"Apache-2.0"
] | 1
|
2019-03-21T05:34:24.000Z
|
2019-03-21T05:34:24.000Z
|
'''
A command library help user upload their results to dashboard.
'''
#!/usr/bin/env python
import json
import argparse
from .._utils import file_utils
from . import main
def import_local_resources(args):
'''Entrance of importing local resources'''
parser = argparse.ArgumentParser(prog="cotk import", \
description="Import local resources")
parser.add_argument("file_id", type=str, help="Name of resource")
parser.add_argument("file_path", type=str, help="Path to resource")
cargs = parser.parse_args(args)
file_utils.import_local_resources(cargs.file_id, cargs.file_path)
main.LOGGER.info("Successfully import local resource {}.".format(cargs.file_id))
| 34.3
| 82
| 0.749271
|
import json
import argparse
from .._utils import file_utils
from . import main
def import_local_resources(args):
parser = argparse.ArgumentParser(prog="cotk import", \
description="Import local resources")
parser.add_argument("file_id", type=str, help="Name of resource")
parser.add_argument("file_path", type=str, help="Path to resource")
cargs = parser.parse_args(args)
file_utils.import_local_resources(cargs.file_id, cargs.file_path)
main.LOGGER.info("Successfully import local resource {}.".format(cargs.file_id))
| true
| true
|
7903ce3f4c7aaca57bba6a8d2056f05cd3727fde
| 602
|
py
|
Python
|
main/algoSitemap.py
|
algorithms-gad/algoBook
|
6a4fb34ae0028feab97707843d9c8ebfeb7386cc
|
[
"Apache-2.0"
] | 5
|
2019-06-20T06:59:41.000Z
|
2022-02-08T21:21:32.000Z
|
main/algoSitemap.py
|
algorithms-gad/algoBook
|
6a4fb34ae0028feab97707843d9c8ebfeb7386cc
|
[
"Apache-2.0"
] | null | null | null |
main/algoSitemap.py
|
algorithms-gad/algoBook
|
6a4fb34ae0028feab97707843d9c8ebfeb7386cc
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib.sitemaps import Sitemap
from main.models import Algo, Code
class AlgoSitemap(Sitemap):
changefreq = "daily"
priority = 1
def items(self):
return Algo.objects.all()
def lastmod(self, obj):
return obj.created_at
def location(self, obj):
return "/" + obj.slug
class CodeSitemap(Sitemap):
changefreq = "daily"
priority = 1
def items(self):
return Code.objects.all()
def lastmod(self, obj):
return obj.algo.created_at
def location(self, obj):
return "/code/%s/%s" % (obj.id, obj.algo.slug)
| 20.066667
| 54
| 0.626246
|
from django.contrib.sitemaps import Sitemap
from main.models import Algo, Code
class AlgoSitemap(Sitemap):
changefreq = "daily"
priority = 1
def items(self):
return Algo.objects.all()
def lastmod(self, obj):
return obj.created_at
def location(self, obj):
return "/" + obj.slug
class CodeSitemap(Sitemap):
changefreq = "daily"
priority = 1
def items(self):
return Code.objects.all()
def lastmod(self, obj):
return obj.algo.created_at
def location(self, obj):
return "/code/%s/%s" % (obj.id, obj.algo.slug)
| true
| true
|
7903cf83bf486a7b16e3a98be063096227b14f80
| 3,174
|
py
|
Python
|
flask_kits/sms/__init__.py
|
by46/flask-kits
|
51edfd426fcb8db326d3cc3d7a5b07830d555163
|
[
"MIT"
] | 1
|
2018-05-22T16:27:58.000Z
|
2018-05-22T16:27:58.000Z
|
flask_kits/sms/__init__.py
|
by46/flask-kits
|
51edfd426fcb8db326d3cc3d7a5b07830d555163
|
[
"MIT"
] | null | null | null |
flask_kits/sms/__init__.py
|
by46/flask-kits
|
51edfd426fcb8db326d3cc3d7a5b07830d555163
|
[
"MIT"
] | null | null | null |
# -:- coding:utf8 -:-
import base64
import hmac
import json
import sys
import time
import urllib
import uuid
from hashlib import sha1
import requests
from flask import current_app
from werkzeug.local import LocalProxy
DEFAULT_URL = 'https://sms.aliyuncs.com'
SMS = LocalProxy(lambda: current_app.extensions['kits_sms'])
class SMSSender(object):
def __init__(self, app_key, secret_key, url=DEFAULT_URL):
self.app_key = app_key
self.secret_key = secret_key
self.url = url
@staticmethod
def percent_encode(content):
# content = str(content)
res = urllib.quote(content, '')
res = res.replace('+', '%20')
res = res.replace('*', '%2A')
res = res.replace('%7E', '~')
return res
def sign(self, access_key_secret, params):
params = sorted(params.items(), key=lambda param: param[0])
canonical_querystring = ''
for (k, v) in params:
canonical_querystring += '&' + self.percent_encode(k) + '=' + self.percent_encode(v)
string_to_sign = 'GET&%2F&' + self.percent_encode(canonical_querystring[1:]) # 使用get请求方法
h = hmac.new(access_key_secret + "&", string_to_sign, sha1)
signature = base64.encodestring(h.digest()).strip()
return signature
def make_url(self, params):
timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
parameters = {
'Format': 'JSON',
'Version': '2016-09-27',
'AccessKeyId': self.app_key,
'SignatureVersion': '1.0',
'SignatureMethod': 'HMAC-SHA1',
'SignatureNonce': str(uuid.uuid1()),
'Timestamp': timestamp,
}
for key in params.keys():
parameters[key] = params[key]
signature = self.sign(self.secret_key, parameters)
parameters['Signature'] = signature
url = self.url + "/?" + urllib.urlencode(parameters)
return url
def do_request(self, params):
url = self.make_url(params)
response = requests.get(url)
print response.ok, response.content
def send(self, template_code, sign_name, receive_num, param):
params = {
'Action': 'SingleSendSms',
'SignName': sign_name,
'TemplateCode': template_code,
'RecNum': receive_num,
'ParamString': json.dumps(param)
}
url = self.make_url(params)
response = requests.get(url)
if not response.ok:
current_app.logger.error(response.content)
return response.ok
def init_extension(kits, app):
url = kits.get_parameter('SMS_URL', default=DEFAULT_URL)
app_key = kits.get_parameter("SMS_APP_KEY")
secret_key = kits.get_parameter('SMS_SECRET_KEY')
app.extensions['kits_sms'] = SMSSender(app_key, secret_key, url)
if __name__ == '__main__':
sender = SMSSender('LTAIWLcy7iT5v7mr', 'gRL1rtYnyfKMDVZs7b4fhbosX0MAAo ')
print sender.send("SMS_49485493", u"testing", "18708140165", param={'code': "123456", 'product': "benjamin"})
| 33.0625
| 114
| 0.600504
|
import base64
import hmac
import json
import sys
import time
import urllib
import uuid
from hashlib import sha1
import requests
from flask import current_app
from werkzeug.local import LocalProxy
DEFAULT_URL = 'https://sms.aliyuncs.com'
SMS = LocalProxy(lambda: current_app.extensions['kits_sms'])
class SMSSender(object):
def __init__(self, app_key, secret_key, url=DEFAULT_URL):
self.app_key = app_key
self.secret_key = secret_key
self.url = url
@staticmethod
def percent_encode(content):
res = urllib.quote(content, '')
res = res.replace('+', '%20')
res = res.replace('*', '%2A')
res = res.replace('%7E', '~')
return res
def sign(self, access_key_secret, params):
params = sorted(params.items(), key=lambda param: param[0])
canonical_querystring = ''
for (k, v) in params:
canonical_querystring += '&' + self.percent_encode(k) + '=' + self.percent_encode(v)
string_to_sign = 'GET&%2F&' + self.percent_encode(canonical_querystring[1:])
h = hmac.new(access_key_secret + "&", string_to_sign, sha1)
signature = base64.encodestring(h.digest()).strip()
return signature
def make_url(self, params):
timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
parameters = {
'Format': 'JSON',
'Version': '2016-09-27',
'AccessKeyId': self.app_key,
'SignatureVersion': '1.0',
'SignatureMethod': 'HMAC-SHA1',
'SignatureNonce': str(uuid.uuid1()),
'Timestamp': timestamp,
}
for key in params.keys():
parameters[key] = params[key]
signature = self.sign(self.secret_key, parameters)
parameters['Signature'] = signature
url = self.url + "/?" + urllib.urlencode(parameters)
return url
def do_request(self, params):
url = self.make_url(params)
response = requests.get(url)
print response.ok, response.content
def send(self, template_code, sign_name, receive_num, param):
params = {
'Action': 'SingleSendSms',
'SignName': sign_name,
'TemplateCode': template_code,
'RecNum': receive_num,
'ParamString': json.dumps(param)
}
url = self.make_url(params)
response = requests.get(url)
if not response.ok:
current_app.logger.error(response.content)
return response.ok
def init_extension(kits, app):
url = kits.get_parameter('SMS_URL', default=DEFAULT_URL)
app_key = kits.get_parameter("SMS_APP_KEY")
secret_key = kits.get_parameter('SMS_SECRET_KEY')
app.extensions['kits_sms'] = SMSSender(app_key, secret_key, url)
if __name__ == '__main__':
sender = SMSSender('LTAIWLcy7iT5v7mr', 'gRL1rtYnyfKMDVZs7b4fhbosX0MAAo ')
print sender.send("SMS_49485493", u"testing", "18708140165", param={'code': "123456", 'product': "benjamin"})
| false
| true
|
7903d1cc8c0485bc5d203c5d86fad6f1c5124ba0
| 6,528
|
py
|
Python
|
pysnmp/EdgeSwitch-IPV6-TUNNEL-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11
|
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/EdgeSwitch-IPV6-TUNNEL-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75
|
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/EdgeSwitch-IPV6-TUNNEL-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module EdgeSwitch-IPV6-TUNNEL-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/EdgeSwitch-IPV6-TUNNEL-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:56:15 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection")
fastPath, = mibBuilder.importSymbols("EdgeSwitch-REF-MIB", "fastPath")
InetAddressPrefixLength, InetAddressIPv4 = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddressPrefixLength", "InetAddressIPv4")
Ipv6Address, Ipv6IfIndex, Ipv6AddressPrefix = mibBuilder.importSymbols("IPV6-TC", "Ipv6Address", "Ipv6IfIndex", "Ipv6AddressPrefix")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Unsigned32, ModuleIdentity, Bits, Gauge32, Integer32, NotificationType, ObjectIdentity, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, iso, Counter64, Counter32, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "ModuleIdentity", "Bits", "Gauge32", "Integer32", "NotificationType", "ObjectIdentity", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "iso", "Counter64", "Counter32", "TimeTicks")
RowStatus, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "DisplayString", "TextualConvention")
fastPathIpv6Tunnel = ModuleIdentity((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27))
fastPathIpv6Tunnel.setRevisions(('2011-01-26 00:00', '2007-05-23 00:00',))
if mibBuilder.loadTexts: fastPathIpv6Tunnel.setLastUpdated('201101260000Z')
if mibBuilder.loadTexts: fastPathIpv6Tunnel.setOrganization('Broadcom Inc')
agentTunnelIPV6Group = MibIdentifier((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1))
agentTunnelIPV6Table = MibTable((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1), )
if mibBuilder.loadTexts: agentTunnelIPV6Table.setStatus('current')
agentTunnelIPV6Entry = MibTableRow((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1), ).setIndexNames((0, "EdgeSwitch-IPV6-TUNNEL-MIB", "agentTunnelID"))
if mibBuilder.loadTexts: agentTunnelIPV6Entry.setStatus('current')
agentTunnelID = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: agentTunnelID.setStatus('current')
agentTunnelIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentTunnelIfIndex.setStatus('current')
agentTunnelMode = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("undefined", 1), ("ip6over4", 2), ("ip6to4", 3))).clone('undefined')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: agentTunnelMode.setStatus('current')
agentTunnelLocalIP4Addr = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1, 4), InetAddressIPv4()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentTunnelLocalIP4Addr.setStatus('current')
agentTunnelRemoteIP4Addr = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1, 5), InetAddressIPv4()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentTunnelRemoteIP4Addr.setStatus('current')
agentTunnelLocalIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentTunnelLocalIfIndex.setStatus('current')
agentTunnelStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1, 7), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: agentTunnelStatus.setStatus('current')
agentTunnelIcmpUnreachableMode = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: agentTunnelIcmpUnreachableMode.setStatus('current')
agentTunnelIPV6PrefixTable = MibTable((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 2), )
if mibBuilder.loadTexts: agentTunnelIPV6PrefixTable.setStatus('current')
agentTunnelIPV6PrefixEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 2, 1), ).setIndexNames((0, "EdgeSwitch-IPV6-TUNNEL-MIB", "agentTunnelID"), (0, "EdgeSwitch-IPV6-TUNNEL-MIB", "agentTunnelIPV6PrefixPrefix"), (0, "EdgeSwitch-IPV6-TUNNEL-MIB", "agentTunnelIPV6PrefixPrefixLen"))
if mibBuilder.loadTexts: agentTunnelIPV6PrefixEntry.setStatus('current')
agentTunnelIPV6PrefixPrefix = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 2, 1, 1), Ipv6AddressPrefix())
if mibBuilder.loadTexts: agentTunnelIPV6PrefixPrefix.setStatus('current')
agentTunnelIPV6PrefixPrefixLen = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 2, 1, 2), InetAddressPrefixLength())
if mibBuilder.loadTexts: agentTunnelIPV6PrefixPrefixLen.setStatus('current')
agentTunnelIPV6PrefixStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 2, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: agentTunnelIPV6PrefixStatus.setStatus('current')
mibBuilder.exportSymbols("EdgeSwitch-IPV6-TUNNEL-MIB", agentTunnelIPV6PrefixStatus=agentTunnelIPV6PrefixStatus, agentTunnelIPV6Entry=agentTunnelIPV6Entry, agentTunnelIPV6Table=agentTunnelIPV6Table, agentTunnelIPV6PrefixEntry=agentTunnelIPV6PrefixEntry, agentTunnelLocalIP4Addr=agentTunnelLocalIP4Addr, fastPathIpv6Tunnel=fastPathIpv6Tunnel, agentTunnelID=agentTunnelID, agentTunnelIPV6PrefixPrefix=agentTunnelIPV6PrefixPrefix, agentTunnelIPV6PrefixPrefixLen=agentTunnelIPV6PrefixPrefixLen, agentTunnelIPV6PrefixTable=agentTunnelIPV6PrefixTable, agentTunnelStatus=agentTunnelStatus, agentTunnelIPV6Group=agentTunnelIPV6Group, agentTunnelRemoteIP4Addr=agentTunnelRemoteIP4Addr, agentTunnelLocalIfIndex=agentTunnelLocalIfIndex, agentTunnelMode=agentTunnelMode, PYSNMP_MODULE_ID=fastPathIpv6Tunnel, agentTunnelIcmpUnreachableMode=agentTunnelIcmpUnreachableMode, agentTunnelIfIndex=agentTunnelIfIndex)
| 123.169811
| 896
| 0.780331
|
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection")
fastPath, = mibBuilder.importSymbols("EdgeSwitch-REF-MIB", "fastPath")
InetAddressPrefixLength, InetAddressIPv4 = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddressPrefixLength", "InetAddressIPv4")
Ipv6Address, Ipv6IfIndex, Ipv6AddressPrefix = mibBuilder.importSymbols("IPV6-TC", "Ipv6Address", "Ipv6IfIndex", "Ipv6AddressPrefix")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Unsigned32, ModuleIdentity, Bits, Gauge32, Integer32, NotificationType, ObjectIdentity, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, iso, Counter64, Counter32, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "ModuleIdentity", "Bits", "Gauge32", "Integer32", "NotificationType", "ObjectIdentity", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "iso", "Counter64", "Counter32", "TimeTicks")
RowStatus, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "DisplayString", "TextualConvention")
fastPathIpv6Tunnel = ModuleIdentity((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27))
fastPathIpv6Tunnel.setRevisions(('2011-01-26 00:00', '2007-05-23 00:00',))
if mibBuilder.loadTexts: fastPathIpv6Tunnel.setLastUpdated('201101260000Z')
if mibBuilder.loadTexts: fastPathIpv6Tunnel.setOrganization('Broadcom Inc')
agentTunnelIPV6Group = MibIdentifier((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1))
agentTunnelIPV6Table = MibTable((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1), )
if mibBuilder.loadTexts: agentTunnelIPV6Table.setStatus('current')
agentTunnelIPV6Entry = MibTableRow((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1), ).setIndexNames((0, "EdgeSwitch-IPV6-TUNNEL-MIB", "agentTunnelID"))
if mibBuilder.loadTexts: agentTunnelIPV6Entry.setStatus('current')
agentTunnelID = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: agentTunnelID.setStatus('current')
agentTunnelIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentTunnelIfIndex.setStatus('current')
agentTunnelMode = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("undefined", 1), ("ip6over4", 2), ("ip6to4", 3))).clone('undefined')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: agentTunnelMode.setStatus('current')
agentTunnelLocalIP4Addr = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1, 4), InetAddressIPv4()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentTunnelLocalIP4Addr.setStatus('current')
agentTunnelRemoteIP4Addr = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1, 5), InetAddressIPv4()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentTunnelRemoteIP4Addr.setStatus('current')
agentTunnelLocalIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentTunnelLocalIfIndex.setStatus('current')
agentTunnelStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1, 7), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: agentTunnelStatus.setStatus('current')
agentTunnelIcmpUnreachableMode = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: agentTunnelIcmpUnreachableMode.setStatus('current')
agentTunnelIPV6PrefixTable = MibTable((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 2), )
if mibBuilder.loadTexts: agentTunnelIPV6PrefixTable.setStatus('current')
agentTunnelIPV6PrefixEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 2, 1), ).setIndexNames((0, "EdgeSwitch-IPV6-TUNNEL-MIB", "agentTunnelID"), (0, "EdgeSwitch-IPV6-TUNNEL-MIB", "agentTunnelIPV6PrefixPrefix"), (0, "EdgeSwitch-IPV6-TUNNEL-MIB", "agentTunnelIPV6PrefixPrefixLen"))
if mibBuilder.loadTexts: agentTunnelIPV6PrefixEntry.setStatus('current')
agentTunnelIPV6PrefixPrefix = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 2, 1, 1), Ipv6AddressPrefix())
if mibBuilder.loadTexts: agentTunnelIPV6PrefixPrefix.setStatus('current')
agentTunnelIPV6PrefixPrefixLen = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 2, 1, 2), InetAddressPrefixLength())
if mibBuilder.loadTexts: agentTunnelIPV6PrefixPrefixLen.setStatus('current')
agentTunnelIPV6PrefixStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 2, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: agentTunnelIPV6PrefixStatus.setStatus('current')
mibBuilder.exportSymbols("EdgeSwitch-IPV6-TUNNEL-MIB", agentTunnelIPV6PrefixStatus=agentTunnelIPV6PrefixStatus, agentTunnelIPV6Entry=agentTunnelIPV6Entry, agentTunnelIPV6Table=agentTunnelIPV6Table, agentTunnelIPV6PrefixEntry=agentTunnelIPV6PrefixEntry, agentTunnelLocalIP4Addr=agentTunnelLocalIP4Addr, fastPathIpv6Tunnel=fastPathIpv6Tunnel, agentTunnelID=agentTunnelID, agentTunnelIPV6PrefixPrefix=agentTunnelIPV6PrefixPrefix, agentTunnelIPV6PrefixPrefixLen=agentTunnelIPV6PrefixPrefixLen, agentTunnelIPV6PrefixTable=agentTunnelIPV6PrefixTable, agentTunnelStatus=agentTunnelStatus, agentTunnelIPV6Group=agentTunnelIPV6Group, agentTunnelRemoteIP4Addr=agentTunnelRemoteIP4Addr, agentTunnelLocalIfIndex=agentTunnelLocalIfIndex, agentTunnelMode=agentTunnelMode, PYSNMP_MODULE_ID=fastPathIpv6Tunnel, agentTunnelIcmpUnreachableMode=agentTunnelIcmpUnreachableMode, agentTunnelIfIndex=agentTunnelIfIndex)
| true
| true
|
7903d207889a055df85c2d7142d6f97f376cde22
| 6,715
|
py
|
Python
|
src/dispatch/task/service.py
|
WouldYouKindly/dispatch
|
c3e8467fe36e0bd78f45a3d3292ea36384981468
|
[
"Apache-2.0"
] | null | null | null |
src/dispatch/task/service.py
|
WouldYouKindly/dispatch
|
c3e8467fe36e0bd78f45a3d3292ea36384981468
|
[
"Apache-2.0"
] | null | null | null |
src/dispatch/task/service.py
|
WouldYouKindly/dispatch
|
c3e8467fe36e0bd78f45a3d3292ea36384981468
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime, timedelta
from typing import List, Optional
from sqlalchemy import or_
from dispatch.plugin import service as plugin_service
from dispatch.event import service as event_service
from dispatch.incident import flows as incident_flows
from dispatch.incident.flows import incident_service
from dispatch.ticket import service as ticket_service
from .models import Task, TaskStatus, TaskUpdate, TaskCreate
def get(*, db_session, task_id: int) -> Optional[Task]:
"""Get a single task by ID."""
return db_session.query(Task).filter(Task.id == task_id).first()
def get_by_resource_id(*, db_session, resource_id: str) -> Optional[Task]:
"""Get a single task by resource id."""
return db_session.query(Task).filter(Task.resource_id == resource_id).first()
def get_all(*, db_session) -> List[Optional[Task]]:
"""Return all tasks."""
return db_session.query(Task)
def get_all_by_incident_id(*, db_session, incident_id: int) -> List[Optional[Task]]:
"""Get all tasks by incident id."""
return db_session.query(Task).filter(Task.incident_id == incident_id)
def get_all_by_incident_id_and_status(
*, db_session, incident_id: int, status: str
) -> List[Optional[Task]]:
"""Get all tasks by incident id and status."""
return (
db_session.query(Task).filter(Task.incident_id == incident_id).filter(Task.status == status)
)
def get_overdue_tasks(*, db_session) -> List[Optional[Task]]:
"""Returns all tasks that have not been resolved and are past due date."""
# TODO ensure that we don't send reminders more than their interval
return (
db_session.query(Task)
.filter(Task.status == TaskStatus.open)
.filter(Task.reminders == True) # noqa
.filter(Task.resolve_by < datetime.utcnow())
.filter(
or_(
Task.last_reminder_at + timedelta(days=1)
< datetime.utcnow(), # daily reminders after due date.
Task.last_reminder_at == None,
)
)
.all()
)
def create(*, db_session, task_in: TaskCreate) -> Task:
"""Create a new task."""
incident = incident_service.get(db_session=db_session, incident_id=task_in.incident.id)
tickets = [
ticket_service.get_or_create_by_weblink(
db_session=db_session, weblink=t.weblink, resource_type="task-ticket"
)
for t in task_in.tickets
]
assignees = []
for i in task_in.assignees:
assignee = incident_flows.incident_add_or_reactivate_participant_flow(
db_session=db_session,
incident_id=incident.id,
user_email=i.individual.email,
)
# due to the freeform nature of task assignment, we can sometimes pick up other emails
# e.g. a google group that we cannont resolve to an individual assignee
if assignee:
assignees.append(assignee)
creator_email = None
if not task_in.creator:
creator_email = task_in.owner.individual.email
else:
creator_email = task_in.creator.individual.email
# add creator as a participant if they are not one already
creator = incident_flows.incident_add_or_reactivate_participant_flow(
db_session=db_session,
incident_id=incident.id,
user_email=creator_email,
)
# if we cannot find any assignees, the creator becomes the default assignee
if not assignees:
assignees.append(creator)
# we add owner as a participant if they are not one already
if task_in.owner:
owner = incident_flows.incident_add_or_reactivate_participant_flow(
db_session=db_session,
incident_id=incident.id,
user_email=task_in.owner.individual.email,
)
else:
owner = incident.commander
task = Task(
**task_in.dict(exclude={"assignees", "owner", "incident", "creator", "tickets"}),
creator=creator,
owner=owner,
assignees=assignees,
incident=incident,
tickets=tickets,
)
event_service.log(
db_session=db_session,
source="Dispatch Core App",
description="New incident task created",
details={"weblink": task.weblink},
incident_id=incident.id,
)
db_session.add(task)
db_session.commit()
return task
def update(*, db_session, task: Task, task_in: TaskUpdate, sync_external: bool = True) -> Task:
"""Update an existing task."""
# ensure we add assignee as participant if they are not one already
assignees = []
for i in task_in.assignees:
assignees.append(
incident_flows.incident_add_or_reactivate_participant_flow(
db_session=db_session,
incident_id=task.incident.id,
user_email=i.individual.email,
)
)
task.assignees = assignees
# we add owner as a participant if they are not one already
if task_in.owner:
task.owner = incident_flows.incident_add_or_reactivate_participant_flow(
db_session=db_session,
incident_id=task.incident.id,
user_email=task_in.owner.individual.email,
)
update_data = task_in.dict(
skip_defaults=True, exclude={"assignees", "owner", "creator", "incident", "tickets"}
)
for field in update_data.keys():
setattr(task, field, update_data[field])
# if we have an external task plugin enabled, attempt to update the external resource as well
# we don't currently have a good way to get the correct file_id (we don't store a task <-> relationship)
# lets try in both the incident doc and PIR doc
drive_task_plugin = plugin_service.get_active(db_session=db_session, plugin_type="task")
if drive_task_plugin:
if sync_external:
try:
if task.incident.incident_document:
file_id = task.incident.incident_document.resource_id
drive_task_plugin.instance.update(
file_id, task.resource_id, resolved=task.status
)
except Exception:
if task.incident.incident_review_document:
file_id = task.incident.incident_review_document.resource_id
drive_task_plugin.instance.update(
file_id, task.resource_id, resolved=task.status
)
db_session.add(task)
db_session.commit()
return task
def delete(*, db_session, task_id: int):
"""Delete an existing task."""
task = db_session.query(Task).filter(Task.id == task_id).first()
db_session.delete(task)
db_session.commit()
| 34.613402
| 108
| 0.656739
|
from datetime import datetime, timedelta
from typing import List, Optional
from sqlalchemy import or_
from dispatch.plugin import service as plugin_service
from dispatch.event import service as event_service
from dispatch.incident import flows as incident_flows
from dispatch.incident.flows import incident_service
from dispatch.ticket import service as ticket_service
from .models import Task, TaskStatus, TaskUpdate, TaskCreate
def get(*, db_session, task_id: int) -> Optional[Task]:
return db_session.query(Task).filter(Task.id == task_id).first()
def get_by_resource_id(*, db_session, resource_id: str) -> Optional[Task]:
return db_session.query(Task).filter(Task.resource_id == resource_id).first()
def get_all(*, db_session) -> List[Optional[Task]]:
return db_session.query(Task)
def get_all_by_incident_id(*, db_session, incident_id: int) -> List[Optional[Task]]:
return db_session.query(Task).filter(Task.incident_id == incident_id)
def get_all_by_incident_id_and_status(
*, db_session, incident_id: int, status: str
) -> List[Optional[Task]]:
return (
db_session.query(Task).filter(Task.incident_id == incident_id).filter(Task.status == status)
)
def get_overdue_tasks(*, db_session) -> List[Optional[Task]]:
return (
db_session.query(Task)
.filter(Task.status == TaskStatus.open)
.filter(Task.reminders == True) # noqa
.filter(Task.resolve_by < datetime.utcnow())
.filter(
or_(
Task.last_reminder_at + timedelta(days=1)
< datetime.utcnow(), # daily reminders after due date.
Task.last_reminder_at == None,
)
)
.all()
)
def create(*, db_session, task_in: TaskCreate) -> Task:
incident = incident_service.get(db_session=db_session, incident_id=task_in.incident.id)
tickets = [
ticket_service.get_or_create_by_weblink(
db_session=db_session, weblink=t.weblink, resource_type="task-ticket"
)
for t in task_in.tickets
]
assignees = []
for i in task_in.assignees:
assignee = incident_flows.incident_add_or_reactivate_participant_flow(
db_session=db_session,
incident_id=incident.id,
user_email=i.individual.email,
)
# due to the freeform nature of task assignment, we can sometimes pick up other emails
# e.g. a google group that we cannont resolve to an individual assignee
if assignee:
assignees.append(assignee)
creator_email = None
if not task_in.creator:
creator_email = task_in.owner.individual.email
else:
creator_email = task_in.creator.individual.email
# add creator as a participant if they are not one already
creator = incident_flows.incident_add_or_reactivate_participant_flow(
db_session=db_session,
incident_id=incident.id,
user_email=creator_email,
)
# if we cannot find any assignees, the creator becomes the default assignee
if not assignees:
assignees.append(creator)
# we add owner as a participant if they are not one already
if task_in.owner:
owner = incident_flows.incident_add_or_reactivate_participant_flow(
db_session=db_session,
incident_id=incident.id,
user_email=task_in.owner.individual.email,
)
else:
owner = incident.commander
task = Task(
**task_in.dict(exclude={"assignees", "owner", "incident", "creator", "tickets"}),
creator=creator,
owner=owner,
assignees=assignees,
incident=incident,
tickets=tickets,
)
event_service.log(
db_session=db_session,
source="Dispatch Core App",
description="New incident task created",
details={"weblink": task.weblink},
incident_id=incident.id,
)
db_session.add(task)
db_session.commit()
return task
def update(*, db_session, task: Task, task_in: TaskUpdate, sync_external: bool = True) -> Task:
# ensure we add assignee as participant if they are not one already
assignees = []
for i in task_in.assignees:
assignees.append(
incident_flows.incident_add_or_reactivate_participant_flow(
db_session=db_session,
incident_id=task.incident.id,
user_email=i.individual.email,
)
)
task.assignees = assignees
# we add owner as a participant if they are not one already
if task_in.owner:
task.owner = incident_flows.incident_add_or_reactivate_participant_flow(
db_session=db_session,
incident_id=task.incident.id,
user_email=task_in.owner.individual.email,
)
update_data = task_in.dict(
skip_defaults=True, exclude={"assignees", "owner", "creator", "incident", "tickets"}
)
for field in update_data.keys():
setattr(task, field, update_data[field])
# if we have an external task plugin enabled, attempt to update the external resource as well
# we don't currently have a good way to get the correct file_id (we don't store a task <-> relationship)
# lets try in both the incident doc and PIR doc
drive_task_plugin = plugin_service.get_active(db_session=db_session, plugin_type="task")
if drive_task_plugin:
if sync_external:
try:
if task.incident.incident_document:
file_id = task.incident.incident_document.resource_id
drive_task_plugin.instance.update(
file_id, task.resource_id, resolved=task.status
)
except Exception:
if task.incident.incident_review_document:
file_id = task.incident.incident_review_document.resource_id
drive_task_plugin.instance.update(
file_id, task.resource_id, resolved=task.status
)
db_session.add(task)
db_session.commit()
return task
def delete(*, db_session, task_id: int):
task = db_session.query(Task).filter(Task.id == task_id).first()
db_session.delete(task)
db_session.commit()
| true
| true
|
7903d2e5a875e3db7b257dc024278b89822207f9
| 1,110
|
py
|
Python
|
Section 5 - Programming Logic/Guess game v7 - while - data validation.py
|
gitjot/python-for-lccs
|
a8a4ae8847abbc33361f80183c06d57b20523382
|
[
"CC0-1.0"
] | 10
|
2020-02-14T14:28:15.000Z
|
2022-02-02T18:44:11.000Z
|
Section 5 - Programming Logic/Guess game v7 - while - data validation.py
|
gitjot/python-for-lccs
|
a8a4ae8847abbc33361f80183c06d57b20523382
|
[
"CC0-1.0"
] | null | null | null |
Section 5 - Programming Logic/Guess game v7 - while - data validation.py
|
gitjot/python-for-lccs
|
a8a4ae8847abbc33361f80183c06d57b20523382
|
[
"CC0-1.0"
] | 8
|
2020-03-25T09:27:42.000Z
|
2021-11-03T15:24:38.000Z
|
# Date: May 2018
# Author: Joe English, PDST
# eMail: computerscience@pdst.ie
# Name: Guessing Game v7
# Purpose: A program to demonstrate data validation
# Description: This is the exact same as version 6 except the input is validated
# Guess Game v7 - while - go again? - data validation
import random
number = random.randint(1, 10)
# Initialise the loop guard variable
keepGoing = True
# Loop as long as keepGoing is True
while keepGoing:
guess = input("Enter a number between 1 and 10: ")
# Validate. Make sure the value entered is numeric
while not guess.isdigit():
guess = input("Enter a number between 1 and 10: ")
# Convert the string to an integer
guess = int(guess)
if guess == number:
print("Correct")
goAgain = input("Play again? (Y/N): ")
if goAgain.upper() == "N":
keepGoing = False
else:
# Get a new number
number = random.randint(1, 10)
elif guess < number:
print("Too low")
else:
print("Too high")
print("Goodbye")
| 25.227273
| 81
| 0.609009
|
import random
number = random.randint(1, 10)
keepGoing = True
while keepGoing:
guess = input("Enter a number between 1 and 10: ")
while not guess.isdigit():
guess = input("Enter a number between 1 and 10: ")
guess = int(guess)
if guess == number:
print("Correct")
goAgain = input("Play again? (Y/N): ")
if goAgain.upper() == "N":
keepGoing = False
else:
number = random.randint(1, 10)
elif guess < number:
print("Too low")
else:
print("Too high")
print("Goodbye")
| true
| true
|
7903d31164da5343c80ba220d6f56a5d7ca0b66f
| 359
|
py
|
Python
|
tests/test_otter.py
|
tadashi0713/circleci-demo-pytorch-api
|
bd699a44f2a1551d2661ce57f6268183109d7293
|
[
"MIT"
] | 1
|
2022-03-29T02:48:51.000Z
|
2022-03-29T02:48:51.000Z
|
tests/test_otter.py
|
tadashi0713/circleci-demo-pytorch-api
|
bd699a44f2a1551d2661ce57f6268183109d7293
|
[
"MIT"
] | null | null | null |
tests/test_otter.py
|
tadashi0713/circleci-demo-pytorch-api
|
bd699a44f2a1551d2661ce57f6268183109d7293
|
[
"MIT"
] | null | null | null |
from io import BytesIO
import pytest
from app import app
def test_otter():
with open('./images/otter.jpeg', 'rb') as img:
img_string = BytesIO(img.read())
response = app.test_client().post('/predict', data={'file': (img_string, 'otter.jpeg')},
content_type="multipart/form-data")
assert response.json['class_name'] == 'otter'
| 32.636364
| 90
| 0.657382
|
from io import BytesIO
import pytest
from app import app
def test_otter():
with open('./images/otter.jpeg', 'rb') as img:
img_string = BytesIO(img.read())
response = app.test_client().post('/predict', data={'file': (img_string, 'otter.jpeg')},
content_type="multipart/form-data")
assert response.json['class_name'] == 'otter'
| true
| true
|
7903d3538ab24610fe9b15c3423ade1811aed996
| 8,207
|
py
|
Python
|
ax/models/tests/test_torch_model_utils.py
|
trsvchn/Ax
|
0b430641c6b33920757dd09ae4318ea487fb4136
|
[
"MIT"
] | 1,803
|
2019-05-01T16:04:15.000Z
|
2022-03-31T16:01:29.000Z
|
ax/models/tests/test_torch_model_utils.py
|
trsvchn/Ax
|
0b430641c6b33920757dd09ae4318ea487fb4136
|
[
"MIT"
] | 810
|
2019-05-01T07:17:47.000Z
|
2022-03-31T23:58:46.000Z
|
ax/models/tests/test_torch_model_utils.py
|
trsvchn/Ax
|
0b430641c6b33920757dd09ae4318ea487fb4136
|
[
"MIT"
] | 220
|
2019-05-01T05:37:22.000Z
|
2022-03-29T04:30:45.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from ax.exceptions.model import ModelError
from ax.models.torch.utils import (
_generate_sobol_points,
is_noiseless,
normalize_indices,
subset_model,
tensor_callable_to_array_callable,
)
from ax.utils.common.testutils import TestCase
from botorch.models import HeteroskedasticSingleTaskGP, ModelListGP, SingleTaskGP
from torch import Tensor
class TorchUtilsTest(TestCase):
def test_is_noiseless(self):
x = torch.zeros(1, 1)
y = torch.zeros(1, 1)
se = torch.zeros(1, 1)
model = SingleTaskGP(x, y)
self.assertTrue(is_noiseless(model))
model = HeteroskedasticSingleTaskGP(x, y, se)
self.assertFalse(is_noiseless(model))
with self.assertRaises(ModelError):
is_noiseless(ModelListGP())
def testNormalizeIndices(self):
indices = [0, 2]
nlzd_indices = normalize_indices(indices, 3)
self.assertEqual(nlzd_indices, indices)
nlzd_indices = normalize_indices(indices, 4)
self.assertEqual(nlzd_indices, indices)
indices = [0, -1]
nlzd_indices = normalize_indices(indices, 3)
self.assertEqual(nlzd_indices, [0, 2])
with self.assertRaises(ValueError):
nlzd_indices = normalize_indices([3], 3)
with self.assertRaises(ValueError):
nlzd_indices = normalize_indices([-4], 3)
def testSubsetModel(self):
x = torch.zeros(1, 1)
y = torch.rand(1, 2)
obj_t = torch.rand(2)
model = SingleTaskGP(x, y)
self.assertEqual(model.num_outputs, 2)
# basic test, can subset
obj_weights = torch.tensor([1.0, 0.0])
subset_model_results = subset_model(model, obj_weights)
model_sub = subset_model_results.model
obj_weights_sub = subset_model_results.objective_weights
ocs_sub = subset_model_results.outcome_constraints
obj_t_sub = subset_model_results.objective_thresholds
self.assertIsNone(ocs_sub)
self.assertIsNone(obj_t_sub)
self.assertEqual(model_sub.num_outputs, 1)
self.assertTrue(torch.equal(obj_weights_sub, torch.tensor([1.0])))
# basic test, cannot subset
obj_weights = torch.tensor([1.0, 2.0])
subset_model_results = subset_model(model, obj_weights)
model_sub = subset_model_results.model
obj_weights_sub = subset_model_results.objective_weights
ocs_sub = subset_model_results.outcome_constraints
obj_t_sub = subset_model_results.objective_thresholds
self.assertIsNone(ocs_sub)
self.assertIsNone(obj_t_sub)
self.assertIs(model_sub, model) # check identity
self.assertIs(obj_weights_sub, obj_weights) # check identity
self.assertTrue(torch.equal(subset_model_results.indices, torch.tensor([0, 1])))
# test w/ outcome constraints, can subset
obj_weights = torch.tensor([1.0, 0.0])
ocs = (torch.tensor([[1.0, 0.0]]), torch.tensor([1.0]))
subset_model_results = subset_model(model, obj_weights, ocs)
model_sub = subset_model_results.model
obj_weights_sub = subset_model_results.objective_weights
ocs_sub = subset_model_results.outcome_constraints
obj_t_sub = subset_model_results.objective_thresholds
self.assertEqual(model_sub.num_outputs, 1)
self.assertIsNone(obj_t_sub)
self.assertTrue(torch.equal(obj_weights_sub, torch.tensor([1.0])))
self.assertTrue(torch.equal(ocs_sub[0], torch.tensor([[1.0]])))
self.assertTrue(torch.equal(ocs_sub[1], torch.tensor([1.0])))
self.assertTrue(torch.equal(subset_model_results.indices, torch.tensor([0])))
# test w/ outcome constraints, cannot subset
obj_weights = torch.tensor([1.0, 0.0])
ocs = (torch.tensor([[0.0, 1.0]]), torch.tensor([1.0]))
subset_model_results = subset_model(model, obj_weights, ocs)
model_sub = subset_model_results.model
obj_weights_sub = subset_model_results.objective_weights
ocs_sub = subset_model_results.outcome_constraints
obj_t_sub = subset_model_results.objective_thresholds
self.assertIs(model_sub, model) # check identity
self.assertIsNone(obj_t_sub)
self.assertIs(obj_weights_sub, obj_weights) # check identity
self.assertIs(ocs_sub, ocs) # check identity
self.assertTrue(torch.equal(subset_model_results.indices, torch.tensor([0, 1])))
# test w/ objective thresholds, cannot subset
obj_weights = torch.tensor([1.0, 0.0])
ocs = (torch.tensor([[0.0, 1.0]]), torch.tensor([1.0]))
subset_model_results = subset_model(model, obj_weights, ocs, obj_t)
model_sub = subset_model_results.model
obj_weights_sub = subset_model_results.objective_weights
ocs_sub = subset_model_results.outcome_constraints
obj_t_sub = subset_model_results.objective_thresholds
self.assertIs(model_sub, model) # check identity
self.assertIs(obj_t, obj_t_sub)
self.assertIs(obj_weights_sub, obj_weights) # check identity
self.assertTrue(torch.equal(subset_model_results.indices, torch.tensor([0, 1])))
self.assertIs(ocs_sub, ocs) # check identity
# test w/ objective thresholds, can subset
obj_weights = torch.tensor([1.0, 0.0])
ocs = (torch.tensor([[1.0, 0.0]]), torch.tensor([1.0]))
subset_model_results = subset_model(model, obj_weights, ocs, obj_t)
model_sub = subset_model_results.model
obj_weights_sub = subset_model_results.objective_weights
ocs_sub = subset_model_results.outcome_constraints
obj_t_sub = subset_model_results.objective_thresholds
self.assertTrue(torch.equal(subset_model_results.indices, torch.tensor([0])))
self.assertEqual(model_sub.num_outputs, 1)
self.assertTrue(torch.equal(obj_weights_sub, torch.tensor([1.0])))
self.assertTrue(torch.equal(obj_t_sub, obj_t[:1]))
self.assertTrue(torch.equal(ocs_sub[0], torch.tensor([[1.0]])))
self.assertTrue(torch.equal(ocs_sub[1], torch.tensor([1.0])))
# test unsupported
yvar = torch.ones(1, 2)
model = HeteroskedasticSingleTaskGP(x, y, yvar)
subset_model_results = subset_model(model, obj_weights)
model_sub = subset_model_results.model
obj_weights_sub = subset_model_results.objective_weights
ocs_sub = subset_model_results.outcome_constraints
obj_t_sub = subset_model_results.objective_thresholds
self.assertIsNone(ocs_sub)
self.assertIs(model_sub, model) # check identity
self.assertIs(obj_weights_sub, obj_weights) # check identity
self.assertTrue(torch.equal(subset_model_results.indices, torch.tensor([0, 1])))
# test error on size inconsistency
obj_weights = torch.ones(3)
with self.assertRaises(RuntimeError):
subset_model(model, obj_weights)
def testGenerateSobolPoints(self):
bounds = [(0.0, 1.0) for _ in range(3)]
linear_constraints = (
torch.tensor([[1, -1, 0]], dtype=torch.double),
torch.tensor([[0]], dtype=torch.double),
)
def test_rounding_func(x: Tensor) -> Tensor:
return x
gen_sobol = _generate_sobol_points(
n_sobol=100,
bounds=bounds,
device=torch.device("cpu"),
linear_constraints=linear_constraints,
rounding_func=test_rounding_func,
)
self.assertEqual(len(gen_sobol), 100)
self.assertIsInstance(gen_sobol, Tensor)
def testTensorCallableToArrayCallable(self):
def tensor_func(x: Tensor) -> Tensor:
return np.exp(x)
new_func = tensor_callable_to_array_callable(
tensor_func=tensor_func, device=torch.device("cpu")
)
self.assertTrue(callable(new_func))
self.assertIsInstance(new_func(np.array([1.0, 2.0])), np.ndarray)
| 46.367232
| 88
| 0.680151
|
import numpy as np
import torch
from ax.exceptions.model import ModelError
from ax.models.torch.utils import (
_generate_sobol_points,
is_noiseless,
normalize_indices,
subset_model,
tensor_callable_to_array_callable,
)
from ax.utils.common.testutils import TestCase
from botorch.models import HeteroskedasticSingleTaskGP, ModelListGP, SingleTaskGP
from torch import Tensor
class TorchUtilsTest(TestCase):
def test_is_noiseless(self):
x = torch.zeros(1, 1)
y = torch.zeros(1, 1)
se = torch.zeros(1, 1)
model = SingleTaskGP(x, y)
self.assertTrue(is_noiseless(model))
model = HeteroskedasticSingleTaskGP(x, y, se)
self.assertFalse(is_noiseless(model))
with self.assertRaises(ModelError):
is_noiseless(ModelListGP())
def testNormalizeIndices(self):
indices = [0, 2]
nlzd_indices = normalize_indices(indices, 3)
self.assertEqual(nlzd_indices, indices)
nlzd_indices = normalize_indices(indices, 4)
self.assertEqual(nlzd_indices, indices)
indices = [0, -1]
nlzd_indices = normalize_indices(indices, 3)
self.assertEqual(nlzd_indices, [0, 2])
with self.assertRaises(ValueError):
nlzd_indices = normalize_indices([3], 3)
with self.assertRaises(ValueError):
nlzd_indices = normalize_indices([-4], 3)
def testSubsetModel(self):
x = torch.zeros(1, 1)
y = torch.rand(1, 2)
obj_t = torch.rand(2)
model = SingleTaskGP(x, y)
self.assertEqual(model.num_outputs, 2)
obj_weights = torch.tensor([1.0, 0.0])
subset_model_results = subset_model(model, obj_weights)
model_sub = subset_model_results.model
obj_weights_sub = subset_model_results.objective_weights
ocs_sub = subset_model_results.outcome_constraints
obj_t_sub = subset_model_results.objective_thresholds
self.assertIsNone(ocs_sub)
self.assertIsNone(obj_t_sub)
self.assertEqual(model_sub.num_outputs, 1)
self.assertTrue(torch.equal(obj_weights_sub, torch.tensor([1.0])))
obj_weights = torch.tensor([1.0, 2.0])
subset_model_results = subset_model(model, obj_weights)
model_sub = subset_model_results.model
obj_weights_sub = subset_model_results.objective_weights
ocs_sub = subset_model_results.outcome_constraints
obj_t_sub = subset_model_results.objective_thresholds
self.assertIsNone(ocs_sub)
self.assertIsNone(obj_t_sub)
self.assertIs(model_sub, model)
self.assertIs(obj_weights_sub, obj_weights)
self.assertTrue(torch.equal(subset_model_results.indices, torch.tensor([0, 1])))
obj_weights = torch.tensor([1.0, 0.0])
ocs = (torch.tensor([[1.0, 0.0]]), torch.tensor([1.0]))
subset_model_results = subset_model(model, obj_weights, ocs)
model_sub = subset_model_results.model
obj_weights_sub = subset_model_results.objective_weights
ocs_sub = subset_model_results.outcome_constraints
obj_t_sub = subset_model_results.objective_thresholds
self.assertEqual(model_sub.num_outputs, 1)
self.assertIsNone(obj_t_sub)
self.assertTrue(torch.equal(obj_weights_sub, torch.tensor([1.0])))
self.assertTrue(torch.equal(ocs_sub[0], torch.tensor([[1.0]])))
self.assertTrue(torch.equal(ocs_sub[1], torch.tensor([1.0])))
self.assertTrue(torch.equal(subset_model_results.indices, torch.tensor([0])))
obj_weights = torch.tensor([1.0, 0.0])
ocs = (torch.tensor([[0.0, 1.0]]), torch.tensor([1.0]))
subset_model_results = subset_model(model, obj_weights, ocs)
model_sub = subset_model_results.model
obj_weights_sub = subset_model_results.objective_weights
ocs_sub = subset_model_results.outcome_constraints
obj_t_sub = subset_model_results.objective_thresholds
self.assertIs(model_sub, model)
self.assertIsNone(obj_t_sub)
self.assertIs(obj_weights_sub, obj_weights)
self.assertIs(ocs_sub, ocs)
self.assertTrue(torch.equal(subset_model_results.indices, torch.tensor([0, 1])))
obj_weights = torch.tensor([1.0, 0.0])
ocs = (torch.tensor([[0.0, 1.0]]), torch.tensor([1.0]))
subset_model_results = subset_model(model, obj_weights, ocs, obj_t)
model_sub = subset_model_results.model
obj_weights_sub = subset_model_results.objective_weights
ocs_sub = subset_model_results.outcome_constraints
obj_t_sub = subset_model_results.objective_thresholds
self.assertIs(model_sub, model)
self.assertIs(obj_t, obj_t_sub)
self.assertIs(obj_weights_sub, obj_weights)
self.assertTrue(torch.equal(subset_model_results.indices, torch.tensor([0, 1])))
self.assertIs(ocs_sub, ocs)
obj_weights = torch.tensor([1.0, 0.0])
ocs = (torch.tensor([[1.0, 0.0]]), torch.tensor([1.0]))
subset_model_results = subset_model(model, obj_weights, ocs, obj_t)
model_sub = subset_model_results.model
obj_weights_sub = subset_model_results.objective_weights
ocs_sub = subset_model_results.outcome_constraints
obj_t_sub = subset_model_results.objective_thresholds
self.assertTrue(torch.equal(subset_model_results.indices, torch.tensor([0])))
self.assertEqual(model_sub.num_outputs, 1)
self.assertTrue(torch.equal(obj_weights_sub, torch.tensor([1.0])))
self.assertTrue(torch.equal(obj_t_sub, obj_t[:1]))
self.assertTrue(torch.equal(ocs_sub[0], torch.tensor([[1.0]])))
self.assertTrue(torch.equal(ocs_sub[1], torch.tensor([1.0])))
yvar = torch.ones(1, 2)
model = HeteroskedasticSingleTaskGP(x, y, yvar)
subset_model_results = subset_model(model, obj_weights)
model_sub = subset_model_results.model
obj_weights_sub = subset_model_results.objective_weights
ocs_sub = subset_model_results.outcome_constraints
obj_t_sub = subset_model_results.objective_thresholds
self.assertIsNone(ocs_sub)
self.assertIs(model_sub, model)
self.assertIs(obj_weights_sub, obj_weights)
self.assertTrue(torch.equal(subset_model_results.indices, torch.tensor([0, 1])))
obj_weights = torch.ones(3)
with self.assertRaises(RuntimeError):
subset_model(model, obj_weights)
def testGenerateSobolPoints(self):
bounds = [(0.0, 1.0) for _ in range(3)]
linear_constraints = (
torch.tensor([[1, -1, 0]], dtype=torch.double),
torch.tensor([[0]], dtype=torch.double),
)
def test_rounding_func(x: Tensor) -> Tensor:
return x
gen_sobol = _generate_sobol_points(
n_sobol=100,
bounds=bounds,
device=torch.device("cpu"),
linear_constraints=linear_constraints,
rounding_func=test_rounding_func,
)
self.assertEqual(len(gen_sobol), 100)
self.assertIsInstance(gen_sobol, Tensor)
def testTensorCallableToArrayCallable(self):
def tensor_func(x: Tensor) -> Tensor:
return np.exp(x)
new_func = tensor_callable_to_array_callable(
tensor_func=tensor_func, device=torch.device("cpu")
)
self.assertTrue(callable(new_func))
self.assertIsInstance(new_func(np.array([1.0, 2.0])), np.ndarray)
| true
| true
|
7903d357942e987d78a3a6e95112c062d3570e3c
| 382
|
py
|
Python
|
setup.py
|
djf604/django-alexa
|
e40ef82e38b918670fec13e51c88e6913bc79bab
|
[
"MIT"
] | 1
|
2019-01-16T01:38:47.000Z
|
2019-01-16T01:38:47.000Z
|
setup.py
|
djf604/django-alexa
|
e40ef82e38b918670fec13e51c88e6913bc79bab
|
[
"MIT"
] | null | null | null |
setup.py
|
djf604/django-alexa
|
e40ef82e38b918670fec13e51c88e6913bc79bab
|
[
"MIT"
] | null | null | null |
from setuptools import setup
from os import path
from sys import version_info
def open_file(fname):
return open(path.join(path.dirname(__file__), fname))
setup_requires = ['pbr']
setup(
license='MIT',
setup_requires=setup_requires,
pbr=True,
auto_version="PBR",
install_requires=open(path.join(path.dirname(__file__), 'requirements.txt')).readlines(),
)
| 21.222222
| 93
| 0.727749
|
from setuptools import setup
from os import path
from sys import version_info
def open_file(fname):
return open(path.join(path.dirname(__file__), fname))
setup_requires = ['pbr']
setup(
license='MIT',
setup_requires=setup_requires,
pbr=True,
auto_version="PBR",
install_requires=open(path.join(path.dirname(__file__), 'requirements.txt')).readlines(),
)
| true
| true
|
7903d522171e2ea817d00017c65dacf4c45fc8c1
| 260
|
py
|
Python
|
server_src/modules/handlers/ITM_Core.py
|
uwdata/termite-data-server
|
1085571407c627bdbbd21c352e793fed65d09599
|
[
"BSD-3-Clause"
] | 97
|
2015-01-17T09:41:57.000Z
|
2022-03-15T11:39:03.000Z
|
server_src/modules/handlers/ITM_Core.py
|
afcarl/termite-data-server
|
1085571407c627bdbbd21c352e793fed65d09599
|
[
"BSD-3-Clause"
] | 12
|
2015-02-01T02:59:56.000Z
|
2021-06-09T02:31:34.000Z
|
server_src/modules/handlers/ITM_Core.py
|
afcarl/termite-data-server
|
1085571407c627bdbbd21c352e793fed65d09599
|
[
"BSD-3-Clause"
] | 35
|
2015-01-25T04:48:37.000Z
|
2021-01-29T20:32:26.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from handlers.Home_Core import Home_Core
class ITM_Core(Home_Core):
def __init__(self, request, response, itm_db):
super(ITM_Core, self).__init__(request, response)
self.itmDB = itm_db
self.db = itm_db.db
| 23.636364
| 51
| 0.723077
|
from handlers.Home_Core import Home_Core
class ITM_Core(Home_Core):
def __init__(self, request, response, itm_db):
super(ITM_Core, self).__init__(request, response)
self.itmDB = itm_db
self.db = itm_db.db
| true
| true
|
7903d59fc5bc3510f7505509ddac1494a90a9278
| 1,171
|
py
|
Python
|
pi/button/button.py
|
kylemcdonald/bsp
|
e33c71f5924bef61a15e2b87230ac27b8f8261aa
|
[
"MIT"
] | 1
|
2021-02-01T18:57:31.000Z
|
2021-02-01T18:57:31.000Z
|
pi/button/button.py
|
kylemcdonald/bsp
|
e33c71f5924bef61a15e2b87230ac27b8f8261aa
|
[
"MIT"
] | 2
|
2021-08-10T01:38:49.000Z
|
2021-10-21T17:15:25.000Z
|
pi/button/button.py
|
kylemcdonald/bsp
|
e33c71f5924bef61a15e2b87230ac27b8f8261aa
|
[
"MIT"
] | 2
|
2021-02-04T19:21:09.000Z
|
2022-01-19T08:45:33.000Z
|
#!/usr/bin/python3
import time
import datetime
from gpiozero import InputDevice, LED
import subprocess
import requests
# RPI enumeration is:
# pin 5 & 6 are used for the button (3 & ground)
# pin 7 & 9 are used for the LED (4 & ground)
button_pin = 3
led_pin = 4
button = InputDevice(button_pin, pull_up=True)
last_active = False
last_press = None
led = LED(led_pin)
led.on()
def button_hold(now, seconds):
if seconds > 3:
print('button hold')
led.blink(.05, .5)
requests.get('http://localhost:8080/home')
time.sleep(2)
subprocess.call(['shutdown', '-h', 'now'], shell=False)
def button_release(now, seconds):
print('button release')
requests.get('http://localhost:8080/button')
while True:
cur_active = button.is_active
now = datetime.datetime.now()
if cur_active and not last_active:
last_press = now
if cur_active:
duration = now - last_press
button_hold(now, duration.total_seconds())
if not cur_active and last_active:
duration = now - last_press
button_release(now, duration.total_seconds())
last_active = cur_active
time.sleep(1/60)
| 25.456522
| 63
| 0.668659
|
import time
import datetime
from gpiozero import InputDevice, LED
import subprocess
import requests
button_pin = 3
led_pin = 4
button = InputDevice(button_pin, pull_up=True)
last_active = False
last_press = None
led = LED(led_pin)
led.on()
def button_hold(now, seconds):
if seconds > 3:
print('button hold')
led.blink(.05, .5)
requests.get('http://localhost:8080/home')
time.sleep(2)
subprocess.call(['shutdown', '-h', 'now'], shell=False)
def button_release(now, seconds):
print('button release')
requests.get('http://localhost:8080/button')
while True:
cur_active = button.is_active
now = datetime.datetime.now()
if cur_active and not last_active:
last_press = now
if cur_active:
duration = now - last_press
button_hold(now, duration.total_seconds())
if not cur_active and last_active:
duration = now - last_press
button_release(now, duration.total_seconds())
last_active = cur_active
time.sleep(1/60)
| true
| true
|
7903d77c2ec02e149f42e2fa3afdfb22fecea4e9
| 2,652
|
py
|
Python
|
var/spack/repos/builtin/packages/r-vgam/package.py
|
renjithravindrankannath/spack
|
043b2cbb7c99d69a373f3ecbf35bc3b4638bcf85
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/r-vgam/package.py
|
renjithravindrankannath/spack
|
043b2cbb7c99d69a373f3ecbf35bc3b4638bcf85
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/r-vgam/package.py
|
renjithravindrankannath/spack
|
043b2cbb7c99d69a373f3ecbf35bc3b4638bcf85
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2019-02-08T20:37:20.000Z
|
2019-03-31T15:19:26.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class RVgam(RPackage):
"""Vector Generalized Linear and Additive Models.
An implementation of about 6 major classes of statistical regression
models. The central algorithm is Fisher scoring and iterative reweighted
least squares. At the heart of this package are the vector generalized
linear and additive model (VGLM/VGAM) classes. VGLMs can be loosely thought
of as multivariate GLMs. VGAMs are data-driven VGLMs that use smoothing.
The book "Vector Generalized Linear and Additive Models: With an
Implementation in R" (Yee, 2015) <DOI:10.1007/978-1-4939-2818-7> gives
details of the statistical framework and the package. Currently only
fixed-effects models are implemented. Many (150+) models and distributions
are estimated by maximum likelihood estimation (MLE) or penalized MLE. The
other classes are RR-VGLMs (reduced-rank VGLMs), quadratic RR-VGLMs,
reduced-rank VGAMs, RCIMs (row-column interaction models)---these classes
perform constrained and unconstrained quadratic ordination (CQO/UQO) models
in ecology, as well as constrained additive ordination (CAO). Hauck-Donner
effect detection is implemented. Note that these functions are subject to
change; see the NEWS and ChangeLog files for latest changes."""
cran = "VGAM"
version('1.1-6', sha256='446a61bac5dd4794e05d20c2f3901eec54afac52c6e23ce2787c5575170dd417')
version('1.1-5', sha256='30190b150f3e5478137d288a45f575b2654ad7c29254b0a1fe5c954ee010a1bb')
version('1.1-1', sha256='de192bd65a7e8818728008de8e60e6dd3b61a13616c887a43e0ccc8147c7da52')
version('1.0-6', sha256='121820a167411e847b41bdcb0028b55842d0ccc0c3471755c67449837e0fe3b9')
version('1.0-4', sha256='e581985f78ef8b866d0e810b2727061bb9c9bc177b2c9090aebb3a35ae87a964')
version('1.0-3', sha256='23bb6690ae15e9ede3198ef55d5d3236c279aa8fa6bd4f7350242379d9d72673')
version('1.0-2', sha256='03561bf484f97b616b1979132c759c5faa69c5d5a4cfd7aea2ea6d3612ac0961')
version('1.0-1', sha256='c066864e406fcee23f383a28299dba3cf83356e5b68df16324885afac87a05ea')
version('1.0-0', sha256='6acdd7db49c0987c565870afe593160ceba72a6ca4a84e6da3cf6f74d1fa02e1')
depends_on('r@3.0.0:', type=('build', 'run'))
depends_on('r@3.1.0:', type=('build', 'run'), when='@1.0-2:')
depends_on('r@3.4.0:', type=('build', 'run'), when='@1.0-4:')
depends_on('r@3.5.0:', type=('build', 'run'), when='@1.1-5:')
| 58.933333
| 95
| 0.761312
|
from spack.package import *
class RVgam(RPackage):
cran = "VGAM"
version('1.1-6', sha256='446a61bac5dd4794e05d20c2f3901eec54afac52c6e23ce2787c5575170dd417')
version('1.1-5', sha256='30190b150f3e5478137d288a45f575b2654ad7c29254b0a1fe5c954ee010a1bb')
version('1.1-1', sha256='de192bd65a7e8818728008de8e60e6dd3b61a13616c887a43e0ccc8147c7da52')
version('1.0-6', sha256='121820a167411e847b41bdcb0028b55842d0ccc0c3471755c67449837e0fe3b9')
version('1.0-4', sha256='e581985f78ef8b866d0e810b2727061bb9c9bc177b2c9090aebb3a35ae87a964')
version('1.0-3', sha256='23bb6690ae15e9ede3198ef55d5d3236c279aa8fa6bd4f7350242379d9d72673')
version('1.0-2', sha256='03561bf484f97b616b1979132c759c5faa69c5d5a4cfd7aea2ea6d3612ac0961')
version('1.0-1', sha256='c066864e406fcee23f383a28299dba3cf83356e5b68df16324885afac87a05ea')
version('1.0-0', sha256='6acdd7db49c0987c565870afe593160ceba72a6ca4a84e6da3cf6f74d1fa02e1')
depends_on('r@3.0.0:', type=('build', 'run'))
depends_on('r@3.1.0:', type=('build', 'run'), when='@1.0-2:')
depends_on('r@3.4.0:', type=('build', 'run'), when='@1.0-4:')
depends_on('r@3.5.0:', type=('build', 'run'), when='@1.1-5:')
| true
| true
|
7903d8542166e8b1d4864abf4a34d51d2976d9e8
| 1,911
|
py
|
Python
|
satsearch/main.py
|
lishrimp/sat-search
|
d81e4774a41990b73b55db4b1e05b21062dd957c
|
[
"MIT"
] | null | null | null |
satsearch/main.py
|
lishrimp/sat-search
|
d81e4774a41990b73b55db4b1e05b21062dd957c
|
[
"MIT"
] | null | null | null |
satsearch/main.py
|
lishrimp/sat-search
|
d81e4774a41990b73b55db4b1e05b21062dd957c
|
[
"MIT"
] | null | null | null |
import os
import sys
import json
from .version import __version__
from satsearch import Search
from satstac import Items
from satsearch.parser import SatUtilsParser
import satsearch.config as config
def main(items=None, printmd=None, printcal=False, found=False,
save=None, download=None, requestor_pays=False, **kwargs):
""" Main function for performing a search """
if items is None:
## if there are no items then perform a search
search = Search.search(**kwargs)
if found:
num = search.found()
print('%s items found' % num)
return num
items = search.items()
else:
# otherwise, load a search from a file
items = Items.load(items)
print('%s items found' % len(items))
# print metadata
if printmd is not None:
print(items.summary(printmd))
# print calendar
if printcal:
print(items.calendar())
# save all metadata in JSON file
if save is not None:
items.save(filename=save)
# download files given `download` keys
if download is not None:
if 'ALL' in download:
# get complete set of assets
download = set([k for i in items for k in i.assets])
for key in download:
items.download(key=key, path=config.DATADIR, filename=config.FILENAME, requestor_pays=requestor_pays)
return items
def cli():
parser = SatUtilsParser.newbie(description='sat-search (v%s)' % __version__)
kwargs = parser.parse_args(sys.argv[1:])
# if a filename, read the GeoJSON file
if 'intersects' in kwargs:
if os.path.exists(kwargs['intersects']):
with open(kwargs['intersects']) as f:
kwargs['intersects'] = json.loads(f.read())
cmd = kwargs.pop('command', None)
if cmd is not None:
main(**kwargs)
if __name__ == "__main__":
cli()
| 27.3
| 113
| 0.628467
|
import os
import sys
import json
from .version import __version__
from satsearch import Search
from satstac import Items
from satsearch.parser import SatUtilsParser
import satsearch.config as config
def main(items=None, printmd=None, printcal=False, found=False,
save=None, download=None, requestor_pays=False, **kwargs):
if items is None:
if found:
num = search.found()
print('%s items found' % num)
return num
items = search.items()
else:
items = Items.load(items)
print('%s items found' % len(items))
if printmd is not None:
print(items.summary(printmd))
if printcal:
print(items.calendar())
if save is not None:
items.save(filename=save)
if download is not None:
if 'ALL' in download:
download = set([k for i in items for k in i.assets])
for key in download:
items.download(key=key, path=config.DATADIR, filename=config.FILENAME, requestor_pays=requestor_pays)
return items
def cli():
parser = SatUtilsParser.newbie(description='sat-search (v%s)' % __version__)
kwargs = parser.parse_args(sys.argv[1:])
if 'intersects' in kwargs:
if os.path.exists(kwargs['intersects']):
with open(kwargs['intersects']) as f:
kwargs['intersects'] = json.loads(f.read())
cmd = kwargs.pop('command', None)
if cmd is not None:
main(**kwargs)
if __name__ == "__main__":
cli()
| true
| true
|
7903d86f9167b1a2db5b5df76fddc53ac94b1163
| 2,621
|
py
|
Python
|
torchtext/datasets/amazonreviewpolarity.py
|
abhinavarora/text
|
69f67f3a775f3d3c6f85cfaa4ac3819500b90696
|
[
"BSD-3-Clause"
] | 1
|
2022-01-03T17:30:57.000Z
|
2022-01-03T17:30:57.000Z
|
torchtext/datasets/amazonreviewpolarity.py
|
abhinavarora/text
|
69f67f3a775f3d3c6f85cfaa4ac3819500b90696
|
[
"BSD-3-Clause"
] | null | null | null |
torchtext/datasets/amazonreviewpolarity.py
|
abhinavarora/text
|
69f67f3a775f3d3c6f85cfaa4ac3819500b90696
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from typing import Union, Tuple
from torchtext._internal.module_utils import is_module_available
from torchtext.data.datasets_utils import (
_wrap_split_argument,
_create_dataset_directory,
)
if is_module_available("torchdata"):
from torchdata.datapipes.iter import FileOpener, GDriveReader, IterableWrapper
URL = "https://drive.google.com/uc?export=download&id=0Bz8a_Dbh9QhbaW12WVVZS2drcnM"
MD5 = "fe39f8b653cada45afd5792e0f0e8f9b"
NUM_LINES = {
"train": 3600000,
"test": 400000,
}
_PATH = "amazon_review_polarity_csv.tar.gz"
_EXTRACTED_FILES = {
"train": os.path.join("amazon_review_polarity_csv", "train.csv"),
"test": os.path.join("amazon_review_polarity_csv", "test.csv"),
}
DATASET_NAME = "AmazonReviewPolarity"
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(("train", "test"))
def AmazonReviewPolarity(root: str, split: Union[Tuple[str], str]):
"""AmazonReviewPolarity Dataset
For additional details refer to https://arxiv.org/abs/1509.01626
Number of lines per split:
- train: 3600000
- test: 400000
Args:
root: Directory where the datasets are saved. Default: os.path.expanduser('~/.torchtext/cache')
split: split or splits to be returned. Can be a string or tuple of strings. Default: (`train`, `test`)
:returns: DataPipe that yields tuple of label (1 to 2) and text containing the review title and text
:rtype: (int, str)
"""
# TODO Remove this after removing conditional dependency
if not is_module_available("torchdata"):
raise ModuleNotFoundError(
"Package `torchdata` not found. Please install following instructions at `https://github.com/pytorch/data`"
)
url_dp = IterableWrapper([URL])
cache_compressed_dp = url_dp.on_disk_cache(
filepath_fn=lambda x: os.path.join(root, _PATH),
hash_dict={os.path.join(root, _PATH): MD5},
hash_type="md5",
)
cache_compressed_dp = GDriveReader(cache_compressed_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_decompressed_dp = cache_compressed_dp.on_disk_cache(
filepath_fn=lambda x: os.path.join(root, _EXTRACTED_FILES[split])
)
cache_decompressed_dp = (
FileOpener(cache_decompressed_dp, mode="b").read_from_tar().filter(lambda x: _EXTRACTED_FILES[split] in x[0])
)
cache_decompressed_dp = cache_decompressed_dp.end_caching(mode="wb", same_filepath_fn=True)
data_dp = FileOpener(cache_decompressed_dp, encoding="utf-8")
return data_dp.parse_csv().map(fn=lambda t: (int(t[0]), " ".join(t[1:])))
| 34.486842
| 119
| 0.720336
|
import os
from typing import Union, Tuple
from torchtext._internal.module_utils import is_module_available
from torchtext.data.datasets_utils import (
_wrap_split_argument,
_create_dataset_directory,
)
if is_module_available("torchdata"):
from torchdata.datapipes.iter import FileOpener, GDriveReader, IterableWrapper
URL = "https://drive.google.com/uc?export=download&id=0Bz8a_Dbh9QhbaW12WVVZS2drcnM"
MD5 = "fe39f8b653cada45afd5792e0f0e8f9b"
NUM_LINES = {
"train": 3600000,
"test": 400000,
}
_PATH = "amazon_review_polarity_csv.tar.gz"
_EXTRACTED_FILES = {
"train": os.path.join("amazon_review_polarity_csv", "train.csv"),
"test": os.path.join("amazon_review_polarity_csv", "test.csv"),
}
DATASET_NAME = "AmazonReviewPolarity"
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(("train", "test"))
def AmazonReviewPolarity(root: str, split: Union[Tuple[str], str]):
if not is_module_available("torchdata"):
raise ModuleNotFoundError(
"Package `torchdata` not found. Please install following instructions at `https://github.com/pytorch/data`"
)
url_dp = IterableWrapper([URL])
cache_compressed_dp = url_dp.on_disk_cache(
filepath_fn=lambda x: os.path.join(root, _PATH),
hash_dict={os.path.join(root, _PATH): MD5},
hash_type="md5",
)
cache_compressed_dp = GDriveReader(cache_compressed_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_decompressed_dp = cache_compressed_dp.on_disk_cache(
filepath_fn=lambda x: os.path.join(root, _EXTRACTED_FILES[split])
)
cache_decompressed_dp = (
FileOpener(cache_decompressed_dp, mode="b").read_from_tar().filter(lambda x: _EXTRACTED_FILES[split] in x[0])
)
cache_decompressed_dp = cache_decompressed_dp.end_caching(mode="wb", same_filepath_fn=True)
data_dp = FileOpener(cache_decompressed_dp, encoding="utf-8")
return data_dp.parse_csv().map(fn=lambda t: (int(t[0]), " ".join(t[1:])))
| true
| true
|
7903db7f786e0b67215a9f80a621d24138b89d66
| 3,654
|
py
|
Python
|
release/src-rt-6.x.4708/router/samba-3.5.8/source4/heimdal/lib/wind/gen-errorlist.py
|
afeng11/tomato-arm
|
1ca18a88480b34fd495e683d849f46c2d47bb572
|
[
"FSFAP"
] | 4
|
2017-05-17T11:27:04.000Z
|
2020-05-24T07:23:26.000Z
|
release/src-rt-6.x.4708/router/samba-3.5.8/source4/heimdal/lib/wind/gen-errorlist.py
|
afeng11/tomato-arm
|
1ca18a88480b34fd495e683d849f46c2d47bb572
|
[
"FSFAP"
] | 1
|
2018-08-21T03:43:09.000Z
|
2018-08-21T03:43:09.000Z
|
release/src-rt-6.x.4708/router/samba-3.5.8/source4/heimdal/lib/wind/gen-errorlist.py
|
afeng11/tomato-arm
|
1ca18a88480b34fd495e683d849f46c2d47bb572
|
[
"FSFAP"
] | 5
|
2017-10-11T08:09:11.000Z
|
2020-10-14T04:10:13.000Z
|
#!/usr/local/bin/python
# -*- coding: iso-8859-1 -*-
# $Id: gen-errorlist.py,v 1.1.1.1 2011/06/10 09:34:43 andrew Exp $
# Copyright (c) 2004 Kungliga Tekniska Högskolan
# (Royal Institute of Technology, Stockholm, Sweden).
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Institute nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import re
import string
import sys
import generate
import rfc3454
import rfc4518
import stringprep
if len(sys.argv) != 3:
print "usage: %s rfc3454.txt out-dir" % sys.argv[0]
sys.exit(1)
tables = rfc3454.read(sys.argv[1])
t2 = rfc4518.read()
for x in t2.iterkeys():
tables[x] = t2[x]
error_list = stringprep.get_errorlist()
errorlist_h = generate.Header('%s/errorlist_table.h' % sys.argv[2])
errorlist_c = generate.Implementation('%s/errorlist_table.c' % sys.argv[2])
errorlist_h.file.write(
'''
#include "windlocl.h"
struct error_entry {
uint32_t start;
unsigned len;
wind_profile_flags flags;
};
extern const struct error_entry _wind_errorlist_table[];
extern const size_t _wind_errorlist_table_size;
''')
errorlist_c.file.write(
'''
#include "errorlist_table.h"
const struct error_entry _wind_errorlist_table[] = {
''')
trans=[]
for t in error_list.iterkeys():
for l in tables[t]:
m = re.search('^ *([0-9A-F]+)-([0-9A-F]+); *(.*) *$', l)
if m:
start = int(m.group(1), 0x10)
end = int(m.group(2), 0x10)
desc = m.group(3)
trans.append([start, end - start + 1, desc, [t]])
else:
m = re.search('^ *([0-9A-F]+); *(.*) *$', l)
if m:
trans.append([int(m.group(1), 0x10), 1, m.group(2), [t]])
trans = stringprep.sort_merge_trans(trans)
for x in trans:
(start, length, description, tables) = x
symbols = stringprep.symbols(error_list, tables)
if len(symbols) == 0:
print "no symbol for %s" % description
sys.exit(1)
errorlist_c.file.write(" {0x%x, 0x%x, %s}, /* %s: %s */\n"
% (start, length, symbols, ",".join(tables), description))
errorlist_c.file.write(
'''};
''')
errorlist_c.file.write(
"const size_t _wind_errorlist_table_size = %u;\n" % len(trans))
errorlist_h.close()
errorlist_c.close()
| 30.198347
| 77
| 0.682813
|
import re
import string
import sys
import generate
import rfc3454
import rfc4518
import stringprep
if len(sys.argv) != 3:
print "usage: %s rfc3454.txt out-dir" % sys.argv[0]
sys.exit(1)
tables = rfc3454.read(sys.argv[1])
t2 = rfc4518.read()
for x in t2.iterkeys():
tables[x] = t2[x]
error_list = stringprep.get_errorlist()
errorlist_h = generate.Header('%s/errorlist_table.h' % sys.argv[2])
errorlist_c = generate.Implementation('%s/errorlist_table.c' % sys.argv[2])
errorlist_h.file.write(
'''
#include "windlocl.h"
struct error_entry {
uint32_t start;
unsigned len;
wind_profile_flags flags;
};
extern const struct error_entry _wind_errorlist_table[];
extern const size_t _wind_errorlist_table_size;
''')
errorlist_c.file.write(
'''
#include "errorlist_table.h"
const struct error_entry _wind_errorlist_table[] = {
''')
trans=[]
for t in error_list.iterkeys():
for l in tables[t]:
m = re.search('^ *([0-9A-F]+)-([0-9A-F]+); *(.*) *$', l)
if m:
start = int(m.group(1), 0x10)
end = int(m.group(2), 0x10)
desc = m.group(3)
trans.append([start, end - start + 1, desc, [t]])
else:
m = re.search('^ *([0-9A-F]+); *(.*) *$', l)
if m:
trans.append([int(m.group(1), 0x10), 1, m.group(2), [t]])
trans = stringprep.sort_merge_trans(trans)
for x in trans:
(start, length, description, tables) = x
symbols = stringprep.symbols(error_list, tables)
if len(symbols) == 0:
print "no symbol for %s" % description
sys.exit(1)
errorlist_c.file.write(" {0x%x, 0x%x, %s}, /* %s: %s */\n"
% (start, length, symbols, ",".join(tables), description))
errorlist_c.file.write(
'''};
''')
errorlist_c.file.write(
"const size_t _wind_errorlist_table_size = %u;\n" % len(trans))
errorlist_h.close()
errorlist_c.close()
| false
| true
|
7903dc1e9eef7d71c41cb8050cc9282a9a2001fe
| 5,677
|
py
|
Python
|
lib/oldlibcode/Utils/parser.py
|
kbasecollaborations/MotifFinderalgoMFMD
|
f1019ecca0d4b4a5d22a902d9a88d7ad45e5c1cb
|
[
"MIT"
] | 2
|
2019-07-19T04:33:45.000Z
|
2019-07-20T05:53:28.000Z
|
lib/oldlibcode/Utils/parser.py
|
man4ish/MotifFinderalgoMFMD
|
f1019ecca0d4b4a5d22a902d9a88d7ad45e5c1cb
|
[
"MIT"
] | null | null | null |
lib/oldlibcode/Utils/parser.py
|
man4ish/MotifFinderalgoMFMD
|
f1019ecca0d4b4a5d22a902d9a88d7ad45e5c1cb
|
[
"MIT"
] | 1
|
2021-03-13T15:13:28.000Z
|
2021-03-13T15:13:28.000Z
|
import sys
import os
import json
import re
import numpy as np
import pandas as pd
from Bio import motifs
from Bio import SeqIO
from Bio.Alphabet import IUPAC
from io import StringIO
def build_mfmd_command(inputFilePath, motiflen, prb):
if not os.path.exists('/kb/module/work/tmp/mfmd'):
os.mkdir('/kb/module/work/tmp/mfmd')
outputFilePath = '/kb/module/work/tmp/mfmd/mfmd_out/mfmd_output.txt'
command = 'java -jar mfmd.jar ' + inputFilePath + ' ' + parameter + ' ' + prb + ' > ' + outputFilePath
return command
def run_mfmd_command(command):
os.system(command)
def parse_mfmd_output(path):
pfmList = []
pfmDict={}
outputFileList = []
pfmMatrix=False
seqflag=False
motifList={}
motifDict={}
locList=[]
alphabet=['A','C','G','T']
motifSet=[]
motifList['Condition']='temp'
motifList['SequenceSet_ref']='123'
background={}
background['A']=0.0
background['C']=0.0
background['G']=0.0
background['T']=0.0
motifDict['Motif_Locations'] = []
motifDict['PWM'] = []
motifDict['PFM'] = []
motiflen=0
a=[]
c=[]
g=[]
t=[]
pwmList=[]
pwmDict={}
rowList = []
rowDict={}
for filename in os.listdir(path):
outputFileList.append(path + '/' + filename)
if(filename=="mfmd_out.txt"):
outputFilePath=path+'/'+filename
mfmdFile = open(outputFilePath,'r')
for line in mfmdFile:
if(re.search("PPM Matrix",line)):
pfmMatrix=True
if(pfmMatrix):
if(line[0].isdigit()):
line=line.strip()
out=line.split()
pfmList.append(out)
a.append(out[0])
c.append(out[1])
g.append(out[2])
t.append(out[3])
rowList = []
rowList.append(('A',float(out[0])))
rowList.append(('C',float(out[1])))
rowList.append(('G',float(out[2])))
rowList.append(('T',float(out[3])))
rowDict['A']=float(out[0])
rowDict['C']=float(out[1])
rowDict['G']=float(out[2])
rowDict['T']=float(out[3])
if(re.search("PSSM Matrix",line)):
pfmMatrix=False
if(re.search("Sequences",line)):
seqflag=True
if(seqflag==True):
line=line.strip()
if(re.search('\*',line)):
seqflag=False
if((line) and not (line.startswith("Seq")) and not (line.startswith("*"))):
line=line.rstrip()
seq=line.split()
seqid=seq[0]
seq_start=int(seq[1])
seq_end=int(seq_start)+int(motiflen)
sequence=seq[2]
orientation='+'
locDict={}
locDict['sequence_id']=seqid;
locDict['start']=seq_start;
locDict['end']=seq_end;
locDict['sequence']=sequence;
locDict['orientation']=orientation;
motifDict['Motif_Locations'].append(locDict)
if(re.search("Width",line)):
arr=line.split(" ")
motiflen=arr[1].split("\t")[0]
a=[float(x) for x in a]
c=[float(x) for x in c]
g=[float(x) for x in g]
t=[float(x) for x in t]
pwmDict['A']=a
pwmDict['C']=c
pwmDict['G']=g
pwmDict['T']=t
pfmDict['A']=[]
pfmDict['C']=[]
pfmDict['G']=[]
pfmDict['T']=[]
motifStr = '>test\n'
motifStr += 'A ' + str(a).replace(',','') + '\n'
motifStr += 'C ' + str(c).replace(',','') + '\n'
motifStr += 'G ' + str(g).replace(',','') + '\n'
motifStr += 'T ' + str(t).replace(',','') + '\n'
handle = StringIO(motifStr)
BioMotif = motifs.read(handle, 'jaspar')
motifDict['PWM']=pwmDict
motifDict['PFM']=pfmDict
motifDict['Iupac_sequence']=str(BioMotif.degenerate_consensus)
motifSet.append(motifDict) #keep in loop for multiple motifs
motifList['Motifs']=motifSet
motifList['Background']=background
motifList['Alphabet']=alphabet
return motifList
output=parse_mfmd_output("/home/manish/Desktop/Data/motifs/man4ish_guptamfmd/test_local/workdir/tmp/mfmd_out")
jsondata = json.dumps(output)
with open('ReportMotif.json', 'w') as outfile:
json.dump(output, outfile)
print(jsondata)
#print(output)
| 35.93038
| 199
| 0.42223
|
import sys
import os
import json
import re
import numpy as np
import pandas as pd
from Bio import motifs
from Bio import SeqIO
from Bio.Alphabet import IUPAC
from io import StringIO
def build_mfmd_command(inputFilePath, motiflen, prb):
if not os.path.exists('/kb/module/work/tmp/mfmd'):
os.mkdir('/kb/module/work/tmp/mfmd')
outputFilePath = '/kb/module/work/tmp/mfmd/mfmd_out/mfmd_output.txt'
command = 'java -jar mfmd.jar ' + inputFilePath + ' ' + parameter + ' ' + prb + ' > ' + outputFilePath
return command
def run_mfmd_command(command):
os.system(command)
def parse_mfmd_output(path):
pfmList = []
pfmDict={}
outputFileList = []
pfmMatrix=False
seqflag=False
motifList={}
motifDict={}
locList=[]
alphabet=['A','C','G','T']
motifSet=[]
motifList['Condition']='temp'
motifList['SequenceSet_ref']='123'
background={}
background['A']=0.0
background['C']=0.0
background['G']=0.0
background['T']=0.0
motifDict['Motif_Locations'] = []
motifDict['PWM'] = []
motifDict['PFM'] = []
motiflen=0
a=[]
c=[]
g=[]
t=[]
pwmList=[]
pwmDict={}
rowList = []
rowDict={}
for filename in os.listdir(path):
outputFileList.append(path + '/' + filename)
if(filename=="mfmd_out.txt"):
outputFilePath=path+'/'+filename
mfmdFile = open(outputFilePath,'r')
for line in mfmdFile:
if(re.search("PPM Matrix",line)):
pfmMatrix=True
if(pfmMatrix):
if(line[0].isdigit()):
line=line.strip()
out=line.split()
pfmList.append(out)
a.append(out[0])
c.append(out[1])
g.append(out[2])
t.append(out[3])
rowList = []
rowList.append(('A',float(out[0])))
rowList.append(('C',float(out[1])))
rowList.append(('G',float(out[2])))
rowList.append(('T',float(out[3])))
rowDict['A']=float(out[0])
rowDict['C']=float(out[1])
rowDict['G']=float(out[2])
rowDict['T']=float(out[3])
if(re.search("PSSM Matrix",line)):
pfmMatrix=False
if(re.search("Sequences",line)):
seqflag=True
if(seqflag==True):
line=line.strip()
if(re.search('\*',line)):
seqflag=False
if((line) and not (line.startswith("Seq")) and not (line.startswith("*"))):
line=line.rstrip()
seq=line.split()
seqid=seq[0]
seq_start=int(seq[1])
seq_end=int(seq_start)+int(motiflen)
sequence=seq[2]
orientation='+'
locDict={}
locDict['sequence_id']=seqid;
locDict['start']=seq_start;
locDict['end']=seq_end;
locDict['sequence']=sequence;
locDict['orientation']=orientation;
motifDict['Motif_Locations'].append(locDict)
if(re.search("Width",line)):
arr=line.split(" ")
motiflen=arr[1].split("\t")[0]
a=[float(x) for x in a]
c=[float(x) for x in c]
g=[float(x) for x in g]
t=[float(x) for x in t]
pwmDict['A']=a
pwmDict['C']=c
pwmDict['G']=g
pwmDict['T']=t
pfmDict['A']=[]
pfmDict['C']=[]
pfmDict['G']=[]
pfmDict['T']=[]
motifStr = '>test\n'
motifStr += 'A ' + str(a).replace(',','') + '\n'
motifStr += 'C ' + str(c).replace(',','') + '\n'
motifStr += 'G ' + str(g).replace(',','') + '\n'
motifStr += 'T ' + str(t).replace(',','') + '\n'
handle = StringIO(motifStr)
BioMotif = motifs.read(handle, 'jaspar')
motifDict['PWM']=pwmDict
motifDict['PFM']=pfmDict
motifDict['Iupac_sequence']=str(BioMotif.degenerate_consensus)
motifSet.append(motifDict)
motifList['Motifs']=motifSet
motifList['Background']=background
motifList['Alphabet']=alphabet
return motifList
output=parse_mfmd_output("/home/manish/Desktop/Data/motifs/man4ish_guptamfmd/test_local/workdir/tmp/mfmd_out")
jsondata = json.dumps(output)
with open('ReportMotif.json', 'w') as outfile:
json.dump(output, outfile)
print(jsondata)
| true
| true
|
7903dc5a753e7ee581edb2da4a0070a95ba83b12
| 4,185
|
py
|
Python
|
userbot/plugins/carbonRGB (2).py
|
Fregiant16/fregiantuserbot
|
6cb23022a1dfa66551c5ded1928d9fded16e0684
|
[
"MIT"
] | 1
|
2020-04-14T15:19:47.000Z
|
2020-04-14T15:19:47.000Z
|
userbot/plugins/carbonRGB (2).py
|
Fregiant16/fregiantuserbot
|
6cb23022a1dfa66551c5ded1928d9fded16e0684
|
[
"MIT"
] | null | null | null |
userbot/plugins/carbonRGB (2).py
|
Fregiant16/fregiantuserbot
|
6cb23022a1dfa66551c5ded1928d9fded16e0684
|
[
"MIT"
] | 2
|
2020-12-01T02:27:27.000Z
|
2022-02-16T08:32:11.000Z
|
"""Carbon Scraper Plugin for Userbot. //text in creative way.
usage: .karb //as a reply to any text message
Thanks to @r4v4n4 for vars,,, Random RGB feature by @PhycoNinja13b"""
from selenium.webdriver.support.ui import Select
from selenium.webdriver.chrome.options import Options
from selenium import webdriver
from telethon import events
from urllib.parse import quote_plus
from urllib.error import HTTPError
from time import sleep
import asyncio
import os
import random
from userbot.utils import admin_cmd
#@borg.on(events.NewMessage(pattern=r"\.karb ", outgoing=True))
@borg.on(admin_cmd(pattern="karb"))
async def carbon_api(e):
RED = random.randint(0,256)
GREEN = random.randint(0,256)
BLUE = random.randint(0,256)
THEME= [ "3024-night",
"a11y-dark",
"blackboard",
"base16-dark",
"base16-light",
"cobalt",
"dracula",
"duotone-dark",
"hopscotch",
"lucario",
"material",
"monokai",
"night-owl",
"nord",
"oceanic-next",
"one-light",
"one-dark",
"panda-syntax",
"paraiso-dark",
"seti",
"shades-of-purple",
"solarized",
"solarized%20light",
"synthwave-84",
"twilight",
"verminal",
"vscode",
"yeti",
"zenburn",
]
CUNTHE = random.randint(0, len(THEME) - 1)
The = THEME[CUNTHE]
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
""" A Wrapper for carbon.now.sh """
await e.edit("⬜⬜⬜⬜⬜")
CARBON = 'https://carbon.now.sh/?bg=rgba({R}%2C{G}%2C.{B}%2C1)&t={T}&wt=none&l=auto&ds=false&dsyoff=20px&dsblur=68px&wc=true&wa=true&pv=56px&ph=56px&ln=false&fl=1&fm=Fira%20Code&fs=14px&lh=152%25&si=false&es=2x&wm=false&code={code}'
CARBONLANG = "en"
textx = await e.get_reply_message()
pcode = e.text
if pcode[6:]:
pcode = str(pcode[6:])
elif textx:
pcode = str(textx.message) # Importing message to module
code = quote_plus(pcode) # Converting to urlencoded
url = CARBON.format(code=code, R=RED, G=GREEN, B=BLUE, T=The, lang=CARBONLANG)
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.binary_location = Config.GOOGLE_CHROME_BIN
chrome_options.add_argument("--window-size=1920x1080")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument('--disable-gpu')
prefs = {'download.default_directory' : './'}
chrome_options.add_experimental_option('prefs', prefs)
await e.edit("⬛⬛⬜⬜⬜")
driver = webdriver.Chrome(executable_path=Config.CHROME_DRIVER, options=chrome_options)
driver.get(url)
download_path = './'
driver.command_executor._commands["send_command"] = ("POST", '/session/$sessionId/chromium/send_command')
params = {'cmd': 'Page.setDownloadBehavior', 'params': {'behavior': 'allow', 'downloadPath': download_path}}
command_result = driver.execute("send_command", params)
driver.find_element_by_xpath("//button[contains(text(),'Export')]").click()
sleep(5) # this might take a bit.
driver.find_element_by_xpath("//button[contains(text(),'4x')]").click()
sleep(5)
await e.edit("⬛⬛⬛⬜⬜")
driver.find_element_by_xpath("//button[contains(text(),'PNG')]").click()
sleep(5) #Waiting for downloading
await e.edit("⬛⬛⬛⬛⬛")
file = './carbon.png'
await e.edit("✅RGB Karbon Completed, Uploading RGB Karbon✅")
await e.client.send_file(
e.chat_id,
file,
caption="Carbonised by [TeleBot](https://t.me/TeleBotHelp)",
force_document=False,
reply_to=e.message.reply_to_msg_id,
)
os.remove('./carbon.png')
# Removing carbon.png after uploading
await e.delete() # Deleting msg
| 20.615764
| 235
| 0.590203
|
from selenium.webdriver.support.ui import Select
from selenium.webdriver.chrome.options import Options
from selenium import webdriver
from telethon import events
from urllib.parse import quote_plus
from urllib.error import HTTPError
from time import sleep
import asyncio
import os
import random
from userbot.utils import admin_cmd
@borg.on(admin_cmd(pattern="karb"))
async def carbon_api(e):
RED = random.randint(0,256)
GREEN = random.randint(0,256)
BLUE = random.randint(0,256)
THEME= [ "3024-night",
"a11y-dark",
"blackboard",
"base16-dark",
"base16-light",
"cobalt",
"dracula",
"duotone-dark",
"hopscotch",
"lucario",
"material",
"monokai",
"night-owl",
"nord",
"oceanic-next",
"one-light",
"one-dark",
"panda-syntax",
"paraiso-dark",
"seti",
"shades-of-purple",
"solarized",
"solarized%20light",
"synthwave-84",
"twilight",
"verminal",
"vscode",
"yeti",
"zenburn",
]
CUNTHE = random.randint(0, len(THEME) - 1)
The = THEME[CUNTHE]
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("⬜⬜⬜⬜⬜")
CARBON = 'https://carbon.now.sh/?bg=rgba({R}%2C{G}%2C.{B}%2C1)&t={T}&wt=none&l=auto&ds=false&dsyoff=20px&dsblur=68px&wc=true&wa=true&pv=56px&ph=56px&ln=false&fl=1&fm=Fira%20Code&fs=14px&lh=152%25&si=false&es=2x&wm=false&code={code}'
CARBONLANG = "en"
textx = await e.get_reply_message()
pcode = e.text
if pcode[6:]:
pcode = str(pcode[6:])
elif textx:
pcode = str(textx.message)
code = quote_plus(pcode)
url = CARBON.format(code=code, R=RED, G=GREEN, B=BLUE, T=The, lang=CARBONLANG)
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.binary_location = Config.GOOGLE_CHROME_BIN
chrome_options.add_argument("--window-size=1920x1080")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument('--disable-gpu')
prefs = {'download.default_directory' : './'}
chrome_options.add_experimental_option('prefs', prefs)
await e.edit("⬛⬛⬜⬜⬜")
driver = webdriver.Chrome(executable_path=Config.CHROME_DRIVER, options=chrome_options)
driver.get(url)
download_path = './'
driver.command_executor._commands["send_command"] = ("POST", '/session/$sessionId/chromium/send_command')
params = {'cmd': 'Page.setDownloadBehavior', 'params': {'behavior': 'allow', 'downloadPath': download_path}}
command_result = driver.execute("send_command", params)
driver.find_element_by_xpath("//button[contains(text(),'Export')]").click()
sleep(5)
driver.find_element_by_xpath("//button[contains(text(),'4x')]").click()
sleep(5)
await e.edit("⬛⬛⬛⬜⬜")
driver.find_element_by_xpath("//button[contains(text(),'PNG')]").click()
sleep(5)
await e.edit("⬛⬛⬛⬛⬛")
file = './carbon.png'
await e.edit("✅RGB Karbon Completed, Uploading RGB Karbon✅")
await e.client.send_file(
e.chat_id,
file,
caption="Carbonised by [TeleBot](https://t.me/TeleBotHelp)",
force_document=False,
reply_to=e.message.reply_to_msg_id,
)
os.remove('./carbon.png')
await e.delete()
| true
| true
|
7903decd15439d23e92ffb110feae9237124ae6a
| 329
|
py
|
Python
|
models/model_NN.py
|
daniloorozco/ufc-predictions
|
0dbf91936587bc9acfea15151ab6845c77483124
|
[
"Apache-2.0"
] | null | null | null |
models/model_NN.py
|
daniloorozco/ufc-predictions
|
0dbf91936587bc9acfea15151ab6845c77483124
|
[
"Apache-2.0"
] | null | null | null |
models/model_NN.py
|
daniloorozco/ufc-predictions
|
0dbf91936587bc9acfea15151ab6845c77483124
|
[
"Apache-2.0"
] | null | null | null |
#Neural Networks
#MLP classifier is optimal algorithm for classifications
from sklearn.neural_network import MLPClassifier
clf = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1)
clf.fit(X_train_clean, y_train)
clf.predict(X_test_clean)
scoreN = clf.score(X_test_clean, y_test)
print(scoreN)
| 29.909091
| 90
| 0.808511
|
from sklearn.neural_network import MLPClassifier
clf = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1)
clf.fit(X_train_clean, y_train)
clf.predict(X_test_clean)
scoreN = clf.score(X_test_clean, y_test)
print(scoreN)
| true
| true
|
7903df2ce15833bf9882d5da630640c63ef493b1
| 702
|
py
|
Python
|
RLBotPack/BotimusPrime/maneuvers/strikes/aerial_shot.py
|
RLMarvin/RLBotPack
|
c88c4111bf67d324b471ad87ad962e7bc8c2a202
|
[
"MIT"
] | null | null | null |
RLBotPack/BotimusPrime/maneuvers/strikes/aerial_shot.py
|
RLMarvin/RLBotPack
|
c88c4111bf67d324b471ad87ad962e7bc8c2a202
|
[
"MIT"
] | null | null | null |
RLBotPack/BotimusPrime/maneuvers/strikes/aerial_shot.py
|
RLMarvin/RLBotPack
|
c88c4111bf67d324b471ad87ad962e7bc8c2a202
|
[
"MIT"
] | null | null | null |
from maneuvers.kit import *
from maneuvers.strikes.aerial_strike import AerialStrike
class AerialShot(AerialStrike):
def intercept_predicate(self, car: Car, ball: Ball):
return ball.position[2] > 500
def configure(self, intercept: AerialIntercept):
ball = intercept.ball
target_direction = ground_direction(ball, self.target)
hit_dir = direction(ball.velocity, target_direction * 4000)
self.arrive.target = intercept.ground_pos - ground(hit_dir) * 130
self.aerial.target = intercept.ball.position - ground(hit_dir) * 130
self.arrive.time = intercept.time
self.aerial.arrival_time = intercept.time
| 35.1
| 77
| 0.682336
|
from maneuvers.kit import *
from maneuvers.strikes.aerial_strike import AerialStrike
class AerialShot(AerialStrike):
def intercept_predicate(self, car: Car, ball: Ball):
return ball.position[2] > 500
def configure(self, intercept: AerialIntercept):
ball = intercept.ball
target_direction = ground_direction(ball, self.target)
hit_dir = direction(ball.velocity, target_direction * 4000)
self.arrive.target = intercept.ground_pos - ground(hit_dir) * 130
self.aerial.target = intercept.ball.position - ground(hit_dir) * 130
self.arrive.time = intercept.time
self.aerial.arrival_time = intercept.time
| true
| true
|
7903dfbb5a2487eacc59276f7318af20b665ff86
| 4,824
|
py
|
Python
|
tools/perf/benchmarks/dromaeo.py
|
iplo/Chain
|
8bc8943d66285d5258fffc41bed7c840516c4422
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 231
|
2015-01-08T09:04:44.000Z
|
2021-12-30T03:03:10.000Z
|
tools/perf/benchmarks/dromaeo.py
|
JasonEric/chromium
|
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2017-02-14T21:55:58.000Z
|
2017-02-14T21:55:58.000Z
|
tools/perf/benchmarks/dromaeo.py
|
JasonEric/chromium
|
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 268
|
2015-01-21T05:53:28.000Z
|
2022-03-25T22:09:01.000Z
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from metrics import power
from telemetry import test
from telemetry.core import util
from telemetry.page import page_measurement
from telemetry.page import page_set
class _DromaeoMeasurement(page_measurement.PageMeasurement):
def __init__(self):
super(_DromaeoMeasurement, self).__init__()
self._power_metric = power.PowerMetric()
def CustomizeBrowserOptions(self, options):
power.PowerMetric.CustomizeBrowserOptions(options)
def DidNavigateToPage(self, page, tab):
self._power_metric.Start(page, tab)
def MeasurePage(self, page, tab, results):
tab.WaitForJavaScriptExpression(
'window.document.cookie.indexOf("__done=1") >= 0', 600)
self._power_metric.Stop(page, tab)
self._power_metric.AddResults(tab, results)
js_get_results = 'JSON.stringify(window.automation.GetResults())'
print js_get_results
score = eval(tab.EvaluateJavaScript(js_get_results))
def Escape(k):
chars = [' ', '-', '/', '(', ')', '*']
for c in chars:
k = k.replace(c, '_')
return k
suffix = page.url[page.url.index('?') + 1 : page.url.index('&')]
for k, v in score.iteritems():
data_type = 'unimportant'
if k == suffix:
data_type = 'default'
results.Add(Escape(k), 'runs/s', float(v), data_type=data_type)
class _DromaeoBenchmark(test.Test):
"""A base class for Dromaeo benchmarks."""
test = _DromaeoMeasurement
def CreatePageSet(self, options):
"""Makes a PageSet for Dromaeo benchmarks."""
# Subclasses are expected to define a class member called query_param.
if not hasattr(self, 'query_param'):
raise NotImplementedError('query_param not in Dromaeo benchmark.')
url = 'file://index.html?%s&automated' % self.query_param
# The docstring of benchmark classes may also be used as a description
# when 'run_benchmarks list' is run.
description = self.__doc__ or 'Dromaeo JavaScript Benchmark'
page_set_dict = {
'description': description,
'pages': [{'url': url}],
}
dromaeo_dir = os.path.join(util.GetChromiumSrcDir(),
'chrome', 'test', 'data', 'dromaeo')
return page_set.PageSet.FromDict(page_set_dict, dromaeo_dir)
class DromaeoDomCoreAttr(_DromaeoBenchmark):
"""Dromaeo DOMCore attr JavaScript benchmark."""
tag = 'domcoreattr'
query_param = 'dom-attr'
class DromaeoDomCoreModify(_DromaeoBenchmark):
"""Dromaeo DOMCore modify JavaScript benchmark."""
tag = 'domcoremodify'
query_param = 'dom-modify'
class DromaeoDomCoreQuery(_DromaeoBenchmark):
"""Dromaeo DOMCore query JavaScript benchmark."""
tag = 'domcorequery'
query_param = 'dom-query'
class DromaeoDomCoreTraverse(_DromaeoBenchmark):
"""Dromaeo DOMCore traverse JavaScript benchmark."""
tag = 'domcoretraverse'
query_param = 'dom-traverse'
class DromaeoJslibAttrJquery(_DromaeoBenchmark):
"""Dromaeo JSLib attr jquery JavaScript benchmark"""
tag = 'jslibattrjquery'
query_param = 'jslib-attr-jquery'
class DromaeoJslibAttrPrototype(_DromaeoBenchmark):
"""Dromaeo JSLib attr prototype JavaScript benchmark"""
tag = 'jslibattrprototype'
query_param = 'jslib-attr-prototype'
class DromaeoJslibEventJquery(_DromaeoBenchmark):
"""Dromaeo JSLib event jquery JavaScript benchmark"""
tag = 'jslibeventjquery'
query_param = 'jslib-event-jquery'
class DromaeoJslibEventPrototype(_DromaeoBenchmark):
"""Dromaeo JSLib event prototype JavaScript benchmark"""
tag = 'jslibeventprototype'
query_param = 'jslib-event-prototype'
class DromaeoJslibModifyJquery(_DromaeoBenchmark):
"""Dromaeo JSLib modify jquery JavaScript benchmark"""
tag = 'jslibmodifyjquery'
query_param = 'jslib-modify-jquery'
class DromaeoJslibModifyPrototype(_DromaeoBenchmark):
"""Dromaeo JSLib modify prototype JavaScript benchmark"""
tag = 'jslibmodifyprototype'
query_param = 'jslib-modify-prototype'
class DromaeoJslibStyleJquery(_DromaeoBenchmark):
"""Dromaeo JSLib style jquery JavaScript benchmark"""
tag = 'jslibstylejquery'
query_param = 'jslib-style-jquery'
class DromaeoJslibStylePrototype(_DromaeoBenchmark):
"""Dromaeo JSLib style prototype JavaScript benchmark"""
tag = 'jslibstyleprototype'
query_param = 'jslib-style-prototype'
class DromaeoJslibTraverseJquery(_DromaeoBenchmark):
"""Dromaeo JSLib traverse jquery JavaScript benchmark"""
tag = 'jslibtraversejquery'
query_param = 'jslib-traverse-jquery'
class DromaeoJslibTraversePrototype(_DromaeoBenchmark):
"""Dromaeo JSLib traverse prototype JavaScript benchmark"""
tag = 'jslibtraverseprototype'
query_param = 'jslib-traverse-prototype'
| 31.122581
| 74
| 0.732172
|
import os
from metrics import power
from telemetry import test
from telemetry.core import util
from telemetry.page import page_measurement
from telemetry.page import page_set
class _DromaeoMeasurement(page_measurement.PageMeasurement):
def __init__(self):
super(_DromaeoMeasurement, self).__init__()
self._power_metric = power.PowerMetric()
def CustomizeBrowserOptions(self, options):
power.PowerMetric.CustomizeBrowserOptions(options)
def DidNavigateToPage(self, page, tab):
self._power_metric.Start(page, tab)
def MeasurePage(self, page, tab, results):
tab.WaitForJavaScriptExpression(
'window.document.cookie.indexOf("__done=1") >= 0', 600)
self._power_metric.Stop(page, tab)
self._power_metric.AddResults(tab, results)
js_get_results = 'JSON.stringify(window.automation.GetResults())'
print js_get_results
score = eval(tab.EvaluateJavaScript(js_get_results))
def Escape(k):
chars = [' ', '-', '/', '(', ')', '*']
for c in chars:
k = k.replace(c, '_')
return k
suffix = page.url[page.url.index('?') + 1 : page.url.index('&')]
for k, v in score.iteritems():
data_type = 'unimportant'
if k == suffix:
data_type = 'default'
results.Add(Escape(k), 'runs/s', float(v), data_type=data_type)
class _DromaeoBenchmark(test.Test):
"""A base class for Dromaeo benchmarks."""
test = _DromaeoMeasurement
def CreatePageSet(self, options):
"""Makes a PageSet for Dromaeo benchmarks."""
if not hasattr(self, 'query_param'):
raise NotImplementedError('query_param not in Dromaeo benchmark.')
url = 'file://index.html?%s&automated' % self.query_param
description = self.__doc__ or 'Dromaeo JavaScript Benchmark'
page_set_dict = {
'description': description,
'pages': [{'url': url}],
}
dromaeo_dir = os.path.join(util.GetChromiumSrcDir(),
'chrome', 'test', 'data', 'dromaeo')
return page_set.PageSet.FromDict(page_set_dict, dromaeo_dir)
class DromaeoDomCoreAttr(_DromaeoBenchmark):
"""Dromaeo DOMCore attr JavaScript benchmark."""
tag = 'domcoreattr'
query_param = 'dom-attr'
class DromaeoDomCoreModify(_DromaeoBenchmark):
"""Dromaeo DOMCore modify JavaScript benchmark."""
tag = 'domcoremodify'
query_param = 'dom-modify'
class DromaeoDomCoreQuery(_DromaeoBenchmark):
"""Dromaeo DOMCore query JavaScript benchmark."""
tag = 'domcorequery'
query_param = 'dom-query'
class DromaeoDomCoreTraverse(_DromaeoBenchmark):
"""Dromaeo DOMCore traverse JavaScript benchmark."""
tag = 'domcoretraverse'
query_param = 'dom-traverse'
class DromaeoJslibAttrJquery(_DromaeoBenchmark):
"""Dromaeo JSLib attr jquery JavaScript benchmark"""
tag = 'jslibattrjquery'
query_param = 'jslib-attr-jquery'
class DromaeoJslibAttrPrototype(_DromaeoBenchmark):
"""Dromaeo JSLib attr prototype JavaScript benchmark"""
tag = 'jslibattrprototype'
query_param = 'jslib-attr-prototype'
class DromaeoJslibEventJquery(_DromaeoBenchmark):
"""Dromaeo JSLib event jquery JavaScript benchmark"""
tag = 'jslibeventjquery'
query_param = 'jslib-event-jquery'
class DromaeoJslibEventPrototype(_DromaeoBenchmark):
"""Dromaeo JSLib event prototype JavaScript benchmark"""
tag = 'jslibeventprototype'
query_param = 'jslib-event-prototype'
class DromaeoJslibModifyJquery(_DromaeoBenchmark):
"""Dromaeo JSLib modify jquery JavaScript benchmark"""
tag = 'jslibmodifyjquery'
query_param = 'jslib-modify-jquery'
class DromaeoJslibModifyPrototype(_DromaeoBenchmark):
"""Dromaeo JSLib modify prototype JavaScript benchmark"""
tag = 'jslibmodifyprototype'
query_param = 'jslib-modify-prototype'
class DromaeoJslibStyleJquery(_DromaeoBenchmark):
"""Dromaeo JSLib style jquery JavaScript benchmark"""
tag = 'jslibstylejquery'
query_param = 'jslib-style-jquery'
class DromaeoJslibStylePrototype(_DromaeoBenchmark):
"""Dromaeo JSLib style prototype JavaScript benchmark"""
tag = 'jslibstyleprototype'
query_param = 'jslib-style-prototype'
class DromaeoJslibTraverseJquery(_DromaeoBenchmark):
"""Dromaeo JSLib traverse jquery JavaScript benchmark"""
tag = 'jslibtraversejquery'
query_param = 'jslib-traverse-jquery'
class DromaeoJslibTraversePrototype(_DromaeoBenchmark):
"""Dromaeo JSLib traverse prototype JavaScript benchmark"""
tag = 'jslibtraverseprototype'
query_param = 'jslib-traverse-prototype'
| false
| true
|
7903dff9bf2d0705e4e789c3b4cd1f9a8ae62555
| 642
|
py
|
Python
|
data/config/color.py
|
ajbowler/mlb-led-scoreboard
|
f6678649253f5491ccdbcd4703372a0ab739f1de
|
[
"MIT"
] | 35
|
2018-01-28T02:40:08.000Z
|
2018-02-26T21:09:48.000Z
|
data/config/color.py
|
ajbowler/mlb-led-scoreboard
|
f6678649253f5491ccdbcd4703372a0ab739f1de
|
[
"MIT"
] | 53
|
2018-01-28T15:01:32.000Z
|
2018-02-26T22:22:51.000Z
|
data/config/color.py
|
ajbowler/mlb-led-scoreboard
|
f6678649253f5491ccdbcd4703372a0ab739f1de
|
[
"MIT"
] | 10
|
2018-01-28T18:35:29.000Z
|
2018-02-20T11:53:07.000Z
|
try:
from rgbmatrix import graphics
except ImportError:
from RGBMatrixEmulator import graphics
class Color:
def __init__(self, color_json):
self.json = color_json
def color(self, keypath):
return self.__find_at_keypath(keypath)
def graphics_color(self, keypath):
color = self.color(keypath)
if not color:
color = self.color("default.text")
return graphics.Color(color["r"], color["g"], color["b"])
def __find_at_keypath(self, keypath):
keys = keypath.split(".")
rv = self.json
for key in keys:
rv = rv[key]
return rv
| 24.692308
| 65
| 0.61215
|
try:
from rgbmatrix import graphics
except ImportError:
from RGBMatrixEmulator import graphics
class Color:
def __init__(self, color_json):
self.json = color_json
def color(self, keypath):
return self.__find_at_keypath(keypath)
def graphics_color(self, keypath):
color = self.color(keypath)
if not color:
color = self.color("default.text")
return graphics.Color(color["r"], color["g"], color["b"])
def __find_at_keypath(self, keypath):
keys = keypath.split(".")
rv = self.json
for key in keys:
rv = rv[key]
return rv
| true
| true
|
7903e09bc5c002e243a2a7cb960564378bb4e47c
| 14,107
|
py
|
Python
|
RESSPyLab/uvc_model.py
|
ioannis-vm/RESSPyLab
|
306fc24d5f8ece8f2f2de274b56b80ba2019f605
|
[
"MIT"
] | 7
|
2019-10-15T09:16:41.000Z
|
2021-09-24T11:28:45.000Z
|
RESSPyLab/uvc_model.py
|
ioannis-vm/RESSPyLab
|
306fc24d5f8ece8f2f2de274b56b80ba2019f605
|
[
"MIT"
] | 3
|
2020-10-22T14:27:22.000Z
|
2021-11-15T17:46:49.000Z
|
RESSPyLab/uvc_model.py
|
ioannis-vm/RESSPyLab
|
306fc24d5f8ece8f2f2de274b56b80ba2019f605
|
[
"MIT"
] | 6
|
2019-07-22T05:47:10.000Z
|
2021-10-24T02:06:26.000Z
|
"""@package vc_updated
Functions to implement the updated Voce-Chaboche material model and measure its error.
"""
import numpy as np
import pandas as pd
from numdifftools import nd_algopy as nda
def uvc_return_mapping(x_sol, data, tol=1.0e-8, maximum_iterations=1000):
""" Implements the time integration of the updated Voce-Chaboche material model.
:param np.array x_sol: Updated Voce-Chaboche model parameters.
:param pd.DataFrame data: stress-strain data.
:param float tol: Local Newton tolerance.
:param int maximum_iterations: maximum iterations in local Newton procedure, raises RuntimeError if exceeded.
:return dict: History of: stress ('stress'), strain ('strain'), the total error ('error') calculated by the
updated Voce-Chaboche model, number of iterations for convergence at each increment ('num_its').
"""
if len(x_sol) < 8:
raise RuntimeError("No backstresses or using original V-C params.")
n_param_per_back = 2
n_basic_param = 6
# Get material properties
E = x_sol[0] * 1.0
sy_0 = x_sol[1] * 1.0
Q = x_sol[2] * 1.0
b = x_sol[3] * 1.0
D = x_sol[4] * 1.0
a = x_sol[5] * 1.0
# Set up backstresses
n_backstresses = int((len(x_sol) - n_basic_param) / n_param_per_back)
c_k = []
gamma_k = []
for i in range(0, n_backstresses):
c_k.append(x_sol[n_basic_param + n_param_per_back * i])
gamma_k.append(x_sol[n_basic_param + 1 + n_param_per_back * i])
# Initialize parameters
alpha_components = np.zeros(n_backstresses, dtype=object) # backstress components
strain = 0.
stress = 0.
ep_eq = 0. # equivalent plastic strain
error = 0. # error measure
sum_abs_de = 0. # total strain
stress_sim = 0.0
stress_test = 0.0
area_test = 0.0
stress_track = []
strain_track = []
strain_inc_track = []
iteration_track = []
loading = np.diff(data['e_true'])
for increment_number, strain_inc in enumerate(loading):
strain += strain_inc
alpha = np.sum(alpha_components)
yield_stress = sy_0 + Q * (1. - np.exp(-b * ep_eq)) - D * (1. - np.exp(-a * ep_eq))
trial_stress = stress + E * strain_inc
relative_stress = trial_stress - alpha
flow_dir = np.sign(relative_stress)
yield_condition = np.abs(relative_stress) - yield_stress
if yield_condition > tol:
is_converged = False
else:
is_converged = True
# For error
stress_sim_1 = stress_sim * 1.0
stress_test_1 = stress_test * 1.0
# Return mapping if plastic loading
ep_eq_init = ep_eq
alpha_init = alpha
consist_param = 0.
number_of_iterations = 0
while is_converged is False and number_of_iterations < maximum_iterations:
number_of_iterations += 1
# Isotropic hardening and isotropic modulus
yield_stress = sy_0 + Q * (1. - np.exp(-b * ep_eq)) - D * (1. - np.exp(-a * ep_eq))
iso_modulus = Q * b * np.exp(-b * ep_eq) - D * a * np.exp(-a * ep_eq)
# Kinematic hardening and kinematic modulus
alpha = 0.
kin_modulus = 0.
for i in range(0, n_backstresses):
e_k = np.exp(-gamma_k[i] * (ep_eq - ep_eq_init))
alpha += flow_dir * c_k[i] / gamma_k[i] + (alpha_components[i] - flow_dir * c_k[i] / gamma_k[i]) * e_k
kin_modulus += c_k[i] * e_k - flow_dir * gamma_k[i] * e_k * alpha_components[i]
delta_alpha = alpha - alpha_init
# Local Newton step
numerator = np.abs(relative_stress) - (consist_param * E + yield_stress + flow_dir * delta_alpha)
denominator = -(E + iso_modulus + kin_modulus)
consist_param = consist_param - numerator / denominator
ep_eq = ep_eq_init + consist_param
if np.abs(numerator) < tol:
is_converged = True
# Update the variables
stress = trial_stress - E * flow_dir * consist_param
for i in range(0, n_backstresses):
e_k = np.exp(-gamma_k[i] * (ep_eq - ep_eq_init))
alpha_components[i] = flow_dir * c_k[i] / gamma_k[i] \
+ (alpha_components[i] - flow_dir * c_k[i] / gamma_k[i]) * e_k
stress_track.append(stress)
strain_track.append(strain)
strain_inc_track.append(strain_inc)
iteration_track.append(number_of_iterations)
# Calculate the error
stress_sim = stress * 1.0
stress_test = data['Sigma_true'].iloc[increment_number + 1]
sum_abs_de += np.abs(strain_inc)
area_test += np.abs(strain_inc) * ((stress_test) ** 2 + (stress_test_1) ** 2) / 2.
error += np.abs(strain_inc) * ((stress_sim - stress_test) ** 2 + (stress_sim_1 - stress_test_1) ** 2) / 2.
if number_of_iterations >= maximum_iterations:
print ("Increment number = ", increment_number)
print ("Parameters = ", x_sol)
print ("Numerator = ", numerator)
raise RuntimeError('Return mapping did not converge in ' + str(maximum_iterations) + ' iterations.')
area = area_test / sum_abs_de
error = error / sum_abs_de
return {'stress': stress_track, 'strain': strain_track, 'error': error, 'num_its': iteration_track,
'area': area}
def sim_curve_uvc(x_sol, test_clean):
""" Returns the stress-strain approximation of the updated Voce-Chaboche material model to a given strain input.
:param np.array x_sol: Voce-Chaboche model parameters
:param DataFrame test_clean: stress-strain data
:return DataFrame: Voce-Chaboche approximation
The strain column in the DataFrame is labeled "e_true" and the stress column is labeled "Sigma_true".
"""
model_output = uvc_return_mapping(x_sol, test_clean)
strain = np.append([0.], model_output['strain'])
stress = np.append([0.], model_output['stress'])
sim_curve = pd.DataFrame(np.array([strain, stress]).transpose(), columns=['e_true', 'Sigma_true'])
return sim_curve
def error_single_test_uvc(x_sol, test_clean):
""" Returns the relative error between a test and its approximation using the updated Voce-Chaboche material model.
:param np.array x_sol: Voce-Chaboche model parameters
:param DataFrame test_clean: stress-strain data
:return float: relative error
The strain column in the DataFrame is labeled "e_true" and the stress column is labeled "Sigma_true".
"""
model_output = uvc_return_mapping(x_sol, test_clean)
return model_output['error']
def normalized_error_single_test_uvc(x_sol, test_clean):
""" Returns the error and the total area of a test and its approximation using the updated Voce-Chaboche
material model.
:param np.array x_sol: Voce-Chaboche model parameters
:param DataFrame test_clean: stress-strain data
:return list: (float) total error, (float) total area
The strain column in the DataFrame is labeled "e_true" and the stress column is labeled "Sigma_true".
"""
model_output = uvc_return_mapping(x_sol, test_clean)
return [model_output['error'], model_output['area']]
def calc_phi_total(x, data):
""" Returns the sum of the normalized relative error of the updated Voce-Chaboche material model given x.
:param np.array x: Updated Voce-Chaboche material model parameters.
:param list data: (pd.DataFrame) Stress-strain history for each test considered.
:return float: Normalized error value expressed as a percent (raw value * 100).
The normalized error is defined in de Sousa and Lignos (2017).
"""
error_total = 0.
area_total = 0.
for d in data:
error, area = normalized_error_single_test_uvc(x, d)
error_total += error
area_total += area
return np.sqrt(error_total / area_total) * 100.
def test_total_area(x, data):
""" Returns the total squared area underneath all the tests.
:param np.array x: Updated Voce-Chaboche material model parameters.
:param list data: (pd.DataFrame) Stress-strain history for each test considered.
:return float: Total squared area.
"""
area_total = 0.
for d in data:
_, area = normalized_error_single_test_uvc(x, d)
area_total += area
return area_total
def uvc_get_hessian(x, data):
""" Returns the Hessian of the material model error function for a given set of test data evaluated at x.
:param np.array x: Updated Voce-Chaboche material model parameters.
:param list data: (pd.DataFrame) Stress-strain history for each test considered.
:return np.array: Hessian matrix of the error function.
"""
def f(xi):
val = 0.
for d in data:
val += error_single_test_uvc(xi, d)
return val
hess_fun = nda.Hessian(f)
return hess_fun(x)
def uvc_consistency_metric(x_base, x_sample, data):
""" Returns the xi_2 consistency metric from de Sousa and Lignos 2019 using the updated Voce-Chaboche model.
:param np.array x_base: Updated Voce-Chaboche material model parameters from the base case.
:param np.array x_sample: Updated Voce-Chaboche material model parameters from the sample case.
:param list data: (pd.DataFrame) Stress-strain history for each test considered.
:return float: Increase in quadratic approximation from the base to the sample case.
"""
x_diff = x_sample - x_base
hess_base = uvc_get_hessian(x_base, data)
numerator = np.dot(x_diff, hess_base.dot(x_diff))
denominator = test_total_area(x_base, data)
return np.sqrt(numerator / denominator)
def uvc_tangent_modulus(x_sol, data, tol=1.0e-8, maximum_iterations=1000):
""" Returns the tangent modulus at each strain step.
:param np.array x_sol: Updated Voce-Chaboche model parameters.
:param pd.DataFrame data: stress-strain data.
:param float tol: Local Newton tolerance.
:param int maximum_iterations: maximum iterations in local Newton procedure, raises RuntimeError if exceeded.
:return np.ndarray: Tangent modulus array.
"""
if len(x_sol) < 8:
raise RuntimeError("No backstresses or using original V-C params.")
n_param_per_back = 2
n_basic_param = 6
# Get material properties
E = x_sol[0] * 1.0
sy_0 = x_sol[1] * 1.0
Q = x_sol[2] * 1.0
b = x_sol[3] * 1.0
D = x_sol[4] * 1.0
a = x_sol[5] * 1.0
# Set up backstresses
n_backstresses = int((len(x_sol) - n_basic_param) / n_param_per_back)
c_k = []
gamma_k = []
for i in range(0, n_backstresses):
c_k.append(x_sol[n_basic_param + n_param_per_back * i])
gamma_k.append(x_sol[n_basic_param + 1 + n_param_per_back * i])
# Initialize parameters
alpha_components = np.zeros(n_backstresses, dtype=object) # backstress components
strain = 0.
stress = 0.
ep_eq = 0. # equivalent plastic strain
stress_track = []
strain_track = []
strain_inc_track = []
iteration_track = []
tangent_track = []
loading = np.diff(data['e_true'])
for increment_number, strain_inc in enumerate(loading):
strain += strain_inc
alpha = np.sum(alpha_components)
yield_stress = sy_0 + Q * (1. - np.exp(-b * ep_eq)) - D * (1. - np.exp(-a * ep_eq))
trial_stress = stress + E * strain_inc
relative_stress = trial_stress - alpha
flow_dir = np.sign(relative_stress)
yield_condition = np.abs(relative_stress) - yield_stress
if yield_condition > tol:
is_converged = False
else:
is_converged = True
# Return mapping if plastic loading
ep_eq_init = ep_eq
alpha_init = alpha
consist_param = 0.
number_of_iterations = 0
while is_converged is False and number_of_iterations < maximum_iterations:
number_of_iterations += 1
# Isotropic hardening and isotropic modulus
yield_stress = sy_0 + Q * (1. - np.exp(-b * ep_eq)) - D * (1. - np.exp(-a * ep_eq))
iso_modulus = Q * b * np.exp(-b * ep_eq) - D * a * np.exp(-a * ep_eq)
# Kinematic hardening and kinematic modulus
alpha = 0.
kin_modulus = 0.
for i in range(0, n_backstresses):
e_k = np.exp(-gamma_k[i] * (ep_eq - ep_eq_init))
alpha += flow_dir * c_k[i] / gamma_k[i] + (alpha_components[i] - flow_dir * c_k[i] / gamma_k[i]) * e_k
kin_modulus += c_k[i] * e_k - flow_dir * gamma_k[i] * e_k * alpha_components[i]
delta_alpha = alpha - alpha_init
# Local Newton step
numerator = np.abs(relative_stress) - (consist_param * E + yield_stress + flow_dir * delta_alpha)
denominator = -(E + iso_modulus + kin_modulus)
consist_param = consist_param - numerator / denominator
ep_eq = ep_eq_init + consist_param
if np.abs(numerator) < tol:
is_converged = True
# Update the variables
stress = trial_stress - E * flow_dir * consist_param
for i in range(0, n_backstresses):
e_k = np.exp(-gamma_k[i] * (ep_eq - ep_eq_init))
alpha_components[i] = flow_dir * c_k[i] / gamma_k[i] \
+ (alpha_components[i] - flow_dir * c_k[i] / gamma_k[i]) * e_k
stress_track.append(stress)
strain_track.append(strain)
strain_inc_track.append(strain_inc)
iteration_track.append(number_of_iterations)
# Calculate the tangent modulus
if number_of_iterations > 0:
h_prime = 0.
for i in range(0, n_backstresses):
h_prime += c_k[i] - flow_dir * gamma_k[i] * alpha_components[i]
k_prime = Q * b * np.exp(-b * ep_eq) - D * a * np.exp(-a * ep_eq)
tangent_track.append(E * (k_prime + h_prime) / (E + k_prime + h_prime))
else:
# Elastic loading
tangent_track.append(E)
return np.append([0.], np.array(tangent_track))
| 38.755495
| 119
| 0.641455
|
import numpy as np
import pandas as pd
from numdifftools import nd_algopy as nda
def uvc_return_mapping(x_sol, data, tol=1.0e-8, maximum_iterations=1000):
if len(x_sol) < 8:
raise RuntimeError("No backstresses or using original V-C params.")
n_param_per_back = 2
n_basic_param = 6
E = x_sol[0] * 1.0
sy_0 = x_sol[1] * 1.0
Q = x_sol[2] * 1.0
b = x_sol[3] * 1.0
D = x_sol[4] * 1.0
a = x_sol[5] * 1.0
n_backstresses = int((len(x_sol) - n_basic_param) / n_param_per_back)
c_k = []
gamma_k = []
for i in range(0, n_backstresses):
c_k.append(x_sol[n_basic_param + n_param_per_back * i])
gamma_k.append(x_sol[n_basic_param + 1 + n_param_per_back * i])
alpha_components = np.zeros(n_backstresses, dtype=object)
strain = 0.
stress = 0.
ep_eq = 0.
error = 0.
sum_abs_de = 0.
stress_sim = 0.0
stress_test = 0.0
area_test = 0.0
stress_track = []
strain_track = []
strain_inc_track = []
iteration_track = []
loading = np.diff(data['e_true'])
for increment_number, strain_inc in enumerate(loading):
strain += strain_inc
alpha = np.sum(alpha_components)
yield_stress = sy_0 + Q * (1. - np.exp(-b * ep_eq)) - D * (1. - np.exp(-a * ep_eq))
trial_stress = stress + E * strain_inc
relative_stress = trial_stress - alpha
flow_dir = np.sign(relative_stress)
yield_condition = np.abs(relative_stress) - yield_stress
if yield_condition > tol:
is_converged = False
else:
is_converged = True
stress_sim_1 = stress_sim * 1.0
stress_test_1 = stress_test * 1.0
ep_eq_init = ep_eq
alpha_init = alpha
consist_param = 0.
number_of_iterations = 0
while is_converged is False and number_of_iterations < maximum_iterations:
number_of_iterations += 1
yield_stress = sy_0 + Q * (1. - np.exp(-b * ep_eq)) - D * (1. - np.exp(-a * ep_eq))
iso_modulus = Q * b * np.exp(-b * ep_eq) - D * a * np.exp(-a * ep_eq)
alpha = 0.
kin_modulus = 0.
for i in range(0, n_backstresses):
e_k = np.exp(-gamma_k[i] * (ep_eq - ep_eq_init))
alpha += flow_dir * c_k[i] / gamma_k[i] + (alpha_components[i] - flow_dir * c_k[i] / gamma_k[i]) * e_k
kin_modulus += c_k[i] * e_k - flow_dir * gamma_k[i] * e_k * alpha_components[i]
delta_alpha = alpha - alpha_init
numerator = np.abs(relative_stress) - (consist_param * E + yield_stress + flow_dir * delta_alpha)
denominator = -(E + iso_modulus + kin_modulus)
consist_param = consist_param - numerator / denominator
ep_eq = ep_eq_init + consist_param
if np.abs(numerator) < tol:
is_converged = True
stress = trial_stress - E * flow_dir * consist_param
for i in range(0, n_backstresses):
e_k = np.exp(-gamma_k[i] * (ep_eq - ep_eq_init))
alpha_components[i] = flow_dir * c_k[i] / gamma_k[i] \
+ (alpha_components[i] - flow_dir * c_k[i] / gamma_k[i]) * e_k
stress_track.append(stress)
strain_track.append(strain)
strain_inc_track.append(strain_inc)
iteration_track.append(number_of_iterations)
stress_sim = stress * 1.0
stress_test = data['Sigma_true'].iloc[increment_number + 1]
sum_abs_de += np.abs(strain_inc)
area_test += np.abs(strain_inc) * ((stress_test) ** 2 + (stress_test_1) ** 2) / 2.
error += np.abs(strain_inc) * ((stress_sim - stress_test) ** 2 + (stress_sim_1 - stress_test_1) ** 2) / 2.
if number_of_iterations >= maximum_iterations:
print ("Increment number = ", increment_number)
print ("Parameters = ", x_sol)
print ("Numerator = ", numerator)
raise RuntimeError('Return mapping did not converge in ' + str(maximum_iterations) + ' iterations.')
area = area_test / sum_abs_de
error = error / sum_abs_de
return {'stress': stress_track, 'strain': strain_track, 'error': error, 'num_its': iteration_track,
'area': area}
def sim_curve_uvc(x_sol, test_clean):
model_output = uvc_return_mapping(x_sol, test_clean)
strain = np.append([0.], model_output['strain'])
stress = np.append([0.], model_output['stress'])
sim_curve = pd.DataFrame(np.array([strain, stress]).transpose(), columns=['e_true', 'Sigma_true'])
return sim_curve
def error_single_test_uvc(x_sol, test_clean):
model_output = uvc_return_mapping(x_sol, test_clean)
return model_output['error']
def normalized_error_single_test_uvc(x_sol, test_clean):
model_output = uvc_return_mapping(x_sol, test_clean)
return [model_output['error'], model_output['area']]
def calc_phi_total(x, data):
error_total = 0.
area_total = 0.
for d in data:
error, area = normalized_error_single_test_uvc(x, d)
error_total += error
area_total += area
return np.sqrt(error_total / area_total) * 100.
def test_total_area(x, data):
area_total = 0.
for d in data:
_, area = normalized_error_single_test_uvc(x, d)
area_total += area
return area_total
def uvc_get_hessian(x, data):
def f(xi):
val = 0.
for d in data:
val += error_single_test_uvc(xi, d)
return val
hess_fun = nda.Hessian(f)
return hess_fun(x)
def uvc_consistency_metric(x_base, x_sample, data):
x_diff = x_sample - x_base
hess_base = uvc_get_hessian(x_base, data)
numerator = np.dot(x_diff, hess_base.dot(x_diff))
denominator = test_total_area(x_base, data)
return np.sqrt(numerator / denominator)
def uvc_tangent_modulus(x_sol, data, tol=1.0e-8, maximum_iterations=1000):
if len(x_sol) < 8:
raise RuntimeError("No backstresses or using original V-C params.")
n_param_per_back = 2
n_basic_param = 6
E = x_sol[0] * 1.0
sy_0 = x_sol[1] * 1.0
Q = x_sol[2] * 1.0
b = x_sol[3] * 1.0
D = x_sol[4] * 1.0
a = x_sol[5] * 1.0
n_backstresses = int((len(x_sol) - n_basic_param) / n_param_per_back)
c_k = []
gamma_k = []
for i in range(0, n_backstresses):
c_k.append(x_sol[n_basic_param + n_param_per_back * i])
gamma_k.append(x_sol[n_basic_param + 1 + n_param_per_back * i])
alpha_components = np.zeros(n_backstresses, dtype=object)
strain = 0.
stress = 0.
ep_eq = 0.
stress_track = []
strain_track = []
strain_inc_track = []
iteration_track = []
tangent_track = []
loading = np.diff(data['e_true'])
for increment_number, strain_inc in enumerate(loading):
strain += strain_inc
alpha = np.sum(alpha_components)
yield_stress = sy_0 + Q * (1. - np.exp(-b * ep_eq)) - D * (1. - np.exp(-a * ep_eq))
trial_stress = stress + E * strain_inc
relative_stress = trial_stress - alpha
flow_dir = np.sign(relative_stress)
yield_condition = np.abs(relative_stress) - yield_stress
if yield_condition > tol:
is_converged = False
else:
is_converged = True
ep_eq_init = ep_eq
alpha_init = alpha
consist_param = 0.
number_of_iterations = 0
while is_converged is False and number_of_iterations < maximum_iterations:
number_of_iterations += 1
yield_stress = sy_0 + Q * (1. - np.exp(-b * ep_eq)) - D * (1. - np.exp(-a * ep_eq))
iso_modulus = Q * b * np.exp(-b * ep_eq) - D * a * np.exp(-a * ep_eq)
alpha = 0.
kin_modulus = 0.
for i in range(0, n_backstresses):
e_k = np.exp(-gamma_k[i] * (ep_eq - ep_eq_init))
alpha += flow_dir * c_k[i] / gamma_k[i] + (alpha_components[i] - flow_dir * c_k[i] / gamma_k[i]) * e_k
kin_modulus += c_k[i] * e_k - flow_dir * gamma_k[i] * e_k * alpha_components[i]
delta_alpha = alpha - alpha_init
numerator = np.abs(relative_stress) - (consist_param * E + yield_stress + flow_dir * delta_alpha)
denominator = -(E + iso_modulus + kin_modulus)
consist_param = consist_param - numerator / denominator
ep_eq = ep_eq_init + consist_param
if np.abs(numerator) < tol:
is_converged = True
stress = trial_stress - E * flow_dir * consist_param
for i in range(0, n_backstresses):
e_k = np.exp(-gamma_k[i] * (ep_eq - ep_eq_init))
alpha_components[i] = flow_dir * c_k[i] / gamma_k[i] \
+ (alpha_components[i] - flow_dir * c_k[i] / gamma_k[i]) * e_k
stress_track.append(stress)
strain_track.append(strain)
strain_inc_track.append(strain_inc)
iteration_track.append(number_of_iterations)
if number_of_iterations > 0:
h_prime = 0.
for i in range(0, n_backstresses):
h_prime += c_k[i] - flow_dir * gamma_k[i] * alpha_components[i]
k_prime = Q * b * np.exp(-b * ep_eq) - D * a * np.exp(-a * ep_eq)
tangent_track.append(E * (k_prime + h_prime) / (E + k_prime + h_prime))
else:
tangent_track.append(E)
return np.append([0.], np.array(tangent_track))
| true
| true
|
7903e11ab9fec1633c9080aadf7a929866e2d98e
| 1,667
|
py
|
Python
|
Models/Encoders/ID_Encoder.py
|
YuGong123/ID-disentanglement-Pytorch
|
1b110f653a1945ea498b21cd6ed7d7e4fee0f74b
|
[
"MIT"
] | 45
|
2021-03-24T09:18:46.000Z
|
2022-03-15T16:45:13.000Z
|
Models/Encoders/ID_Encoder.py
|
YuGong123/ID-disentanglement-Pytorch
|
1b110f653a1945ea498b21cd6ed7d7e4fee0f74b
|
[
"MIT"
] | 1
|
2022-01-17T14:10:35.000Z
|
2022-01-17T14:10:35.000Z
|
Models/Encoders/ID_Encoder.py
|
YuGong123/ID-disentanglement-Pytorch
|
1b110f653a1945ea498b21cd6ed7d7e4fee0f74b
|
[
"MIT"
] | 9
|
2021-03-31T08:11:38.000Z
|
2022-01-15T10:07:48.000Z
|
import torch
from facenet_pytorch import MTCNN, InceptionResnetV1
from torchvision import transforms
from Configs import Global_Config
IMAGE_SIZE = 220
mtcnn = MTCNN(
image_size=IMAGE_SIZE, margin=0, min_face_size=20,
thresholds=[0.6, 0.7, 0.7], factor=0.709, post_process=True,
device=Global_Config.device
)
to_pil = transforms.ToPILImage(mode='RGB')
crop_transform = transforms.Compose([transforms.Resize(IMAGE_SIZE),
transforms.CenterCrop(IMAGE_SIZE)])
resnet = InceptionResnetV1(pretrained='vggface2', classify=False).eval().to(Global_Config.device)
class ID_Encoder(torch.nn.Module):
def __init__(self):
super(ID_Encoder, self).__init__()
def crop_tensor_according_to_bboxes(self, images, bboxes):
cropped_batch = []
for idx, image in enumerate(images):
try:
cropped_image = crop_transform(image[:, int(bboxes[idx][0][1]):int(bboxes[idx][0][3]),
int(bboxes[idx][0][0]):int(bboxes[idx][0][2])].unsqueeze(0))
except:
cropped_image = crop_transform(image.unsqueeze(0))
cropped_batch.append(cropped_image)
return torch.cat(cropped_batch, dim=0)
def preprocess_images_to_id_encoder(self, images):
bboxes = [mtcnn.detect(to_pil(image))[0] for image in images]
cropped_images = self.crop_tensor_according_to_bboxes(images, bboxes)
return cropped_images
def forward(self, images):
cropped_images = self.preprocess_images_to_id_encoder(images)
img_embeddings = resnet(cropped_images)
return img_embeddings
| 38.767442
| 102
| 0.673065
|
import torch
from facenet_pytorch import MTCNN, InceptionResnetV1
from torchvision import transforms
from Configs import Global_Config
IMAGE_SIZE = 220
mtcnn = MTCNN(
image_size=IMAGE_SIZE, margin=0, min_face_size=20,
thresholds=[0.6, 0.7, 0.7], factor=0.709, post_process=True,
device=Global_Config.device
)
to_pil = transforms.ToPILImage(mode='RGB')
crop_transform = transforms.Compose([transforms.Resize(IMAGE_SIZE),
transforms.CenterCrop(IMAGE_SIZE)])
resnet = InceptionResnetV1(pretrained='vggface2', classify=False).eval().to(Global_Config.device)
class ID_Encoder(torch.nn.Module):
def __init__(self):
super(ID_Encoder, self).__init__()
def crop_tensor_according_to_bboxes(self, images, bboxes):
cropped_batch = []
for idx, image in enumerate(images):
try:
cropped_image = crop_transform(image[:, int(bboxes[idx][0][1]):int(bboxes[idx][0][3]),
int(bboxes[idx][0][0]):int(bboxes[idx][0][2])].unsqueeze(0))
except:
cropped_image = crop_transform(image.unsqueeze(0))
cropped_batch.append(cropped_image)
return torch.cat(cropped_batch, dim=0)
def preprocess_images_to_id_encoder(self, images):
bboxes = [mtcnn.detect(to_pil(image))[0] for image in images]
cropped_images = self.crop_tensor_according_to_bboxes(images, bboxes)
return cropped_images
def forward(self, images):
cropped_images = self.preprocess_images_to_id_encoder(images)
img_embeddings = resnet(cropped_images)
return img_embeddings
| true
| true
|
7903e1b04695df81077c2c0893327902d34a6f6f
| 5,010
|
py
|
Python
|
TD/double_q_learning.py
|
hadleyhzy34/reinforcement_learning
|
14371756c2ff8225dc800d146452b7956875410c
|
[
"MIT"
] | null | null | null |
TD/double_q_learning.py
|
hadleyhzy34/reinforcement_learning
|
14371756c2ff8225dc800d146452b7956875410c
|
[
"MIT"
] | null | null | null |
TD/double_q_learning.py
|
hadleyhzy34/reinforcement_learning
|
14371756c2ff8225dc800d146452b7956875410c
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import gym
import random
# hyper parameters
# test 1
# alpha = 0.5
# gamma = 0.95
# epsilon = 0.1
epsilon = 0.1
alpha = 0.1
gamma = 0.1
def update_sarsa_table(sarsa, state, action, reward, next_state, next_action, alpha, gamma):
'''
update sarsa state-action pair value, main difference from q learning is that it uses epsilon greedy policy
return action
'''
next_max = sarsa[next_state,next_action] # corresponding action-state value to current action
# print(f'current status is: {type(q[pre_state,action])},{type(alpha)},{type(reward)},{type(gamma)},{type(next_max)}')
sarsa[state,action] = sarsa[state,action] + alpha * (reward + gamma * next_max - sarsa[state,action])
def epsilon_greedy_policy_sarsa(env, state, sarsa, epsilon):
'''
epsilon greedy policy for q learning to generate actions
'''
if random.uniform(0,1) < epsilon:
return env.action_space.sample()
else:
return np.argmax(sarsa[state])
def epsilon_greedy_policy(env, state, q, epsilon):
'''
epsilon greedy policy for q learning to generate actions
'''
if random.uniform(0,1) < epsilon:
return env.action_space.sample()
else:
return np.argmax(q[state])
def update_q_table(q, pre_state, action, reward, next_state, alpha, gamma):
'''
'''
next_max = np.max(q[next_state]) # max state-action value for next state
# print(f'current status is: {type(q[pre_state,action])},{type(alpha)},{type(reward)},{type(gamma)},{type(next_max)}')
q[pre_state,action] = q[pre_state,action] + alpha * (reward + gamma * next_max - q[pre_state,action])
#-----------------------q learning-------------------------------------------
env = gym.make("Taxi-v3")
# initialize q table
q = np.zeros((env.observation_space.n, env.action_space.n))
q_pre = np.zeros((env.observation_space.n, env.action_space.n)) # to check convergence when training
reward_record = []
error_record = []
# loop for each episode:
for episode in range(5000):
r = 0
state = env.reset()
while True:# loop for each step of episode
# choose A from S using policy derived from Q(e.g, epsilon greedy policy)
action = epsilon_greedy_policy(env,state,q,epsilon)
# take action A, observe R, S'
next_state, reward, done, _ = env.step(action)
# update Q(S,A)
update_q_table(q,state,action,reward,next_state,alpha,gamma)
# S<--S'
state = next_state
r += reward
if done:
break
reward_record.append(r)
error = 0
for i in range(q.shape[0]):
error = error + np.sum(np.abs(q[i]-q_pre[i]))
# print(f'{np.abs(q[i]-q_pre[i])},{np.sum(np.abs(q[i]-q_pre[i]))}')
error_record.append(error)
q_pre = np.copy(q)
if episode%100 == 0:
print(f'{episode}th episode: {r}, {error}')
#close game env
env.close()
#plot diagram
# plt.plot(list(range(5000)),reward_record)
# plt.show()
# plt.plot(list(range(5000)),error_record)
# plt.show()
#double q learning
env = gym.make("Taxi-v3")
# initialize q table
q1 = np.zeros((env.observation_space.n, env.action_space.n))
q2 = np.zeros((env.observation_space.n, env.action_space.n))
q1_pre = np.zeros((env.observation_space.n, env.action_space.n)) # to check convergence when training
q2_pre = np.zeros((env.observation_space.n, env.action_space.n)) # to check convergence when training
# reward and error record
d_reward_record = []
d_error_record = []
# loop for each episode:
for episode in range(5000):
r = 0
state = env.reset()
while True:# loop for each step of episode
# choose A from S using policy derived from Q1+Q2(e.g, epsilon greedy policy)
action = epsilon_greedy_policy(env,state,q1+q2,epsilon)
# take action A, observe R, S'
next_state, reward, done, _ = env.step(action)
# with 0.5 probability:
if random.uniform(0,1) < 0.5:
update_q_table(q1,state,action,reward,next_state,alpha,gamma)
else:
update_q_table(q2,state,action,reward,next_state,alpha,gamma)
# S<--S'
state = next_state
r += reward
if done:
break
d_reward_record.append(r)
error = 0
for i in range(q.shape[0]):
error = error + 0.5 * np.sum(np.abs(q1[i]-q1_pre[i])) + 0.5 * np.sum(np.abs(q2[i]-q2_pre[i]))
# print(f'{np.abs(q[i]-q_pre[i])},{np.sum(np.abs(q[i]-q_pre[i]))}')
d_error_record.append(error)
q1_pre = np.copy(q1)
q2_pre = np.copy(q2)
if episode%100 == 0:
print(f'{episode}th episode: {r}, {error}')
#close game env
env.close()
#plot diagram
plt.plot(list(range(5000)),reward_record,label='q learning')
plt.plot(list(range(5000)),d_reward_record,label='double q learning')
plt.legend()
plt.show()
plt.plot(list(range(5000)),error_record,label='q learning')
plt.plot(list(range(5000)),d_error_record, label='double q learning')
plt.legend()
plt.show()
| 31.910828
| 122
| 0.645709
|
import numpy as np
import matplotlib.pyplot as plt
import gym
import random
epsilon = 0.1
alpha = 0.1
gamma = 0.1
def update_sarsa_table(sarsa, state, action, reward, next_state, next_action, alpha, gamma):
next_max = sarsa[next_state,next_action]
sarsa[state,action] = sarsa[state,action] + alpha * (reward + gamma * next_max - sarsa[state,action])
def epsilon_greedy_policy_sarsa(env, state, sarsa, epsilon):
if random.uniform(0,1) < epsilon:
return env.action_space.sample()
else:
return np.argmax(sarsa[state])
def epsilon_greedy_policy(env, state, q, epsilon):
if random.uniform(0,1) < epsilon:
return env.action_space.sample()
else:
return np.argmax(q[state])
def update_q_table(q, pre_state, action, reward, next_state, alpha, gamma):
next_max = np.max(q[next_state])
q[pre_state,action] = q[pre_state,action] + alpha * (reward + gamma * next_max - q[pre_state,action])
env = gym.make("Taxi-v3")
q = np.zeros((env.observation_space.n, env.action_space.n))
q_pre = np.zeros((env.observation_space.n, env.action_space.n))
reward_record = []
error_record = []
for episode in range(5000):
r = 0
state = env.reset()
while True:
action = epsilon_greedy_policy(env,state,q,epsilon)
next_state, reward, done, _ = env.step(action)
# update Q(S,A)
update_q_table(q,state,action,reward,next_state,alpha,gamma)
# S<--S'
state = next_state
r += reward
if done:
break
reward_record.append(r)
error = 0
for i in range(q.shape[0]):
error = error + np.sum(np.abs(q[i]-q_pre[i]))
error_record.append(error)
q_pre = np.copy(q)
if episode%100 == 0:
print(f'{episode}th episode: {r}, {error}')
env.close()
env = gym.make("Taxi-v3")
q1 = np.zeros((env.observation_space.n, env.action_space.n))
q2 = np.zeros((env.observation_space.n, env.action_space.n))
q1_pre = np.zeros((env.observation_space.n, env.action_space.n))
q2_pre = np.zeros((env.observation_space.n, env.action_space.n))
d_reward_record = []
d_error_record = []
for episode in range(5000):
r = 0
state = env.reset()
while True:
action = epsilon_greedy_policy(env,state,q1+q2,epsilon)
next_state, reward, done, _ = env.step(action)
# with 0.5 probability:
if random.uniform(0,1) < 0.5:
update_q_table(q1,state,action,reward,next_state,alpha,gamma)
else:
update_q_table(q2,state,action,reward,next_state,alpha,gamma)
# S<--S'
state = next_state
r += reward
if done:
break
d_reward_record.append(r)
error = 0
for i in range(q.shape[0]):
error = error + 0.5 * np.sum(np.abs(q1[i]-q1_pre[i])) + 0.5 * np.sum(np.abs(q2[i]-q2_pre[i]))
d_error_record.append(error)
q1_pre = np.copy(q1)
q2_pre = np.copy(q2)
if episode%100 == 0:
print(f'{episode}th episode: {r}, {error}')
env.close()
plt.plot(list(range(5000)),reward_record,label='q learning')
plt.plot(list(range(5000)),d_reward_record,label='double q learning')
plt.legend()
plt.show()
plt.plot(list(range(5000)),error_record,label='q learning')
plt.plot(list(range(5000)),d_error_record, label='double q learning')
plt.legend()
plt.show()
| true
| true
|
7903e3ec3e23fe818c6c939e0dc1de03ae3eef94
| 1,727
|
py
|
Python
|
tasks.py
|
brainfukk/fiuread
|
7414ec9f580b8bdc78e3ce63bb6ebf1ac7cdc4f8
|
[
"Apache-2.0"
] | null | null | null |
tasks.py
|
brainfukk/fiuread
|
7414ec9f580b8bdc78e3ce63bb6ebf1ac7cdc4f8
|
[
"Apache-2.0"
] | null | null | null |
tasks.py
|
brainfukk/fiuread
|
7414ec9f580b8bdc78e3ce63bb6ebf1ac7cdc4f8
|
[
"Apache-2.0"
] | null | null | null |
import invoke
from pathlib import Path
PACKAGE = "src"
REQUIRED_COVERAGE = 90
BASE_DIR = Path(__file__).resolve().parent
@invoke.task(name="format")
def format_(arg):
autoflake = "autoflake -i --recursive --remove-all-unused-imports --remove-duplicate-keys --remove-unused-variables"
arg.run(f"{autoflake} {PACKAGE}", echo=True)
arg.run(f"isort {PACKAGE}", echo=True)
arg.run(f"black {PACKAGE}", echo=True)
@invoke.task(
help={
"style": "Check style with flake8, isort, and black",
"typing": "Check typing with mypy",
}
)
def check(arg, style=True, typing=True):
if style:
arg.run(f"flake8 {PACKAGE}", echo=True)
arg.run(f"isort --diff {PACKAGE} --check-only", echo=True)
arg.run(f"black --diff {PACKAGE} --check", echo=True)
if typing:
arg.run(f"mypy --no-incremental --cache-dir=/dev/null {PACKAGE}", echo=True)
@invoke.task
def test(arg):
arg.run(
f"pytest",
pty=True,
echo=True,
)
@invoke.task
def makemigrations(arg, message):
arg.run(f"cd {BASE_DIR} && alembic revision --autogenerate -m '{message}'", echo=True, pty=True)
@invoke.task
def migrate(arg):
arg.run(f"cd {BASE_DIR} && alembic upgrade head", echo=True)
@invoke.task
def hooks(arg):
invoke_path = Path(arg.run("which invoke", hide=True).stdout[:-1])
for src_path in Path(".hooks").iterdir():
dst_path = Path(".git/hooks") / src_path.name
print(f"Installing: {dst_path}")
with open(str(src_path), "r") as f:
src_data = f.read()
with open(str(dst_path), "w") as f:
f.write(src_data.format(invoke_path=invoke_path.parent))
arg.run(f"chmod +x {dst_path}")
| 26.569231
| 120
| 0.62652
|
import invoke
from pathlib import Path
PACKAGE = "src"
REQUIRED_COVERAGE = 90
BASE_DIR = Path(__file__).resolve().parent
@invoke.task(name="format")
def format_(arg):
autoflake = "autoflake -i --recursive --remove-all-unused-imports --remove-duplicate-keys --remove-unused-variables"
arg.run(f"{autoflake} {PACKAGE}", echo=True)
arg.run(f"isort {PACKAGE}", echo=True)
arg.run(f"black {PACKAGE}", echo=True)
@invoke.task(
help={
"style": "Check style with flake8, isort, and black",
"typing": "Check typing with mypy",
}
)
def check(arg, style=True, typing=True):
if style:
arg.run(f"flake8 {PACKAGE}", echo=True)
arg.run(f"isort --diff {PACKAGE} --check-only", echo=True)
arg.run(f"black --diff {PACKAGE} --check", echo=True)
if typing:
arg.run(f"mypy --no-incremental --cache-dir=/dev/null {PACKAGE}", echo=True)
@invoke.task
def test(arg):
arg.run(
f"pytest",
pty=True,
echo=True,
)
@invoke.task
def makemigrations(arg, message):
arg.run(f"cd {BASE_DIR} && alembic revision --autogenerate -m '{message}'", echo=True, pty=True)
@invoke.task
def migrate(arg):
arg.run(f"cd {BASE_DIR} && alembic upgrade head", echo=True)
@invoke.task
def hooks(arg):
invoke_path = Path(arg.run("which invoke", hide=True).stdout[:-1])
for src_path in Path(".hooks").iterdir():
dst_path = Path(".git/hooks") / src_path.name
print(f"Installing: {dst_path}")
with open(str(src_path), "r") as f:
src_data = f.read()
with open(str(dst_path), "w") as f:
f.write(src_data.format(invoke_path=invoke_path.parent))
arg.run(f"chmod +x {dst_path}")
| true
| true
|
7903e5d7ac19c9a0922ba869ee3c7668486d2480
| 2,507
|
py
|
Python
|
django-vue/djangoAPI/api/urls.py
|
BeautifulBeer/Youflix
|
751dcf257ce36b7ac597eaabcf4e67ab237f1eff
|
[
"Apache-2.0"
] | 3
|
2021-09-05T14:25:29.000Z
|
2021-12-13T05:06:24.000Z
|
django-vue/djangoAPI/api/urls.py
|
BeautifulBeer/Youflix
|
751dcf257ce36b7ac597eaabcf4e67ab237f1eff
|
[
"Apache-2.0"
] | 11
|
2020-06-06T00:51:00.000Z
|
2022-02-26T20:43:16.000Z
|
django-vue/djangoAPI/api/urls.py
|
BeautifulBeer/Youflix
|
751dcf257ce36b7ac597eaabcf4e67ab237f1eff
|
[
"Apache-2.0"
] | 3
|
2019-11-28T03:19:42.000Z
|
2019-12-04T06:22:33.000Z
|
from django.conf.urls import url
from api.views import movie_views
from api.views import auth_views
from api.views import rating_views
from api.views import recommend_views
from api.views import collabo_test
from api.views import content_based
from api.algorithms import kmeansClustering
urlpatterns = [
# user 접근 URL
url(r'auth/signup-many/$', auth_views.signup_many, name='sign_up_many'),
url(r'auth/getUsers/$', auth_views.getUsers, name='get_users'),
url(r'auth/deleteUser/$', auth_views.deleteUser, name='delete_user'),
url(r'auth/similarUser/$', auth_views.similarUser, name='similarUser'),
url(r'^auth/loginmember/$', auth_views.login, name='login_member'),
url(r'^auth/registermember/$', auth_views.register, name='register_member'),
url(r'^auth/logoutmember/$', auth_views.logout, name='logout_member'),
url(r'^auth/session/$', auth_views.session_member, name="session_member"),
url(r'^auth/updateUser/$', auth_views.updateUser, name="update_user"),
url(r'^auth/predictRating/$', auth_views.predictMovieRating, name="predictRating"),
# 중복체크 검사
url(r'^auth/duplicateInspection/$', auth_views.duplicate_inspection, name="duplicate_inspection"),
# movie 접근 URL
url(r'movies/$', movie_views.movies, name='movie_list'),
url(r'movies/pref/$', movie_views.moviesPref, name='movie_pref'),
url(r'movies/views/$', movie_views.views, name='movie_views'),
url(r'movies/modify/$', movie_views.modify, name='movie_modify'),
url(r'movies/neverSeenMovies/$', movie_views.never_seen_movie_list, name='never_seen_movie_list'),
url(r'movies/faculites/$', movie_views.faculites, name='faculites'),
url(r'movies/rating/$', movie_views.get_rating_movie, name='get_rating_movie'),
# 추천 URL
url(r'^auth/recommendMovie/$', recommend_views.RecommendMovie, name='recommendMovie'),
# 평점정보 접근 URL
url(r'rateMovie/$', rating_views.rate_movie, name='rate_movie'),
url(r'getRatings/$', rating_views.get_ratings, name='get_ratings'),
url(r'getEvaluatedRating/$', rating_views.get_evaluate_rating, name='get_evaluate_rating'),
url(r'ratings/comment/$', rating_views.create_comment, name='create_comment'),
# clustering 실행 URL
url('clustering/kmeansClustering/C/', kmeansClustering.C_Cluster, name="c_Cluster"),
# Content-Based Algorithm
url(r'preprocessing/$', content_based.preprocessing_for_cb, name='preprocessing'),
url(r'content_based/$', content_based.algorithm, name='content_based')
]
| 49.156863
| 102
| 0.735142
|
from django.conf.urls import url
from api.views import movie_views
from api.views import auth_views
from api.views import rating_views
from api.views import recommend_views
from api.views import collabo_test
from api.views import content_based
from api.algorithms import kmeansClustering
urlpatterns = [
url(r'auth/signup-many/$', auth_views.signup_many, name='sign_up_many'),
url(r'auth/getUsers/$', auth_views.getUsers, name='get_users'),
url(r'auth/deleteUser/$', auth_views.deleteUser, name='delete_user'),
url(r'auth/similarUser/$', auth_views.similarUser, name='similarUser'),
url(r'^auth/loginmember/$', auth_views.login, name='login_member'),
url(r'^auth/registermember/$', auth_views.register, name='register_member'),
url(r'^auth/logoutmember/$', auth_views.logout, name='logout_member'),
url(r'^auth/session/$', auth_views.session_member, name="session_member"),
url(r'^auth/updateUser/$', auth_views.updateUser, name="update_user"),
url(r'^auth/predictRating/$', auth_views.predictMovieRating, name="predictRating"),
url(r'^auth/duplicateInspection/$', auth_views.duplicate_inspection, name="duplicate_inspection"),
url(r'movies/$', movie_views.movies, name='movie_list'),
url(r'movies/pref/$', movie_views.moviesPref, name='movie_pref'),
url(r'movies/views/$', movie_views.views, name='movie_views'),
url(r'movies/modify/$', movie_views.modify, name='movie_modify'),
url(r'movies/neverSeenMovies/$', movie_views.never_seen_movie_list, name='never_seen_movie_list'),
url(r'movies/faculites/$', movie_views.faculites, name='faculites'),
url(r'movies/rating/$', movie_views.get_rating_movie, name='get_rating_movie'),
url(r'^auth/recommendMovie/$', recommend_views.RecommendMovie, name='recommendMovie'),
url(r'rateMovie/$', rating_views.rate_movie, name='rate_movie'),
url(r'getRatings/$', rating_views.get_ratings, name='get_ratings'),
url(r'getEvaluatedRating/$', rating_views.get_evaluate_rating, name='get_evaluate_rating'),
url(r'ratings/comment/$', rating_views.create_comment, name='create_comment'),
url('clustering/kmeansClustering/C/', kmeansClustering.C_Cluster, name="c_Cluster"),
url(r'preprocessing/$', content_based.preprocessing_for_cb, name='preprocessing'),
url(r'content_based/$', content_based.algorithm, name='content_based')
]
| true
| true
|
7903e5fc5527bf8e7e55c056bd8cef87c1bc7e04
| 6,476
|
py
|
Python
|
plugins/modules/oci_network_ip_sec_connection_device_status_facts.py
|
A7rMtWE57x/oci-ansible-collection
|
80548243a085cd53fd5dddaa8135b5cb43612c66
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/oci_network_ip_sec_connection_device_status_facts.py
|
A7rMtWE57x/oci-ansible-collection
|
80548243a085cd53fd5dddaa8135b5cb43612c66
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/oci_network_ip_sec_connection_device_status_facts.py
|
A7rMtWE57x/oci-ansible-collection
|
80548243a085cd53fd5dddaa8135b5cb43612c66
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2017, 2020 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_network_ip_sec_connection_device_status_facts
short_description: Fetches details about a IpSecConnectionDeviceStatus resource in Oracle Cloud Infrastructure
description:
- Fetches details about a IpSecConnectionDeviceStatus resource in Oracle Cloud Infrastructure
- Deprecated. To get the tunnel status, instead use
L(GetIPSecConnectionTunnel,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/20160918/IPSecConnectionTunnel/GetIPSecConnectionTunnel).
version_added: "2.9"
author: Oracle (@oracle)
options:
ipsc_id:
description:
- The OCID of the IPSec connection.
type: str
aliases: ["id"]
required: true
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Get a specific ip_sec_connection_device_status
oci_network_ip_sec_connection_device_status_facts:
ipsc_id: ocid1.ipsc.oc1..xxxxxxEXAMPLExxxxxx
"""
RETURN = """
ip_sec_connection_device_status:
description:
- IpSecConnectionDeviceStatus resource
returned: on success
type: complex
contains:
compartment_id:
description:
- The OCID of the compartment containing the IPSec connection.
returned: on success
type: string
sample: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
id:
description:
- The IPSec connection's Oracle ID (OCID).
returned: on success
type: string
sample: ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx
time_created:
description:
- The date and time the IPSec connection was created, in the format defined by L(RFC3339,https://tools.ietf.org/html/rfc3339).
- "Example: `2016-08-25T21:10:29.600Z`"
returned: on success
type: string
sample: 2016-08-25T21:10:29.600Z
tunnels:
description:
- Two L(TunnelStatus,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/20160918/TunnelStatus/) objects.
returned: on success
type: complex
contains:
ip_address:
description:
- The IP address of Oracle's VPN headend.
- "Example: `203.0.113.50`"
returned: on success
type: string
sample: 203.0.113.50
lifecycle_state:
description:
- The tunnel's current state.
returned: on success
type: string
sample: UP
time_created:
description:
- The date and time the IPSec connection was created, in the format defined by L(RFC3339,https://tools.ietf.org/html/rfc3339).
- "Example: `2016-08-25T21:10:29.600Z`"
returned: on success
type: string
sample: 2016-08-25T21:10:29.600Z
time_state_modified:
description:
- When the state of the tunnel last changed, in the format defined by L(RFC3339,https://tools.ietf.org/html/rfc3339).
- "Example: `2016-08-25T21:10:29.600Z`"
returned: on success
type: string
sample: 2016-08-25T21:10:29.600Z
sample: {
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"time_created": "2016-08-25T21:10:29.600Z",
"tunnels": [{
"ip_address": "203.0.113.50",
"lifecycle_state": "UP",
"time_created": "2016-08-25T21:10:29.600Z",
"time_state_modified": "2016-08-25T21:10:29.600Z"
}]
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.core import VirtualNetworkClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class IpSecConnectionDeviceStatusFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get"""
def get_required_params_for_get(self):
return [
"ipsc_id",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_ip_sec_connection_device_status,
ipsc_id=self.module.params.get("ipsc_id"),
)
IpSecConnectionDeviceStatusFactsHelperCustom = get_custom_class(
"IpSecConnectionDeviceStatusFactsHelperCustom"
)
class ResourceFactsHelper(
IpSecConnectionDeviceStatusFactsHelperCustom,
IpSecConnectionDeviceStatusFactsHelperGen,
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(dict(ipsc_id=dict(aliases=["id"], type="str", required=True),))
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="ip_sec_connection_device_status",
service_client_class=VirtualNetworkClient,
namespace="core",
)
result = []
if resource_facts_helper.is_get():
result = resource_facts_helper.get()
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(ip_sec_connection_device_status=result)
if __name__ == "__main__":
main()
| 33.905759
| 150
| 0.637431
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_network_ip_sec_connection_device_status_facts
short_description: Fetches details about a IpSecConnectionDeviceStatus resource in Oracle Cloud Infrastructure
description:
- Fetches details about a IpSecConnectionDeviceStatus resource in Oracle Cloud Infrastructure
- Deprecated. To get the tunnel status, instead use
L(GetIPSecConnectionTunnel,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/20160918/IPSecConnectionTunnel/GetIPSecConnectionTunnel).
version_added: "2.9"
author: Oracle (@oracle)
options:
ipsc_id:
description:
- The OCID of the IPSec connection.
type: str
aliases: ["id"]
required: true
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Get a specific ip_sec_connection_device_status
oci_network_ip_sec_connection_device_status_facts:
ipsc_id: ocid1.ipsc.oc1..xxxxxxEXAMPLExxxxxx
"""
RETURN = """
ip_sec_connection_device_status:
description:
- IpSecConnectionDeviceStatus resource
returned: on success
type: complex
contains:
compartment_id:
description:
- The OCID of the compartment containing the IPSec connection.
returned: on success
type: string
sample: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
id:
description:
- The IPSec connection's Oracle ID (OCID).
returned: on success
type: string
sample: ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx
time_created:
description:
- The date and time the IPSec connection was created, in the format defined by L(RFC3339,https://tools.ietf.org/html/rfc3339).
- "Example: `2016-08-25T21:10:29.600Z`"
returned: on success
type: string
sample: 2016-08-25T21:10:29.600Z
tunnels:
description:
- Two L(TunnelStatus,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/20160918/TunnelStatus/) objects.
returned: on success
type: complex
contains:
ip_address:
description:
- The IP address of Oracle's VPN headend.
- "Example: `203.0.113.50`"
returned: on success
type: string
sample: 203.0.113.50
lifecycle_state:
description:
- The tunnel's current state.
returned: on success
type: string
sample: UP
time_created:
description:
- The date and time the IPSec connection was created, in the format defined by L(RFC3339,https://tools.ietf.org/html/rfc3339).
- "Example: `2016-08-25T21:10:29.600Z`"
returned: on success
type: string
sample: 2016-08-25T21:10:29.600Z
time_state_modified:
description:
- When the state of the tunnel last changed, in the format defined by L(RFC3339,https://tools.ietf.org/html/rfc3339).
- "Example: `2016-08-25T21:10:29.600Z`"
returned: on success
type: string
sample: 2016-08-25T21:10:29.600Z
sample: {
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"time_created": "2016-08-25T21:10:29.600Z",
"tunnels": [{
"ip_address": "203.0.113.50",
"lifecycle_state": "UP",
"time_created": "2016-08-25T21:10:29.600Z",
"time_state_modified": "2016-08-25T21:10:29.600Z"
}]
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.core import VirtualNetworkClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class IpSecConnectionDeviceStatusFactsHelperGen(OCIResourceFactsHelperBase):
def get_required_params_for_get(self):
return [
"ipsc_id",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_ip_sec_connection_device_status,
ipsc_id=self.module.params.get("ipsc_id"),
)
IpSecConnectionDeviceStatusFactsHelperCustom = get_custom_class(
"IpSecConnectionDeviceStatusFactsHelperCustom"
)
class ResourceFactsHelper(
IpSecConnectionDeviceStatusFactsHelperCustom,
IpSecConnectionDeviceStatusFactsHelperGen,
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(dict(ipsc_id=dict(aliases=["id"], type="str", required=True),))
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="ip_sec_connection_device_status",
service_client_class=VirtualNetworkClient,
namespace="core",
)
result = []
if resource_facts_helper.is_get():
result = resource_facts_helper.get()
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(ip_sec_connection_device_status=result)
if __name__ == "__main__":
main()
| true
| true
|
7903e63cced7635c71fde7de7353f2137a297424
| 1,516
|
py
|
Python
|
tests/benchmark/milvus_benchmark/metrics/models/metric.py
|
NotRyan/milvus
|
1bd3205dbf84ee7734e9849d1e3be30ded1aa619
|
[
"Apache-2.0"
] | null | null | null |
tests/benchmark/milvus_benchmark/metrics/models/metric.py
|
NotRyan/milvus
|
1bd3205dbf84ee7734e9849d1e3be30ded1aa619
|
[
"Apache-2.0"
] | null | null | null |
tests/benchmark/milvus_benchmark/metrics/models/metric.py
|
NotRyan/milvus
|
1bd3205dbf84ee7734e9849d1e3be30ded1aa619
|
[
"Apache-2.0"
] | null | null | null |
import time
import datetime
import json
import hashlib
from .env import Env
from .server import Server
from .hardware import Hardware
class Metric(object):
def __init__(self):
# format of report data
self._version = '0.1'
self._type = 'metric'
self.run_id = None
self.mode = None
self.server = Server()
self.hardware = Hardware()
self.env = Env()
self.status = "INIT"
self.err_message = ""
self.collection = {}
self.index = {}
self.search = {}
self.run_params = {}
self.metrics = {
"type": "",
"value": None,
}
self.datetime = str(datetime.datetime.now())
def set_run_id(self):
# Get current time as run id, which uniquely identifies this test
self.run_id = int(time.time())
def set_mode(self, mode):
# Set the deployment mode of milvus
self.mode = mode
# including: metric, suite_metric
def set_case_metric_type(self):
self._type = "case"
def json_md5(self):
json_str = json.dumps(vars(self), sort_keys=True)
return hashlib.md5(json_str.encode('utf-8')).hexdigest()
def update_status(self, status):
# Set the final result of the test run: RUN_SUCC or RUN_FAILED
self.status = status
def update_result(self, result):
self.metrics["value"].update(result)
def update_message(self, err_message):
self.err_message = err_message
| 27.071429
| 73
| 0.600923
|
import time
import datetime
import json
import hashlib
from .env import Env
from .server import Server
from .hardware import Hardware
class Metric(object):
def __init__(self):
self._version = '0.1'
self._type = 'metric'
self.run_id = None
self.mode = None
self.server = Server()
self.hardware = Hardware()
self.env = Env()
self.status = "INIT"
self.err_message = ""
self.collection = {}
self.index = {}
self.search = {}
self.run_params = {}
self.metrics = {
"type": "",
"value": None,
}
self.datetime = str(datetime.datetime.now())
def set_run_id(self):
self.run_id = int(time.time())
def set_mode(self, mode):
self.mode = mode
def set_case_metric_type(self):
self._type = "case"
def json_md5(self):
json_str = json.dumps(vars(self), sort_keys=True)
return hashlib.md5(json_str.encode('utf-8')).hexdigest()
def update_status(self, status):
self.status = status
def update_result(self, result):
self.metrics["value"].update(result)
def update_message(self, err_message):
self.err_message = err_message
| true
| true
|
7903e67a14415b573a7cb2ac7c96a447d8fc00f9
| 6,757
|
py
|
Python
|
example/gluon/tree_lstm/main.py
|
viper7882/mxnet_win32
|
8b05c0cf83026147efd70a21abb3ac25ca6099f1
|
[
"Apache-2.0"
] | 7
|
2017-08-04T07:10:22.000Z
|
2020-07-02T13:01:28.000Z
|
example/gluon/tree_lstm/main.py
|
yanghaojin/BMXNet
|
102f8d0ed59529bbd162c37bf07ae58ad6c4caa1
|
[
"Apache-2.0"
] | null | null | null |
example/gluon/tree_lstm/main.py
|
yanghaojin/BMXNet
|
102f8d0ed59529bbd162c37bf07ae58ad6c4caa1
|
[
"Apache-2.0"
] | 11
|
2018-02-27T15:32:09.000Z
|
2021-04-21T08:48:17.000Z
|
# This example is inspired by https://github.com/dasguptar/treelstm.pytorch
import argparse, cPickle, math, os, random
import logging
logging.basicConfig(level=logging.INFO)
import numpy as np
from tqdm import tqdm
import mxnet as mx
from mxnet import gluon
from mxnet.gluon import nn
from mxnet import autograd as ag
from tree_lstm import SimilarityTreeLSTM
from dataset import Vocab, SICKDataIter
parser = argparse.ArgumentParser(description='TreeLSTM for Sentence Similarity on Dependency Trees')
parser.add_argument('--data', default='data/sick/',
help='path to raw dataset. required when preprocessed dataset is not available.')
parser.add_argument('--word_embed', default='data/glove/glove.840B.300d.txt',
help='directory with word embeddings. required when preprocessed dataset is not available.')
parser.add_argument('--batch_size', type=int, default=25,
help='training batch size per device (CPU/GPU).')
parser.add_argument('--epochs', default=50, type=int,
help='number of total epochs to run')
parser.add_argument('--lr', default=0.02, type=float,
help='initial learning rate')
parser.add_argument('--wd', default=0.0001, type=float,
help='weight decay factor')
parser.add_argument('--optimizer', default='adagrad',
help='optimizer (default: adagrad)')
parser.add_argument('--seed', default=123, type=int,
help='random seed (default: 123)')
parser.add_argument('--use-gpu', action='store_true',
help='whether to use GPU.')
opt = parser.parse_args()
logging.info(opt)
context = [mx.gpu(0) if opt.use_gpu else mx.cpu()]
rnn_hidden_size, sim_hidden_size, num_classes = 150, 50, 5
optimizer = opt.optimizer.lower()
mx.random.seed(opt.seed)
np.random.seed(opt.seed)
random.seed(opt.seed)
batch_size = opt.batch_size
# read dataset
if os.path.exists('dataset.cPickle'):
with open('dataset.cPickle', 'rb') as f:
train_iter, dev_iter, test_iter, vocab = cPickle.load(f)
else:
root_dir = opt.data
segments = ['train', 'dev', 'test']
token_files = [os.path.join(root_dir, seg, '%s.toks'%tok)
for tok in ['a', 'b']
for seg in segments]
vocab = Vocab(filepaths=token_files, embedpath=opt.word_embed)
train_iter, dev_iter, test_iter = [SICKDataIter(os.path.join(root_dir, segment), vocab, num_classes)
for segment in segments]
with open('dataset.cPickle', 'wb') as f:
cPickle.dump([train_iter, dev_iter, test_iter, vocab], f)
logging.info('==> SICK vocabulary size : %d ' % vocab.size)
logging.info('==> Size of train data : %d ' % len(train_iter))
logging.info('==> Size of dev data : %d ' % len(dev_iter))
logging.info('==> Size of test data : %d ' % len(test_iter))
# get network
net = SimilarityTreeLSTM(sim_hidden_size, rnn_hidden_size, vocab.size, vocab.embed.shape[1], num_classes)
# use pearson correlation and mean-square error for evaluation
metric = mx.metric.create(['pearsonr', 'mse'])
def to_target(x):
target = np.zeros((1, num_classes))
ceil = int(math.ceil(x))
floor = int(math.floor(x))
if ceil==floor:
target[0][floor-1] = 1
else:
target[0][floor-1] = ceil - x
target[0][ceil-1] = x - floor
return mx.nd.array(target)
def to_score(x):
levels = mx.nd.arange(1, 6, ctx=x.context)
return [mx.nd.sum(levels*mx.nd.exp(x), axis=1).reshape((-1,1))]
# when evaluating in validation mode, check and see if pearson-r is improved
# if so, checkpoint and run evaluation on test dataset
def test(ctx, data_iter, best, mode='validation', num_iter=-1):
data_iter.reset()
batches = len(data_iter)
data_iter.set_context(ctx[0])
preds = []
labels = [mx.nd.array(data_iter.labels, ctx=ctx[0]).reshape((-1,1))]
for _ in tqdm(range(batches), desc='Testing in {} mode'.format(mode)):
l_tree, l_sent, r_tree, r_sent, label = data_iter.next()
z = net(mx.nd, l_sent, r_sent, l_tree, r_tree)
preds.append(z)
preds = to_score(mx.nd.concat(*preds, dim=0))
metric.update(preds, labels)
names, values = metric.get()
metric.reset()
for name, acc in zip(names, values):
logging.info(mode+' acc: %s=%f'%(name, acc))
if name == 'pearsonr':
test_r = acc
if mode == 'validation' and num_iter >= 0:
if test_r >= best:
best = test_r
logging.info('New optimum found: {}. Checkpointing.'.format(best))
net.collect_params().save('childsum_tree_lstm_{}.params'.format(num_iter))
test(ctx, test_iter, -1, 'test')
return best
def train(epoch, ctx, train_data, dev_data):
# initialization with context
if isinstance(ctx, mx.Context):
ctx = [ctx]
net.collect_params().initialize(mx.init.Xavier(magnitude=2.24), ctx=ctx[0])
net.embed.weight.set_data(vocab.embed.as_in_context(ctx[0]))
train_data.set_context(ctx[0])
dev_data.set_context(ctx[0])
# set up trainer for optimizing the network.
trainer = gluon.Trainer(net.collect_params(), optimizer, {'learning_rate': opt.lr, 'wd': opt.wd})
best_r = -1
Loss = gluon.loss.KLDivLoss()
for i in range(epoch):
train_data.reset()
num_batches = len(train_data)
# collect predictions and labels for evaluation metrics
preds = []
labels = [mx.nd.array(train_data.labels, ctx=ctx[0]).reshape((-1,1))]
for j in tqdm(range(num_batches), desc='Training epoch {}'.format(i)):
# get next batch
l_tree, l_sent, r_tree, r_sent, label = train_data.next()
# use autograd to record the forward calculation
with ag.record():
# forward calculation. the output is log probability
z = net(mx.nd, l_sent, r_sent, l_tree, r_tree)
# calculate loss
loss = Loss(z, to_target(label).as_in_context(ctx[0]))
# backward calculation for gradients.
loss.backward()
preds.append(z)
# update weight after every batch_size samples
if (j+1) % batch_size == 0:
trainer.step(batch_size)
# translate log-probability to scores, and evaluate
preds = to_score(mx.nd.concat(*preds, dim=0))
metric.update(preds, labels)
names, values = metric.get()
metric.reset()
for name, acc in zip(names, values):
logging.info('training acc at epoch %d: %s=%f'%(i, name, acc))
best_r = test(ctx, dev_data, best_r, num_iter=i)
train(opt.epochs, context, train_iter, dev_iter)
| 39.284884
| 112
| 0.636969
|
import argparse, cPickle, math, os, random
import logging
logging.basicConfig(level=logging.INFO)
import numpy as np
from tqdm import tqdm
import mxnet as mx
from mxnet import gluon
from mxnet.gluon import nn
from mxnet import autograd as ag
from tree_lstm import SimilarityTreeLSTM
from dataset import Vocab, SICKDataIter
parser = argparse.ArgumentParser(description='TreeLSTM for Sentence Similarity on Dependency Trees')
parser.add_argument('--data', default='data/sick/',
help='path to raw dataset. required when preprocessed dataset is not available.')
parser.add_argument('--word_embed', default='data/glove/glove.840B.300d.txt',
help='directory with word embeddings. required when preprocessed dataset is not available.')
parser.add_argument('--batch_size', type=int, default=25,
help='training batch size per device (CPU/GPU).')
parser.add_argument('--epochs', default=50, type=int,
help='number of total epochs to run')
parser.add_argument('--lr', default=0.02, type=float,
help='initial learning rate')
parser.add_argument('--wd', default=0.0001, type=float,
help='weight decay factor')
parser.add_argument('--optimizer', default='adagrad',
help='optimizer (default: adagrad)')
parser.add_argument('--seed', default=123, type=int,
help='random seed (default: 123)')
parser.add_argument('--use-gpu', action='store_true',
help='whether to use GPU.')
opt = parser.parse_args()
logging.info(opt)
context = [mx.gpu(0) if opt.use_gpu else mx.cpu()]
rnn_hidden_size, sim_hidden_size, num_classes = 150, 50, 5
optimizer = opt.optimizer.lower()
mx.random.seed(opt.seed)
np.random.seed(opt.seed)
random.seed(opt.seed)
batch_size = opt.batch_size
if os.path.exists('dataset.cPickle'):
with open('dataset.cPickle', 'rb') as f:
train_iter, dev_iter, test_iter, vocab = cPickle.load(f)
else:
root_dir = opt.data
segments = ['train', 'dev', 'test']
token_files = [os.path.join(root_dir, seg, '%s.toks'%tok)
for tok in ['a', 'b']
for seg in segments]
vocab = Vocab(filepaths=token_files, embedpath=opt.word_embed)
train_iter, dev_iter, test_iter = [SICKDataIter(os.path.join(root_dir, segment), vocab, num_classes)
for segment in segments]
with open('dataset.cPickle', 'wb') as f:
cPickle.dump([train_iter, dev_iter, test_iter, vocab], f)
logging.info('==> SICK vocabulary size : %d ' % vocab.size)
logging.info('==> Size of train data : %d ' % len(train_iter))
logging.info('==> Size of dev data : %d ' % len(dev_iter))
logging.info('==> Size of test data : %d ' % len(test_iter))
net = SimilarityTreeLSTM(sim_hidden_size, rnn_hidden_size, vocab.size, vocab.embed.shape[1], num_classes)
metric = mx.metric.create(['pearsonr', 'mse'])
def to_target(x):
target = np.zeros((1, num_classes))
ceil = int(math.ceil(x))
floor = int(math.floor(x))
if ceil==floor:
target[0][floor-1] = 1
else:
target[0][floor-1] = ceil - x
target[0][ceil-1] = x - floor
return mx.nd.array(target)
def to_score(x):
levels = mx.nd.arange(1, 6, ctx=x.context)
return [mx.nd.sum(levels*mx.nd.exp(x), axis=1).reshape((-1,1))]
def test(ctx, data_iter, best, mode='validation', num_iter=-1):
data_iter.reset()
batches = len(data_iter)
data_iter.set_context(ctx[0])
preds = []
labels = [mx.nd.array(data_iter.labels, ctx=ctx[0]).reshape((-1,1))]
for _ in tqdm(range(batches), desc='Testing in {} mode'.format(mode)):
l_tree, l_sent, r_tree, r_sent, label = data_iter.next()
z = net(mx.nd, l_sent, r_sent, l_tree, r_tree)
preds.append(z)
preds = to_score(mx.nd.concat(*preds, dim=0))
metric.update(preds, labels)
names, values = metric.get()
metric.reset()
for name, acc in zip(names, values):
logging.info(mode+' acc: %s=%f'%(name, acc))
if name == 'pearsonr':
test_r = acc
if mode == 'validation' and num_iter >= 0:
if test_r >= best:
best = test_r
logging.info('New optimum found: {}. Checkpointing.'.format(best))
net.collect_params().save('childsum_tree_lstm_{}.params'.format(num_iter))
test(ctx, test_iter, -1, 'test')
return best
def train(epoch, ctx, train_data, dev_data):
if isinstance(ctx, mx.Context):
ctx = [ctx]
net.collect_params().initialize(mx.init.Xavier(magnitude=2.24), ctx=ctx[0])
net.embed.weight.set_data(vocab.embed.as_in_context(ctx[0]))
train_data.set_context(ctx[0])
dev_data.set_context(ctx[0])
trainer = gluon.Trainer(net.collect_params(), optimizer, {'learning_rate': opt.lr, 'wd': opt.wd})
best_r = -1
Loss = gluon.loss.KLDivLoss()
for i in range(epoch):
train_data.reset()
num_batches = len(train_data)
preds = []
labels = [mx.nd.array(train_data.labels, ctx=ctx[0]).reshape((-1,1))]
for j in tqdm(range(num_batches), desc='Training epoch {}'.format(i)):
l_tree, l_sent, r_tree, r_sent, label = train_data.next()
with ag.record():
z = net(mx.nd, l_sent, r_sent, l_tree, r_tree)
loss = Loss(z, to_target(label).as_in_context(ctx[0]))
loss.backward()
preds.append(z)
if (j+1) % batch_size == 0:
trainer.step(batch_size)
preds = to_score(mx.nd.concat(*preds, dim=0))
metric.update(preds, labels)
names, values = metric.get()
metric.reset()
for name, acc in zip(names, values):
logging.info('training acc at epoch %d: %s=%f'%(i, name, acc))
best_r = test(ctx, dev_data, best_r, num_iter=i)
train(opt.epochs, context, train_iter, dev_iter)
| true
| true
|
7903e7a52cbd85ee4be424abff335c43fb6de6c5
| 2,657
|
py
|
Python
|
tensorflow_probability/python/internal/test_combinations_test.py
|
jakee417/probability-1
|
ae7117f37ac441bc7a888167ea23e5e620c5bcde
|
[
"Apache-2.0"
] | 3,670
|
2018-02-14T03:29:40.000Z
|
2022-03-30T01:19:52.000Z
|
tensorflow_probability/python/internal/test_combinations_test.py
|
jakee417/probability-1
|
ae7117f37ac441bc7a888167ea23e5e620c5bcde
|
[
"Apache-2.0"
] | 1,395
|
2018-02-24T02:28:49.000Z
|
2022-03-31T16:12:06.000Z
|
tensorflow_probability/python/internal/test_combinations_test.py
|
jakee417/probability-1
|
ae7117f37ac441bc7a888167ea23e5e620c5bcde
|
[
"Apache-2.0"
] | 1,135
|
2018-02-14T01:51:10.000Z
|
2022-03-28T02:24:11.000Z
|
# Copyright 2019 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests generating test combinations."""
from collections import OrderedDict
# Dependency imports
from tensorflow_probability.python.internal import test_combinations
from tensorflow_probability.python.internal import test_util
class TestingCombinationsTest(test_util.TestCase):
def test_combine(self):
self.assertEqual([{
"a": 1,
"b": 2
}, {
"a": 1,
"b": 3
}, {
"a": 2,
"b": 2
}, {
"a": 2,
"b": 3
}], test_combinations.combine(a=[1, 2], b=[2, 3]))
def test_arguments_sorted(self):
self.assertEqual([
OrderedDict([("aa", 1), ("ab", 2)]),
OrderedDict([("aa", 1), ("ab", 3)]),
OrderedDict([("aa", 2), ("ab", 2)]),
OrderedDict([("aa", 2), ("ab", 3)])
], test_combinations.combine(ab=[2, 3], aa=[1, 2]))
def test_combine_single_parameter(self):
self.assertEqual([{
"a": 1,
"b": 2
}, {
"a": 2,
"b": 2
}], test_combinations.combine(a=[1, 2], b=2))
def test_add(self):
self.assertEqual(
[{
"a": 1
}, {
"a": 2
}, {
"b": 2
}, {
"b": 3
}],
(test_combinations.combine(a=[1, 2]) +
test_combinations.combine(b=[2, 3])))
@test_combinations.generate(
test_combinations.combine(a=[1, 0], b=[2, 3], c=[1]))
class CombineTheTestSuite(test_util.TestCase):
def test_add_things(self, a, b, c):
self.assertLessEqual(3, a + b + c)
self.assertLessEqual(a + b + c, 5)
def test_add_things_one_more(self, a, b, c):
self.assertLessEqual(3, a + b + c)
self.assertLessEqual(a + b + c, 5)
def not_a_test(self, a=0, b=0, c=0):
del a, b, c
self.fail()
def _test_but_private(self, a=0, b=0, c=0):
del a, b, c
self.fail()
# Check that nothing funny happens to a non-callable that starts with "_test".
test_member = 0
if __name__ == "__main__":
test_util.main()
| 26.838384
| 80
| 0.579601
|
from collections import OrderedDict
from tensorflow_probability.python.internal import test_combinations
from tensorflow_probability.python.internal import test_util
class TestingCombinationsTest(test_util.TestCase):
def test_combine(self):
self.assertEqual([{
"a": 1,
"b": 2
}, {
"a": 1,
"b": 3
}, {
"a": 2,
"b": 2
}, {
"a": 2,
"b": 3
}], test_combinations.combine(a=[1, 2], b=[2, 3]))
def test_arguments_sorted(self):
self.assertEqual([
OrderedDict([("aa", 1), ("ab", 2)]),
OrderedDict([("aa", 1), ("ab", 3)]),
OrderedDict([("aa", 2), ("ab", 2)]),
OrderedDict([("aa", 2), ("ab", 3)])
], test_combinations.combine(ab=[2, 3], aa=[1, 2]))
def test_combine_single_parameter(self):
self.assertEqual([{
"a": 1,
"b": 2
}, {
"a": 2,
"b": 2
}], test_combinations.combine(a=[1, 2], b=2))
def test_add(self):
self.assertEqual(
[{
"a": 1
}, {
"a": 2
}, {
"b": 2
}, {
"b": 3
}],
(test_combinations.combine(a=[1, 2]) +
test_combinations.combine(b=[2, 3])))
@test_combinations.generate(
test_combinations.combine(a=[1, 0], b=[2, 3], c=[1]))
class CombineTheTestSuite(test_util.TestCase):
def test_add_things(self, a, b, c):
self.assertLessEqual(3, a + b + c)
self.assertLessEqual(a + b + c, 5)
def test_add_things_one_more(self, a, b, c):
self.assertLessEqual(3, a + b + c)
self.assertLessEqual(a + b + c, 5)
def not_a_test(self, a=0, b=0, c=0):
del a, b, c
self.fail()
def _test_but_private(self, a=0, b=0, c=0):
del a, b, c
self.fail()
test_member = 0
if __name__ == "__main__":
test_util.main()
| true
| true
|
7903e7f157b9f443705e4011e434c2ed6a5dbe99
| 2,685
|
py
|
Python
|
DQM/Integration/python/clients/info_dqm_sourceclient-live_cfg.py
|
NTrevisani/cmssw
|
a212a27526f34eb9507cf8b875c93896e6544781
|
[
"Apache-2.0"
] | 2
|
2020-01-27T15:21:37.000Z
|
2020-05-11T11:13:18.000Z
|
DQM/Integration/python/clients/info_dqm_sourceclient-live_cfg.py
|
NTrevisani/cmssw
|
a212a27526f34eb9507cf8b875c93896e6544781
|
[
"Apache-2.0"
] | 8
|
2020-03-20T23:18:36.000Z
|
2020-05-27T11:00:06.000Z
|
DQM/Integration/python/clients/info_dqm_sourceclient-live_cfg.py
|
NTrevisani/cmssw
|
a212a27526f34eb9507cf8b875c93896e6544781
|
[
"Apache-2.0"
] | 3
|
2019-03-09T13:06:43.000Z
|
2020-07-03T00:47:30.000Z
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("DQM")
# message logger
process.MessageLogger = cms.Service("MessageLogger",
destinations = cms.untracked.vstring('cout'),
cout = cms.untracked.PSet(threshold = cms.untracked.string('WARNING'))
)
#----------------------------
#### Event Source
#----------------------------
# for live online DQM in P5
process.load("DQM.Integration.config.inputsource_cfi")
# for testing in lxplus
#process.load("DQM.Integration.config.fileinputsource_cfi")
# Global tag - Condition for P5 cluster
process.load("DQM.Integration.config.FrontierCondition_GT_cfi")
#----------------------------
#### DQM Environment
#----------------------------
process.load("DQM.Integration.config.environment_cfi")
process.dqmEnv.subSystemFolder = 'Info'
process.dqmSaver.tag = 'Info'
#-----------------------------
# Digitisation: produce the Scalers digis containing DCS bits
process.load("EventFilter.ScalersRawToDigi.ScalersRawToDigi_cfi")
# Digitisation: produce the TCDS digis containing BST record
from EventFilter.Utilities.tcdsRawToDigi_cfi import *
process.tcdsDigis = tcdsRawToDigi.clone()
# OnlineMetaDataRawToDigi will put DCSRecord to an event
process.load('EventFilter.OnlineMetaDataRawToDigi.onlineMetaDataRawToDigi_cfi')
process.onlineMetaDataDigis = cms.EDProducer('OnlineMetaDataRawToDigi')
# DQMProvInfo is the DQM module to be run
process.load("DQMServices.Components.DQMProvInfo_cfi")
# DQM Modules
process.dqmmodules = cms.Sequence(process.dqmEnv + process.dqmSaver)
process.evfDQMmodulesPath = cms.Path(
process.scalersRawToDigi*
process.tcdsDigis*
process.onlineMetaDataRawToDigi*
process.dqmProvInfo*
process.dqmmodules
)
process.schedule = cms.Schedule(process.evfDQMmodulesPath)
process.dqmProvInfo.runType = process.runType.getRunTypeName()
# Heavy Ion Specific Fed Raw Data Collection Label
if (process.runType.getRunType() == process.runType.hi_run):
process.scalersRawToDigi.scalersInputTag = cms.InputTag("rawDataRepacker")
process.tcdsDigis.InputLabel = cms.InputTag("rawDataRepacker")
else:
process.scalersRawToDigi.scalersInputTag = cms.InputTag("rawDataCollector")
process.tcdsDigis.InputLabel = cms.InputTag("rawDataCollector")
# Process customizations included here
from DQM.Integration.config.online_customizations_cfi import *
process = customise(process)
| 39.485294
| 106
| 0.664804
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("DQM")
process.MessageLogger = cms.Service("MessageLogger",
destinations = cms.untracked.vstring('cout'),
cout = cms.untracked.PSet(threshold = cms.untracked.string('WARNING'))
)
source_cfi")
process.load("DQM.Integration.config.FrontierCondition_GT_cfi")
")
process.dqmEnv.subSystemFolder = 'Info'
process.dqmSaver.tag = 'Info'
process.load("EventFilter.ScalersRawToDigi.ScalersRawToDigi_cfi")
from EventFilter.Utilities.tcdsRawToDigi_cfi import *
process.tcdsDigis = tcdsRawToDigi.clone()
process.load('EventFilter.OnlineMetaDataRawToDigi.onlineMetaDataRawToDigi_cfi')
process.onlineMetaDataDigis = cms.EDProducer('OnlineMetaDataRawToDigi')
process.load("DQMServices.Components.DQMProvInfo_cfi")
process.dqmmodules = cms.Sequence(process.dqmEnv + process.dqmSaver)
process.evfDQMmodulesPath = cms.Path(
process.scalersRawToDigi*
process.tcdsDigis*
process.onlineMetaDataRawToDigi*
process.dqmProvInfo*
process.dqmmodules
)
process.schedule = cms.Schedule(process.evfDQMmodulesPath)
process.dqmProvInfo.runType = process.runType.getRunTypeName()
if (process.runType.getRunType() == process.runType.hi_run):
process.scalersRawToDigi.scalersInputTag = cms.InputTag("rawDataRepacker")
process.tcdsDigis.InputLabel = cms.InputTag("rawDataRepacker")
else:
process.scalersRawToDigi.scalersInputTag = cms.InputTag("rawDataCollector")
process.tcdsDigis.InputLabel = cms.InputTag("rawDataCollector")
from DQM.Integration.config.online_customizations_cfi import *
process = customise(process)
| true
| true
|
7903e806d922d6b01e49d831aa4186dddf3a4e15
| 132,055
|
py
|
Python
|
netpyne/metadata/metadata.py
|
naiduv/netpyne
|
6ecfe1b7223d3e40615274bfec9d53e7d03b534a
|
[
"MIT"
] | 1
|
2021-04-21T16:48:17.000Z
|
2021-04-21T16:48:17.000Z
|
netpyne/metadata/metadata.py
|
bikramkhastgir/netpyne
|
20d2dfdecf303c779d6ab97e6ef579835798beb1
|
[
"MIT"
] | 1
|
2021-05-04T00:42:12.000Z
|
2021-05-04T00:42:12.000Z
|
netpyne/metadata/metadata.py
|
bikramkhastgir/netpyne
|
20d2dfdecf303c779d6ab97e6ef579835798beb1
|
[
"MIT"
] | null | null | null |
"""
Module containing NetPyNE metadata
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
metadata = {
# ---------------------------------------------------------------------------------------------------------------------
# netParams
# ---------------------------------------------------------------------------------------------------------------------
"netParams": {
"label": "Network Parameters",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"popParams": {
"label": "Population Parameters",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"cellType": {
"label": "Cell type",
"suggestions": "",
"help": "Arbitrary cell type attribute/tag assigned to all cells in this population; can be used as condition to apply specific cell properties. e.g. 'Pyr' (for pyramidal neurons) or 'FS' (for fast-spiking interneurons)",
"hintText": "",
"type": "str"
},
"numCells": {
"label": "Number of cells",
"suggestions": "",
"help": "The total number of cells in this population.",
"hintText": "number of cells",
"type": "int"
},
"density": {
"label": "Cell density (neurons/mm^3)",
"suggestions": "",
"help": "The cell density in neurons/mm3. The volume occupied by each population can be customized (see xRange, yRange and zRange); otherwise the full network volume will be used (defined in netParams: sizeX, sizeY, sizeZ). density can be expressed as a function of normalized location (xnorm, ynorm or znorm), by providing a string with the variable and any common Python mathematical operators/functions. e.g. '1e5 * exp(-ynorm/2)'. ",
"hintText": "density in neurons/mm3",
"type": "str"
},
"gridSpacing": {
"label": "Grid spacing (um)",
"suggestions": "",
"help": "Fixed grid spacing between cells (in um). Cells will be placed in a grid, with the total number of cells be determined based on spacing and sizeX, sizeY, sizeZ. e.g. a spacing of 20 with sizeX=sizeY=sizeZ=100 will lead to 5*5*5=125 cells.",
"hintText": "fixed grid spacing",
"type": "int"
},
"cellModel": {
"label": "Cell model",
"help": "Can be either 1) an arbitrary cell model attribute/tag assigned to all cells in this population, and used later as a condition to apply specific cell properties. e.g. 'HH' (standard Hodkgin-Huxley type cell model) or 'Izhi2007' (Izhikevich point neuron model), 2) a point process artificial cell, with its parameters defined directly in this population entry, i.e. no need for cell propoerties (e.g. 'NetStim', VecStim', 'IntFire1')",
"suggestions": [
"VecStim",
"NetStim",
"IntFire1"
],
"type": "str"
},
"xRange": {
"label": "X-axis range (um)",
"help": "Range of neuron positions in x-axis (horizontal length), specified as a 2-element list [min, max] using absolute values in um (e.g.[100, 200]).",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
"xnormRange": {
"label": "X-axis normalized range (0-1)",
"help": "Range of neuron positions in x-axis (horizontal length), specified as a 2-element list [min, max] using normalized values between 0 and 1 as fraction of sizeX (e.g.[0.1,0.2]).",
"suggestions": "",
"hintText": "",
"default": [
0,
1
],
"type": "list(float)"
},
"yRange": {
"label": "Y-axis range (um)",
"help": "Range of neuron positions in y-axis (vertical height=cortical depth), specified as 2-element list [min, max] using absolute values in um (e.g.[100,200]).",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
"ynormRange": {
"label": "Y-axis normalized range (0-1)",
"help": "Range of neuron positions in y-axis (vertical height=cortical depth), specified as a 2-element list [min, max] using normalized values between 0 and 1 as fraction of sizeY (e.g.[0.1,0.2]).",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
"zRange": {
"label": "Z-axis range (um)",
"help": "Range of neuron positions in z-axis (horizontal depth), specified as a 2-element list [min, max] using absolute value in um (e.g.[100,200]).",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
"znormRange": {
"label": "Z-axis normalized range (0-1)",
"help": "Range of neuron positions in z-axis (horizontal depth), specified as a 2-element list [min, max] using normalized values between 0 and 1 as fraction of sizeZ (e.g.[0.1,0.2]).",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
"interval": {
"label": "Spike interval (ms)",
"help": "Spike interval in ms.",
"suggestions": "",
"hintText": "50",
"type": "float"
},
"rate": {
"label": "Firing rate (Hz)",
"help": "Firing rate in Hz (note this is the inverse of the NetStim interval property).",
"suggestions": "",
"hintText": "",
"type": "float"
},
"noise": {
"label": "Noise fraction (0-1)",
"help": "Fraction of noise in NetStim (0 = deterministic; 1 = completely random).",
"suggestions": "",
"hintText": "0.5",
"type": "list(float)"
},
"start": {
"label": "Start time (ms)",
"help": "Time of first spike in ms (default = 0).",
"suggestions": "",
"hintText": "0",
"type": "list(float)"
},
"number": {
"label": "Max number of spikes",
"help": "Max number of spikes generated (default = 1e12).",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
"seed": {
"label": "Randomizer seed (optional)",
"help": " Seed for randomizer (optional; defaults to value set in simConfig.seeds['stim'])",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
"spkTimes": {
"label": "Spike times",
"help": "List of spike times (only for 'VecStim') e.g. [1, 10, 40, 50], range(1,500,10), or any variable containing a Python list.",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
"pulses": {
"label": "Pulses",
"help": "List of spiking pulses (only for 'VecStim'); each item includes the start (ms), end (ms), rate (Hz), and noise (0 to 1) pulse parameters. ",
"suggestions": "",
"hintText": "",
"type": "list(float)"
}
}
},
"scale": {
"label": "scale factor",
"help": "Scale factor multiplier for number of cells (default: 1)",
"suggestions": "",
"hintText": "",
"default": 1,
"type": "float"
},
"shape": {
"label": "network shape",
"help": "Shape of network: 'cuboid', 'cylinder' or 'ellipsoid' (default: 'cuboid')",
"suggestions": "",
"hintText": "",
"options": [
"cuboid",
"cylinder",
"ellipsoid"
],
"default": "cuboid",
"type": "str"
},
"sizeX": {
"label": "x-dimension",
"help": "x-dimension (horizontal length) network size in um (default: 100)",
"suggestions": "",
"hintText": "",
"default": 100,
"type": "float"
},
"sizeY": {
"label": "y-dimension",
"help": "y-dimension (horizontal length) network size in um (default: 100)",
"suggestions": "",
"hintText": "",
"default": 100,
"type": "float"
},
"sizeZ": {
"label": "z-dimension",
"help": "z-dimension (horizontal length) network size in um (default: 100)",
"suggestions": "",
"hintText": "",
"default": 100,
"type": "float"
},
"rotateCellsRandomly": {
"label": "random rotation",
"help": "Random rotation of cells around y-axis [min,max] radians, e.g. [0, 3.0] (default: False)",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
"defaultWeight": {
"label": "default weight connection",
"help": "Default connection weight (default: 1)",
"suggestions": "",
"hintText": "",
"default": 1,
"type": "float"
},
"defaultDelay": {
"label": "default delay",
"help": "Default connection delay, in ms (default: 1)",
"suggestions": "",
"hintText": "",
"default": 1,
"type": "float"
},
"propVelocity": {
"label": "conduction velocity",
"help": "Conduction velocity in um/ms (e.g. 500 um/ms = 0.5 m/s) (default: 500)",
"suggestions": "",
"hintText": "",
"default": 500,
"type": "float"
},
"scaleConnWeight": {
"label": "connection weight scale factor",
"help": "Connection weight scale factor (excludes NetStims) (default: 1)",
"suggestions": "",
"hintText": "",
"default": 1,
"type": "float"
},
"scaleConnWeightNetStims": {
"label": "connection weight scale factor for NetStims",
"help": "Connection weight scale factor for NetStims (default: 1)",
"suggestions": "",
"hintText": "",
"default": 1,
"type": "float"
},
"scaleConnWeightModels": {
"label": "Connection weight scale factor for each cell model",
"help": "Connection weight scale factor for each cell model, e.g. {'HH': 0.1, 'Izhi': 0.2} (default: {})",
"suggestions": "",
"hintText": "",
"type": "dict"
},
"popTagsCopiedToCells": {
"label": "",
"help": "List of tags that will be copied from the population to the cells (default: ['pop', 'cellModel', 'cellType'])}",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
# ---------------------------------------------------------------------------------------------------------------------
# netParams.cellParams
# ---------------------------------------------------------------------------------------------------------------------
"cellParams": {
"label": "Cell Parameters",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"conds": {
"label": "Conds",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"pop": {
"label": "Population",
"help": "Apply the cell rule only to cells belonging to this population (or list of populations).",
"suggestions": "",
"hintText": "",
"type": "list(str)"
},
"cellType": {
"label": "Cell type",
"suggestions": "",
"help": "Apply the cell rule only to cells with this cell type attribute/tag.",
"hintText": "",
"type": "list(str)"
},
"cellModel": {
"label": "Cell model",
"suggestions": "",
"help": "Apply the cell rule only to cells with this cell model attribute/tag.",
"hintText": "",
"type": "list(str)"
},
"x": {
"label": "Range of x-axis locations",
"suggestions": "",
"help": "Apply the cell rule only to cells within these x-axis locations.",
"hintText": ""
},
"y": {
"label": "Range of y-axis locations",
"suggestions": "",
"help": "Apply the cell rule only to cells within these y-axis locations.",
"hintText": ""
},
"z": {
"label": "Range of z-axis locations",
"suggestions": "",
"help": "Apply the cell rule only to cells within these z-axis locations.",
"hintText": ""
},
"xnorm": {
"label": "Range of normalized x-axis locations",
"suggestions": "",
"help": "Apply the cell rule only to cells within these normalized x-axis locations.",
"hintText": ""
},
"ynorm": {
"label": "Range of normalized y-axis locations",
"suggestions": "",
"help": "Apply the cell rule only to cells within these normalized y-axis locations.",
"hintText": ""
},
"znorm": {
"label": "Range of normalized z-axis locations",
"suggestions": "",
"help": "Apply the cell rule only to cells within these normalized z-axis locations.",
"hintText": ""
}
}
},
"secs": {
"label": "Sections",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"geom": {
"label": "Cell geometry",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"diam": {
"label": "Diameter (um)",
"default": 10,
"suggestions": "",
"help": "",
"hintText": "10",
"type": "float"
},
"L": {
"label": "Length (um)",
"default": 50,
"suggestions": "",
"help": "",
"hintText": "50",
"type": "float"
},
"Ra": {
"label": "Axial resistance, Ra (ohm-cm)",
"default": 100,
"suggestions": "",
"help": "",
"hintText": "100",
"type": "float"
},
"cm": {
"label": "Membrane capacitance, cm (uF/cm2)",
"suggestions": "",
"help": "",
"hintText": "1",
"type": "float"
},
"pt3d": {
"label": "3D points",
"suggestions": "",
"help": "",
"hintText": "",
"type": "list(list(float))"
},
"nseg": {
"label": "Number of segments, nseg",
"default": 1,
"suggestions": "",
"help": "",
"hintText": "1",
"type": "float"
}
},
"mechs": {
"label": "Mechanisms",
"help": "Dictionary of density/distributed mechanisms, including the name of the mechanism (e.g. hh or pas) and a list of properties of the mechanism (e.g. {'g': 0.003, 'e': -70}).",
"suggestions": "",
"hintText": "",
"type": "float"
},
"ions": {
"label": "Ions",
"help": "Dictionary of ions, including the name of the ion (e.g. hh or pas) and a list of properties of the ion (e.g. {'e': -70}).",
"suggestions": "",
"hintText": ""
},
"pointps": {
"label": "Point processes",
"help": "Dictionary of point processes (excluding synaptic mechanisms). The key contains an arbitrary label (e.g. 'Izhi') The value contains a dictionary with the point process properties (e.g. {'mod':'Izhi2007a', 'a':0.03, 'b':-2, 'c':-50, 'd':100, 'celltype':1}).",
"suggestions": "",
"hintText": "",
"children": {
"mod": {
"label": "Point process name",
"help": "The name of the NEURON mechanism, e.g. 'Izhi2007a'",
"suggestions": "",
"hintText": "",
"type": "float"
},
"loc": {
"label": "Location (0-1)",
"help": "Section location where to place synaptic mechanism, e.g. 1.0, default=0.5.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"vref": {
"label": "Point process variable for voltage (optional)",
"help": "Internal mechanism variable containing the cell membrane voltage, e.g. 'V'.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"synList": {
"label": "Point process list of synapses (optional)",
"help": "list of internal mechanism synaptic mechanism labels, e.g. ['AMPA', 'NMDA', 'GABAB'].",
"suggestions": "",
"hintText": "",
"type": "float"
}
},
"vinit": {
"label": "Initial membrance voltage, vinit (mV)",
"help": "(optional) Initial membrane voltage (in mV) of the section (default: -65).e.g. cellRule['secs']['soma']['vinit'] = -72",
"suggestions": "",
"hintText": ""
},
"spikeGenLoc": {
"label": "Spike generation location (0-1)",
"help": "(optional) Indicates that this section is responsible for spike generation (instead of the default 'soma'), and provides the location (segment) where spikes are generated.e.g. cellRule['secs']['axon']['spikeGenLoc'] = 1.0.",
"suggestions": "",
"hintText": ""
},
"threshold": {
"label": "Spike threshold voltage (mV)",
"help": "(optional) Threshold voltage (in mV) used to detect a spike originating in this section of the cell. If omitted, defaults to netParams.defaultThreshold = 10.0.e.g. cellRule['secs']['soma']['threshold'] = 5.0.",
"suggestions": "",
"hintText": ""
}
},
"secLists": {
"label": "Section lists (optional) ",
"help": "Dictionary of sections lists (e.g. {'all': ['soma', 'dend']})",
"suggestions": "",
"hintText": ""
}
},
"topol": {
"label": "Topology",
"help": "Topological properties, including parentSec (label of parent section), parentX (parent location where to make connection) and childX (current section child location where to make connection).",
"suggestions": "",
"hintText": "",
"children": {
"parentSec": {
"label": "Parent Section",
"suggestions": [
"soma"
],
"help": "label of parent section",
"hintText": "soma",
"type": "str"
},
"parentX": {
"label": "Parent connection location",
"suggestions": [
0,
1
],
"help": "Parent location where to make connection",
"hintText": "1",
"type": "float"
},
"childX": {
"label": "Child connection location",
"suggestions": [
0,
1
],
"help": "Current section child location where to make connection",
"hintText": "1",
"type": "float"
}
}
}
}
}
}
},
# ---------------------------------------------------------------------------------------------------------------------
# netParams.synMechParams
# ---------------------------------------------------------------------------------------------------------------------
"synMechParams": {
"label": "Synaptic mechanism parameters",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"mod": {
"label": "NMODL mechanism name",
"help": "The NMODL mechanism name (e.g. 'ExpSyn'); note this does not always coincide with the name of the mod file.",
"suggestions": "",
"options": [
"ExpSyn",
"Exp2Syn"
],
"hintText": "",
"type": "str"
},
"selfNetCon": {
"label": "Self NetCon parameters",
"help": "Dict with parameters of NetCon between the cell voltage and the synapse, required by some synaptic mechanisms such as the homeostatic synapse (hsyn). e.g. 'selfNetCon': {'sec': 'soma' , threshold: -15, 'weight': -1, 'delay': 0} (by default the source section, 'sec' = 'soma').",
"suggestions": "",
"hintText": ""
},
"tau1": {
"label": "Time constant for exponential 1 (ms)",
"help": "Define the time constant for the first exponential.",
"suggestions": "",
"hintText": "1",
"type": "float"
},
"tau2": {
"label": "Time constant for exponential 2 (ms)",
"help": "Define the time constant for the second exponential.",
"suggestions": "",
"hintText": "5",
"type": "float"
},
"e": {
"label": "Reversal potential (mV)",
"help": "Reversal potential of the synaptic receptors.",
"suggestions": "",
"hintText": "0",
"type": "float"
},
"i": {
"label": "synaptic current (nA)",
"help": "Synaptic current in nA.",
"suggestions": "",
"hintText": "10",
"type": "float"
}
}
},
# ---------------------------------------------------------------------------------------------------------------------
# netParams.connParams
# ---------------------------------------------------------------------------------------------------------------------
"connParams": {
"label": "Connectivity parameters",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"preConds": {
"label": "Conditions for the presynaptic cells",
"help": "Presynaptic cell conditions defined using attributes/tags and the required value e.g. {'cellType': 'PYR'}. Values can be lists, e.g. {'pop': ['Exc1', 'Exc2']}. For location properties, the list values correspond to the min and max values, e.g. {'ynorm': [0.1, 0.6]}.",
"suggestions": "",
"hintText": "",
"children": {
"pop": {
"label": "Population (multiple selection available)",
"suggestions": "",
"help": "Cells belonging to this population (or list of populations) will be connected pre-synaptically.",
"hintText": ""
},
"cellType": {
"label": "Cell type (multiple selection available)",
"suggestions": "",
"help": "Ccells with this cell type attribute/tag will be connected pre-synaptically.",
"hintText": ""
},
"cellModel": {
"label": "Cell model (multiple selection available)",
"suggestions": "",
"help": "Cells with this cell model attribute/tag will be connected pre-synaptically.",
"hintText": ""
},
"x": {
"label": "Range of x-axis locations",
"suggestions": "",
"help": "Cells within these x-axis locations will be connected pre-synaptically.",
"hintText": ""
},
"y": {
"label": "Range of y-axis locations",
"suggestions": "",
"help": "Cells within these y-axis locations will be connected pre-synaptically.",
"hintText": ""
},
"z": {
"label": "Range of z-axis locations",
"suggestions": "",
"help": "Cells within these z-axis locations will be connected pre-synaptically..",
"hintText": ""
},
"xnorm": {
"label": "Range of normalized x-axis locations",
"suggestions": "",
"help": "Cells within these normalized x-axis locations will be connected pre-synaptically.",
"hintText": ""
},
"ynorm": {
"label": "Range of normalized y-axis locations",
"suggestions": "",
"help": "Cells within these normalized y-axis locations will be connected pre-synaptically.",
"hintText": ""
},
"znorm": {
"label": "Range of normalized z-axis locations",
"suggestions": "",
"help": "Cells within these normalized z-axis locations will be connected pre-synaptically.",
"hintText": ""
}
}
},
"postConds": {
"label": "Conditions for the postsynaptic cells",
"help": "Defined as a dictionary with the attributes/tags of the postsynaptic cell and the required values e.g. {'cellType': 'PYR'}. Values can be lists, e.g. {'pop': ['Exc1', 'Exc2']}. For location properties, the list values correspond to the min and max values, e.g. {'ynorm': [0.1, 0.6]}.",
"suggestions": "",
"hintText": "",
"children": {
"pop": {
"label": "Population (multiple selection available)",
"suggestions": "",
"help": "Cells belonging to this population (or list of populations) will be connected post-synaptically.",
"hintText": ""
},
"cellType": {
"label": "Cell type (multiple selection available)",
"suggestions": "",
"help": "Ccells with this cell type attribute/tag will be connected post-synaptically.",
"hintText": ""
},
"cellModel": {
"label": "Cell model (multiple selection available)",
"suggestions": "",
"help": "Cells with this cell model attribute/tag will be connected post-synaptically.",
"hintText": ""
},
"x": {
"label": "Range of x-axis locations",
"suggestions": "",
"help": "Cells within these x-axis locations will be connected post-synaptically.",
"hintText": ""
},
"y": {
"label": "Range of y-axis locations",
"suggestions": "",
"help": "Cells within these y-axis locations will be connected post-synaptically.",
"hintText": ""
},
"z": {
"label": "Range of z-axis locations",
"suggestions": "",
"help": "Cells within these z-axis locations will be connected post-synaptically..",
"hintText": ""
},
"xnorm": {
"label": "Range of normalized x-axis locations",
"suggestions": "",
"help": "Cells within these normalized x-axis locations will be connected post-synaptically.",
"hintText": ""
},
"ynorm": {
"label": "Range of normalized y-axis locations",
"suggestions": "",
"help": "Cells within these normalized y-axis locations will be connected post-synaptically.",
"hintText": ""
},
"znorm": {
"label": "Range of normalized z-axis locations",
"suggestions": "",
"help": "Cells within these normalized z-axis locations will be connected post-synaptically.",
"hintText": ""
}
}
},
"sec": {
"label": "Postsynaptic neuron section",
"help": "Name of target section on the postsynaptic neuron (e.g. 'soma'). If omitted, defaults to 'soma' if exists, otherwise to first section in the cell sections list. If synsPerConn > 1, a list of sections or sectionList can be specified, and synapses will be distributed uniformly along the specified section(s), taking into account the length of each section.",
"suggestions": "",
"hintText": "soma",
"type": "list(str)"
},
"loc": {
"label": "Postsynaptic neuron location (0-1)",
"help": "Location of target synaptic mechanism (e.g. 0.3). If omitted, defaults to 0.5. Can be single value, or list (if have synsPerConn > 1) or list of lists (If have both a list of synMechs and synsPerConn > 1).",
"suggestions": "",
"hintText": "0.5",
"type": "list(float)"
},
"synMech": {
"label": "Synaptic mechanism",
"help": "Label (or list of labels) of target synaptic mechanism on the postsynaptic neuron (e.g. 'AMPA' or ['AMPA', 'NMDA']). If omitted employs first synaptic mechanism in the cell synaptic mechanisms list. If have list, a separate connection is created to each synMech; and a list of weights, delays and or locs can be provided.",
"suggestions": "",
"hintText": ""
},
"synsPerConn": {
"label": "Number of individual synaptic contacts per connection",
"help": "Number of individual synaptic contacts (synapses) per cell-to-cell connection (connection). Can be defined as a function (see Functions as strings). If omitted, defaults to 1.",
"suggestions": "",
"hintText": "",
"default": 1
},
"weight": {
"label": "Weight of synaptic connection",
"help": "Strength of synaptic connection (e.g. 0.01). Associated to a change in conductance, but has different meaning and scale depending on the synaptic mechanism and cell model. Can be defined as a function (see Functions as strings). If omitted, defaults to netParams.defaultWeight = 1.",
"suggestions": "",
"hintText": "",
"type": "func"
},
"delay": {
"label": "Connection delay (ms)",
"help": "Time (in ms) for the presynaptic spike to reach the postsynaptic neuron. Can be defined as a function (see Functions as strings). If omitted, defaults to netParams.defaultDelay = 1.",
"suggestions": "",
"hintText": "",
"type": "func"
},
"probability": {
"label": "Probability of connection (0-1)",
"help": "Probability of connection between each pre and postsynaptic cell (0 to 1). Can be a string that defines as a function, e.g. '0.1*dist_3D+uniform(0.2,0.4)' (see Documentation on 'Functions as strings'). Overrides the convergence, divergence and fromList parameters.",
"suggestions": "0.1",
"hintText": "",
"type": "func"
},
"convergence": {
"label": "Convergence",
"help": "Number of pre-synaptic cells connected to each post-synaptic cell. Can be a string that defines as a function, e.g. '2*dist_3D+uniform(2,4)' (see Documentation on 'Functions as strings'). Overrides the divergence and fromList parameters.",
"suggestions": "5",
"hintText": "",
"type": "func"
},
"divergence": {
"label": "Divergence",
"help": "Number of post-synaptic cells connected to each pre-synaptic cell. Can be a string that defines as a function, e.g. '2*dist_3D+uniform(2,4)' (see Documentation on 'Functions as strings'). Overrides the fromList parameter.",
"suggestions": "5",
"hintText": "",
"type": "func"
},
"connList": {
"label": "Explicit list of one-to-one connections",
"help": "Each connection is indicated with relative ids of cell in pre and post populations, e.g. [[0,1],[3,1]] creates a connection between pre cell 0 and post cell 1; and pre cell 3 and post cell 1. Weights, delays and locs can also be specified as a list for each of the individual cell connection. These lists can be 2D or 3D if combined with multiple synMechs and synsPerConn > 1 (the outer dimension will correspond to the connList).",
"suggestions": "",
"hintText": "list(list(float))"
},
"connFunc": {
"label": "Internal connectivity function to use (not required)",
"help": "Automatically set to probConn, convConn, divConn or fromList, when the probability, convergence, divergence or connList parameters are included, respectively. Otherwise defaults to fullConn, ie. all-to-all connectivity.",
"suggestions": "",
"hintText": ""
},
"shape": {
"label": "Weight shape",
"help": "Modifies the conn weight dynamically during the simulation based on the specified pattern. Contains a dictionary with the following fields: 'switchOnOff' - times at which to switch on and off the weight, 'pulseType' - type of pulse to generate; either 'square' or 'gaussian', 'pulsePeriod' - period (in ms) of the pulse, 'pulseWidth' - width (in ms) of the pulse.",
"suggestions": "",
"hintText": ""
},
"plasticity": {
"label": "Plasticity mechanism",
"help": "Requires 2 fields: mech to specifiy the name of the plasticity mechanism, and params containing a dictionary with the parameters of the mechanism, e.g. {'mech': 'STDP', 'params': {'hebbwt': 0.01, 'antiwt':-0.01, 'wmax': 50, 'RLon': 1 'tauhebb': 10}}.",
"suggestions": "",
"hintText": "",
"type": "dict"
}
}
},
# ---------------------------------------------------------------------------------------------------------------------
# netParams.stimSourceParams
# ---------------------------------------------------------------------------------------------------------------------
"stimSourceParams": {
"label": "Stimulation source parameters",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"type": {
"label": "Point process used as stimulator",
"help": "Point process used as stimulator; allowed values: 'IClamp', 'VClamp', 'SEClamp', 'NetStim' and 'AlphaSynapse'. Note that NetStims can be added both using this method, or by creating a population of 'cellModel': 'NetStim' and adding the appropriate connections.",
"suggestions": "",
"hintText": "",
"default": "IClamp",
"type": "str"
},
"dur": {
"label": "Current clamp duration (ms)",
"help": "Duration of current clamp injection in ms",
"suggestions": "",
"hintText": "10",
"type": "float"
},
"amp": {
"label": "Current clamp amplitude (nA)",
"help": "Amplitude of current injection in nA",
"suggestions": "",
"hintText": "10",
"type": "float"
},
"del": {
"label": "Current clamp delay (ms)",
"help": "Delay (time when turned on after simulation starts) of current clamp in ms.",
"suggestions": "",
"hintText": "5",
"type": "float"
},
"vClampAmp": {
"label": "Current clamp amplitude (nA)",
"help": "Voltage clamp with three levels. Clamp is on at time 0, and off at time dur[0]+dur[1]+dur[2].",
"suggestions": "",
"hintText": "10",
"type": "list(float)"
},
"vClampDur": {
"label": "Current clamp delay (ms)",
"help": "Voltage clamp with three levels. Clamp is on at time 0, and off at time dur[0]+dur[1]+dur[2].",
"suggestions": "",
"hintText": "5",
"type": "list(float)"
},
"interval": {
"label": "Interval between spikes (ms)",
"help": "Define the mean time interval between spike.",
"suggestions": "10",
"hintText": "",
"type": "float"
},
"rate": {
"label": "Firing rate (Hz)",
"help": "Firing rate in Hz (note this is the inverse of the NetStim interval property).",
"suggestions": "",
"hintText": "",
"type": "float"
},
"rstim": {
"label": "Voltage clamp stimulation resistance",
"help": "Voltage clamp stimulation resistance.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"gain": {
"label": "Voltage clamp amplifier gain",
"help": "Voltage clamp amplifier gain.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"number": {
"label": "Maximum number of spikes",
"help": "Maximum number of spikes generated by the NetStim.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"start": {
"label": "Start time of first spike",
"help": "Define the start time for the first spike.",
"suggestions": "0",
"hintText": "",
"type": "float"
},
"noise": {
"label": "Noise/randomness fraction (0-1)",
"help": "Fractional noise, 0 <= noise <= 1, means that an interval between spikes consists of a fixed interval of duration (1 - noise)*interval plus a negexp interval of mean duration noise*interval. Note that the most likely negexp interval has duration 0.",
"suggestions": "0.5",
"hintText": "",
"type": "float"
},
"tau1": {
"label": "Voltage clamp tau1",
"help": "Voltage clamp tau1.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"tau2": {
"label": "Voltage clamp tau2",
"help": "Voltage clamp tau2.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"i": {
"label": "Voltage clamp current (nA)",
"help": "Voltage clamp injected current in nA.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"onset": {
"label": "Alpha synapse onset time (ms)",
"help": "Alpha synapse onset time.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"tau": {
"label": "Alpha synapse time constant (ms)",
"help": "Alpha synapse time constant (ms).",
"suggestions": "",
"hintText": "",
"type": "float"
},
"gmax": {
"label": "Alpha synapse maximum conductance",
"help": "Alpha synapse maximum conductance.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"e": {
"label": "Alpha synapse equilibrium potential",
"help": "Alpha synapse equilibrium potential.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"rs": {
"label": "Voltage clamp resistance (MOhm)",
"help": "Voltage clamp resistance (MOhm).",
"suggestions": "",
"hintText": "",
"type": "float"
},
"vc": {
"label": "Voltage clamp reference voltage (mV)",
"help": "Voltage clamp reference voltage (mV).",
"suggestions": "",
"hintText": "",
"type": "float"
}
}
},
# ---------------------------------------------------------------------------------------------------------------------
# netParams.stimTargetParams
# ---------------------------------------------------------------------------------------------------------------------
"stimTargetParams": {
"label": "Stimulation target parameters",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"source": {
"label": "Stimulation source",
"help": "Label of the stimulation source (e.g. 'electrode_current').",
"suggestions": "",
"hintText": ""
},
"conds": {
"label": "Conditions of cells where the stimulation will be applied",
"help": "Conditions of cells where the stimulation will be applied. Can include a field 'cellList' with the relative cell indices within the subset of cells selected (e.g. 'conds': {'cellType':'PYR', 'y':[100,200], 'cellList': [1,2,3]}).",
"suggestions": "",
"hintText": "",
"children": {
"pop": {
"label": "Target population",
"help": "Populations that will receive the stimulation e.g. {'pop': ['Exc1', 'Exc2']}",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
"cellType": {
"label": "Target cell type",
"suggestions": "",
"help": "Cell types that will receive the stimulation",
"hintText": "",
"type": "str"
},
"cellModel": {
"label": "Target cell model",
"help": "Cell models that will receive the stimulation.",
"suggestions": "",
"type": "str"
},
"x": {
"label": "Range of x-axis locations",
"suggestions": "",
"help": "Cells within this x-axis locations will receive stimulation",
"hintText": ""
},
"y": {
"label": "Range of y-axis locations",
"suggestions": "",
"help": "Cells within this y-axis locations will receive stimulation",
"hintText": ""
},
"z": {
"label": "Range of z-axis locations",
"suggestions": "",
"help": "Cells within this z-axis locations will receive stimulation",
"hintText": ""
},
"xnorm": {
"label": "Range of normalized x-axis locations",
"suggestions": "",
"help": "Cells withing this normalized x-axis locations will receive stimulation",
"hintText": ""
},
"ynorm": {
"label": "Range of normalized y-axis locations",
"suggestions": "",
"help": "Cells within this normalized y-axis locations will receive stimulation",
"hintText": ""
},
"znorm": {
"label": "Range of normalized z-axis locations",
"suggestions": "",
"help": "Cells within this normalized z-axis locations will receive stimulation",
"hintText": ""
},
"cellList": {
"label": "Target cell global indices (gids)",
"help": "Global indices (gids) of neurons to receive stimulation. ([1, 8, 12])",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
}
},
"sec": {
"label": "Target section",
"help": "Target section (default: 'soma').",
"suggestions": "",
"hintText": "",
"type": "str"
},
"loc": {
"label": "Target location",
"help": "Target location (default: 0.5). Can be defined as a function (see Functions as strings).",
"suggestions": "",
"hintText": "",
"type": "float"
},
"synMech": {
"label": "Target synaptic mechanism",
"help": "Synaptic mechanism label to connect NetStim to. Optional; only for NetStims.",
"suggestions": "",
"hintText": ""
},
"weight": {
"label": "Weight of connection between NetStim and cell",
"help": "Weight of connection between NetStim and cell. Optional; only for NetStims. Can be defined as a function (see Functions as strings).",
"suggestions": "",
"hintText": ""
},
"delay": {
"label": "Delay of connection between NetStim and cell",
"help": "Delay of connection between NetStim and cell (default: 1). Optional; only for NetStims. Can be defined as a function (see Functions as strings).",
"suggestions": "",
"hintText": ""
},
"synsPerConn": {
"label": "Number of synaptic contacts per connection between NetStim and cell",
"help": "Number of synaptic contacts of connection between NetStim and cell (default: 1). Optional; only for NetStims. Can be defined as a function (see Functions as strings).",
"suggestions": "",
"hintText": ""
}
}
},
# ---------------------------------------------------------------------------------------------------------------------
# netParams.importCellParams
# ---------------------------------------------------------------------------------------------------------------------
"importCellParams": {
"label": "Import cell from .hoc or .py templates",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"fileName": {
"label": "Absolute path to file",
"help": "Absolute path to .hoc or .py template file.",
"suggestions": "",
"hintText": "",
"type": "str"
},
"cellName": {
"label": "Cell template/class name",
"help": "Template or class name defined inside the .hoc or .py file",
"suggestions": "",
"hintText": "",
"type": "str"
},
"label": {
"label": "Cell rule label",
"help": "Give a name to this cell rule.",
"suggestions": "",
"hintText": "",
"type": "str"
},
"importSynMechs": {
"label": "Import synaptic mechanisms",
"help": "If true, synaptic mechanisms will also be imported from the file. (default: False)",
"suggestions": "",
"hintText": "",
"type": "bool"
},
"compileMod": {
"label": "Compile mod files",
"help": "If true, mod files will be compiled before importing the cell. (default: false)",
"suggestions": "",
"hintText": "",
"type": "bool"
},
"modFolder": {
"label": "Path to mod folder",
"help": "Define the absolute path to the folder containing the mod files.",
"suggestions": "",
"hintText": "",
"type": "str"
},
}
}
}
},
# ---------------------------------------------------------------------------------------------------------------------
# simConfig
# ---------------------------------------------------------------------------------------------------------------------
"simConfig": {
"label": "Simulation Configuration",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"simLabel": {
"label": "Simulation label",
"help": "Choose a label for this simulation",
"suggestions": "",
"type": "str"
},
"duration": {
"label": "Duration (ms)",
"help": "Simulation duration in ms (default: 1000)",
"suggestions": "",
"default": 1000,
"type": "float"
},
"dt": {
"label": "Time step, dt",
"help": "Simulation time step in ms (default: 0.1)",
"suggestions": "",
"default": 0.025,
"type": "float"
},
"seeds": {
"label": "Randomizer seeds",
"help": "Dictionary with random seeds for connectivity, input stimulation, and cell locations (default: {'conn': 1, 'stim': 1, 'loc': 1}).",
"suggestions": "",
"type": "dict"
},
"addSynMechs": {
"label": "Add synaptic mechanisms",
"help": "Whether to add synaptic mechanisms or not (default: True).",
"suggestions": "",
"type": "bool"
},
"includeParamsLabel": {
"label": "Include parameter rule label",
"help": "Include label of parameters rule that created that cell, conn or stim (default: True).",
"suggestions": "",
"type": "bool"
},
"timing": {
"label": "Show timing",
"help": "Show and record timing of each process (default: True).",
"suggestions": "",
"type": "bool"
},
"verbose": {
"label": "Verbose mode",
"help": "Show detailed messages (default: False).",
"suggestions": "",
"type": "bool"
},
"saveFolder": {
"label": "Output folder",
"help": "Path where to save output data (default: '')",
"suggestions": "",
"type": "str"
},
"filename": {
"label": "Output file name",
"help": "Name of file to save model output (default: 'model_output')",
"suggestions": "",
"default": "model_output",
"type": "str"
},
"saveDataInclude": {
"label": "Data to include in output file",
"help": "Data structures to save to file (default: ['netParams', 'netCells', 'netPops', 'simConfig', 'simData'])",
"suggestions": "",
"type": "list(str)"
},
"timestampFilename": {
"label": "Add timestamp to file name",
"help": "Add timestamp to filename to avoid overwriting (default: False)",
"suggestions": "",
"type": "bool"
},
"savePickle": {
"label": "Save as Pickle",
"help": "Save data to pickle file (default: False).",
"suggestions": "",
"type": "bool"
},
"saveJson": {
"label": "Save as JSON",
"help": "Save dat to json file (default: False).",
"suggestions": "",
"type": "bool"
},
"saveMat": {
"label": "Save as MAT",
"help": "Save data to mat file (default: False).",
"suggestions": "",
"type": "bool"
},
"saveHDF5": {
"label": "Save as HDF5",
"help": "Save data to save to HDF5 file (under development) (default: False).",
"suggestions": "",
"type": "bool"
},
"saveDpk": {
"label": "Save as DPK",
"help": "Save data to .dpk pickled file (default: False).",
"suggestions": "",
"type": "bool"
},
"checkErrors": {
"label": "Check parameter errors",
"help": "check for errors (default: False).",
"suggestions": "",
"type": "bool"
},
"checkErrorsVerbose": {
"label": "Check parameter errors verbose mode",
"help": "check errors vervose (default: False)",
"suggestions": "",
"type": "bool"
},
"backupCfgFile": {
"label": "Copy simulation configuration file to this folder:",
"help": "Copy cfg file to folder, eg. ['cfg.py', 'backupcfg/'] (default: []).",
"suggestions": "",
"type": "list(str)"
},
"recordCells": {
"label": "Cells to record traces from",
"help": "List of cells from which to record traces. Can include cell gids (e.g. 5), population labels (e.g. 'S' to record from one cell of the 'S' population), or 'all', to record from all cells. NOTE: All cells selected in the include argument of simConfig.analysis['plotTraces'] will be automatically included in recordCells. (default: []).",
"suggestions": "",
"type": "list(float)"
},
"recordTraces": {
"label": "Traces to record from cells",
"help": "Dict of traces to record (default: {} ; example: {'V_soma': {'sec':'soma','loc':0.5,'var':'v'} }).",
"suggestions": "",
"type": "dict(dict)",
"default": "{\"V_soma\": {\"sec\": \"soma\", \"loc\": 0.5, \"var\": \"v\"}}"
},
"saveCSV": {
"label": "Save as CSV",
"help": "save cvs file (under development) (default: False)",
"suggestions": "",
"type": "bool"
},
"saveDat": {
"label": "Save as DAT ",
"help": "save .dat file (default: False)",
"suggestions": "",
"type": "bool"
},
"saveCellSecs": {
"label": "Store cell sections after simulation",
"help": "Save cell sections after gathering data from nodes post simulation; set to False to reduce memory required (default: True)",
"suggestions": "",
"type": "bool"
},
"saveCellConns": {
"label": "Store cell connections after simulation",
"help": "Save cell connections after gathering data from nodes post simulation; set to False to reduce memory required (default: True)",
"suggestions": "",
"type": "bool"
},
"recordStim": {
"label": "Record spikes of artificial stimulators (NetStims and VecStims)",
"help": "Record spikes of NetStims and VecStims (default: False).",
"suggestions": "",
"type": "bool"
},
"recordLFP": {
"label": "Record LFP electrode locations",
"help": "3D locations of local field potential (LFP) electrodes, e.g. [[50, 100, 50], [50, 200]] (default: False).",
"suggestions": "",
"type": "list(list(float))"
},
"saveLFPCells": {
"label": "Store LFP of individual cells",
"help": "Store LFP generated individually by each cell in sim.allSimData['LFPCells'].",
"suggestions": "",
"type": "bool"
},
"recordStep": {
"label": "Time step for data recording (ms)",
"help": "Step size in ms for data recording (default: 0.1).",
"suggestions": "",
"default": 0.1,
"type": "float"
},
"printRunTime": {
"label": "Interval to print run time at (s)",
"help": "Print run time at interval (in sec) specified here (eg. 0.1) (default: False).",
"suggestions": "",
"type": "float"
},
"printSynsAfterRule": {
"label": "Print total connections",
"help": "Print total connections after each conn rule is applied.",
"suggestions": "",
"type": "bool"
},
"printPopAvgRates": {
"label": "Print population average firing rates",
"help": "Print population avg firing rates after run (default: False).",
"suggestions": "",
"type": "bool"
},
"connRandomSecFromList": {
"label": "Select random sections from list for connection",
"help": "Select random section (and location) from list even when synsPerConn=1 (default: True).",
"suggestions": "",
"type": "bool"
},
"compactConnFormat": {
"label": "Use compact connection format (list instead of dicT)",
"help": "Replace dict format with compact list format for conns (need to provide list of keys to include) (default: False).",
"suggestions": "",
"type": "bool"
},
"gatherOnlySimData": {
"label": "Gather only simulation output data",
"help": "Omits gathering of net and cell data thus reducing gatherData time (default: False).",
"suggestions": "",
"type": "bool"
},
"createPyStruct": {
"label": "Create Python structure",
"help": "Create Python structure (simulator-independent) when instantiating network (default: True).",
"suggestions": "",
"type": "bool"
},
"createNEURONObj": {
"label": "Create NEURON objects",
"help": "Create runnable network in NEURON when instantiating netpyne network metadata (default: True).",
"suggestions": "",
"type": "bool"
},
"cvode_active": {
"label": "use CVode",
"help": "Use CVode variable time step (default: False).",
"suggestions": "",
"type": "bool"
},
"cache_efficient": {
"label": "use CVode cache_efficient",
"help": "Use CVode cache_efficient option to optimize load when running on many cores (default: False).",
"suggestions": "",
"type": "bool"
},
"hParams": {
"label": "Set global parameters (temperature, initial voltage, etc)",
"help": "Dictionary with parameters of h module (default: {'celsius': 6.3, 'v_init': -65.0, 'clamp_resist': 0.001}).",
"suggestions": "",
"type": "dict"
},
"saveTxt": {
"label": "Save as TXT",
"help": "Save data to txt file (under development) (default: False)",
"suggestions": "",
"type": "bool"
},
"saveTiming": {
"label": "Save timing data to file",
"help": " Save timing data to pickle file (default: False).",
"suggestions": "",
"type": "bool"
},
# ---------------------------------------------------------------------------------------------------------------------
# simConfig.analysis
# ---------------------------------------------------------------------------------------------------------------------
"analysis": {
"label": "Analysis",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"plotRaster": {
"label": "Raster plot",
"suggestions": "",
"help": "Plot raster (spikes over time) of network cells.",
"hintText": "",
"children": {
"include": {
"label": "Cells to include",
"suggestions": "",
"help": "List of cells to include (['all'|,'allCells'|,'allNetStims'|,120|,'L4'|,('L2', 56)|,('L5',[4,5,6])])",
"hintText": "",
"type": "str"
},
"timeRange": {
"label": "Time range [min,max] (ms)",
"suggestions": "",
"help": "Time range of spikes shown; if None shows all ([start,stop])",
"hintText": "",
"type": "list(float)"
},
"maxSpikes": {
"label": "Maximum number of spikes to plot",
"suggestions": "",
"help": "maximum number of spikes that will be plotted (int).",
"hintText": "",
"type": "float"
},
"orderBy": {
"label": "Order by",
"suggestions": "",
"help": "Unique numeric cell property to order y-axis by, e.g. 'gid', 'ynorm', 'y' ('gid'|'y'|'ynorm'|...)",
"hintText": "",
"options": [
"gid",
"y",
"ynorm"
],
"type": "str"
},
"orderInverse": {
"label": "Invert y-axis",
"suggestions": "",
"help": "Invert the y-axis order (True|False)",
"hintText": "",
"type": "bool"
},
"labels": {
"label": "Population labels",
"suggestions": "",
"help": "Show population labels in a legend or overlayed on one side of raster ('legend'|'overlay'))",
"hintText": "",
"type": "str"
},
"popRates": {
"label": "Include population rates",
"suggestions": "",
"help": "Include population rates ('legend'|'overlay')",
"hintText": "",
"options": [
"legend",
"overlay"
],
"type": "str"
},
"spikeHist": {
"label": "Overlay spike histogram",
"suggestions": "",
"help": "overlay line over raster showing spike histogram (spikes/bin) (None|'overlay'|'subplot')",
"hintText": "",
"options": [
"None",
"overlay",
"subplot"
],
"type": "str"
},
"spikeHistBin": {
"label": "Bin size for histogram",
"suggestions": "",
"help": "Size of bin in ms to use for histogram (int)",
"hintText": "",
"type": "float"
},
"syncLines": {
"label": "Synchronization lines",
"suggestions": "",
"help": "calculate synchorny measure and plot vertical lines for each spike to evidence synchrony (True|False)",
"hintText": "",
"type": "bool"
},
"figSize": {
"label": "Figure size",
"suggestions": "",
"help": "Size of figure ((width, height))",
"hintText": "",
"type": "str"
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"suggestions": "",
"help": "File name where to save the figure (None|'fileName')",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
},
"plotSpikeHist": {
"label": "Plot Spike Histogram",
"suggestions": "",
"help": "Plot spike histogram.",
"hintText": "",
"children": {
"include": {
"label": "Cells to include",
"suggestions": "",
"help": "List of cells to include (['all'|,'allCells'|,'allNetStims'|,120|,'L4'|,('L2', 56)|,('L5',[4,5,6])])",
"hintText": "",
"type": "list"
},
"timeRange": {
"label": "Time range [min,max] (ms)",
"suggestions": "",
"help": "Time range of spikes shown; if None shows all ([start,stop])",
"hintText": "",
"type": "list(float)"
},
"binSize": {
"label": "bin size for histogram",
"suggestions": "",
"help": "Size of bin in ms to use for histogram (int)",
"hintText": "",
"type": "int"
},
"overlay": {
"label": "show overlay",
"suggestions": "",
"help": "Whether to overlay the data lines or plot in separate subplots (True|False)",
"hintText": "",
"type": "bool"
},
"graphType": {
"label": "type of Graph",
"suggestions": "",
"help": " Type of graph to use (line graph or bar plot) ('line'|'bar')",
"hintText": "",
"options": [
"line",
"bar"
],
"type": "str"
},
"yaxis": {
"label": "axis units",
"suggestions": "",
"help": "Units of y axis (firing rate in Hz, or spike count) ('rate'|'count')",
"hintText": "",
"options": [
"rate",
"count"
],
"type": "str"
},
"figSize": {
"label": "Figure size",
"suggestions": "",
"help": "Size of figure ((width, height))",
"hintText": "",
"type": ""
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"help": "File name where to save the figure (None|'fileName')",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
},
"plotRatePSD": {
"label": "Plot Rate PSD",
"suggestions": "",
"help": "Plot spikes power spectral density (PSD).",
"hintText": "",
"children": {
"include": {
"label": "Cells to include",
"suggestions": "",
"help": "List of cells to include (['all'|,'allCells'|,'allNetStims'|,120|,'L4'|,('L2', 56)|,('L5',[4,5,6])])",
"hintText": "",
"type": "list"
},
"timeRange": {
"label": "Time range [min,max] (ms)",
"suggestions": "",
"help": "Time range of spikes shown; if None shows all ([start,stop])",
"hintText": "",
"type": "list(float)"
},
"binSize": {
"label": "Bin size",
"suggestions": "",
"help": "Size of bin in ms to use (int)",
"hintText": "",
"type": "float"
},
"maxFreq": {
"label": "maximum frequency",
"suggestions": "",
"help": " Maximum frequency to show in plot (float).",
"hintText": "",
"type": "float"
},
"NFFT": {
"label": "Number of point",
"suggestions": "",
"help": "The number of data points used in each block for the FFT (power of 2)",
"hintText": "",
"type": "float"
},
"noverlap": {
"label": "Number of overlap points",
"suggestions": "",
"help": "Number of points of overlap between segments (< nperseg).",
"hintText": "",
"type": "float"
},
"smooth": {
"label": "Window size",
"suggestions": "",
"help": "Window size for smoothing; no smoothing if 0.",
"hintText": "",
"type": "float"
},
"overlay": {
"label": "Overlay data",
"suggestions": "",
"help": "Whether to overlay the data lines or plot in separate subplots (True|False).",
"hintText": "",
"type": "bool"
},
"figSize": {
"label": "Figure size",
"suggestions": "",
"help": "Size of figure ((width, height))",
"hintText": "",
"type": ""
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"suggestions": "",
"help": "File name where to save the figure (None|'fileName')",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
},
"plotSpikeStats": {
"label": "Plot Spike Statistics",
"suggestions": "",
"help": "Plot spike histogram.",
"hintText": "",
"children": {
"include": {
"label": "Cells to include",
"suggestions": "",
"help": "List of cells to include (['all'|,'allCells'|,'allNetStims'|,120|,'L4'|,('L2', 56)|,('L5',[4,5,6])])",
"hintText": "",
"type": "list"
},
"timeRange": {
"label": "Time range [min,max] (ms)",
"suggestions": "",
"help": "Time range of spikes shown; if None shows all ([start,stop])",
"hintText": "",
"type": "list(float)"
},
"graphType": {
"label": "type of graph",
"suggestions": "",
"help": "Type of graph to use ('boxplot').",
"hintText": "",
"options": [
"boxplot"
],
"type": "str"
},
"stats": {
"label": "meassure type to calculate stats",
"suggestions": "",
"help": "List of types measure to calculate stats over: cell firing rates, interspike interval coefficient of variation (ISI CV), pairwise synchrony, and/or overall synchrony (sync measures calculated using PySpike SPIKE-Synchrony measure) (['rate', |'isicv'| 'pairsync' |'sync'|]).",
"hintText": "",
"options": [
"rate",
"isicv",
"pairsync",
"sync"
],
"type": "str"
},
"popColors": {
"label": "color for each population",
"suggestions": "",
"help": "Dictionary with color (value) used for each population/key.",
"hintText": "",
"type": "dict"
},
"figSize": {
"label": "figure size",
"suggestions": "",
"help": "Size of figure ((width, height)).",
"hintText": "",
"type": ""
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"suggestions": "",
"help": "File name where to save the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
},
"plotTraces": {
"label": "Plot Traces",
"suggestions": "",
"help": "Plot recorded traces (specified in simConfig.recordTraces).",
"hintText": "",
"children": {
"include": {
"label": "Cells to include",
"suggestions": "",
"help": "List of cells to include (['all'|,'allCells'|,'allNetStims'|,120|,'L4'|,('L2', 56)|,('L5',[4,5,6])])",
"hintText": "",
"type": "list(float)"
},
"timeRange": {
"label": "Time range [min,max] (ms)",
"suggestions": "",
"help": "Time range for shown Traces ; if None shows all ([start,stop])",
"hintText": "",
"type": "list(float)"
},
"overlay": {
"label": "overlay data",
"suggestions": "",
"help": "Whether to overlay the data lines or plot in separate subplots (True|False).",
"hintText": "",
"type": "bool"
},
"oneFigPer": {
"label": "plot one figure per cell/trace",
"suggestions": "",
"help": "Whether to plot one figure per cell or per trace (showing multiple cells) ('cell'|'trace').",
"hintText": "",
"options": [
"cell",
"traces"
],
"type": "str"
},
"rerun": {
"label": "re-run simulation",
"suggestions": "",
"help": "rerun simulation so new set of cells gets recorded (True|False).",
"hintText": "",
"type": "bool"
},
"figSize": {
"label": "Figure size",
"suggestions": "",
"help": "Size of figure ((width, height))",
"hintText": "",
"type": ""
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"suggestions": "",
"help": "File name where to save the figure (None|'fileName')",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
},
"plotLFP": {
"label": "Plot LFP",
"suggestions": "",
"help": "Plot LFP / extracellular electrode recordings (time-resolved, power spectral density, time-frequency and 3D locations).",
"hintText": "",
"children": {
"electrodes": {
"label": "electrode to show",
"suggestions": "",
"help": " List of electrodes to include; 'avg'=avg of all electrodes; 'all'=each electrode separately (['avg', 'all', 0, 1, ...]).",
"hintText": "",
"type": "list"
},
"plots": {
"label": "Select plot types to show (multiple selection available)",
"suggestions": "",
"help": "list of plot types to show (['timeSeries', 'PSD', 'timeFreq', 'locations']).",
"hintText": "",
"options": [
"timeSeries",
"PSD",
"spectrogram",
"locations"
],
"type": "str"
},
"timeRange": {
"label": "Time range [min,max] (ms)",
"suggestions": "",
"help": "Time range for shown Traces ; if None shows all ([start,stop])",
"hintText": "",
"type": "list(float)"
},
"NFFT": {
"label": "NFFT",
"suggestions": "",
"help": "The number of data points used in each block for the FFT (power of 2) (float)",
"hintText": "",
"type": "float"
},
"noverlap": {
"label": "Overlap",
"suggestions": "",
"help": "Number of points of overlap between segments (int, < nperseg).",
"hintText": "",
"type": "float"
},
"maxFreq": {
"label": "Maximum Frequency",
"suggestions": "",
"help": "Maximum frequency shown in plot for PSD and time-freq (float).",
"hintText": "",
"type": "float"
},
"nperseg": {
"label": "Segment length (nperseg)",
"suggestions": "",
"help": "Length of each segment for time-freq (int).",
"hintText": "",
"type": "float"
},
"smooth": {
"label": "Window size",
"suggestions": "",
"help": "Window size for smoothing; no smoothing if 0 (int).",
"hintText": "",
"type": "float"
},
"separation": {
"label": "Separation factor",
"suggestions": "",
"help": "Separation factor between time-resolved LFP plots; multiplied by max LFP value (float).",
"hintText": "",
"type": "float"
},
"includeAxon": {
"label": "Include axon",
"suggestions": "",
"help": "Whether to show the axon in the location plot (boolean).",
"hintText": "",
"type": "bool"
},
"figSize": {
"label": "Figure size",
"suggestions": "",
"help": "Size of figure ((width, height))",
"hintText": "",
"type": ""
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"suggestions": "",
"help": "File name where to save the figure (None|'fileName')",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
},
"plotShape": {
"label": "Plot Shape",
"suggestions": "",
"help": "",
"hintText": "Plot 3D cell shape using Matplotlib or NEURON Interviews PlotShape.",
"children": {
"includePre": {
"label": "population (or cell by index) to presyn",
"suggestions": "",
"help": "List of cells to include (['all'|,'allCells'|,'allNetStims'|,120|,'L4'|,('L2', 56)|,('L5',[4,5,6])])",
"hintText": "",
"type": "list"
},
"includePost": {
"label": "population (or cell by index) to postsyn",
"suggestions": "",
"help": "List of cells to include (['all'|,'allCells'|,'allNetStims'|,120|,'L4'|,('L2', 56)|,('L5',[4,5,6])])",
"hintText": "",
"type": "list"
},
"synStyle": {
"label": "synaptic marker style",
"suggestions": "",
"help": "Style of marker to show synapses (Matplotlib markers).",
"hintText": "",
"type": "str"
},
"dist": {
"label": "3D distance",
"suggestions": "",
"help": "3D distance (like zoom).",
"hintText": "",
"type": "float"
},
"synSize": {
"label": "synapses marker size",
"suggestions": "",
"help": "Size of marker to show synapses.",
"hintText": "",
"type": "float"
},
"cvar": {
"label": "variable to represent in shape plot",
"suggestions": "",
"help": "Variable to represent in shape plot ('numSyns'|'weightNorm').",
"hintText": "",
"options": [
"numSyns",
"weightNorm"
],
"type": "str"
},
"cvals": {
"label": "value to represent in shape plot",
"suggestions": "",
"help": "List of values to represent in shape plot; must be same as num segments (list of size num segments; ).",
"hintText": "",
"type": "list(float)"
},
"iv": {
"label": "use NEURON iv",
"suggestions": "",
"help": "Use NEURON Interviews (instead of matplotlib) to show shape plot (True|False).",
"hintText": "",
"type": "bool"
},
"ivprops": {
"label": "properties for iv",
"suggestions": "",
"help": "Dict of properties to plot using Interviews (dict).",
"hintText": "",
"type": "dict"
},
"showSyns": {
"label": "show synaptic connections in 3D",
"suggestions": "",
"help": "Show synaptic connections in 3D (True|False).",
"hintText": "",
"type": "bool"
},
"bkgColor": {
"label": "background color",
"suggestions": "",
"help": "RGBA list/tuple with bakcground color eg. (0.5, 0.2, 0.1, 1.0) (list/tuple with 4 floats).",
"hintText": "",
"type": "list(float)"
},
"showElectrodes": {
"label": "show electrodes",
"suggestions": "",
"help": "Show electrodes in 3D (True|False).",
"hintText": "",
"type": "bool"
},
"includeAxon": {
"label": "include Axon in shape plot",
"suggestions": "",
"help": "Include axon in shape plot (True|False).",
"hintText": "",
"type": "bool"
},
"figSize": {
"label": "Figure size",
"suggestions": "",
"help": "Size of figure ((width, height))",
"hintText": "",
"type": ""
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"suggestions": "",
"help": "File name where to save the figure (None|'fileName')",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
},
"plot2Dnet": {
"label": "Plot 2D net",
"suggestions": "",
"help": "Plot 2D representation of network cell positions and connections.",
"hintText": "",
"children": {
"include": {
"label": "Cells to include",
"suggestions": "",
"help": "List of cells to show (['all'|,'allCells'|,'allNetStims'|,120|,'L4'|,('L2', 56)|,('L5',[4,5,6])]).",
"hintText": "",
"type": "list"
},
"showConns": {
"label": "show connections",
"suggestions": "",
"help": "Whether to show connections or not (True|False).",
"hintText": "",
"type": "bool"
},
"view": {
"label": "perspective view",
"suggestions": "",
"help": "Perspective view, either front ('xy') or top-down ('xz').",
"hintText": "",
"options": [
"xy",
"xz"
],
"type": "str"
},
"figSize": {
"label": "Figure size",
"suggestions": "",
"help": "Size of figure ((width, height))",
"hintText": "",
"type": ""
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"suggestions": "",
"help": "File name where to save the figure (None|'fileName')",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
},
"plotConn": {
"label": "Plot Connectivity",
"suggestions": "",
"help": "Plot network connectivity.",
"hintText": "",
"children": {
"include": {
"label": "Cells to include",
"suggestions": "",
"help": "List of cells to show (['all'|,'allCells'|,'allNetStims'|,120|,'L4'|,('L2', 56)|,('L5',[4,5,6])]).",
"hintText": "",
"type": "list"
},
"feature": {
"label": "feature to show",
"suggestions": "",
"help": "Feature to show in connectivity matrix; the only features applicable to groupBy='cell' are 'weight', 'delay' and 'numConns'; 'strength' = weight * probability ('weight'|'delay'|'numConns'|'probability'|'strength'|'convergence'|'divergence')g.",
"hintText": "",
"options": [
"weight",
"delay",
"numConns",
"probability",
"strength",
"convergency",
"divergency"
],
"type": "str"
},
"groupBy": {
"label": "group by",
"suggestions": "",
"help": "Show matrix for individual cells or populations ('pop'|'cell').",
"hintText": "",
"options": [
"pop",
"cell"
],
"type": "str"
},
"orderBy": {
"label": "order by",
"suggestions": "",
"help": "Unique numeric cell property to order x and y axes by, e.g. 'gid', 'ynorm', 'y' (requires groupBy='cells') ('gid'|'y'|'ynorm'|...).",
"hintText": "",
"options": [
"gid",
"y",
"ynorm"
],
"type": "str"
},
"figSize": {
"label": "Figure size",
"suggestions": "",
"help": "Size of figure ((width, height))",
"hintText": "",
"type": ""
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"suggestions": "",
"help": "File name where to save the figure (None|'fileName')",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
},
"granger": {
"label": "Granger",
"suggestions": "",
"help": "Calculate and optionally plot Granger Causality.",
"hintText": "",
"children": {
"cells1": {
"label": "population (or cell by index) to subset 1",
"suggestions": "",
"help": "Subset of cells from which to obtain spike train 1 (['all',|'allCells','allNetStims',|,120,|,'E1'|,('L2', 56)|,('L5',[4,5,6])]).",
"hintText": "",
"type": "list"
},
"cells2": {
"label": "population (or cell by index cell) to subset 2",
"suggestions": "",
"help": "Subset of cells from which to obtain spike train 2 (['all',|'allCells','allNetStims',|,120,|,'E1'|,('L2', 56)|,('L5',[4,5,6])]).",
"hintText": "",
"type": "list"
},
"spks1": {
"label": "spike times to train 1",
"suggestions": "",
"help": "Spike train 1; list of spike times; if omitted then obtains spikes from cells1 (list).",
"hintText": "",
"type": "list"
},
"spks2": {
"label": "spike times to train 2",
"suggestions": "",
"help": "Spike train 2; list of spike times; if omitted then obtains spikes from cells1 (list).",
"hintText": "",
"type": "list"
},
"timeRange": {
"label": "Time range [min,max] (ms)",
"suggestions": "",
"help": "Range of time to calculate nTE in ms ([min, max]).",
"hintText": "",
"type": "list(float)"
},
"binSize": {
"label": "bin size",
"suggestions": "",
"help": "Bin size used to convert spike times into histogram (int).",
"hintText": "",
"type": "float"
},
"label1": {
"label": "label for train 1",
"suggestions": "",
"help": "Label for spike train 1 to use in plot (string).",
"hintText": "",
"type": "str"
},
"label2": {
"label": "label for train 2",
"suggestions": "",
"help": "Label for spike train 2 to use in plot (string).",
"hintText": "",
"type": "str"
},
"figSize": {
"label": "Figure size",
"suggestions": "",
"help": "Size of figure ((width, height))",
"hintText": "",
"type": ""
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"suggestions": "",
"help": "File name where to save the figure (None|'fileName')",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
},
"nTE": {
"label": "Normalize Transfer Entropy",
"suggestions": "",
"help": "Calculate normalized transfer entropy.",
"hintText": "",
"children": {
"cell1": {
"label": "Cell Subset 1",
"suggestions": "",
"help": "Subset of cells from which to obtain spike train 1 (['all',|'allCells','allNetStims',|,120,|,'E1'|,('L2', 56)|,('L5',[4,5,6])]).",
"hintText": "",
"type": "list"
},
"cell2": {
"label": "Cell Subset 2",
"suggestions": "",
"help": "Subset of cells from which to obtain spike train 2 (['all',|'allCells','allNetStims',|,120,|,'E1'|,('L2', 56)|,('L5',[4,5,6])]).",
"hintText": "",
"type": "list"
},
"spks1": {
"label": "Spike train 1",
"suggestions": "",
"help": "Spike train 1; list of spike times; if omitted then obtains spikes from cells1 (list).",
"hintText": "",
"type": "list(float)"
},
"spks2": {
"label": "Spike train 2",
"suggestions": "",
"help": "Spike train 2; list of spike times; if omitted then obtains spikes from cells1 (list).",
"hintText": "",
"type": "list(float)"
},
"timeRange": {
"label": "Time range [min,max] (ms)",
"suggestions": "",
"help": "Range of time to calculate nTE in ms ([min, max]).",
"hintText": "",
"type": "list(float)"
},
"binSize": {
"label": "Bin size",
"suggestions": "",
"help": "Bin size used to convert spike times into histogram (int).",
"hintText": "",
"type": "float"
},
"numShuffle": {
"label": "Number of Shuffles",
"suggestions": "",
"help": "Number of times to shuffle spike train 1 to calculate TEshuffled; note: nTE = (TE - TEShuffled)/H(X2F|X2P) (int).",
"hintText": "",
"type": "float"
},
"figSize": {
"label": "Figure size",
"suggestions": "",
"help": "Size of figure ((width, height))",
"hintText": "",
"type": ""
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"suggestions": "",
"help": "File name where to save the figure (None|'fileName')",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
}
}
}
}
}
}
| 53.659082
| 467
| 0.323153
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
metadata = {
"netParams": {
"label": "Network Parameters",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"popParams": {
"label": "Population Parameters",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"cellType": {
"label": "Cell type",
"suggestions": "",
"help": "Arbitrary cell type attribute/tag assigned to all cells in this population; can be used as condition to apply specific cell properties. e.g. 'Pyr' (for pyramidal neurons) or 'FS' (for fast-spiking interneurons)",
"hintText": "",
"type": "str"
},
"numCells": {
"label": "Number of cells",
"suggestions": "",
"help": "The total number of cells in this population.",
"hintText": "number of cells",
"type": "int"
},
"density": {
"label": "Cell density (neurons/mm^3)",
"suggestions": "",
"help": "The cell density in neurons/mm3. The volume occupied by each population can be customized (see xRange, yRange and zRange); otherwise the full network volume will be used (defined in netParams: sizeX, sizeY, sizeZ). density can be expressed as a function of normalized location (xnorm, ynorm or znorm), by providing a string with the variable and any common Python mathematical operators/functions. e.g. '1e5 * exp(-ynorm/2)'. ",
"hintText": "density in neurons/mm3",
"type": "str"
},
"gridSpacing": {
"label": "Grid spacing (um)",
"suggestions": "",
"help": "Fixed grid spacing between cells (in um). Cells will be placed in a grid, with the total number of cells be determined based on spacing and sizeX, sizeY, sizeZ. e.g. a spacing of 20 with sizeX=sizeY=sizeZ=100 will lead to 5*5*5=125 cells.",
"hintText": "fixed grid spacing",
"type": "int"
},
"cellModel": {
"label": "Cell model",
"help": "Can be either 1) an arbitrary cell model attribute/tag assigned to all cells in this population, and used later as a condition to apply specific cell properties. e.g. 'HH' (standard Hodkgin-Huxley type cell model) or 'Izhi2007' (Izhikevich point neuron model), 2) a point process artificial cell, with its parameters defined directly in this population entry, i.e. no need for cell propoerties (e.g. 'NetStim', VecStim', 'IntFire1')",
"suggestions": [
"VecStim",
"NetStim",
"IntFire1"
],
"type": "str"
},
"xRange": {
"label": "X-axis range (um)",
"help": "Range of neuron positions in x-axis (horizontal length), specified as a 2-element list [min, max] using absolute values in um (e.g.[100, 200]).",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
"xnormRange": {
"label": "X-axis normalized range (0-1)",
"help": "Range of neuron positions in x-axis (horizontal length), specified as a 2-element list [min, max] using normalized values between 0 and 1 as fraction of sizeX (e.g.[0.1,0.2]).",
"suggestions": "",
"hintText": "",
"default": [
0,
1
],
"type": "list(float)"
},
"yRange": {
"label": "Y-axis range (um)",
"help": "Range of neuron positions in y-axis (vertical height=cortical depth), specified as 2-element list [min, max] using absolute values in um (e.g.[100,200]).",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
"ynormRange": {
"label": "Y-axis normalized range (0-1)",
"help": "Range of neuron positions in y-axis (vertical height=cortical depth), specified as a 2-element list [min, max] using normalized values between 0 and 1 as fraction of sizeY (e.g.[0.1,0.2]).",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
"zRange": {
"label": "Z-axis range (um)",
"help": "Range of neuron positions in z-axis (horizontal depth), specified as a 2-element list [min, max] using absolute value in um (e.g.[100,200]).",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
"znormRange": {
"label": "Z-axis normalized range (0-1)",
"help": "Range of neuron positions in z-axis (horizontal depth), specified as a 2-element list [min, max] using normalized values between 0 and 1 as fraction of sizeZ (e.g.[0.1,0.2]).",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
"interval": {
"label": "Spike interval (ms)",
"help": "Spike interval in ms.",
"suggestions": "",
"hintText": "50",
"type": "float"
},
"rate": {
"label": "Firing rate (Hz)",
"help": "Firing rate in Hz (note this is the inverse of the NetStim interval property).",
"suggestions": "",
"hintText": "",
"type": "float"
},
"noise": {
"label": "Noise fraction (0-1)",
"help": "Fraction of noise in NetStim (0 = deterministic; 1 = completely random).",
"suggestions": "",
"hintText": "0.5",
"type": "list(float)"
},
"start": {
"label": "Start time (ms)",
"help": "Time of first spike in ms (default = 0).",
"suggestions": "",
"hintText": "0",
"type": "list(float)"
},
"number": {
"label": "Max number of spikes",
"help": "Max number of spikes generated (default = 1e12).",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
"seed": {
"label": "Randomizer seed (optional)",
"help": " Seed for randomizer (optional; defaults to value set in simConfig.seeds['stim'])",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
"spkTimes": {
"label": "Spike times",
"help": "List of spike times (only for 'VecStim') e.g. [1, 10, 40, 50], range(1,500,10), or any variable containing a Python list.",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
"pulses": {
"label": "Pulses",
"help": "List of spiking pulses (only for 'VecStim'); each item includes the start (ms), end (ms), rate (Hz), and noise (0 to 1) pulse parameters. ",
"suggestions": "",
"hintText": "",
"type": "list(float)"
}
}
},
"scale": {
"label": "scale factor",
"help": "Scale factor multiplier for number of cells (default: 1)",
"suggestions": "",
"hintText": "",
"default": 1,
"type": "float"
},
"shape": {
"label": "network shape",
"help": "Shape of network: 'cuboid', 'cylinder' or 'ellipsoid' (default: 'cuboid')",
"suggestions": "",
"hintText": "",
"options": [
"cuboid",
"cylinder",
"ellipsoid"
],
"default": "cuboid",
"type": "str"
},
"sizeX": {
"label": "x-dimension",
"help": "x-dimension (horizontal length) network size in um (default: 100)",
"suggestions": "",
"hintText": "",
"default": 100,
"type": "float"
},
"sizeY": {
"label": "y-dimension",
"help": "y-dimension (horizontal length) network size in um (default: 100)",
"suggestions": "",
"hintText": "",
"default": 100,
"type": "float"
},
"sizeZ": {
"label": "z-dimension",
"help": "z-dimension (horizontal length) network size in um (default: 100)",
"suggestions": "",
"hintText": "",
"default": 100,
"type": "float"
},
"rotateCellsRandomly": {
"label": "random rotation",
"help": "Random rotation of cells around y-axis [min,max] radians, e.g. [0, 3.0] (default: False)",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
"defaultWeight": {
"label": "default weight connection",
"help": "Default connection weight (default: 1)",
"suggestions": "",
"hintText": "",
"default": 1,
"type": "float"
},
"defaultDelay": {
"label": "default delay",
"help": "Default connection delay, in ms (default: 1)",
"suggestions": "",
"hintText": "",
"default": 1,
"type": "float"
},
"propVelocity": {
"label": "conduction velocity",
"help": "Conduction velocity in um/ms (e.g. 500 um/ms = 0.5 m/s) (default: 500)",
"suggestions": "",
"hintText": "",
"default": 500,
"type": "float"
},
"scaleConnWeight": {
"label": "connection weight scale factor",
"help": "Connection weight scale factor (excludes NetStims) (default: 1)",
"suggestions": "",
"hintText": "",
"default": 1,
"type": "float"
},
"scaleConnWeightNetStims": {
"label": "connection weight scale factor for NetStims",
"help": "Connection weight scale factor for NetStims (default: 1)",
"suggestions": "",
"hintText": "",
"default": 1,
"type": "float"
},
"scaleConnWeightModels": {
"label": "Connection weight scale factor for each cell model",
"help": "Connection weight scale factor for each cell model, e.g. {'HH': 0.1, 'Izhi': 0.2} (default: {})",
"suggestions": "",
"hintText": "",
"type": "dict"
},
"popTagsCopiedToCells": {
"label": "",
"help": "List of tags that will be copied from the population to the cells (default: ['pop', 'cellModel', 'cellType'])}",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
# ---------------------------------------------------------------------------------------------------------------------
# netParams.cellParams
# ---------------------------------------------------------------------------------------------------------------------
"cellParams": {
"label": "Cell Parameters",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"conds": {
"label": "Conds",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"pop": {
"label": "Population",
"help": "Apply the cell rule only to cells belonging to this population (or list of populations).",
"suggestions": "",
"hintText": "",
"type": "list(str)"
},
"cellType": {
"label": "Cell type",
"suggestions": "",
"help": "Apply the cell rule only to cells with this cell type attribute/tag.",
"hintText": "",
"type": "list(str)"
},
"cellModel": {
"label": "Cell model",
"suggestions": "",
"help": "Apply the cell rule only to cells with this cell model attribute/tag.",
"hintText": "",
"type": "list(str)"
},
"x": {
"label": "Range of x-axis locations",
"suggestions": "",
"help": "Apply the cell rule only to cells within these x-axis locations.",
"hintText": ""
},
"y": {
"label": "Range of y-axis locations",
"suggestions": "",
"help": "Apply the cell rule only to cells within these y-axis locations.",
"hintText": ""
},
"z": {
"label": "Range of z-axis locations",
"suggestions": "",
"help": "Apply the cell rule only to cells within these z-axis locations.",
"hintText": ""
},
"xnorm": {
"label": "Range of normalized x-axis locations",
"suggestions": "",
"help": "Apply the cell rule only to cells within these normalized x-axis locations.",
"hintText": ""
},
"ynorm": {
"label": "Range of normalized y-axis locations",
"suggestions": "",
"help": "Apply the cell rule only to cells within these normalized y-axis locations.",
"hintText": ""
},
"znorm": {
"label": "Range of normalized z-axis locations",
"suggestions": "",
"help": "Apply the cell rule only to cells within these normalized z-axis locations.",
"hintText": ""
}
}
},
"secs": {
"label": "Sections",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"geom": {
"label": "Cell geometry",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"diam": {
"label": "Diameter (um)",
"default": 10,
"suggestions": "",
"help": "",
"hintText": "10",
"type": "float"
},
"L": {
"label": "Length (um)",
"default": 50,
"suggestions": "",
"help": "",
"hintText": "50",
"type": "float"
},
"Ra": {
"label": "Axial resistance, Ra (ohm-cm)",
"default": 100,
"suggestions": "",
"help": "",
"hintText": "100",
"type": "float"
},
"cm": {
"label": "Membrane capacitance, cm (uF/cm2)",
"suggestions": "",
"help": "",
"hintText": "1",
"type": "float"
},
"pt3d": {
"label": "3D points",
"suggestions": "",
"help": "",
"hintText": "",
"type": "list(list(float))"
},
"nseg": {
"label": "Number of segments, nseg",
"default": 1,
"suggestions": "",
"help": "",
"hintText": "1",
"type": "float"
}
},
"mechs": {
"label": "Mechanisms",
"help": "Dictionary of density/distributed mechanisms, including the name of the mechanism (e.g. hh or pas) and a list of properties of the mechanism (e.g. {'g': 0.003, 'e': -70}).",
"suggestions": "",
"hintText": "",
"type": "float"
},
"ions": {
"label": "Ions",
"help": "Dictionary of ions, including the name of the ion (e.g. hh or pas) and a list of properties of the ion (e.g. {'e': -70}).",
"suggestions": "",
"hintText": ""
},
"pointps": {
"label": "Point processes",
"help": "Dictionary of point processes (excluding synaptic mechanisms). The key contains an arbitrary label (e.g. 'Izhi') The value contains a dictionary with the point process properties (e.g. {'mod':'Izhi2007a', 'a':0.03, 'b':-2, 'c':-50, 'd':100, 'celltype':1}).",
"suggestions": "",
"hintText": "",
"children": {
"mod": {
"label": "Point process name",
"help": "The name of the NEURON mechanism, e.g. 'Izhi2007a'",
"suggestions": "",
"hintText": "",
"type": "float"
},
"loc": {
"label": "Location (0-1)",
"help": "Section location where to place synaptic mechanism, e.g. 1.0, default=0.5.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"vref": {
"label": "Point process variable for voltage (optional)",
"help": "Internal mechanism variable containing the cell membrane voltage, e.g. 'V'.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"synList": {
"label": "Point process list of synapses (optional)",
"help": "list of internal mechanism synaptic mechanism labels, e.g. ['AMPA', 'NMDA', 'GABAB'].",
"suggestions": "",
"hintText": "",
"type": "float"
}
},
"vinit": {
"label": "Initial membrance voltage, vinit (mV)",
"help": "(optional) Initial membrane voltage (in mV) of the section (default: -65).e.g. cellRule['secs']['soma']['vinit'] = -72",
"suggestions": "",
"hintText": ""
},
"spikeGenLoc": {
"label": "Spike generation location (0-1)",
"help": "(optional) Indicates that this section is responsible for spike generation (instead of the default 'soma'), and provides the location (segment) where spikes are generated.e.g. cellRule['secs']['axon']['spikeGenLoc'] = 1.0.",
"suggestions": "",
"hintText": ""
},
"threshold": {
"label": "Spike threshold voltage (mV)",
"help": "(optional) Threshold voltage (in mV) used to detect a spike originating in this section of the cell. If omitted, defaults to netParams.defaultThreshold = 10.0.e.g. cellRule['secs']['soma']['threshold'] = 5.0.",
"suggestions": "",
"hintText": ""
}
},
"secLists": {
"label": "Section lists (optional) ",
"help": "Dictionary of sections lists (e.g. {'all': ['soma', 'dend']})",
"suggestions": "",
"hintText": ""
}
},
"topol": {
"label": "Topology",
"help": "Topological properties, including parentSec (label of parent section), parentX (parent location where to make connection) and childX (current section child location where to make connection).",
"suggestions": "",
"hintText": "",
"children": {
"parentSec": {
"label": "Parent Section",
"suggestions": [
"soma"
],
"help": "label of parent section",
"hintText": "soma",
"type": "str"
},
"parentX": {
"label": "Parent connection location",
"suggestions": [
0,
1
],
"help": "Parent location where to make connection",
"hintText": "1",
"type": "float"
},
"childX": {
"label": "Child connection location",
"suggestions": [
0,
1
],
"help": "Current section child location where to make connection",
"hintText": "1",
"type": "float"
}
}
}
}
}
}
},
# ---------------------------------------------------------------------------------------------------------------------
# netParams.synMechParams
# ---------------------------------------------------------------------------------------------------------------------
"synMechParams": {
"label": "Synaptic mechanism parameters",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"mod": {
"label": "NMODL mechanism name",
"help": "The NMODL mechanism name (e.g. 'ExpSyn'); note this does not always coincide with the name of the mod file.",
"suggestions": "",
"options": [
"ExpSyn",
"Exp2Syn"
],
"hintText": "",
"type": "str"
},
"selfNetCon": {
"label": "Self NetCon parameters",
"help": "Dict with parameters of NetCon between the cell voltage and the synapse, required by some synaptic mechanisms such as the homeostatic synapse (hsyn). e.g. 'selfNetCon': {'sec': 'soma' , threshold: -15, 'weight': -1, 'delay': 0} (by default the source section, 'sec' = 'soma').",
"suggestions": "",
"hintText": ""
},
"tau1": {
"label": "Time constant for exponential 1 (ms)",
"help": "Define the time constant for the first exponential.",
"suggestions": "",
"hintText": "1",
"type": "float"
},
"tau2": {
"label": "Time constant for exponential 2 (ms)",
"help": "Define the time constant for the second exponential.",
"suggestions": "",
"hintText": "5",
"type": "float"
},
"e": {
"label": "Reversal potential (mV)",
"help": "Reversal potential of the synaptic receptors.",
"suggestions": "",
"hintText": "0",
"type": "float"
},
"i": {
"label": "synaptic current (nA)",
"help": "Synaptic current in nA.",
"suggestions": "",
"hintText": "10",
"type": "float"
}
}
},
# ---------------------------------------------------------------------------------------------------------------------
# netParams.connParams
# ---------------------------------------------------------------------------------------------------------------------
"connParams": {
"label": "Connectivity parameters",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"preConds": {
"label": "Conditions for the presynaptic cells",
"help": "Presynaptic cell conditions defined using attributes/tags and the required value e.g. {'cellType': 'PYR'}. Values can be lists, e.g. {'pop': ['Exc1', 'Exc2']}. For location properties, the list values correspond to the min and max values, e.g. {'ynorm': [0.1, 0.6]}.",
"suggestions": "",
"hintText": "",
"children": {
"pop": {
"label": "Population (multiple selection available)",
"suggestions": "",
"help": "Cells belonging to this population (or list of populations) will be connected pre-synaptically.",
"hintText": ""
},
"cellType": {
"label": "Cell type (multiple selection available)",
"suggestions": "",
"help": "Ccells with this cell type attribute/tag will be connected pre-synaptically.",
"hintText": ""
},
"cellModel": {
"label": "Cell model (multiple selection available)",
"suggestions": "",
"help": "Cells with this cell model attribute/tag will be connected pre-synaptically.",
"hintText": ""
},
"x": {
"label": "Range of x-axis locations",
"suggestions": "",
"help": "Cells within these x-axis locations will be connected pre-synaptically.",
"hintText": ""
},
"y": {
"label": "Range of y-axis locations",
"suggestions": "",
"help": "Cells within these y-axis locations will be connected pre-synaptically.",
"hintText": ""
},
"z": {
"label": "Range of z-axis locations",
"suggestions": "",
"help": "Cells within these z-axis locations will be connected pre-synaptically..",
"hintText": ""
},
"xnorm": {
"label": "Range of normalized x-axis locations",
"suggestions": "",
"help": "Cells within these normalized x-axis locations will be connected pre-synaptically.",
"hintText": ""
},
"ynorm": {
"label": "Range of normalized y-axis locations",
"suggestions": "",
"help": "Cells within these normalized y-axis locations will be connected pre-synaptically.",
"hintText": ""
},
"znorm": {
"label": "Range of normalized z-axis locations",
"suggestions": "",
"help": "Cells within these normalized z-axis locations will be connected pre-synaptically.",
"hintText": ""
}
}
},
"postConds": {
"label": "Conditions for the postsynaptic cells",
"help": "Defined as a dictionary with the attributes/tags of the postsynaptic cell and the required values e.g. {'cellType': 'PYR'}. Values can be lists, e.g. {'pop': ['Exc1', 'Exc2']}. For location properties, the list values correspond to the min and max values, e.g. {'ynorm': [0.1, 0.6]}.",
"suggestions": "",
"hintText": "",
"children": {
"pop": {
"label": "Population (multiple selection available)",
"suggestions": "",
"help": "Cells belonging to this population (or list of populations) will be connected post-synaptically.",
"hintText": ""
},
"cellType": {
"label": "Cell type (multiple selection available)",
"suggestions": "",
"help": "Ccells with this cell type attribute/tag will be connected post-synaptically.",
"hintText": ""
},
"cellModel": {
"label": "Cell model (multiple selection available)",
"suggestions": "",
"help": "Cells with this cell model attribute/tag will be connected post-synaptically.",
"hintText": ""
},
"x": {
"label": "Range of x-axis locations",
"suggestions": "",
"help": "Cells within these x-axis locations will be connected post-synaptically.",
"hintText": ""
},
"y": {
"label": "Range of y-axis locations",
"suggestions": "",
"help": "Cells within these y-axis locations will be connected post-synaptically.",
"hintText": ""
},
"z": {
"label": "Range of z-axis locations",
"suggestions": "",
"help": "Cells within these z-axis locations will be connected post-synaptically..",
"hintText": ""
},
"xnorm": {
"label": "Range of normalized x-axis locations",
"suggestions": "",
"help": "Cells within these normalized x-axis locations will be connected post-synaptically.",
"hintText": ""
},
"ynorm": {
"label": "Range of normalized y-axis locations",
"suggestions": "",
"help": "Cells within these normalized y-axis locations will be connected post-synaptically.",
"hintText": ""
},
"znorm": {
"label": "Range of normalized z-axis locations",
"suggestions": "",
"help": "Cells within these normalized z-axis locations will be connected post-synaptically.",
"hintText": ""
}
}
},
"sec": {
"label": "Postsynaptic neuron section",
"help": "Name of target section on the postsynaptic neuron (e.g. 'soma'). If omitted, defaults to 'soma' if exists, otherwise to first section in the cell sections list. If synsPerConn > 1, a list of sections or sectionList can be specified, and synapses will be distributed uniformly along the specified section(s), taking into account the length of each section.",
"suggestions": "",
"hintText": "soma",
"type": "list(str)"
},
"loc": {
"label": "Postsynaptic neuron location (0-1)",
"help": "Location of target synaptic mechanism (e.g. 0.3). If omitted, defaults to 0.5. Can be single value, or list (if have synsPerConn > 1) or list of lists (If have both a list of synMechs and synsPerConn > 1).",
"suggestions": "",
"hintText": "0.5",
"type": "list(float)"
},
"synMech": {
"label": "Synaptic mechanism",
"help": "Label (or list of labels) of target synaptic mechanism on the postsynaptic neuron (e.g. 'AMPA' or ['AMPA', 'NMDA']). If omitted employs first synaptic mechanism in the cell synaptic mechanisms list. If have list, a separate connection is created to each synMech; and a list of weights, delays and or locs can be provided.",
"suggestions": "",
"hintText": ""
},
"synsPerConn": {
"label": "Number of individual synaptic contacts per connection",
"help": "Number of individual synaptic contacts (synapses) per cell-to-cell connection (connection). Can be defined as a function (see Functions as strings). If omitted, defaults to 1.",
"suggestions": "",
"hintText": "",
"default": 1
},
"weight": {
"label": "Weight of synaptic connection",
"help": "Strength of synaptic connection (e.g. 0.01). Associated to a change in conductance, but has different meaning and scale depending on the synaptic mechanism and cell model. Can be defined as a function (see Functions as strings). If omitted, defaults to netParams.defaultWeight = 1.",
"suggestions": "",
"hintText": "",
"type": "func"
},
"delay": {
"label": "Connection delay (ms)",
"help": "Time (in ms) for the presynaptic spike to reach the postsynaptic neuron. Can be defined as a function (see Functions as strings). If omitted, defaults to netParams.defaultDelay = 1.",
"suggestions": "",
"hintText": "",
"type": "func"
},
"probability": {
"label": "Probability of connection (0-1)",
"help": "Probability of connection between each pre and postsynaptic cell (0 to 1). Can be a string that defines as a function, e.g. '0.1*dist_3D+uniform(0.2,0.4)' (see Documentation on 'Functions as strings'). Overrides the convergence, divergence and fromList parameters.",
"suggestions": "0.1",
"hintText": "",
"type": "func"
},
"convergence": {
"label": "Convergence",
"help": "Number of pre-synaptic cells connected to each post-synaptic cell. Can be a string that defines as a function, e.g. '2*dist_3D+uniform(2,4)' (see Documentation on 'Functions as strings'). Overrides the divergence and fromList parameters.",
"suggestions": "5",
"hintText": "",
"type": "func"
},
"divergence": {
"label": "Divergence",
"help": "Number of post-synaptic cells connected to each pre-synaptic cell. Can be a string that defines as a function, e.g. '2*dist_3D+uniform(2,4)' (see Documentation on 'Functions as strings'). Overrides the fromList parameter.",
"suggestions": "5",
"hintText": "",
"type": "func"
},
"connList": {
"label": "Explicit list of one-to-one connections",
"help": "Each connection is indicated with relative ids of cell in pre and post populations, e.g. [[0,1],[3,1]] creates a connection between pre cell 0 and post cell 1; and pre cell 3 and post cell 1. Weights, delays and locs can also be specified as a list for each of the individual cell connection. These lists can be 2D or 3D if combined with multiple synMechs and synsPerConn > 1 (the outer dimension will correspond to the connList).",
"suggestions": "",
"hintText": "list(list(float))"
},
"connFunc": {
"label": "Internal connectivity function to use (not required)",
"help": "Automatically set to probConn, convConn, divConn or fromList, when the probability, convergence, divergence or connList parameters are included, respectively. Otherwise defaults to fullConn, ie. all-to-all connectivity.",
"suggestions": "",
"hintText": ""
},
"shape": {
"label": "Weight shape",
"help": "Modifies the conn weight dynamically during the simulation based on the specified pattern. Contains a dictionary with the following fields: 'switchOnOff' - times at which to switch on and off the weight, 'pulseType' - type of pulse to generate; either 'square' or 'gaussian', 'pulsePeriod' - period (in ms) of the pulse, 'pulseWidth' - width (in ms) of the pulse.",
"suggestions": "",
"hintText": ""
},
"plasticity": {
"label": "Plasticity mechanism",
"help": "Requires 2 fields: mech to specifiy the name of the plasticity mechanism, and params containing a dictionary with the parameters of the mechanism, e.g. {'mech': 'STDP', 'params': {'hebbwt': 0.01, 'antiwt':-0.01, 'wmax': 50, 'RLon': 1 'tauhebb': 10}}.",
"suggestions": "",
"hintText": "",
"type": "dict"
}
}
},
# ---------------------------------------------------------------------------------------------------------------------
# netParams.stimSourceParams
# ---------------------------------------------------------------------------------------------------------------------
"stimSourceParams": {
"label": "Stimulation source parameters",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"type": {
"label": "Point process used as stimulator",
"help": "Point process used as stimulator; allowed values: 'IClamp', 'VClamp', 'SEClamp', 'NetStim' and 'AlphaSynapse'. Note that NetStims can be added both using this method, or by creating a population of 'cellModel': 'NetStim' and adding the appropriate connections.",
"suggestions": "",
"hintText": "",
"default": "IClamp",
"type": "str"
},
"dur": {
"label": "Current clamp duration (ms)",
"help": "Duration of current clamp injection in ms",
"suggestions": "",
"hintText": "10",
"type": "float"
},
"amp": {
"label": "Current clamp amplitude (nA)",
"help": "Amplitude of current injection in nA",
"suggestions": "",
"hintText": "10",
"type": "float"
},
"del": {
"label": "Current clamp delay (ms)",
"help": "Delay (time when turned on after simulation starts) of current clamp in ms.",
"suggestions": "",
"hintText": "5",
"type": "float"
},
"vClampAmp": {
"label": "Current clamp amplitude (nA)",
"help": "Voltage clamp with three levels. Clamp is on at time 0, and off at time dur[0]+dur[1]+dur[2].",
"suggestions": "",
"hintText": "10",
"type": "list(float)"
},
"vClampDur": {
"label": "Current clamp delay (ms)",
"help": "Voltage clamp with three levels. Clamp is on at time 0, and off at time dur[0]+dur[1]+dur[2].",
"suggestions": "",
"hintText": "5",
"type": "list(float)"
},
"interval": {
"label": "Interval between spikes (ms)",
"help": "Define the mean time interval between spike.",
"suggestions": "10",
"hintText": "",
"type": "float"
},
"rate": {
"label": "Firing rate (Hz)",
"help": "Firing rate in Hz (note this is the inverse of the NetStim interval property).",
"suggestions": "",
"hintText": "",
"type": "float"
},
"rstim": {
"label": "Voltage clamp stimulation resistance",
"help": "Voltage clamp stimulation resistance.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"gain": {
"label": "Voltage clamp amplifier gain",
"help": "Voltage clamp amplifier gain.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"number": {
"label": "Maximum number of spikes",
"help": "Maximum number of spikes generated by the NetStim.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"start": {
"label": "Start time of first spike",
"help": "Define the start time for the first spike.",
"suggestions": "0",
"hintText": "",
"type": "float"
},
"noise": {
"label": "Noise/randomness fraction (0-1)",
"help": "Fractional noise, 0 <= noise <= 1, means that an interval between spikes consists of a fixed interval of duration (1 - noise)*interval plus a negexp interval of mean duration noise*interval. Note that the most likely negexp interval has duration 0.",
"suggestions": "0.5",
"hintText": "",
"type": "float"
},
"tau1": {
"label": "Voltage clamp tau1",
"help": "Voltage clamp tau1.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"tau2": {
"label": "Voltage clamp tau2",
"help": "Voltage clamp tau2.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"i": {
"label": "Voltage clamp current (nA)",
"help": "Voltage clamp injected current in nA.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"onset": {
"label": "Alpha synapse onset time (ms)",
"help": "Alpha synapse onset time.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"tau": {
"label": "Alpha synapse time constant (ms)",
"help": "Alpha synapse time constant (ms).",
"suggestions": "",
"hintText": "",
"type": "float"
},
"gmax": {
"label": "Alpha synapse maximum conductance",
"help": "Alpha synapse maximum conductance.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"e": {
"label": "Alpha synapse equilibrium potential",
"help": "Alpha synapse equilibrium potential.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"rs": {
"label": "Voltage clamp resistance (MOhm)",
"help": "Voltage clamp resistance (MOhm).",
"suggestions": "",
"hintText": "",
"type": "float"
},
"vc": {
"label": "Voltage clamp reference voltage (mV)",
"help": "Voltage clamp reference voltage (mV).",
"suggestions": "",
"hintText": "",
"type": "float"
}
}
},
# ---------------------------------------------------------------------------------------------------------------------
# netParams.stimTargetParams
# ---------------------------------------------------------------------------------------------------------------------
"stimTargetParams": {
"label": "Stimulation target parameters",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"source": {
"label": "Stimulation source",
"help": "Label of the stimulation source (e.g. 'electrode_current').",
"suggestions": "",
"hintText": ""
},
"conds": {
"label": "Conditions of cells where the stimulation will be applied",
"help": "Conditions of cells where the stimulation will be applied. Can include a field 'cellList' with the relative cell indices within the subset of cells selected (e.g. 'conds': {'cellType':'PYR', 'y':[100,200], 'cellList': [1,2,3]}).",
"suggestions": "",
"hintText": "",
"children": {
"pop": {
"label": "Target population",
"help": "Populations that will receive the stimulation e.g. {'pop': ['Exc1', 'Exc2']}",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
"cellType": {
"label": "Target cell type",
"suggestions": "",
"help": "Cell types that will receive the stimulation",
"hintText": "",
"type": "str"
},
"cellModel": {
"label": "Target cell model",
"help": "Cell models that will receive the stimulation.",
"suggestions": "",
"type": "str"
},
"x": {
"label": "Range of x-axis locations",
"suggestions": "",
"help": "Cells within this x-axis locations will receive stimulation",
"hintText": ""
},
"y": {
"label": "Range of y-axis locations",
"suggestions": "",
"help": "Cells within this y-axis locations will receive stimulation",
"hintText": ""
},
"z": {
"label": "Range of z-axis locations",
"suggestions": "",
"help": "Cells within this z-axis locations will receive stimulation",
"hintText": ""
},
"xnorm": {
"label": "Range of normalized x-axis locations",
"suggestions": "",
"help": "Cells withing this normalized x-axis locations will receive stimulation",
"hintText": ""
},
"ynorm": {
"label": "Range of normalized y-axis locations",
"suggestions": "",
"help": "Cells within this normalized y-axis locations will receive stimulation",
"hintText": ""
},
"znorm": {
"label": "Range of normalized z-axis locations",
"suggestions": "",
"help": "Cells within this normalized z-axis locations will receive stimulation",
"hintText": ""
},
"cellList": {
"label": "Target cell global indices (gids)",
"help": "Global indices (gids) of neurons to receive stimulation. ([1, 8, 12])",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
}
},
"sec": {
"label": "Target section",
"help": "Target section (default: 'soma').",
"suggestions": "",
"hintText": "",
"type": "str"
},
"loc": {
"label": "Target location",
"help": "Target location (default: 0.5). Can be defined as a function (see Functions as strings).",
"suggestions": "",
"hintText": "",
"type": "float"
},
"synMech": {
"label": "Target synaptic mechanism",
"help": "Synaptic mechanism label to connect NetStim to. Optional; only for NetStims.",
"suggestions": "",
"hintText": ""
},
"weight": {
"label": "Weight of connection between NetStim and cell",
"help": "Weight of connection between NetStim and cell. Optional; only for NetStims. Can be defined as a function (see Functions as strings).",
"suggestions": "",
"hintText": ""
},
"delay": {
"label": "Delay of connection between NetStim and cell",
"help": "Delay of connection between NetStim and cell (default: 1). Optional; only for NetStims. Can be defined as a function (see Functions as strings).",
"suggestions": "",
"hintText": ""
},
"synsPerConn": {
"label": "Number of synaptic contacts per connection between NetStim and cell",
"help": "Number of synaptic contacts of connection between NetStim and cell (default: 1). Optional; only for NetStims. Can be defined as a function (see Functions as strings).",
"suggestions": "",
"hintText": ""
}
}
},
# ---------------------------------------------------------------------------------------------------------------------
# netParams.importCellParams
# ---------------------------------------------------------------------------------------------------------------------
"importCellParams": {
"label": "Import cell from .hoc or .py templates",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"fileName": {
"label": "Absolute path to file",
"help": "Absolute path to .hoc or .py template file.",
"suggestions": "",
"hintText": "",
"type": "str"
},
"cellName": {
"label": "Cell template/class name",
"help": "Template or class name defined inside the .hoc or .py file",
"suggestions": "",
"hintText": "",
"type": "str"
},
"label": {
"label": "Cell rule label",
"help": "Give a name to this cell rule.",
"suggestions": "",
"hintText": "",
"type": "str"
},
"importSynMechs": {
"label": "Import synaptic mechanisms",
"help": "If true, synaptic mechanisms will also be imported from the file. (default: False)",
"suggestions": "",
"hintText": "",
"type": "bool"
},
"compileMod": {
"label": "Compile mod files",
"help": "If true, mod files will be compiled before importing the cell. (default: false)",
"suggestions": "",
"hintText": "",
"type": "bool"
},
"modFolder": {
"label": "Path to mod folder",
"help": "Define the absolute path to the folder containing the mod files.",
"suggestions": "",
"hintText": "",
"type": "str"
},
}
}
}
},
# ---------------------------------------------------------------------------------------------------------------------
# simConfig
# ---------------------------------------------------------------------------------------------------------------------
"simConfig": {
"label": "Simulation Configuration",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"simLabel": {
"label": "Simulation label",
"help": "Choose a label for this simulation",
"suggestions": "",
"type": "str"
},
"duration": {
"label": "Duration (ms)",
"help": "Simulation duration in ms (default: 1000)",
"suggestions": "",
"default": 1000,
"type": "float"
},
"dt": {
"label": "Time step, dt",
"help": "Simulation time step in ms (default: 0.1)",
"suggestions": "",
"default": 0.025,
"type": "float"
},
"seeds": {
"label": "Randomizer seeds",
"help": "Dictionary with random seeds for connectivity, input stimulation, and cell locations (default: {'conn': 1, 'stim': 1, 'loc': 1}).",
"suggestions": "",
"type": "dict"
},
"addSynMechs": {
"label": "Add synaptic mechanisms",
"help": "Whether to add synaptic mechanisms or not (default: True).",
"suggestions": "",
"type": "bool"
},
"includeParamsLabel": {
"label": "Include parameter rule label",
"help": "Include label of parameters rule that created that cell, conn or stim (default: True).",
"suggestions": "",
"type": "bool"
},
"timing": {
"label": "Show timing",
"help": "Show and record timing of each process (default: True).",
"suggestions": "",
"type": "bool"
},
"verbose": {
"label": "Verbose mode",
"help": "Show detailed messages (default: False).",
"suggestions": "",
"type": "bool"
},
"saveFolder": {
"label": "Output folder",
"help": "Path where to save output data (default: '')",
"suggestions": "",
"type": "str"
},
"filename": {
"label": "Output file name",
"help": "Name of file to save model output (default: 'model_output')",
"suggestions": "",
"default": "model_output",
"type": "str"
},
"saveDataInclude": {
"label": "Data to include in output file",
"help": "Data structures to save to file (default: ['netParams', 'netCells', 'netPops', 'simConfig', 'simData'])",
"suggestions": "",
"type": "list(str)"
},
"timestampFilename": {
"label": "Add timestamp to file name",
"help": "Add timestamp to filename to avoid overwriting (default: False)",
"suggestions": "",
"type": "bool"
},
"savePickle": {
"label": "Save as Pickle",
"help": "Save data to pickle file (default: False).",
"suggestions": "",
"type": "bool"
},
"saveJson": {
"label": "Save as JSON",
"help": "Save dat to json file (default: False).",
"suggestions": "",
"type": "bool"
},
"saveMat": {
"label": "Save as MAT",
"help": "Save data to mat file (default: False).",
"suggestions": "",
"type": "bool"
},
"saveHDF5": {
"label": "Save as HDF5",
"help": "Save data to save to HDF5 file (under development) (default: False).",
"suggestions": "",
"type": "bool"
},
"saveDpk": {
"label": "Save as DPK",
"help": "Save data to .dpk pickled file (default: False).",
"suggestions": "",
"type": "bool"
},
"checkErrors": {
"label": "Check parameter errors",
"help": "check for errors (default: False).",
"suggestions": "",
"type": "bool"
},
"checkErrorsVerbose": {
"label": "Check parameter errors verbose mode",
"help": "check errors vervose (default: False)",
"suggestions": "",
"type": "bool"
},
"backupCfgFile": {
"label": "Copy simulation configuration file to this folder:",
"help": "Copy cfg file to folder, eg. ['cfg.py', 'backupcfg/'] (default: []).",
"suggestions": "",
"type": "list(str)"
},
"recordCells": {
"label": "Cells to record traces from",
"help": "List of cells from which to record traces. Can include cell gids (e.g. 5), population labels (e.g. 'S' to record from one cell of the 'S' population), or 'all', to record from all cells. NOTE: All cells selected in the include argument of simConfig.analysis['plotTraces'] will be automatically included in recordCells. (default: []).",
"suggestions": "",
"type": "list(float)"
},
"recordTraces": {
"label": "Traces to record from cells",
"help": "Dict of traces to record (default: {} ; example: {'V_soma': {'sec':'soma','loc':0.5,'var':'v'} }).",
"suggestions": "",
"type": "dict(dict)",
"default": "{\"V_soma\": {\"sec\": \"soma\", \"loc\": 0.5, \"var\": \"v\"}}"
},
"saveCSV": {
"label": "Save as CSV",
"help": "save cvs file (under development) (default: False)",
"suggestions": "",
"type": "bool"
},
"saveDat": {
"label": "Save as DAT ",
"help": "save .dat file (default: False)",
"suggestions": "",
"type": "bool"
},
"saveCellSecs": {
"label": "Store cell sections after simulation",
"help": "Save cell sections after gathering data from nodes post simulation; set to False to reduce memory required (default: True)",
"suggestions": "",
"type": "bool"
},
"saveCellConns": {
"label": "Store cell connections after simulation",
"help": "Save cell connections after gathering data from nodes post simulation; set to False to reduce memory required (default: True)",
"suggestions": "",
"type": "bool"
},
"recordStim": {
"label": "Record spikes of artificial stimulators (NetStims and VecStims)",
"help": "Record spikes of NetStims and VecStims (default: False).",
"suggestions": "",
"type": "bool"
},
"recordLFP": {
"label": "Record LFP electrode locations",
"help": "3D locations of local field potential (LFP) electrodes, e.g. [[50, 100, 50], [50, 200]] (default: False).",
"suggestions": "",
"type": "list(list(float))"
},
"saveLFPCells": {
"label": "Store LFP of individual cells",
"help": "Store LFP generated individually by each cell in sim.allSimData['LFPCells'].",
"suggestions": "",
"type": "bool"
},
"recordStep": {
"label": "Time step for data recording (ms)",
"help": "Step size in ms for data recording (default: 0.1).",
"suggestions": "",
"default": 0.1,
"type": "float"
},
"printRunTime": {
"label": "Interval to print run time at (s)",
"help": "Print run time at interval (in sec) specified here (eg. 0.1) (default: False).",
"suggestions": "",
"type": "float"
},
"printSynsAfterRule": {
"label": "Print total connections",
"help": "Print total connections after each conn rule is applied.",
"suggestions": "",
"type": "bool"
},
"printPopAvgRates": {
"label": "Print population average firing rates",
"help": "Print population avg firing rates after run (default: False).",
"suggestions": "",
"type": "bool"
},
"connRandomSecFromList": {
"label": "Select random sections from list for connection",
"help": "Select random section (and location) from list even when synsPerConn=1 (default: True).",
"suggestions": "",
"type": "bool"
},
"compactConnFormat": {
"label": "Use compact connection format (list instead of dicT)",
"help": "Replace dict format with compact list format for conns (need to provide list of keys to include) (default: False).",
"suggestions": "",
"type": "bool"
},
"gatherOnlySimData": {
"label": "Gather only simulation output data",
"help": "Omits gathering of net and cell data thus reducing gatherData time (default: False).",
"suggestions": "",
"type": "bool"
},
"createPyStruct": {
"label": "Create Python structure",
"help": "Create Python structure (simulator-independent) when instantiating network (default: True).",
"suggestions": "",
"type": "bool"
},
"createNEURONObj": {
"label": "Create NEURON objects",
"help": "Create runnable network in NEURON when instantiating netpyne network metadata (default: True).",
"suggestions": "",
"type": "bool"
},
"cvode_active": {
"label": "use CVode",
"help": "Use CVode variable time step (default: False).",
"suggestions": "",
"type": "bool"
},
"cache_efficient": {
"label": "use CVode cache_efficient",
"help": "Use CVode cache_efficient option to optimize load when running on many cores (default: False).",
"suggestions": "",
"type": "bool"
},
"hParams": {
"label": "Set global parameters (temperature, initial voltage, etc)",
"help": "Dictionary with parameters of h module (default: {'celsius': 6.3, 'v_init': -65.0, 'clamp_resist': 0.001}).",
"suggestions": "",
"type": "dict"
},
"saveTxt": {
"label": "Save as TXT",
"help": "Save data to txt file (under development) (default: False)",
"suggestions": "",
"type": "bool"
},
"saveTiming": {
"label": "Save timing data to file",
"help": " Save timing data to pickle file (default: False).",
"suggestions": "",
"type": "bool"
},
# ---------------------------------------------------------------------------------------------------------------------
# simConfig.analysis
# ---------------------------------------------------------------------------------------------------------------------
"analysis": {
"label": "Analysis",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"plotRaster": {
"label": "Raster plot",
"suggestions": "",
"help": "Plot raster (spikes over time) of network cells.",
"hintText": "",
"children": {
"include": {
"label": "Cells to include",
"suggestions": "",
"help": "List of cells to include (['all'|,'allCells'|,'allNetStims'|,120|,'L4'|,('L2', 56)|,('L5',[4,5,6])])",
"hintText": "",
"type": "str"
},
"timeRange": {
"label": "Time range [min,max] (ms)",
"suggestions": "",
"help": "Time range of spikes shown; if None shows all ([start,stop])",
"hintText": "",
"type": "list(float)"
},
"maxSpikes": {
"label": "Maximum number of spikes to plot",
"suggestions": "",
"help": "maximum number of spikes that will be plotted (int).",
"hintText": "",
"type": "float"
},
"orderBy": {
"label": "Order by",
"suggestions": "",
"help": "Unique numeric cell property to order y-axis by, e.g. 'gid', 'ynorm', 'y' ('gid'|'y'|'ynorm'|...)",
"hintText": "",
"options": [
"gid",
"y",
"ynorm"
],
"type": "str"
},
"orderInverse": {
"label": "Invert y-axis",
"suggestions": "",
"help": "Invert the y-axis order (True|False)",
"hintText": "",
"type": "bool"
},
"labels": {
"label": "Population labels",
"suggestions": "",
"help": "Show population labels in a legend or overlayed on one side of raster ('legend'|'overlay'))",
"hintText": "",
"type": "str"
},
"popRates": {
"label": "Include population rates",
"suggestions": "",
"help": "Include population rates ('legend'|'overlay')",
"hintText": "",
"options": [
"legend",
"overlay"
],
"type": "str"
},
"spikeHist": {
"label": "Overlay spike histogram",
"suggestions": "",
"help": "overlay line over raster showing spike histogram (spikes/bin) (None|'overlay'|'subplot')",
"hintText": "",
"options": [
"None",
"overlay",
"subplot"
],
"type": "str"
},
"spikeHistBin": {
"label": "Bin size for histogram",
"suggestions": "",
"help": "Size of bin in ms to use for histogram (int)",
"hintText": "",
"type": "float"
},
"syncLines": {
"label": "Synchronization lines",
"suggestions": "",
"help": "calculate synchorny measure and plot vertical lines for each spike to evidence synchrony (True|False)",
"hintText": "",
"type": "bool"
},
"figSize": {
"label": "Figure size",
"suggestions": "",
"help": "Size of figure ((width, height))",
"hintText": "",
"type": "str"
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"suggestions": "",
"help": "File name where to save the figure (None|'fileName')",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
},
"plotSpikeHist": {
"label": "Plot Spike Histogram",
"suggestions": "",
"help": "Plot spike histogram.",
"hintText": "",
"children": {
"include": {
"label": "Cells to include",
"suggestions": "",
"help": "List of cells to include (['all'|,'allCells'|,'allNetStims'|,120|,'L4'|,('L2', 56)|,('L5',[4,5,6])])",
"hintText": "",
"type": "list"
},
"timeRange": {
"label": "Time range [min,max] (ms)",
"suggestions": "",
"help": "Time range of spikes shown; if None shows all ([start,stop])",
"hintText": "",
"type": "list(float)"
},
"binSize": {
"label": "bin size for histogram",
"suggestions": "",
"help": "Size of bin in ms to use for histogram (int)",
"hintText": "",
"type": "int"
},
"overlay": {
"label": "show overlay",
"suggestions": "",
"help": "Whether to overlay the data lines or plot in separate subplots (True|False)",
"hintText": "",
"type": "bool"
},
"graphType": {
"label": "type of Graph",
"suggestions": "",
"help": " Type of graph to use (line graph or bar plot) ('line'|'bar')",
"hintText": "",
"options": [
"line",
"bar"
],
"type": "str"
},
"yaxis": {
"label": "axis units",
"suggestions": "",
"help": "Units of y axis (firing rate in Hz, or spike count) ('rate'|'count')",
"hintText": "",
"options": [
"rate",
"count"
],
"type": "str"
},
"figSize": {
"label": "Figure size",
"suggestions": "",
"help": "Size of figure ((width, height))",
"hintText": "",
"type": ""
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"help": "File name where to save the figure (None|'fileName')",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
},
"plotRatePSD": {
"label": "Plot Rate PSD",
"suggestions": "",
"help": "Plot spikes power spectral density (PSD).",
"hintText": "",
"children": {
"include": {
"label": "Cells to include",
"suggestions": "",
"help": "List of cells to include (['all'|,'allCells'|,'allNetStims'|,120|,'L4'|,('L2', 56)|,('L5',[4,5,6])])",
"hintText": "",
"type": "list"
},
"timeRange": {
"label": "Time range [min,max] (ms)",
"suggestions": "",
"help": "Time range of spikes shown; if None shows all ([start,stop])",
"hintText": "",
"type": "list(float)"
},
"binSize": {
"label": "Bin size",
"suggestions": "",
"help": "Size of bin in ms to use (int)",
"hintText": "",
"type": "float"
},
"maxFreq": {
"label": "maximum frequency",
"suggestions": "",
"help": " Maximum frequency to show in plot (float).",
"hintText": "",
"type": "float"
},
"NFFT": {
"label": "Number of point",
"suggestions": "",
"help": "The number of data points used in each block for the FFT (power of 2)",
"hintText": "",
"type": "float"
},
"noverlap": {
"label": "Number of overlap points",
"suggestions": "",
"help": "Number of points of overlap between segments (< nperseg).",
"hintText": "",
"type": "float"
},
"smooth": {
"label": "Window size",
"suggestions": "",
"help": "Window size for smoothing; no smoothing if 0.",
"hintText": "",
"type": "float"
},
"overlay": {
"label": "Overlay data",
"suggestions": "",
"help": "Whether to overlay the data lines or plot in separate subplots (True|False).",
"hintText": "",
"type": "bool"
},
"figSize": {
"label": "Figure size",
"suggestions": "",
"help": "Size of figure ((width, height))",
"hintText": "",
"type": ""
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"suggestions": "",
"help": "File name where to save the figure (None|'fileName')",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
},
"plotSpikeStats": {
"label": "Plot Spike Statistics",
"suggestions": "",
"help": "Plot spike histogram.",
"hintText": "",
"children": {
"include": {
"label": "Cells to include",
"suggestions": "",
"help": "List of cells to include (['all'|,'allCells'|,'allNetStims'|,120|,'L4'|,('L2', 56)|,('L5',[4,5,6])])",
"hintText": "",
"type": "list"
},
"timeRange": {
"label": "Time range [min,max] (ms)",
"suggestions": "",
"help": "Time range of spikes shown; if None shows all ([start,stop])",
"hintText": "",
"type": "list(float)"
},
"graphType": {
"label": "type of graph",
"suggestions": "",
"help": "Type of graph to use ('boxplot').",
"hintText": "",
"options": [
"boxplot"
],
"type": "str"
},
"stats": {
"label": "meassure type to calculate stats",
"suggestions": "",
"help": "List of types measure to calculate stats over: cell firing rates, interspike interval coefficient of variation (ISI CV), pairwise synchrony, and/or overall synchrony (sync measures calculated using PySpike SPIKE-Synchrony measure) (['rate', |'isicv'| 'pairsync' |'sync'|]).",
"hintText": "",
"options": [
"rate",
"isicv",
"pairsync",
"sync"
],
"type": "str"
},
"popColors": {
"label": "color for each population",
"suggestions": "",
"help": "Dictionary with color (value) used for each population/key.",
"hintText": "",
"type": "dict"
},
"figSize": {
"label": "figure size",
"suggestions": "",
"help": "Size of figure ((width, height)).",
"hintText": "",
"type": ""
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"suggestions": "",
"help": "File name where to save the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
},
"plotTraces": {
"label": "Plot Traces",
"suggestions": "",
"help": "Plot recorded traces (specified in simConfig.recordTraces).",
"hintText": "",
"children": {
"include": {
"label": "Cells to include",
"suggestions": "",
"help": "List of cells to include (['all'|,'allCells'|,'allNetStims'|,120|,'L4'|,('L2', 56)|,('L5',[4,5,6])])",
"hintText": "",
"type": "list(float)"
},
"timeRange": {
"label": "Time range [min,max] (ms)",
"suggestions": "",
"help": "Time range for shown Traces ; if None shows all ([start,stop])",
"hintText": "",
"type": "list(float)"
},
"overlay": {
"label": "overlay data",
"suggestions": "",
"help": "Whether to overlay the data lines or plot in separate subplots (True|False).",
"hintText": "",
"type": "bool"
},
"oneFigPer": {
"label": "plot one figure per cell/trace",
"suggestions": "",
"help": "Whether to plot one figure per cell or per trace (showing multiple cells) ('cell'|'trace').",
"hintText": "",
"options": [
"cell",
"traces"
],
"type": "str"
},
"rerun": {
"label": "re-run simulation",
"suggestions": "",
"help": "rerun simulation so new set of cells gets recorded (True|False).",
"hintText": "",
"type": "bool"
},
"figSize": {
"label": "Figure size",
"suggestions": "",
"help": "Size of figure ((width, height))",
"hintText": "",
"type": ""
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"suggestions": "",
"help": "File name where to save the figure (None|'fileName')",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
},
"plotLFP": {
"label": "Plot LFP",
"suggestions": "",
"help": "Plot LFP / extracellular electrode recordings (time-resolved, power spectral density, time-frequency and 3D locations).",
"hintText": "",
"children": {
"electrodes": {
"label": "electrode to show",
"suggestions": "",
"help": " List of electrodes to include; 'avg'=avg of all electrodes; 'all'=each electrode separately (['avg', 'all', 0, 1, ...]).",
"hintText": "",
"type": "list"
},
"plots": {
"label": "Select plot types to show (multiple selection available)",
"suggestions": "",
"help": "list of plot types to show (['timeSeries', 'PSD', 'timeFreq', 'locations']).",
"hintText": "",
"options": [
"timeSeries",
"PSD",
"spectrogram",
"locations"
],
"type": "str"
},
"timeRange": {
"label": "Time range [min,max] (ms)",
"suggestions": "",
"help": "Time range for shown Traces ; if None shows all ([start,stop])",
"hintText": "",
"type": "list(float)"
},
"NFFT": {
"label": "NFFT",
"suggestions": "",
"help": "The number of data points used in each block for the FFT (power of 2) (float)",
"hintText": "",
"type": "float"
},
"noverlap": {
"label": "Overlap",
"suggestions": "",
"help": "Number of points of overlap between segments (int, < nperseg).",
"hintText": "",
"type": "float"
},
"maxFreq": {
"label": "Maximum Frequency",
"suggestions": "",
"help": "Maximum frequency shown in plot for PSD and time-freq (float).",
"hintText": "",
"type": "float"
},
"nperseg": {
"label": "Segment length (nperseg)",
"suggestions": "",
"help": "Length of each segment for time-freq (int).",
"hintText": "",
"type": "float"
},
"smooth": {
"label": "Window size",
"suggestions": "",
"help": "Window size for smoothing; no smoothing if 0 (int).",
"hintText": "",
"type": "float"
},
"separation": {
"label": "Separation factor",
"suggestions": "",
"help": "Separation factor between time-resolved LFP plots; multiplied by max LFP value (float).",
"hintText": "",
"type": "float"
},
"includeAxon": {
"label": "Include axon",
"suggestions": "",
"help": "Whether to show the axon in the location plot (boolean).",
"hintText": "",
"type": "bool"
},
"figSize": {
"label": "Figure size",
"suggestions": "",
"help": "Size of figure ((width, height))",
"hintText": "",
"type": ""
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"suggestions": "",
"help": "File name where to save the figure (None|'fileName')",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
},
"plotShape": {
"label": "Plot Shape",
"suggestions": "",
"help": "",
"hintText": "Plot 3D cell shape using Matplotlib or NEURON Interviews PlotShape.",
"children": {
"includePre": {
"label": "population (or cell by index) to presyn",
"suggestions": "",
"help": "List of cells to include (['all'|,'allCells'|,'allNetStims'|,120|,'L4'|,('L2', 56)|,('L5',[4,5,6])])",
"hintText": "",
"type": "list"
},
"includePost": {
"label": "population (or cell by index) to postsyn",
"suggestions": "",
"help": "List of cells to include (['all'|,'allCells'|,'allNetStims'|,120|,'L4'|,('L2', 56)|,('L5',[4,5,6])])",
"hintText": "",
"type": "list"
},
"synStyle": {
"label": "synaptic marker style",
"suggestions": "",
"help": "Style of marker to show synapses (Matplotlib markers).",
"hintText": "",
"type": "str"
},
"dist": {
"label": "3D distance",
"suggestions": "",
"help": "3D distance (like zoom).",
"hintText": "",
"type": "float"
},
"synSize": {
"label": "synapses marker size",
"suggestions": "",
"help": "Size of marker to show synapses.",
"hintText": "",
"type": "float"
},
"cvar": {
"label": "variable to represent in shape plot",
"suggestions": "",
"help": "Variable to represent in shape plot ('numSyns'|'weightNorm').",
"hintText": "",
"options": [
"numSyns",
"weightNorm"
],
"type": "str"
},
"cvals": {
"label": "value to represent in shape plot",
"suggestions": "",
"help": "List of values to represent in shape plot; must be same as num segments (list of size num segments; ).",
"hintText": "",
"type": "list(float)"
},
"iv": {
"label": "use NEURON iv",
"suggestions": "",
"help": "Use NEURON Interviews (instead of matplotlib) to show shape plot (True|False).",
"hintText": "",
"type": "bool"
},
"ivprops": {
"label": "properties for iv",
"suggestions": "",
"help": "Dict of properties to plot using Interviews (dict).",
"hintText": "",
"type": "dict"
},
"showSyns": {
"label": "show synaptic connections in 3D",
"suggestions": "",
"help": "Show synaptic connections in 3D (True|False).",
"hintText": "",
"type": "bool"
},
"bkgColor": {
"label": "background color",
"suggestions": "",
"help": "RGBA list/tuple with bakcground color eg. (0.5, 0.2, 0.1, 1.0) (list/tuple with 4 floats).",
"hintText": "",
"type": "list(float)"
},
"showElectrodes": {
"label": "show electrodes",
"suggestions": "",
"help": "Show electrodes in 3D (True|False).",
"hintText": "",
"type": "bool"
},
"includeAxon": {
"label": "include Axon in shape plot",
"suggestions": "",
"help": "Include axon in shape plot (True|False).",
"hintText": "",
"type": "bool"
},
"figSize": {
"label": "Figure size",
"suggestions": "",
"help": "Size of figure ((width, height))",
"hintText": "",
"type": ""
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"suggestions": "",
"help": "File name where to save the figure (None|'fileName')",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
},
"plot2Dnet": {
"label": "Plot 2D net",
"suggestions": "",
"help": "Plot 2D representation of network cell positions and connections.",
"hintText": "",
"children": {
"include": {
"label": "Cells to include",
"suggestions": "",
"help": "List of cells to show (['all'|,'allCells'|,'allNetStims'|,120|,'L4'|,('L2', 56)|,('L5',[4,5,6])]).",
"hintText": "",
"type": "list"
},
"showConns": {
"label": "show connections",
"suggestions": "",
"help": "Whether to show connections or not (True|False).",
"hintText": "",
"type": "bool"
},
"view": {
"label": "perspective view",
"suggestions": "",
"help": "Perspective view, either front ('xy') or top-down ('xz').",
"hintText": "",
"options": [
"xy",
"xz"
],
"type": "str"
},
"figSize": {
"label": "Figure size",
"suggestions": "",
"help": "Size of figure ((width, height))",
"hintText": "",
"type": ""
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"suggestions": "",
"help": "File name where to save the figure (None|'fileName')",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
},
"plotConn": {
"label": "Plot Connectivity",
"suggestions": "",
"help": "Plot network connectivity.",
"hintText": "",
"children": {
"include": {
"label": "Cells to include",
"suggestions": "",
"help": "List of cells to show (['all'|,'allCells'|,'allNetStims'|,120|,'L4'|,('L2', 56)|,('L5',[4,5,6])]).",
"hintText": "",
"type": "list"
},
"feature": {
"label": "feature to show",
"suggestions": "",
"help": "Feature to show in connectivity matrix; the only features applicable to groupBy='cell' are 'weight', 'delay' and 'numConns'; 'strength' = weight * probability ('weight'|'delay'|'numConns'|'probability'|'strength'|'convergence'|'divergence')g.",
"hintText": "",
"options": [
"weight",
"delay",
"numConns",
"probability",
"strength",
"convergency",
"divergency"
],
"type": "str"
},
"groupBy": {
"label": "group by",
"suggestions": "",
"help": "Show matrix for individual cells or populations ('pop'|'cell').",
"hintText": "",
"options": [
"pop",
"cell"
],
"type": "str"
},
"orderBy": {
"label": "order by",
"suggestions": "",
"help": "Unique numeric cell property to order x and y axes by, e.g. 'gid', 'ynorm', 'y' (requires groupBy='cells') ('gid'|'y'|'ynorm'|...).",
"hintText": "",
"options": [
"gid",
"y",
"ynorm"
],
"type": "str"
},
"figSize": {
"label": "Figure size",
"suggestions": "",
"help": "Size of figure ((width, height))",
"hintText": "",
"type": ""
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"suggestions": "",
"help": "File name where to save the figure (None|'fileName')",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
},
"granger": {
"label": "Granger",
"suggestions": "",
"help": "Calculate and optionally plot Granger Causality.",
"hintText": "",
"children": {
"cells1": {
"label": "population (or cell by index) to subset 1",
"suggestions": "",
"help": "Subset of cells from which to obtain spike train 1 (['all',|'allCells','allNetStims',|,120,|,'E1'|,('L2', 56)|,('L5',[4,5,6])]).",
"hintText": "",
"type": "list"
},
"cells2": {
"label": "population (or cell by index cell) to subset 2",
"suggestions": "",
"help": "Subset of cells from which to obtain spike train 2 (['all',|'allCells','allNetStims',|,120,|,'E1'|,('L2', 56)|,('L5',[4,5,6])]).",
"hintText": "",
"type": "list"
},
"spks1": {
"label": "spike times to train 1",
"suggestions": "",
"help": "Spike train 1; list of spike times; if omitted then obtains spikes from cells1 (list).",
"hintText": "",
"type": "list"
},
"spks2": {
"label": "spike times to train 2",
"suggestions": "",
"help": "Spike train 2; list of spike times; if omitted then obtains spikes from cells1 (list).",
"hintText": "",
"type": "list"
},
"timeRange": {
"label": "Time range [min,max] (ms)",
"suggestions": "",
"help": "Range of time to calculate nTE in ms ([min, max]).",
"hintText": "",
"type": "list(float)"
},
"binSize": {
"label": "bin size",
"suggestions": "",
"help": "Bin size used to convert spike times into histogram (int).",
"hintText": "",
"type": "float"
},
"label1": {
"label": "label for train 1",
"suggestions": "",
"help": "Label for spike train 1 to use in plot (string).",
"hintText": "",
"type": "str"
},
"label2": {
"label": "label for train 2",
"suggestions": "",
"help": "Label for spike train 2 to use in plot (string).",
"hintText": "",
"type": "str"
},
"figSize": {
"label": "Figure size",
"suggestions": "",
"help": "Size of figure ((width, height))",
"hintText": "",
"type": ""
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"suggestions": "",
"help": "File name where to save the figure (None|'fileName')",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
},
"nTE": {
"label": "Normalize Transfer Entropy",
"suggestions": "",
"help": "Calculate normalized transfer entropy.",
"hintText": "",
"children": {
"cell1": {
"label": "Cell Subset 1",
"suggestions": "",
"help": "Subset of cells from which to obtain spike train 1 (['all',|'allCells','allNetStims',|,120,|,'E1'|,('L2', 56)|,('L5',[4,5,6])]).",
"hintText": "",
"type": "list"
},
"cell2": {
"label": "Cell Subset 2",
"suggestions": "",
"help": "Subset of cells from which to obtain spike train 2 (['all',|'allCells','allNetStims',|,120,|,'E1'|,('L2', 56)|,('L5',[4,5,6])]).",
"hintText": "",
"type": "list"
},
"spks1": {
"label": "Spike train 1",
"suggestions": "",
"help": "Spike train 1; list of spike times; if omitted then obtains spikes from cells1 (list).",
"hintText": "",
"type": "list(float)"
},
"spks2": {
"label": "Spike train 2",
"suggestions": "",
"help": "Spike train 2; list of spike times; if omitted then obtains spikes from cells1 (list).",
"hintText": "",
"type": "list(float)"
},
"timeRange": {
"label": "Time range [min,max] (ms)",
"suggestions": "",
"help": "Range of time to calculate nTE in ms ([min, max]).",
"hintText": "",
"type": "list(float)"
},
"binSize": {
"label": "Bin size",
"suggestions": "",
"help": "Bin size used to convert spike times into histogram (int).",
"hintText": "",
"type": "float"
},
"numShuffle": {
"label": "Number of Shuffles",
"suggestions": "",
"help": "Number of times to shuffle spike train 1 to calculate TEshuffled; note: nTE = (TE - TEShuffled)/H(X2F|X2P) (int).",
"hintText": "",
"type": "float"
},
"figSize": {
"label": "Figure size",
"suggestions": "",
"help": "Size of figure ((width, height))",
"hintText": "",
"type": ""
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"suggestions": "",
"help": "File name where to save the figure (None|'fileName')",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
}
}
}
}
}
}
| true
| true
|
7903e85f0a981e9fe819f9e5b2d7c9a01b6174c5
| 6,314
|
py
|
Python
|
rdmo/projects/views/project_create.py
|
cbittner/rdmo
|
1d6885ad2a69f6d24c9fca6446536e0c06de5486
|
[
"Apache-2.0"
] | null | null | null |
rdmo/projects/views/project_create.py
|
cbittner/rdmo
|
1d6885ad2a69f6d24c9fca6446536e0c06de5486
|
[
"Apache-2.0"
] | null | null | null |
rdmo/projects/views/project_create.py
|
cbittner/rdmo
|
1d6885ad2a69f6d24c9fca6446536e0c06de5486
|
[
"Apache-2.0"
] | null | null | null |
import logging
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ValidationError
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.views.generic import CreateView, TemplateView
from django.views.generic.base import View as BaseView
from rdmo.core.imports import handle_uploaded_file
from rdmo.core.plugins import get_plugin, get_plugins
from rdmo.core.views import RedirectViewMixin
from rdmo.questions.models import Catalog
from rdmo.tasks.models import Task
from rdmo.views.models import View
from ..forms import ProjectForm
from ..models import Membership, Project
from ..utils import (save_import_snapshot_values, save_import_tasks,
save_import_values, save_import_views)
logger = logging.getLogger(__name__)
class ProjectCreateView(LoginRequiredMixin, RedirectViewMixin, CreateView):
model = Project
form_class = ProjectForm
def get_form_kwargs(self):
catalogs = Catalog.objects.filter_current_site() \
.filter_group(self.request.user) \
.filter_availability(self.request.user)
form_kwargs = super().get_form_kwargs()
form_kwargs.update({
'catalogs': catalogs
})
return form_kwargs
def form_valid(self, form):
# add current site
form.instance.site = get_current_site(self.request)
# save the project
response = super(ProjectCreateView, self).form_valid(form)
# add all tasks to project
tasks = Task.objects.filter_current_site() \
.filter_group(self.request.user) \
.filter_availability(self.request.user)
for task in tasks:
form.instance.tasks.add(task)
# add all views to project
views = View.objects.filter_current_site() \
.filter_catalog(self.object.catalog) \
.filter_group(self.request.user) \
.filter_availability(self.request.user)
for view in views:
form.instance.views.add(view)
# add current user as owner
membership = Membership(project=form.instance, user=self.request.user, role='owner')
membership.save()
return response
class ProjectCreateUploadView(LoginRequiredMixin, BaseView):
success_url = reverse_lazy('projects')
def get(self, request, *args, **kwargs):
return HttpResponseRedirect(self.success_url)
def post(self, request, *args, **kwargs):
try:
uploaded_file = request.FILES['uploaded_file']
except KeyError:
return HttpResponseRedirect(self.success_url)
else:
import_tmpfile_name = handle_uploaded_file(uploaded_file)
for import_key, import_plugin in get_plugins('PROJECT_IMPORTS').items():
import_plugin.file_name = import_tmpfile_name
if import_plugin.check():
try:
import_plugin.process()
except ValidationError as e:
return render(request, 'core/error.html', {
'title': _('Import error'),
'errors': e
}, status=400)
# store information in session for ProjectCreateImportView
request.session['create_import_tmpfile_name'] = import_tmpfile_name
request.session['create_import_key'] = import_key
return render(request, 'projects/project_upload.html', {
'create': True,
'file_name': uploaded_file.name,
'project': import_plugin.project,
'values': import_plugin.values,
'snapshots': import_plugin.snapshots,
'tasks': import_plugin.tasks,
'views': import_plugin.views
})
return render(request, 'core/error.html', {
'title': _('Import error'),
'errors': [_('Files of this type cannot be imported.')]
}, status=400)
class ProjectCreateImportView(LoginRequiredMixin, TemplateView):
success_url = reverse_lazy('projects')
def get(self, request, *args, **kwargs):
return HttpResponseRedirect(self.success_url)
def post(self, request, *args, **kwargs):
import_tmpfile_name = request.session.get('create_import_tmpfile_name')
import_key = request.session.get('create_import_key')
checked = [key for key, value in request.POST.items() if 'on' in value]
if import_tmpfile_name and import_key:
import_plugin = get_plugin('PROJECT_IMPORTS', import_key)
import_plugin.file_name = import_tmpfile_name
if import_plugin.check():
try:
import_plugin.process()
except ValidationError as e:
return render(request, 'core/error.html', {
'title': _('Import error'),
'errors': e
}, status=400)
# add current site and save project
import_plugin.project.site = get_current_site(self.request)
import_plugin.project.save()
# add user to project
membership = Membership(project=import_plugin.project, user=request.user, role='owner')
membership.save()
save_import_values(import_plugin.project, import_plugin.values, checked)
save_import_snapshot_values(import_plugin.project, import_plugin.snapshots, checked)
save_import_tasks(import_plugin.project, import_plugin.tasks)
save_import_views(import_plugin.project, import_plugin.views)
return HttpResponseRedirect(import_plugin.project.get_absolute_url())
return render(request, 'core/error.html', {
'title': _('Import error'),
'errors': [_('There has been an error with your import.')]
}, status=400)
| 39.217391
| 103
| 0.625436
|
import logging
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ValidationError
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.views.generic import CreateView, TemplateView
from django.views.generic.base import View as BaseView
from rdmo.core.imports import handle_uploaded_file
from rdmo.core.plugins import get_plugin, get_plugins
from rdmo.core.views import RedirectViewMixin
from rdmo.questions.models import Catalog
from rdmo.tasks.models import Task
from rdmo.views.models import View
from ..forms import ProjectForm
from ..models import Membership, Project
from ..utils import (save_import_snapshot_values, save_import_tasks,
save_import_values, save_import_views)
logger = logging.getLogger(__name__)
class ProjectCreateView(LoginRequiredMixin, RedirectViewMixin, CreateView):
model = Project
form_class = ProjectForm
def get_form_kwargs(self):
catalogs = Catalog.objects.filter_current_site() \
.filter_group(self.request.user) \
.filter_availability(self.request.user)
form_kwargs = super().get_form_kwargs()
form_kwargs.update({
'catalogs': catalogs
})
return form_kwargs
def form_valid(self, form):
form.instance.site = get_current_site(self.request)
response = super(ProjectCreateView, self).form_valid(form)
tasks = Task.objects.filter_current_site() \
.filter_group(self.request.user) \
.filter_availability(self.request.user)
for task in tasks:
form.instance.tasks.add(task)
views = View.objects.filter_current_site() \
.filter_catalog(self.object.catalog) \
.filter_group(self.request.user) \
.filter_availability(self.request.user)
for view in views:
form.instance.views.add(view)
membership = Membership(project=form.instance, user=self.request.user, role='owner')
membership.save()
return response
class ProjectCreateUploadView(LoginRequiredMixin, BaseView):
success_url = reverse_lazy('projects')
def get(self, request, *args, **kwargs):
return HttpResponseRedirect(self.success_url)
def post(self, request, *args, **kwargs):
try:
uploaded_file = request.FILES['uploaded_file']
except KeyError:
return HttpResponseRedirect(self.success_url)
else:
import_tmpfile_name = handle_uploaded_file(uploaded_file)
for import_key, import_plugin in get_plugins('PROJECT_IMPORTS').items():
import_plugin.file_name = import_tmpfile_name
if import_plugin.check():
try:
import_plugin.process()
except ValidationError as e:
return render(request, 'core/error.html', {
'title': _('Import error'),
'errors': e
}, status=400)
request.session['create_import_tmpfile_name'] = import_tmpfile_name
request.session['create_import_key'] = import_key
return render(request, 'projects/project_upload.html', {
'create': True,
'file_name': uploaded_file.name,
'project': import_plugin.project,
'values': import_plugin.values,
'snapshots': import_plugin.snapshots,
'tasks': import_plugin.tasks,
'views': import_plugin.views
})
return render(request, 'core/error.html', {
'title': _('Import error'),
'errors': [_('Files of this type cannot be imported.')]
}, status=400)
class ProjectCreateImportView(LoginRequiredMixin, TemplateView):
success_url = reverse_lazy('projects')
def get(self, request, *args, **kwargs):
return HttpResponseRedirect(self.success_url)
def post(self, request, *args, **kwargs):
import_tmpfile_name = request.session.get('create_import_tmpfile_name')
import_key = request.session.get('create_import_key')
checked = [key for key, value in request.POST.items() if 'on' in value]
if import_tmpfile_name and import_key:
import_plugin = get_plugin('PROJECT_IMPORTS', import_key)
import_plugin.file_name = import_tmpfile_name
if import_plugin.check():
try:
import_plugin.process()
except ValidationError as e:
return render(request, 'core/error.html', {
'title': _('Import error'),
'errors': e
}, status=400)
import_plugin.project.site = get_current_site(self.request)
import_plugin.project.save()
membership = Membership(project=import_plugin.project, user=request.user, role='owner')
membership.save()
save_import_values(import_plugin.project, import_plugin.values, checked)
save_import_snapshot_values(import_plugin.project, import_plugin.snapshots, checked)
save_import_tasks(import_plugin.project, import_plugin.tasks)
save_import_views(import_plugin.project, import_plugin.views)
return HttpResponseRedirect(import_plugin.project.get_absolute_url())
return render(request, 'core/error.html', {
'title': _('Import error'),
'errors': [_('There has been an error with your import.')]
}, status=400)
| true
| true
|
7903ea7c18d842ba9d8b382e763d6d9a217b2eab
| 1,604
|
py
|
Python
|
rootcp/models.py
|
EugeneNdiaye/rootCP
|
a9777d0f4871dbd1bc0afd680889c0a3e73ec7d0
|
[
"BSD-3-Clause"
] | 1
|
2022-01-08T15:30:25.000Z
|
2022-01-08T15:30:25.000Z
|
rootcp/models.py
|
EugeneNdiaye/rootCP
|
a9777d0f4871dbd1bc0afd680889c0a3e73ec7d0
|
[
"BSD-3-Clause"
] | null | null | null |
rootcp/models.py
|
EugeneNdiaye/rootCP
|
a9777d0f4871dbd1bc0afd680889c0a3e73ec7d0
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
class ridge:
""" Ridge estimator.
"""
def __init__(self, lmd=0.1):
self.lmd = lmd
self.hat = None
self.hatn = None
def fit(self, X, y):
if self.hat is None:
G = X.T.dot(X) + self.lmd * np.eye(X.shape[1])
self.hat = np.linalg.solve(G, X.T)
if self.hatn is None:
y0 = np.array(list(y[:-1]) + [0])
self.hatn = self.hat.dot(y0)
self.beta = self.hatn + y[-1] * self.hat[:, -1]
def predict(self, X):
return X.dot(self.beta)
def conformity(self, y, y_pred):
return 0.5 * np.square(y - y_pred)
class regressor:
def __init__(self, model=None, s_eps=0., conform=None):
self.model = model
self.coefs = []
self.s_eps = s_eps
self.conform = conform
def fit(self, X, y):
refit = True
for t in range(len(self.coefs)):
if self.s_eps == 0:
break
if abs(self.coefs[t][0] - y[-1]) <= self.s_eps:
self.beta = self.coefs[t][1].copy()
refit = False
break
if refit:
self.beta = self.model.fit(X, y)
if self.s_eps != 0:
self.coefs += [[y[-1], self.beta.copy()]]
def predict(self, X):
if len(X.shape) == 1:
X = X.reshape(1, -1)
return self.model.predict(X)
def conformity(self, y, y_pred):
if self.conform is None:
return np.abs(y - y_pred)
else:
return self.conform(y, y_pred)
| 20.831169
| 59
| 0.483791
|
import numpy as np
class ridge:
def __init__(self, lmd=0.1):
self.lmd = lmd
self.hat = None
self.hatn = None
def fit(self, X, y):
if self.hat is None:
G = X.T.dot(X) + self.lmd * np.eye(X.shape[1])
self.hat = np.linalg.solve(G, X.T)
if self.hatn is None:
y0 = np.array(list(y[:-1]) + [0])
self.hatn = self.hat.dot(y0)
self.beta = self.hatn + y[-1] * self.hat[:, -1]
def predict(self, X):
return X.dot(self.beta)
def conformity(self, y, y_pred):
return 0.5 * np.square(y - y_pred)
class regressor:
def __init__(self, model=None, s_eps=0., conform=None):
self.model = model
self.coefs = []
self.s_eps = s_eps
self.conform = conform
def fit(self, X, y):
refit = True
for t in range(len(self.coefs)):
if self.s_eps == 0:
break
if abs(self.coefs[t][0] - y[-1]) <= self.s_eps:
self.beta = self.coefs[t][1].copy()
refit = False
break
if refit:
self.beta = self.model.fit(X, y)
if self.s_eps != 0:
self.coefs += [[y[-1], self.beta.copy()]]
def predict(self, X):
if len(X.shape) == 1:
X = X.reshape(1, -1)
return self.model.predict(X)
def conformity(self, y, y_pred):
if self.conform is None:
return np.abs(y - y_pred)
else:
return self.conform(y, y_pred)
| true
| true
|
7903eb73c3f6b1512edfad4b6b076a4433ccc540
| 347
|
py
|
Python
|
pools/eventlet.py
|
JohnStarich/python-pool-performance
|
5a8428ca95240932e0b1b0d7064bf8020e0b1f2e
|
[
"MIT"
] | 32
|
2016-08-05T20:54:57.000Z
|
2021-11-16T19:28:12.000Z
|
pools/eventlet.py
|
ktosiu/python-pool-performance
|
5a8428ca95240932e0b1b0d7064bf8020e0b1f2e
|
[
"MIT"
] | 1
|
2018-10-26T10:43:16.000Z
|
2018-10-31T07:37:20.000Z
|
pools/eventlet.py
|
ktosiu/python-pool-performance
|
5a8428ca95240932e0b1b0d7064bf8020e0b1f2e
|
[
"MIT"
] | 7
|
2017-03-18T21:27:53.000Z
|
2022-02-11T01:40:48.000Z
|
from pools import PoolTest
import eventlet
class EventletPool(PoolTest):
def init_pool(self, worker_count):
return eventlet.GreenPool(worker_count)
def map(self, work_func, inputs):
return self.pool.imap(work_func, inputs)
def init_network_resource(self):
return eventlet.import_patched('requests').Session
| 24.785714
| 58
| 0.731988
|
from pools import PoolTest
import eventlet
class EventletPool(PoolTest):
def init_pool(self, worker_count):
return eventlet.GreenPool(worker_count)
def map(self, work_func, inputs):
return self.pool.imap(work_func, inputs)
def init_network_resource(self):
return eventlet.import_patched('requests').Session
| true
| true
|
7903ec9c043049b9e677a2917e22d25071fe1f34
| 3,227
|
py
|
Python
|
tracportalopt/project/notification.py
|
isabella232/TracPortalPlugin
|
985581b16aad360cfc78d6b901c93fb922f7bc30
|
[
"MIT"
] | 2
|
2015-01-19T05:53:30.000Z
|
2016-01-08T10:30:02.000Z
|
tracportalopt/project/notification.py
|
iij/TracPortalPlugin
|
985581b16aad360cfc78d6b901c93fb922f7bc30
|
[
"MIT"
] | 1
|
2022-01-20T12:47:18.000Z
|
2022-01-20T12:47:18.000Z
|
tracportalopt/project/notification.py
|
isabella232/TracPortalPlugin
|
985581b16aad360cfc78d6b901c93fb922f7bc30
|
[
"MIT"
] | 3
|
2016-12-08T02:25:36.000Z
|
2022-01-20T12:10:58.000Z
|
#! -*- coding: utf-8 -*-
#
# (C) 2013 Internet Initiative Japan Inc.
# All rights reserved.
#
# Created on 2013/05/15
# @author: yosinobu@iij.ad.jp
"""Notify project owner with email when the project created successfully."""
from pkg_resources import resource_filename
from trac.config import Option, ListOption
from trac.core import Component, implements
from trac.notification import Notify, NotifyEmail
from trac.web.chrome import ITemplateProvider
from tracportal.i18n import _
from tracportal.project.api import IProjectCreationInterceptor
class ProjectCreationNotificationSystem(Component):
implements(ITemplateProvider, IProjectCreationInterceptor)
# options
from_name = Option('tracportal', 'notify_email_from_name', doc=_('Sender name to use in notification emails.'))
from_email = Option('tracportal', 'notify_email_from', doc=_('Sender address to use in notification emails.'))
ccrcpts = ListOption('tracportal', 'notify_email_cc',
doc=_('Email address(es) to always send notifications to, '
'addresses can be seen by all recipients (Cc:).'))
subject = Option('tracportal', 'notify_email_subject', default=_("Ready to start Trac project!"),
doc=_('Subject in notification emails.'))
# ITemplateProvider methods
def get_templates_dirs(self):
return [resource_filename(__name__, 'templates')]
def get_htdocs_dirs(self):
return []
# IProjectCreationInterceptor methods
def pre_process(self, project_info, owner_info):
pass
def post_process(self, project_info, owner_info, env):
if 'email' in owner_info:
project_info['url'] = env.abs_href()
support = {
'name': self.from_name or self.env.project_name,
'email': self.from_email or self.env.config.get('notification', 'smtp_from'),
}
notify_email = ProjectCreationNotifyEmail(self.env, (owner_info['email'],), tuple(self.ccrcpts),
project_info, owner_info, support)
notify_email.notify('')
class ProjectCreationNotifyEmail(NotifyEmail):
"""Notification of a project creation."""
template_name = 'project_creation_notify_email.txt'
def __init__(self, env, torcpts, ccrcpts, project_info, owner_info, support):
NotifyEmail.__init__(self, env)
self.torcpts = torcpts
self.ccrcpts = ccrcpts
self.project_info = project_info
self.owner_info = owner_info
self.support = support
self.subject = self.subject
def get_recipients(self, resid):
return (self.torcpts, self.ccrcpts,)
def notify(self, resid, subject=None, author=None):
if subject:
self.subject = subject
self.from_name = self.support['name']
self.from_email = self.support['email']
self.replyto_email = self.support['email']
if self.data is None:
self.data = {}
self.data.update({
'owner': self.owner_info,
'project': self.project_info,
'support': self.support,
})
Notify.notify(self, resid)
| 37.523256
| 115
| 0.654478
|
from pkg_resources import resource_filename
from trac.config import Option, ListOption
from trac.core import Component, implements
from trac.notification import Notify, NotifyEmail
from trac.web.chrome import ITemplateProvider
from tracportal.i18n import _
from tracportal.project.api import IProjectCreationInterceptor
class ProjectCreationNotificationSystem(Component):
implements(ITemplateProvider, IProjectCreationInterceptor)
from_name = Option('tracportal', 'notify_email_from_name', doc=_('Sender name to use in notification emails.'))
from_email = Option('tracportal', 'notify_email_from', doc=_('Sender address to use in notification emails.'))
ccrcpts = ListOption('tracportal', 'notify_email_cc',
doc=_('Email address(es) to always send notifications to, '
'addresses can be seen by all recipients (Cc:).'))
subject = Option('tracportal', 'notify_email_subject', default=_("Ready to start Trac project!"),
doc=_('Subject in notification emails.'))
def get_templates_dirs(self):
return [resource_filename(__name__, 'templates')]
def get_htdocs_dirs(self):
return []
def pre_process(self, project_info, owner_info):
pass
def post_process(self, project_info, owner_info, env):
if 'email' in owner_info:
project_info['url'] = env.abs_href()
support = {
'name': self.from_name or self.env.project_name,
'email': self.from_email or self.env.config.get('notification', 'smtp_from'),
}
notify_email = ProjectCreationNotifyEmail(self.env, (owner_info['email'],), tuple(self.ccrcpts),
project_info, owner_info, support)
notify_email.notify('')
class ProjectCreationNotifyEmail(NotifyEmail):
template_name = 'project_creation_notify_email.txt'
def __init__(self, env, torcpts, ccrcpts, project_info, owner_info, support):
NotifyEmail.__init__(self, env)
self.torcpts = torcpts
self.ccrcpts = ccrcpts
self.project_info = project_info
self.owner_info = owner_info
self.support = support
self.subject = self.subject
def get_recipients(self, resid):
return (self.torcpts, self.ccrcpts,)
def notify(self, resid, subject=None, author=None):
if subject:
self.subject = subject
self.from_name = self.support['name']
self.from_email = self.support['email']
self.replyto_email = self.support['email']
if self.data is None:
self.data = {}
self.data.update({
'owner': self.owner_info,
'project': self.project_info,
'support': self.support,
})
Notify.notify(self, resid)
| true
| true
|
7903edee44cb421c689de087d74c9b211ef7a7d7
| 1,022
|
py
|
Python
|
pdip/configuration/services/config_service.py
|
ahmetcagriakca/pdip
|
c4c16d5666a740154cabdc6762cd44d98b7bdde8
|
[
"MIT"
] | 2
|
2021-12-09T21:07:46.000Z
|
2021-12-11T22:18:01.000Z
|
pdip/configuration/services/config_service.py
|
PythonDataIntegrator/pdip
|
c4c16d5666a740154cabdc6762cd44d98b7bdde8
|
[
"MIT"
] | null | null | null |
pdip/configuration/services/config_service.py
|
PythonDataIntegrator/pdip
|
c4c16d5666a740154cabdc6762cd44d98b7bdde8
|
[
"MIT"
] | 3
|
2021-11-15T00:47:00.000Z
|
2021-12-17T11:35:45.000Z
|
from functools import lru_cache
from injector import inject
from .config_parameter_base import ConfigParameterBase
from ...data.repository import RepositoryProvider
from ...dependency import IScoped
from ...exceptions import RequiredClassException
class ConfigService(IScoped):
@inject
def __init__(self,
repository_provider: RepositoryProvider
):
self.repository_provider = repository_provider
config_subclasses = ConfigParameterBase.__subclasses__()
if config_subclasses is None or len(config_subclasses) == 0:
raise RequiredClassException(f'Requires {ConfigParameterBase.__name__} derived class')
config_class = config_subclasses[0]
self.config_reposiotry = repository_provider.get(config_class)
@lru_cache()
def get_config_by_name(self, name):
parameter = self.config_reposiotry.first(Name=name)
if parameter is not None:
return parameter.Value
else:
return None
| 34.066667
| 98
| 0.715264
|
from functools import lru_cache
from injector import inject
from .config_parameter_base import ConfigParameterBase
from ...data.repository import RepositoryProvider
from ...dependency import IScoped
from ...exceptions import RequiredClassException
class ConfigService(IScoped):
@inject
def __init__(self,
repository_provider: RepositoryProvider
):
self.repository_provider = repository_provider
config_subclasses = ConfigParameterBase.__subclasses__()
if config_subclasses is None or len(config_subclasses) == 0:
raise RequiredClassException(f'Requires {ConfigParameterBase.__name__} derived class')
config_class = config_subclasses[0]
self.config_reposiotry = repository_provider.get(config_class)
@lru_cache()
def get_config_by_name(self, name):
parameter = self.config_reposiotry.first(Name=name)
if parameter is not None:
return parameter.Value
else:
return None
| true
| true
|
7903ee13b1c151cbfba658ffd4ecad5f8b2eb45f
| 13,689
|
py
|
Python
|
ably/rest/auth.py
|
jvinet/ably-python
|
0d75a7af347bf7c1a8d73739f58fa41ed4eaae23
|
[
"Apache-2.0"
] | 22
|
2015-04-29T13:33:46.000Z
|
2022-01-10T17:51:10.000Z
|
ably/rest/auth.py
|
jvinet/ably-python
|
0d75a7af347bf7c1a8d73739f58fa41ed4eaae23
|
[
"Apache-2.0"
] | 193
|
2015-04-07T22:47:17.000Z
|
2022-03-28T14:52:56.000Z
|
ably/rest/auth.py
|
jvinet/ably-python
|
0d75a7af347bf7c1a8d73739f58fa41ed4eaae23
|
[
"Apache-2.0"
] | 21
|
2015-04-14T13:26:31.000Z
|
2021-10-02T15:30:54.000Z
|
import base64
from datetime import timedelta
import logging
import time
import uuid
import warnings
import httpx
from ably.types.capability import Capability
from ably.types.tokendetails import TokenDetails
from ably.types.tokenrequest import TokenRequest
from ably.util.exceptions import AblyException, IncompatibleClientIdException
__all__ = ["Auth"]
log = logging.getLogger(__name__)
class Auth:
class Method:
BASIC = "BASIC"
TOKEN = "TOKEN"
def __init__(self, ably, options):
self.__ably = ably
self.__auth_options = options
if options.token_details:
self.__client_id = options.token_details.client_id
else:
self.__client_id = options.client_id
self.__client_id_validated = False
self.__basic_credentials = None
self.__auth_params = None
self.__token_details = None
self.__time_offset = None
must_use_token_auth = options.use_token_auth is True
must_not_use_token_auth = options.use_token_auth is False
can_use_basic_auth = options.key_secret is not None
if not must_use_token_auth and can_use_basic_auth:
# We have the key, no need to authenticate the client
# default to using basic auth
log.debug("anonymous, using basic auth")
self.__auth_mechanism = Auth.Method.BASIC
basic_key = "%s:%s" % (options.key_name, options.key_secret)
basic_key = base64.b64encode(basic_key.encode('utf-8'))
self.__basic_credentials = basic_key.decode('ascii')
return
elif must_not_use_token_auth and not can_use_basic_auth:
raise ValueError('If use_token_auth is False you must provide a key')
# Using token auth
self.__auth_mechanism = Auth.Method.TOKEN
if options.token_details:
self.__token_details = options.token_details
elif options.auth_token:
self.__token_details = TokenDetails(token=options.auth_token)
else:
self.__token_details = None
if options.auth_callback:
log.debug("using token auth with auth_callback")
elif options.auth_url:
log.debug("using token auth with auth_url")
elif options.key_secret:
log.debug("using token auth with client-side signing")
elif options.auth_token:
log.debug("using token auth with supplied token only")
elif options.token_details:
log.debug("using token auth with supplied token_details")
else:
raise ValueError("Can't authenticate via token, must provide "
"auth_callback, auth_url, key, token or a TokenDetail")
async def __authorize_when_necessary(self, token_params=None, auth_options=None, force=False):
self.__auth_mechanism = Auth.Method.TOKEN
if token_params is None:
token_params = dict(self.auth_options.default_token_params)
else:
self.auth_options.default_token_params = dict(token_params)
self.auth_options.default_token_params.pop('timestamp', None)
if auth_options is not None:
self.auth_options.replace(auth_options)
auth_options = dict(self.auth_options.auth_options)
if self.client_id is not None:
token_params['client_id'] = self.client_id
token_details = self.__token_details
if not force and not self.token_details_has_expired():
log.debug("using cached token; expires = %d",
token_details.expires)
return token_details
self.__token_details = await self.request_token(token_params, **auth_options)
self._configure_client_id(self.__token_details.client_id)
return self.__token_details
def token_details_has_expired(self):
token_details = self.__token_details
if token_details is None:
return True
expires = token_details.expires
if expires is None:
return False
timestamp = self._timestamp()
if self.__time_offset:
timestamp += self.__time_offset
return expires < timestamp + token_details.TOKEN_EXPIRY_BUFFER
async def authorize(self, token_params=None, auth_options=None):
return await self.__authorize_when_necessary(token_params, auth_options, force=True)
async def authorise(self, *args, **kwargs):
warnings.warn(
"authorise is deprecated and will be removed in v2.0, please use authorize",
DeprecationWarning)
return await self.authorize(*args, **kwargs)
async def request_token(self, token_params=None,
# auth_options
key_name=None, key_secret=None, auth_callback=None,
auth_url=None, auth_method=None, auth_headers=None,
auth_params=None, query_time=None):
token_params = token_params or {}
token_params = dict(self.auth_options.default_token_params,
**token_params)
key_name = key_name or self.auth_options.key_name
key_secret = key_secret or self.auth_options.key_secret
log.debug("Auth callback: %s" % auth_callback)
log.debug("Auth options: %s" % self.auth_options)
if query_time is None:
query_time = self.auth_options.query_time
query_time = bool(query_time)
auth_callback = auth_callback or self.auth_options.auth_callback
auth_url = auth_url or self.auth_options.auth_url
auth_params = auth_params or self.auth_options.auth_params or {}
auth_method = (auth_method or self.auth_options.auth_method).upper()
auth_headers = auth_headers or self.auth_options.auth_headers or {}
log.debug("Token Params: %s" % token_params)
if auth_callback:
log.debug("using token auth with authCallback")
token_request = await auth_callback(token_params)
elif auth_url:
log.debug("using token auth with authUrl")
token_request = await self.token_request_from_auth_url(
auth_method, auth_url, token_params, auth_headers, auth_params)
else:
token_request = await self.create_token_request(
token_params, key_name=key_name, key_secret=key_secret,
query_time=query_time)
if isinstance(token_request, TokenDetails):
return token_request
elif isinstance(token_request, dict) and 'issued' in token_request:
return TokenDetails.from_dict(token_request)
elif isinstance(token_request, dict):
token_request = TokenRequest.from_json(token_request)
elif isinstance(token_request, str):
return TokenDetails(token=token_request)
token_path = "/keys/%s/requestToken" % token_request.key_name
response = await self.ably.http.post(
token_path,
headers=auth_headers,
body=token_request.to_dict(),
skip_auth=True
)
AblyException.raise_for_response(response)
response_dict = response.to_native()
log.debug("Token: %s" % str(response_dict.get("token")))
return TokenDetails.from_dict(response_dict)
async def create_token_request(self, token_params=None,
key_name=None, key_secret=None, query_time=None):
token_params = token_params or {}
token_request = {}
key_name = key_name or self.auth_options.key_name
key_secret = key_secret or self.auth_options.key_secret
if not key_name or not key_secret:
log.debug('key_name or key_secret blank')
raise AblyException("No key specified: no means to generate a token", 401, 40101)
token_request['key_name'] = key_name
if token_params.get('timestamp'):
token_request['timestamp'] = token_params['timestamp']
else:
if query_time is None:
query_time = self.auth_options.query_time
if query_time:
if self.__time_offset is None:
server_time = await self.ably.time()
local_time = self._timestamp()
self.__time_offset = server_time - local_time
token_request['timestamp'] = server_time
else:
local_time = self._timestamp()
token_request['timestamp'] = local_time + self.__time_offset
else:
token_request['timestamp'] = self._timestamp()
token_request['timestamp'] = int(token_request['timestamp'])
ttl = token_params.get('ttl')
if ttl is not None:
if isinstance(ttl, timedelta):
ttl = ttl.total_seconds() * 1000
token_request['ttl'] = int(ttl)
capability = token_params.get('capability')
if capability is not None:
token_request['capability'] = str(Capability(capability))
token_request["client_id"] = (
token_params.get('client_id') or self.client_id)
# Note: There is no expectation that the client
# specifies the nonce; this is done by the library
# However, this can be overridden by the client
# simply for testing purposes
token_request["nonce"] = token_params.get('nonce') or self._random_nonce()
token_request = TokenRequest(**token_request)
if token_params.get('mac') is None:
# Note: There is no expectation that the client
# specifies the mac; this is done by the library
# However, this can be overridden by the client
# simply for testing purposes.
token_request.sign_request(key_secret.encode('utf8'))
else:
token_request.mac = token_params['mac']
return token_request
@property
def ably(self):
return self.__ably
@property
def auth_mechanism(self):
return self.__auth_mechanism
@property
def auth_options(self):
return self.__auth_options
@property
def auth_params(self):
return self.__auth_params
@property
def basic_credentials(self):
return self.__basic_credentials
@property
def token_credentials(self):
if self.__token_details:
token = self.__token_details.token
token_key = base64.b64encode(token.encode('utf-8'))
return token_key.decode('ascii')
@property
def token_details(self):
return self.__token_details
@property
def client_id(self):
return self.__client_id
@property
def time_offset(self):
return self.__time_offset
def _configure_client_id(self, new_client_id):
# If new client ID from Ably is a wildcard, but preconfigured clientId is set,
# then keep the existing clientId
if self.client_id != '*' and new_client_id == '*':
self.__client_id_validated = True
return
# If client_id is defined and not a wildcard, prevent it changing, this is not supported
if self.client_id is not None and self.client_id != '*' and new_client_id != self.client_id:
raise IncompatibleClientIdException(
"Client ID is immutable once configured for a client. "
"Client ID cannot be changed to '{}'".format(new_client_id), 400, 40012)
self.__client_id_validated = True
self.__client_id = new_client_id
def can_assume_client_id(self, assumed_client_id):
if self.__client_id_validated:
return self.client_id == '*' or self.client_id == assumed_client_id
elif self.client_id is None or self.client_id == '*':
return True # client ID is unknown
else:
return self.client_id == assumed_client_id
async def _get_auth_headers(self):
if self.__auth_mechanism == Auth.Method.BASIC:
# RSA7e2
if self.client_id:
return {
'Authorization': 'Basic %s' % self.basic_credentials,
'X-Ably-ClientId': base64.b64encode(self.client_id.encode('utf-8'))
}
return {
'Authorization': 'Basic %s' % self.basic_credentials,
}
else:
await self.__authorize_when_necessary()
return {
'Authorization': 'Bearer %s' % self.token_credentials,
}
def _timestamp(self):
"""Returns the local time in milliseconds since the unix epoch"""
return int(time.time() * 1000)
def _random_nonce(self):
return uuid.uuid4().hex[:16]
async def token_request_from_auth_url(self, method, url, token_params, headers, auth_params):
body = None
params = None
if method == 'GET':
body = {}
params = dict(auth_params, **token_params)
elif method == 'POST':
params = {}
body = dict(auth_params, **token_params)
from ably.http.http import Response
async with httpx.AsyncClient(http2=True) as client:
resp = await client.request(method=method, url=url, headers=headers, params=params, data=body)
response = Response(resp)
AblyException.raise_for_response(response)
try:
token_request = response.to_native()
except ValueError:
token_request = response.text
return token_request
| 38.025
| 106
| 0.633867
|
import base64
from datetime import timedelta
import logging
import time
import uuid
import warnings
import httpx
from ably.types.capability import Capability
from ably.types.tokendetails import TokenDetails
from ably.types.tokenrequest import TokenRequest
from ably.util.exceptions import AblyException, IncompatibleClientIdException
__all__ = ["Auth"]
log = logging.getLogger(__name__)
class Auth:
class Method:
BASIC = "BASIC"
TOKEN = "TOKEN"
def __init__(self, ably, options):
self.__ably = ably
self.__auth_options = options
if options.token_details:
self.__client_id = options.token_details.client_id
else:
self.__client_id = options.client_id
self.__client_id_validated = False
self.__basic_credentials = None
self.__auth_params = None
self.__token_details = None
self.__time_offset = None
must_use_token_auth = options.use_token_auth is True
must_not_use_token_auth = options.use_token_auth is False
can_use_basic_auth = options.key_secret is not None
if not must_use_token_auth and can_use_basic_auth:
log.debug("anonymous, using basic auth")
self.__auth_mechanism = Auth.Method.BASIC
basic_key = "%s:%s" % (options.key_name, options.key_secret)
basic_key = base64.b64encode(basic_key.encode('utf-8'))
self.__basic_credentials = basic_key.decode('ascii')
return
elif must_not_use_token_auth and not can_use_basic_auth:
raise ValueError('If use_token_auth is False you must provide a key')
self.__auth_mechanism = Auth.Method.TOKEN
if options.token_details:
self.__token_details = options.token_details
elif options.auth_token:
self.__token_details = TokenDetails(token=options.auth_token)
else:
self.__token_details = None
if options.auth_callback:
log.debug("using token auth with auth_callback")
elif options.auth_url:
log.debug("using token auth with auth_url")
elif options.key_secret:
log.debug("using token auth with client-side signing")
elif options.auth_token:
log.debug("using token auth with supplied token only")
elif options.token_details:
log.debug("using token auth with supplied token_details")
else:
raise ValueError("Can't authenticate via token, must provide "
"auth_callback, auth_url, key, token or a TokenDetail")
async def __authorize_when_necessary(self, token_params=None, auth_options=None, force=False):
self.__auth_mechanism = Auth.Method.TOKEN
if token_params is None:
token_params = dict(self.auth_options.default_token_params)
else:
self.auth_options.default_token_params = dict(token_params)
self.auth_options.default_token_params.pop('timestamp', None)
if auth_options is not None:
self.auth_options.replace(auth_options)
auth_options = dict(self.auth_options.auth_options)
if self.client_id is not None:
token_params['client_id'] = self.client_id
token_details = self.__token_details
if not force and not self.token_details_has_expired():
log.debug("using cached token; expires = %d",
token_details.expires)
return token_details
self.__token_details = await self.request_token(token_params, **auth_options)
self._configure_client_id(self.__token_details.client_id)
return self.__token_details
def token_details_has_expired(self):
token_details = self.__token_details
if token_details is None:
return True
expires = token_details.expires
if expires is None:
return False
timestamp = self._timestamp()
if self.__time_offset:
timestamp += self.__time_offset
return expires < timestamp + token_details.TOKEN_EXPIRY_BUFFER
async def authorize(self, token_params=None, auth_options=None):
return await self.__authorize_when_necessary(token_params, auth_options, force=True)
async def authorise(self, *args, **kwargs):
warnings.warn(
"authorise is deprecated and will be removed in v2.0, please use authorize",
DeprecationWarning)
return await self.authorize(*args, **kwargs)
async def request_token(self, token_params=None,
# auth_options
key_name=None, key_secret=None, auth_callback=None,
auth_url=None, auth_method=None, auth_headers=None,
auth_params=None, query_time=None):
token_params = token_params or {}
token_params = dict(self.auth_options.default_token_params,
**token_params)
key_name = key_name or self.auth_options.key_name
key_secret = key_secret or self.auth_options.key_secret
log.debug("Auth callback: %s" % auth_callback)
log.debug("Auth options: %s" % self.auth_options)
if query_time is None:
query_time = self.auth_options.query_time
query_time = bool(query_time)
auth_callback = auth_callback or self.auth_options.auth_callback
auth_url = auth_url or self.auth_options.auth_url
auth_params = auth_params or self.auth_options.auth_params or {}
auth_method = (auth_method or self.auth_options.auth_method).upper()
auth_headers = auth_headers or self.auth_options.auth_headers or {}
log.debug("Token Params: %s" % token_params)
if auth_callback:
log.debug("using token auth with authCallback")
token_request = await auth_callback(token_params)
elif auth_url:
log.debug("using token auth with authUrl")
token_request = await self.token_request_from_auth_url(
auth_method, auth_url, token_params, auth_headers, auth_params)
else:
token_request = await self.create_token_request(
token_params, key_name=key_name, key_secret=key_secret,
query_time=query_time)
if isinstance(token_request, TokenDetails):
return token_request
elif isinstance(token_request, dict) and 'issued' in token_request:
return TokenDetails.from_dict(token_request)
elif isinstance(token_request, dict):
token_request = TokenRequest.from_json(token_request)
elif isinstance(token_request, str):
return TokenDetails(token=token_request)
token_path = "/keys/%s/requestToken" % token_request.key_name
response = await self.ably.http.post(
token_path,
headers=auth_headers,
body=token_request.to_dict(),
skip_auth=True
)
AblyException.raise_for_response(response)
response_dict = response.to_native()
log.debug("Token: %s" % str(response_dict.get("token")))
return TokenDetails.from_dict(response_dict)
async def create_token_request(self, token_params=None,
key_name=None, key_secret=None, query_time=None):
token_params = token_params or {}
token_request = {}
key_name = key_name or self.auth_options.key_name
key_secret = key_secret or self.auth_options.key_secret
if not key_name or not key_secret:
log.debug('key_name or key_secret blank')
raise AblyException("No key specified: no means to generate a token", 401, 40101)
token_request['key_name'] = key_name
if token_params.get('timestamp'):
token_request['timestamp'] = token_params['timestamp']
else:
if query_time is None:
query_time = self.auth_options.query_time
if query_time:
if self.__time_offset is None:
server_time = await self.ably.time()
local_time = self._timestamp()
self.__time_offset = server_time - local_time
token_request['timestamp'] = server_time
else:
local_time = self._timestamp()
token_request['timestamp'] = local_time + self.__time_offset
else:
token_request['timestamp'] = self._timestamp()
token_request['timestamp'] = int(token_request['timestamp'])
ttl = token_params.get('ttl')
if ttl is not None:
if isinstance(ttl, timedelta):
ttl = ttl.total_seconds() * 1000
token_request['ttl'] = int(ttl)
capability = token_params.get('capability')
if capability is not None:
token_request['capability'] = str(Capability(capability))
token_request["client_id"] = (
token_params.get('client_id') or self.client_id)
# Note: There is no expectation that the client
# specifies the nonce; this is done by the library
# However, this can be overridden by the client
# simply for testing purposes
token_request["nonce"] = token_params.get('nonce') or self._random_nonce()
token_request = TokenRequest(**token_request)
if token_params.get('mac') is None:
# Note: There is no expectation that the client
# specifies the mac; this is done by the library
# However, this can be overridden by the client
# simply for testing purposes.
token_request.sign_request(key_secret.encode('utf8'))
else:
token_request.mac = token_params['mac']
return token_request
@property
def ably(self):
return self.__ably
@property
def auth_mechanism(self):
return self.__auth_mechanism
@property
def auth_options(self):
return self.__auth_options
@property
def auth_params(self):
return self.__auth_params
@property
def basic_credentials(self):
return self.__basic_credentials
@property
def token_credentials(self):
if self.__token_details:
token = self.__token_details.token
token_key = base64.b64encode(token.encode('utf-8'))
return token_key.decode('ascii')
@property
def token_details(self):
return self.__token_details
@property
def client_id(self):
return self.__client_id
@property
def time_offset(self):
return self.__time_offset
def _configure_client_id(self, new_client_id):
# If new client ID from Ably is a wildcard, but preconfigured clientId is set,
# then keep the existing clientId
if self.client_id != '*' and new_client_id == '*':
self.__client_id_validated = True
return
# If client_id is defined and not a wildcard, prevent it changing, this is not supported
if self.client_id is not None and self.client_id != '*' and new_client_id != self.client_id:
raise IncompatibleClientIdException(
"Client ID is immutable once configured for a client. "
"Client ID cannot be changed to '{}'".format(new_client_id), 400, 40012)
self.__client_id_validated = True
self.__client_id = new_client_id
def can_assume_client_id(self, assumed_client_id):
if self.__client_id_validated:
return self.client_id == '*' or self.client_id == assumed_client_id
elif self.client_id is None or self.client_id == '*':
return True # client ID is unknown
else:
return self.client_id == assumed_client_id
async def _get_auth_headers(self):
if self.__auth_mechanism == Auth.Method.BASIC:
# RSA7e2
if self.client_id:
return {
'Authorization': 'Basic %s' % self.basic_credentials,
'X-Ably-ClientId': base64.b64encode(self.client_id.encode('utf-8'))
}
return {
'Authorization': 'Basic %s' % self.basic_credentials,
}
else:
await self.__authorize_when_necessary()
return {
'Authorization': 'Bearer %s' % self.token_credentials,
}
def _timestamp(self):
return int(time.time() * 1000)
def _random_nonce(self):
return uuid.uuid4().hex[:16]
async def token_request_from_auth_url(self, method, url, token_params, headers, auth_params):
body = None
params = None
if method == 'GET':
body = {}
params = dict(auth_params, **token_params)
elif method == 'POST':
params = {}
body = dict(auth_params, **token_params)
from ably.http.http import Response
async with httpx.AsyncClient(http2=True) as client:
resp = await client.request(method=method, url=url, headers=headers, params=params, data=body)
response = Response(resp)
AblyException.raise_for_response(response)
try:
token_request = response.to_native()
except ValueError:
token_request = response.text
return token_request
| true
| true
|
7903ee1aea86984f986bb719451fe4c7292c3a42
| 407
|
py
|
Python
|
tests/tests_instance.py
|
Antash696/VRP
|
386b84adbe34be37aabc1e638515ce722849a952
|
[
"MIT"
] | 33
|
2017-10-18T01:18:27.000Z
|
2021-10-04T14:17:52.000Z
|
tests/tests_instance.py
|
dj-boy/VRP
|
386b84adbe34be37aabc1e638515ce722849a952
|
[
"MIT"
] | 1
|
2020-12-21T01:59:21.000Z
|
2020-12-21T01:59:21.000Z
|
tests/tests_instance.py
|
dj-boy/VRP
|
386b84adbe34be37aabc1e638515ce722849a952
|
[
"MIT"
] | 19
|
2017-06-26T15:02:00.000Z
|
2022-03-31T08:44:20.000Z
|
import unittest
from code import instance as i
from code import datamapping as dm
class TestProblemInstance(unittest.TestCase):
def setUp(self):
raw_data = dm.Importer()
raw_data.import_data("./tests/cvrp1.test")
data = dm.DataMapper(raw_data)
self.problem = i.ProblemInstance(data)
def test_(self):
pass
if __name__ == "__main__":
unittest.main()
| 19.380952
| 50
| 0.668305
|
import unittest
from code import instance as i
from code import datamapping as dm
class TestProblemInstance(unittest.TestCase):
def setUp(self):
raw_data = dm.Importer()
raw_data.import_data("./tests/cvrp1.test")
data = dm.DataMapper(raw_data)
self.problem = i.ProblemInstance(data)
def test_(self):
pass
if __name__ == "__main__":
unittest.main()
| true
| true
|
7903eef5a8bc5a4a589813ab0d1164bef047564a
| 1,037
|
py
|
Python
|
py_lex.py
|
Spico197/PythonCompilerPrinciplesExp
|
cb06dd7ee50ed7755c18b0684c8b7aa169396e3d
|
[
"MIT"
] | 3
|
2020-12-05T07:39:44.000Z
|
2021-12-06T05:58:49.000Z
|
py_lex.py
|
Spico197/PythonCompilerPrinciplesExp
|
cb06dd7ee50ed7755c18b0684c8b7aa169396e3d
|
[
"MIT"
] | null | null | null |
py_lex.py
|
Spico197/PythonCompilerPrinciplesExp
|
cb06dd7ee50ed7755c18b0684c8b7aa169396e3d
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
#coding=utf-8
import ply.lex as lex
# LEX for parsing Python
# Tokens
tokens=('VARIABLE','NUMBER', 'IF', 'ELIF', 'ELSE', 'WHILE', 'FOR', 'PRINT', 'INC', 'LEN', 'GDIV', 'BREAK', 'LET')
literals=['=','+','-','*','(',')','{','}','<','>', ';', ',', '[', ']']
#Define of tokens
def t_NUMBER(t):
r'[0-9]+'
return t
def t_PRINT(t):
r'print'
return t
def t_IF(t):
r'if'
return t
def t_WHILE(t):
r'while'
return t
def t_FOR(t):
r'for'
return t
def t_LEN(t):
r'len'
return t
def t_INC(t):
'\+\+'
return t
def t_GDIV(t):
r'//'
return t
def t_BREAK(t):
r'break'
return t
def t_LET(t):
r'<='
return t
def t_ELIF(t):
r'elif'
return t
def t_ELSE(t):
r'else'
return t
def t_VARIABLE(t):
r'[a-zA-Z_]+'
return t
# Ignored
t_ignore = " \t"
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
lex.lex()
| 14.013514
| 114
| 0.479267
|
import ply.lex as lex
tokens=('VARIABLE','NUMBER', 'IF', 'ELIF', 'ELSE', 'WHILE', 'FOR', 'PRINT', 'INC', 'LEN', 'GDIV', 'BREAK', 'LET')
literals=['=','+','-','*','(',')','{','}','<','>', ';', ',', '[', ']']
def t_NUMBER(t):
return t
def t_PRINT(t):
return t
def t_IF(t):
return t
def t_WHILE(t):
return t
def t_FOR(t):
return t
def t_LEN(t):
return t
def t_INC(t):
return t
def t_GDIV(t):
return t
def t_BREAK(t):
return t
def t_LET(t):
return t
def t_ELIF(t):
return t
def t_ELSE(t):
return t
def t_VARIABLE(t):
return t
t_ignore = " \t"
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
lex.lex()
| true
| true
|
7903ef0ebf9c38b92d82860265860d491077bbd5
| 783
|
py
|
Python
|
ADT/aclhistory-edit.py
|
UKHomeOffice/dq-ssm_ingest
|
35aafd637e6d7e75e1d558d275b7d0518bfc6c47
|
[
"MIT"
] | 1
|
2018-02-14T10:15:34.000Z
|
2018-02-14T10:15:34.000Z
|
ADT/aclhistory-edit.py
|
UKHomeOffice/dq-ssm_ingest
|
35aafd637e6d7e75e1d558d275b7d0518bfc6c47
|
[
"MIT"
] | 2
|
2018-07-17T07:01:43.000Z
|
2018-11-22T16:33:33.000Z
|
ADT/aclhistory-edit.py
|
UKHomeOffice/dq-ssm_ingest
|
35aafd637e6d7e75e1d558d275b7d0518bfc6c47
|
[
"MIT"
] | 2
|
2018-02-15T11:48:58.000Z
|
2021-04-11T09:24:21.000Z
|
#!/usr/bin/python
import gdbm
import sys
import os
db_filename = "aclhistory.db"
example_filename = "HOMEOFFICEROLL3_20180521.CSV"
example_status = "D"
if len(sys.argv) != 3:
scriptname = os.path.basename(str(sys.argv[0]))
print "usage:", scriptname, "<FILENAME>", "<STATUS>"
print "\t Pass in the filename and status to be set in the .db file(" + db_filename + ")"
print "\t Example: ", scriptname, example_filename, example_status
print "\t to set file", example_filename, "=", example_status, "in", db_filename
os._exit(1)
file_to_set = str(sys.argv[1])
status_to_set = str(sys.argv[2])
db_file = gdbm.open(db_filename,'c')
for f in db_file.keys():
if f == file_to_set:
print "Updating the key", f
db_file[f] = status_to_set
print "File", f, "State", db_file[f]
| 27
| 90
| 0.702427
|
import gdbm
import sys
import os
db_filename = "aclhistory.db"
example_filename = "HOMEOFFICEROLL3_20180521.CSV"
example_status = "D"
if len(sys.argv) != 3:
scriptname = os.path.basename(str(sys.argv[0]))
print "usage:", scriptname, "<FILENAME>", "<STATUS>"
print "\t Pass in the filename and status to be set in the .db file(" + db_filename + ")"
print "\t Example: ", scriptname, example_filename, example_status
print "\t to set file", example_filename, "=", example_status, "in", db_filename
os._exit(1)
file_to_set = str(sys.argv[1])
status_to_set = str(sys.argv[2])
db_file = gdbm.open(db_filename,'c')
for f in db_file.keys():
if f == file_to_set:
print "Updating the key", f
db_file[f] = status_to_set
print "File", f, "State", db_file[f]
| false
| true
|
7903efa0fc0b65d208a01c204b5663cc740a760d
| 3,564
|
py
|
Python
|
test/functional/test_framework/blocktools.py
|
Supernode-SUNO/SUNO
|
6b34a154671597b6e072eeecf336d2d3d38ee6bb
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/blocktools.py
|
Supernode-SUNO/SUNO
|
6b34a154671597b6e072eeecf336d2d3d38ee6bb
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/blocktools.py
|
Supernode-SUNO/SUNO
|
6b34a154671597b6e072eeecf336d2d3d38ee6bb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for manipulating blocks and transactions."""
from test_framework.mininode import *
from test_framework.script import CScript, OP_TRUE, OP_CHECKSIG
# Create a block (with regtest difficulty)
def create_block(hashprev, coinbase, nTime=None):
block = CBlock()
if nTime is None:
import time
block.nTime = int(time.time()+600)
else:
block.nTime = nTime
block.hashPrevBlock = hashprev
block.nBits = 0x1e0ffff0 # Will break after a difficulty adjustment...
block.vtx.append(coinbase)
block.hashMerkleRoot = block.calc_merkle_root()
block.calc_sha256()
return block
def serialize_script_num(value):
r = bytearray(0)
if value == 0:
return r
neg = value < 0
absvalue = -value if neg else value
while (absvalue):
r.append(int(absvalue & 0xff))
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return r
def cbase_scriptsig(height):
return ser_string(serialize_script_num(height))
def cbase_value(height):
#return ((50 * COIN) >> int(height/150))
return (250 * COIN)
# Create a coinbase transaction, assuming no miner fees.
# If pubkey is passed in, the coinbase output will be a P2PK output;
# otherwise an anyone-can-spend output.
def create_coinbase(height, pubkey = None):
coinbase = CTransaction()
coinbase.vin = [CTxIn(NullOutPoint, cbase_scriptsig(height), 0xffffffff)]
coinbaseoutput = CTxOut()
coinbaseoutput.nValue = cbase_value(height)
if (pubkey != None):
coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG])
else:
coinbaseoutput.scriptPubKey = CScript([OP_TRUE])
coinbase.vout = [coinbaseoutput]
coinbase.calc_sha256()
return coinbase
# Create a transaction.
# If the scriptPubKey is not specified, make it anyone-can-spend.
def create_transaction(prevtx, n, sig, value, scriptPubKey=CScript()):
tx = CTransaction()
assert(n < len(prevtx.vout))
tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), sig, 0xffffffff))
tx.vout.append(CTxOut(value, scriptPubKey))
tx.calc_sha256()
return tx
def create_transaction_from_outpoint(outPoint, sig, value, scriptPubKey=CScript()):
tx = CTransaction()
tx.vin.append(CTxIn(outPoint, sig, 0xffffffff))
tx.vout.append(CTxOut(value, scriptPubKey))
tx.calc_sha256()
return tx
def get_legacy_sigopcount_block(block, fAccurate=True):
count = 0
for tx in block.vtx:
count += get_legacy_sigopcount_tx(tx, fAccurate)
return count
def get_legacy_sigopcount_tx(tx, fAccurate=True):
count = 0
for i in tx.vout:
count += i.scriptPubKey.GetSigOpCount(fAccurate)
for j in tx.vin:
# scriptSig might be of type bytes, so convert to CScript for the moment
count += CScript(j.scriptSig).GetSigOpCount(fAccurate)
return count
### SupernodeCoin specific blocktools ###
def create_coinbase_pos(height):
coinbase = CTransaction()
coinbase.vin = [CTxIn(NullOutPoint, cbase_scriptsig(height), 0xffffffff)]
coinbase.vout = [CTxOut(0, b"")]
coinbase.calc_sha256()
return coinbase
def is_zerocoin(uniqueness):
ulen = len(uniqueness)
if ulen == 32: return True
if ulen == 36: return False
raise Exception("Wrong uniqueness len: %d" % ulen)
| 33
| 83
| 0.695567
|
from test_framework.mininode import *
from test_framework.script import CScript, OP_TRUE, OP_CHECKSIG
def create_block(hashprev, coinbase, nTime=None):
block = CBlock()
if nTime is None:
import time
block.nTime = int(time.time()+600)
else:
block.nTime = nTime
block.hashPrevBlock = hashprev
block.nBits = 0x1e0ffff0
block.vtx.append(coinbase)
block.hashMerkleRoot = block.calc_merkle_root()
block.calc_sha256()
return block
def serialize_script_num(value):
r = bytearray(0)
if value == 0:
return r
neg = value < 0
absvalue = -value if neg else value
while (absvalue):
r.append(int(absvalue & 0xff))
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return r
def cbase_scriptsig(height):
return ser_string(serialize_script_num(height))
def cbase_value(height):
return (250 * COIN)
def create_coinbase(height, pubkey = None):
coinbase = CTransaction()
coinbase.vin = [CTxIn(NullOutPoint, cbase_scriptsig(height), 0xffffffff)]
coinbaseoutput = CTxOut()
coinbaseoutput.nValue = cbase_value(height)
if (pubkey != None):
coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG])
else:
coinbaseoutput.scriptPubKey = CScript([OP_TRUE])
coinbase.vout = [coinbaseoutput]
coinbase.calc_sha256()
return coinbase
def create_transaction(prevtx, n, sig, value, scriptPubKey=CScript()):
tx = CTransaction()
assert(n < len(prevtx.vout))
tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), sig, 0xffffffff))
tx.vout.append(CTxOut(value, scriptPubKey))
tx.calc_sha256()
return tx
def create_transaction_from_outpoint(outPoint, sig, value, scriptPubKey=CScript()):
tx = CTransaction()
tx.vin.append(CTxIn(outPoint, sig, 0xffffffff))
tx.vout.append(CTxOut(value, scriptPubKey))
tx.calc_sha256()
return tx
def get_legacy_sigopcount_block(block, fAccurate=True):
count = 0
for tx in block.vtx:
count += get_legacy_sigopcount_tx(tx, fAccurate)
return count
def get_legacy_sigopcount_tx(tx, fAccurate=True):
count = 0
for i in tx.vout:
count += i.scriptPubKey.GetSigOpCount(fAccurate)
for j in tx.vin:
count += CScript(j.scriptSig).GetSigOpCount(fAccurate)
return count
TxIn(NullOutPoint, cbase_scriptsig(height), 0xffffffff)]
coinbase.vout = [CTxOut(0, b"")]
coinbase.calc_sha256()
return coinbase
def is_zerocoin(uniqueness):
ulen = len(uniqueness)
if ulen == 32: return True
if ulen == 36: return False
raise Exception("Wrong uniqueness len: %d" % ulen)
| true
| true
|
7903f0276fa1659f3ba798ab62438fe906bbb1be
| 5,189
|
py
|
Python
|
src/orders/models.py
|
hellojerry/pizzatime
|
1ddb4667c30b97d1ca832420ba53723c1aa787f1
|
[
"MIT"
] | 1
|
2016-08-24T00:29:11.000Z
|
2016-08-24T00:29:11.000Z
|
src/orders/models.py
|
hellojerry/pizzatime
|
1ddb4667c30b97d1ca832420ba53723c1aa787f1
|
[
"MIT"
] | null | null | null |
src/orders/models.py
|
hellojerry/pizzatime
|
1ddb4667c30b97d1ca832420ba53723c1aa787f1
|
[
"MIT"
] | null | null | null |
from django.db import models
import string, random, datetime
from profiles.models import UserProfile, Location, Surcharges, User
from decimal import *
from menu.models import Product, Entree, Pizza, PizzaTopping, Side
from localflavor.us.models import PhoneNumberField, USStateField, USZipCodeField
#modify this to check against prior conf orders.
def make_conf(length=8, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(length))
class Order(models.Model):
customer = models.ForeignKey(User, blank=True, null=True)
created_date = models.DateTimeField(auto_now=False, auto_now_add=True)
stamped = models.BooleanField(default=False)
stamped_time = models.DateTimeField(auto_now=True, auto_now_add=False, blank=True, null=True)
complete = models.BooleanField(default=False)
delivery = models.BooleanField(default=False)
delivery_available = models.BooleanField(default=False)
location = models.ForeignKey(Location, blank=True, null=True)
total = models.DecimalField(max_digits=20, decimal_places=2, default=0)
subtotal = models.DecimalField(max_digits=20, decimal_places=2, default=0)
taxes = models.DecimalField(max_digits=20, decimal_places=2, default=0)
first_name = models.CharField(max_length=120, blank=True, null=True)
last_name = models.CharField(max_length=120, blank=True, null=True)
street_address = models.CharField(max_length=120, blank=True, null=True)
city = models.CharField(max_length=120, blank=True, null=True)
state = USStateField(blank=True, null=True)
zipcode = USZipCodeField(blank=True, null=True)
phone = PhoneNumberField(blank=True, null=True)
email = models.EmailField(max_length=120, blank=True, null=True)
note = models.TextField(max_length=1000,blank=True, null=True)
conf_number = models.CharField(max_length=20, blank=True, null=True)
#delivery charge needs to be separate from lines
def get_delivery_charge(self):
return Location.objects.get(id=str(self.location)).get_delivery_charge()
def compute_subtotal(self):
lineitems = list(OrderLineItem.objects.filter(order=self.id))
delivery_charge = Location.objects.get(id=str(self.location)).get_delivery_charge()
lines = []
for lineitem in lineitems:
lines.append(lineitem.line_price)
if self.delivery == True:
pre_sub = sum(lines)
subtotal = sum(lines) + delivery_charge
return subtotal
else:
return sum(lines)
def compute_taxes(self):
subtotal = self.compute_subtotal()
loc = Surcharges.objects.get(location=self.location).location
tax_rate = Decimal(str(loc.get_tax_rate()))
return Decimal(round(subtotal * tax_rate, 2)).quantize(Decimal('.01'), rounding=ROUND_HALF_UP)
def compute_total(self):
return Decimal(round(self.compute_subtotal() + self.compute_taxes(), 2)).quantize(Decimal('.01'), rounding=ROUND_HALF_UP)
class Meta:
ordering = ['-stamped_time']
def __unicode__(self):
return str(str(self.created_date) + ' ' + str(self.id)) + str(self.customer)
(PIZZA, 'PIZZA'),
(SIDE, 'SIDE'),
(SOUP,'SOUP'),
(SALAD,'SALAD'),
(BREADSTICKS,'BREADSTICKS'),
(PASTA,'PASTA'),
(WINGS,'WINGS'),
(SANDWICH,'SANDWICH'),
(BEVERAGE,'BEVERAGE'),
class OrderLineItem(models.Model):
order = models.ForeignKey(Order)
product = models.ForeignKey('menu.Product')
size = models.CharField(max_length=7, blank=True, null=True)
PIZZA = 'PIZZA'
SIDE = 'SIDE'
SOUP = 'SOUP'
SALAD = 'SALAD'
BREADSTICKS = 'BREADSTICKS'
PASTA = 'PASTA'
WINGS = 'WINGS'
SANDWICH = 'SANDWICH'
BEVERAGE = 'BEVERAGE'
ITEM_TYPES = (
(PIZZA, 'PIZZA'),
(SIDE,'SIDE'),
(SOUP,'SOUP'),
(SALAD,'SALAD'),
(BREADSTICKS,'BREADSTICKS'),
(PASTA, 'PASTA'),
(WINGS, 'WINGS'),
(SANDWICH,'SANDWICH'),
(BEVERAGE, 'BEVERAGE'),
)
product_type = models.CharField(max_length=50, choices=ITEM_TYPES, default=PIZZA)
qty = models.PositiveIntegerField(default=1)
line_price = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True)
toppings = models.ManyToManyField(PizzaTopping, blank=True, null=True, related_name='topping')
def get_price(self):
if self.product_type == 'PIZZA':
pizza_price = Pizza.objects.get(product_id=self.product, size=self.size).get_price()
pricing = []
pricing.append(pizza_price)
for topping in self.toppings.all():
pricing.append(topping.price)
return sum(pricing)
elif self.product_type == 'ENTREE':
return Entree.objects.get(product_id=self.product, size=self.size).get_price()
elif self.product_type == 'SIDE':
return Side.objects.get(product_id=self.product, size=self.size).price
def __unicode__(self):
return str(self.product)
| 38.437037
| 129
| 0.664097
|
from django.db import models
import string, random, datetime
from profiles.models import UserProfile, Location, Surcharges, User
from decimal import *
from menu.models import Product, Entree, Pizza, PizzaTopping, Side
from localflavor.us.models import PhoneNumberField, USStateField, USZipCodeField
def make_conf(length=8, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(length))
class Order(models.Model):
customer = models.ForeignKey(User, blank=True, null=True)
created_date = models.DateTimeField(auto_now=False, auto_now_add=True)
stamped = models.BooleanField(default=False)
stamped_time = models.DateTimeField(auto_now=True, auto_now_add=False, blank=True, null=True)
complete = models.BooleanField(default=False)
delivery = models.BooleanField(default=False)
delivery_available = models.BooleanField(default=False)
location = models.ForeignKey(Location, blank=True, null=True)
total = models.DecimalField(max_digits=20, decimal_places=2, default=0)
subtotal = models.DecimalField(max_digits=20, decimal_places=2, default=0)
taxes = models.DecimalField(max_digits=20, decimal_places=2, default=0)
first_name = models.CharField(max_length=120, blank=True, null=True)
last_name = models.CharField(max_length=120, blank=True, null=True)
street_address = models.CharField(max_length=120, blank=True, null=True)
city = models.CharField(max_length=120, blank=True, null=True)
state = USStateField(blank=True, null=True)
zipcode = USZipCodeField(blank=True, null=True)
phone = PhoneNumberField(blank=True, null=True)
email = models.EmailField(max_length=120, blank=True, null=True)
note = models.TextField(max_length=1000,blank=True, null=True)
conf_number = models.CharField(max_length=20, blank=True, null=True)
def get_delivery_charge(self):
return Location.objects.get(id=str(self.location)).get_delivery_charge()
def compute_subtotal(self):
lineitems = list(OrderLineItem.objects.filter(order=self.id))
delivery_charge = Location.objects.get(id=str(self.location)).get_delivery_charge()
lines = []
for lineitem in lineitems:
lines.append(lineitem.line_price)
if self.delivery == True:
pre_sub = sum(lines)
subtotal = sum(lines) + delivery_charge
return subtotal
else:
return sum(lines)
def compute_taxes(self):
subtotal = self.compute_subtotal()
loc = Surcharges.objects.get(location=self.location).location
tax_rate = Decimal(str(loc.get_tax_rate()))
return Decimal(round(subtotal * tax_rate, 2)).quantize(Decimal('.01'), rounding=ROUND_HALF_UP)
def compute_total(self):
return Decimal(round(self.compute_subtotal() + self.compute_taxes(), 2)).quantize(Decimal('.01'), rounding=ROUND_HALF_UP)
class Meta:
ordering = ['-stamped_time']
def __unicode__(self):
return str(str(self.created_date) + ' ' + str(self.id)) + str(self.customer)
(PIZZA, 'PIZZA'),
(SIDE, 'SIDE'),
(SOUP,'SOUP'),
(SALAD,'SALAD'),
(BREADSTICKS,'BREADSTICKS'),
(PASTA,'PASTA'),
(WINGS,'WINGS'),
(SANDWICH,'SANDWICH'),
(BEVERAGE,'BEVERAGE'),
class OrderLineItem(models.Model):
order = models.ForeignKey(Order)
product = models.ForeignKey('menu.Product')
size = models.CharField(max_length=7, blank=True, null=True)
PIZZA = 'PIZZA'
SIDE = 'SIDE'
SOUP = 'SOUP'
SALAD = 'SALAD'
BREADSTICKS = 'BREADSTICKS'
PASTA = 'PASTA'
WINGS = 'WINGS'
SANDWICH = 'SANDWICH'
BEVERAGE = 'BEVERAGE'
ITEM_TYPES = (
(PIZZA, 'PIZZA'),
(SIDE,'SIDE'),
(SOUP,'SOUP'),
(SALAD,'SALAD'),
(BREADSTICKS,'BREADSTICKS'),
(PASTA, 'PASTA'),
(WINGS, 'WINGS'),
(SANDWICH,'SANDWICH'),
(BEVERAGE, 'BEVERAGE'),
)
product_type = models.CharField(max_length=50, choices=ITEM_TYPES, default=PIZZA)
qty = models.PositiveIntegerField(default=1)
line_price = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True)
toppings = models.ManyToManyField(PizzaTopping, blank=True, null=True, related_name='topping')
def get_price(self):
if self.product_type == 'PIZZA':
pizza_price = Pizza.objects.get(product_id=self.product, size=self.size).get_price()
pricing = []
pricing.append(pizza_price)
for topping in self.toppings.all():
pricing.append(topping.price)
return sum(pricing)
elif self.product_type == 'ENTREE':
return Entree.objects.get(product_id=self.product, size=self.size).get_price()
elif self.product_type == 'SIDE':
return Side.objects.get(product_id=self.product, size=self.size).price
def __unicode__(self):
return str(self.product)
| true
| true
|
7903f0e8cbf52ac530b12e8b6192b08a3c4a90f1
| 82,016
|
py
|
Python
|
test/integration/component/test_stopped_vm.py
|
ksowmya/cloudstack-1
|
f8f779158da056be7da669884ae4ddd109cec044
|
[
"Apache-2.0"
] | 1
|
2020-03-27T22:21:20.000Z
|
2020-03-27T22:21:20.000Z
|
test/integration/component/test_stopped_vm.py
|
ksowmya/cloudstack-1
|
f8f779158da056be7da669884ae4ddd109cec044
|
[
"Apache-2.0"
] | null | null | null |
test/integration/component/test_stopped_vm.py
|
ksowmya/cloudstack-1
|
f8f779158da056be7da669884ae4ddd109cec044
|
[
"Apache-2.0"
] | 1
|
2019-12-26T07:16:06.000Z
|
2019-12-26T07:16:06.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" P1 for stopped Virtual Maschine life cycle
"""
#Import Local Modules
import marvin
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.remoteSSHClient import remoteSSHClient
from marvin.integration.lib.utils import *
from marvin.integration.lib.base import *
from marvin.integration.lib.common import *
#Import System modules
import time
class Services:
"""Test Stopped VM Life Cycle Services
"""
def __init__(self):
self.services = {
"account": {
"email": "test@test.com",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended in create account to
# ensure unique username generated each time
"password": "password",
},
"virtual_machine":
{
"displayname": "testserver",
"username": "root", # VM creds for SSH
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"service_offering":
{
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100, # in MHz
"memory": 128, # In MBs
},
"disk_offering": {
"displaytext": "Tiny volume",
"name": "Tiny volume",
"disksize": 1
},
"volume": {
"diskname": "DataDisk",
"url": 'http://download.cloud.com/releases/2.0.0/UbuntuServer-10-04-64bit.vhd.bz2',
"format": 'VHD'
},
"iso": # ISO settings for Attach/Detach ISO tests
{
"displaytext": "Test ISO",
"name": "testISO",
"url": "http://people.apache.org/~tsp/dummy.iso",
# Source URL where ISO is located
"ostype": 'CentOS 5.3 (64-bit)',
"mode": 'HTTP_DOWNLOAD', # Downloading existing ISO
},
"template": {
"url": "http://download.cloud.com/releases/2.0.0/UbuntuServer-10-04-64bit.vhd.bz2",
"hypervisor": 'XenServer',
"format": 'VHD',
"isfeatured": True,
"ispublic": True,
"isextractable": True,
"displaytext": "Cent OS Template",
"name": "Cent OS Template",
"ostype": 'CentOS 5.3 (64-bit)',
"templatefilter": 'self',
"passwordenabled": True,
},
"sleep": 60,
"timeout": 10,
#Migrate VM to hostid
"ostype": 'CentOS 5.3 (64-bit)',
# CentOS 5.3 (64-bit)
}
class TestDeployVM(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestDeployVM,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
# Create service offerings, disk offerings etc
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
# Cleanup
cls._cleanup = [
cls.service_offering,
cls.disk_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.services = Services().services
self.services["virtual_machine"]["zoneid"] = self.zone.id
self.services["iso"]["zoneid"] = self.zone.id
self.services["virtual_machine"]["template"] = self.template.id
self.account = Account.create(
self.apiclient,
self.services["account"],
domainid=self.domain.id
)
self.cleanup = [self.account]
return
def tearDown(self):
try:
self.debug("Cleaning up the resources")
cleanup_resources(self.apiclient, self.cleanup)
self.debug("Cleanup complete!")
except Exception as e:
self.debug("Warning! Exception in tearDown: %s" % e)
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_01_deploy_vm_no_startvm(self):
"""Test Deploy Virtual Machine with no startVM parameter
"""
# Validate the following:
# 1. deploy Vm without specifying the startvm parameter
# 2. Should be able to login to the VM.
# 3. listVM command should return the deployed VM.State of this VM
# should be "Running".
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
mode=self.zone.networktype
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in Running state after deployment"
)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_02_deploy_vm_startvm_true(self):
"""Test Deploy Virtual Machine with startVM=true parameter
"""
# Validate the following:
# 1. deploy Vm with the startvm=true
# 2. Should be able to login to the VM.
# 3. listVM command should return the deployed VM.State of this VM
# should be "Running".
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=True,
diskofferingid=self.disk_offering.id,
mode=self.zone.networktype
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in Running state after deployment"
)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_03_deploy_vm_startvm_false(self):
"""Test Deploy Virtual Machine with startVM=false parameter
"""
# Validate the following:
# 1. deploy Vm with the startvm=false
# 2. Should not be able to login to the VM.
# 3. listVM command should return the deployed VM.State of this VM
# should be "Stopped".
# 4. Check listRouters call for that account. List routers should
# return empty response
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False,
diskofferingid=self.disk_offering.id,
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Stopped state after deployment with startvm=false"
)
routers = Router.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertEqual(
routers,
None,
"List routers should return empty response"
)
self.debug("Destroying instance: %s" % self.virtual_machine.name)
self.virtual_machine.delete(self.apiclient)
self.debug("Instance is destroyed!")
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.debug("Instance destroyed..waiting till expunge interval")
interval = list_configurations(
self.apiclient,
name='expunge.interval'
)
delay = list_configurations(
self.apiclient,
name='expunge.delay'
)
# Sleep to ensure that all resources are deleted
time.sleep((int(interval[0].value) + int(delay[0].value)))
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.assertEqual(
list_vm_response,
None,
"Check list response returns a valid list"
)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_04_deploy_startvm_false_attach_volume(self):
"""Test Deploy Virtual Machine with startVM=false and attach volume
"""
# Validate the following:
# 1. deploy Vm with the startvm=false. Attach volume to the instance
# 2. listVM command should return the deployed VM.State of this VM
# should be "Stopped".
# 3. Attach volume should be successful
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False,
diskofferingid=self.disk_offering.id,
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Stopped state after deployment with startvm=false"
)
self.debug("Creating a volume in account: %s" %
self.account.name)
volume = Volume.create(
self.apiclient,
self.services["volume"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
diskofferingid=self.disk_offering.id
)
self.debug("Created volume in account: %s" % self.account.name)
self.debug("Attaching volume to instance: %s" %
self.virtual_machine.name)
try:
self.virtual_machine.attach_volume(self.apiclient, volume)
except Exception as e:
self.fail("Attach volume failed!")
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_05_deploy_startvm_false_change_so(self):
"""Test Deploy Virtual Machine with startVM=false and change service offering
"""
# Validate the following:
# 1. deploy Vm with the startvm=false. Attach volume to the instance
# 2. listVM command should return the deployed VM.State of this VM
# should be "Stopped".
# 4. Change service offering
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False,
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Stopped state after deployment with startvm=false"
)
medium_service_off = ServiceOffering.create(
self.apiclient,
self.services["service_offering"]
)
self.cleanup.append(medium_service_off)
self.debug("Changing service offering for instance: %s" %
self.virtual_machine.name)
try:
self.virtual_machine.change_service_offering(
self.apiclient,
medium_service_off.id
)
except Exception as e:
self.fail("Change service offering failed: %s" % e)
self.debug("Starting the instance: %s" % self.virtual_machine.name)
self.virtual_machine.start(self.apiclient)
self.debug("Instance: %s started" % self.virtual_machine.name)
listedvm = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id)
self.assert_(isinstance(listedvm, list))
self.assert_(len(listedvm) > 0)
self.assertEqual(listedvm[0].serviceofferingid, medium_service_off.id, msg="VM did not change service offering")
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_06_deploy_startvm_attach_detach(self):
"""Test Deploy Virtual Machine with startVM=false and
attach detach volumes
"""
# Validate the following:
# 1. deploy Vm with the startvm=false. Attach volume to the instance
# 2. listVM command should return the deployed VM.State of this VM
# should be "Stopped".
# 3. Attach volume should be successful
# 4. Detach volume from instance. Detach should be successful
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False,
diskofferingid=self.disk_offering.id,
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Stopped state after deployment with startvm=false"
)
self.debug("Creating a volume in account: %s" %
self.account.name)
volume = Volume.create(
self.apiclient,
self.services["volume"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
diskofferingid=self.disk_offering.id
)
self.debug("Created volume in account: %s" % self.account.name)
self.debug("Attaching volume to instance: %s" %
self.virtual_machine.name)
try:
self.virtual_machine.attach_volume(self.apiclient, volume)
except Exception as e:
self.fail("Attach volume failed!")
self.debug("Detaching the disk: %s" % volume.name)
self.virtual_machine.detach_volume(self.apiclient, volume)
self.debug("Datadisk %s detached!" % volume.name)
volumes = Volume.list(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='DATADISK',
id=volume.id,
listall=True
)
self.assertEqual(
volumes,
None,
"List Volumes should not list any volume for instance"
)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_07_deploy_startvm_attach_iso(self):
"""Test Deploy Virtual Machine with startVM=false and attach ISO
"""
# Validate the following:
# 1. deploy Vm with the startvm=false. Attach volume to the instance
# 2. listVM command should return the deployed VM.State of this VM
# should be "Stopped".
# 3. Attach ISO to the instance. Attach ISO should be successful
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False,
diskofferingid=self.disk_offering.id,
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Stopped state after deployment with startvm=false"
)
self.debug("Registering a ISO in account: %s" %
self.account.name)
iso = Iso.create(
self.apiclient,
self.services["iso"],
account=self.account.name,
domainid=self.account.domainid
)
self.debug("Successfully created ISO with ID: %s" % iso.id)
try:
iso.download(self.apiclient)
self.cleanup.append(iso)
except Exception as e:
self.fail("Exception while downloading ISO %s: %s"\
% (iso.id, e))
self.debug("Attach ISO with ID: %s to VM ID: %s" % (
iso.id,
self.virtual_machine.id
))
try:
self.virtual_machine.attach_iso(self.apiclient, iso)
except Exception as e:
self.fail("Attach ISO failed!")
vms = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List vms should return a valid list"
)
vm = vms[0]
self.assertEqual(
vm.isoid,
iso.id,
"The ISO status should be reflected in list Vm call"
)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_08_deploy_attached_volume(self):
"""Test Deploy Virtual Machine with startVM=false and attach volume already attached to different machine
"""
# Validate the following:
# 1. deploy Vm with the startvm=false. Attach volume to the instance
# 2. listVM command should return the deployed VM.State of this VM
# should be "Stopped".
# 3. Create an instance with datadisk attached to it. Detach DATADISK
# 4. Attach the volume to first virtual machine.
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False,
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine_1.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine_1.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Stopped state after deployment with startvm=false"
)
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine_2 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine_2.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine_2.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in Stopped state after deployment with startvm=false"
)
self.debug(
"Fetching DATADISK details for instance: %s" %
self.virtual_machine_2.name)
volumes = Volume.list(
self.apiclient,
type='DATADISK',
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertEqual(
isinstance(volumes, list),
True,
"List volumes should return a valid list"
)
volume = volumes[0]
self.debug("Detaching the disk: %s" % volume.name)
try:
self.virtual_machine_2.detach_volume(self.apiclient, volume)
self.debug("Datadisk %s detached!" % volume.name)
except Exception as e:
self.fail("Detach volume failed!")
self.debug("Attaching volume to instance: %s" %
self.virtual_machine_1.name)
try:
self.virtual_machine_1.attach_volume(self.apiclient, volume)
except Exception as e:
self.fail("Attach volume failed with %s!" % e)
volumes = Volume.list(
self.apiclient,
virtualmachineid=self.virtual_machine_1.id,
type='DATADISK',
id=volume.id,
listall=True
)
self.assertNotEqual(
volumes,
None,
"List Volumes should not list any volume for instance"
)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_09_stop_vm_migrate_vol(self):
"""Test Stopped Virtual Machine's ROOT volume migration
"""
# Validate the following:
# 1. deploy Vm with startvm=true
# 2. Should not be able to login to the VM.
# 3. listVM command should return the deployed VM.State of this VM
# should be "Running".
# 4. Stop the vm
# 5.list primary storages in the cluster , should be more than one
# 6.Migrate voluem to another available primary storage
clusters = Cluster.list(
self.apiclient,
zoneid = self.zone.id
)
self.assertEqual(
isinstance(clusters, list),
True,
"Check list response returns a valid list"
)
i = 0
for cluster in clusters :
storage_pools = StoragePool.list(
self.apiclient,
clusterid = cluster.id
)
if len(storage_pools) > 1 :
self.cluster_id = cluster.id
i += 1
break
if i == 0 :
self.skipTest("No cluster with more than one primary storage pool to perform migrate volume test")
hosts = Host.list(
self.apiclient,
clusterid = self.cluster_id
)
self.assertEqual(
isinstance(hosts, list),
True,
"Check list response returns a valid list"
)
host = hosts[0]
self.debug("Deploying instance on host: %s" % host.id)
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
hostid=host.id,
mode=self.zone.networktype
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in Running state after deployment"
)
self.debug("Stopping instance: %s" % self.virtual_machine.name)
self.virtual_machine.stop(self.apiclient)
self.debug("Instance is stopped!")
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Stopped state after stoping vm"
)
volumes = Volume.list(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='ROOT',
listall=True
)
self.assertEqual(
isinstance(volumes, list),
True,
"Check volume list response returns a valid list"
)
vol_response = volumes[0]
#get the storage name in which volume is stored
storage_name = vol_response.storage
storage_pools = StoragePool.list(
self.apiclient,
clusterid = self.cluster_id
)
#Get storage pool to migrate volume
for spool in storage_pools:
if spool.name == storage_name:
continue
else:
self.storage_id = spool.id
self.storage_name = spool.name
break
self.debug("Migrating volume to storage pool: %s" % self.storage_name)
Volume.migrate(
self.apiclient,
storageid = self.storage_id,
volumeid = vol_response.id
)
volume = Volume.list(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='ROOT',
listall=True
)
self.assertEqual(
volume[0].storage,
self.storage_name,
"Check volume migration response")
return
class TestDeployHaEnabledVM(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestDeployHaEnabledVM,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
# Create service, disk offerings etc
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"],
offerha=True
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
# Cleanup
cls._cleanup = [
cls.service_offering,
cls.disk_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.services = Services().services
self.services["virtual_machine"]["zoneid"] = self.zone.id
self.services["virtual_machine"]["template"] = self.template.id
self.services["iso"]["zoneid"] = self.zone.id
self.account = Account.create(
self.apiclient,
self.services["account"],
domainid=self.domain.id
)
self.cleanup = [self.account]
return
def tearDown(self):
try:
self.debug("Cleaning up the resources")
cleanup_resources(self.apiclient, self.cleanup)
self.debug("Cleanup complete!")
except Exception as e:
self.debug("Warning! Exception in tearDown: %s" % e)
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_01_deploy_ha_vm_startvm_false(self):
"""Test Deploy HA enabled Virtual Machine with startvm=false
"""
# Validate the following:
# 1. deployHA enabled Vm with the startvm parameter = false
# 2. listVM command should return the deployed VM. State of this VM
# should be "Created".
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
startvm=False
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Stopped state after deployment"
)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_02_deploy_ha_vm_from_iso(self):
"""Test Deploy HA enabled Virtual Machine from ISO
"""
# Validate the following:
# 1. deployHA enabled Vm using ISO with the startvm parameter=true
# 2. listVM command should return the deployed VM. State of this VM
# should be "Running".
self.iso = Iso.create(
self.apiclient,
self.services["iso"],
account=self.account.name,
domainid=self.account.domainid
)
try:
# Dowanload the ISO
self.iso.download(self.apiclient)
self.cleanup.append(self.iso)
except Exception as e:
raise Exception("Exception while downloading ISO %s: %s"\
% (self.iso.id, e))
self.debug("Registered ISO: %s" % self.iso.name)
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
templateid=self.iso.id,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
startvm=True
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in Running state after deployment"
)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_03_deploy_ha_vm_iso_startvm_false(self):
"""Test Deploy HA enabled Virtual Machine from ISO with startvm=false
"""
# Validate the following:
# 1. deployHA enabled Vm using ISO with the startvm parameter=false
# 2. listVM command should return the deployed VM. State of this VM
# should be "Stopped".
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
startvm=False
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Running state after deployment"
)
return
class TestRouterStateAfterDeploy(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestRouterStateAfterDeploy,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
# Create service offerings, disk offerings etc
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
# Cleanup
cls._cleanup = [
cls.service_offering,
cls.disk_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.services = Services().services
self.services["virtual_machine"]["zoneid"] = self.zone.id
self.services["virtual_machine"]["template"] = self.template.id
self.services["iso"]["zoneid"] = self.zone.id
self.account = Account.create(
self.apiclient,
self.services["account"],
domainid=self.domain.id
)
self.cleanup = [self.account]
return
def tearDown(self):
try:
self.debug("Cleaning up the resources")
cleanup_resources(self.apiclient, self.cleanup)
self.debug("Cleanup complete!")
except Exception as e:
self.debug("Warning! Exception in tearDown: %s" % e)
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_01_deploy_vm_no_startvm(self):
"""Test Deploy Virtual Machine with no startVM parameter
"""
# Validate the following:
# 1. deploy Vm without specifying the startvm parameter
# 2. Should be able to login to the VM.
# 3. listVM command should return the deployed VM.State of this VM
# should be "Running".
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
startvm=False
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine_1.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine_1.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in stopped state after deployment"
)
self.debug("Checking the router state after VM deployment")
routers = Router.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertEqual(
routers,
None,
"List routers should return empty response"
)
self.debug(
"Deploying another instance (startvm=true) in the account: %s" %
self.account.name)
self.virtual_machine_2 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
startvm=True
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine_2.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine_2.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in Running state after deployment"
)
self.debug("Checking the router state after VM deployment")
routers = Router.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"List routers should not return empty response"
)
for router in routers:
self.debug("Router state: %s" % router.state)
self.assertEqual(
router.state,
"Running",
"Router should be in running state when instance is running in the account"
)
self.debug("Destroying the running VM:%s" %
self.virtual_machine_2.name)
self.virtual_machine_2.delete(self.apiclient)
self.debug("Instance destroyed..waiting till expunge interval")
interval = list_configurations(
self.apiclient,
name='expunge.interval'
)
delay = list_configurations(
self.apiclient,
name='expunge.delay'
)
# Sleep to ensure that all resources are deleted
time.sleep((int(interval[0].value) + int(delay[0].value)) * 2)
self.debug("Checking the router state after VM deployment")
routers = Router.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertNotEqual(
routers,
None,
"Router should get deleted after expunge delay+wait"
)
return
class TestDeployVMBasicZone(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestDeployVMBasicZone,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
# Create service offerings, disk offerings etc
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
# Cleanup
cls._cleanup = [
cls.service_offering,
cls.disk_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.services = Services().services
self.services["virtual_machine"]["zoneid"] = self.zone.id
self.services["iso"]["zoneid"] = self.zone.id
self.services["virtual_machine"]["template"] = self.template.id
self.account = Account.create(
self.apiclient,
self.services["account"],
domainid=self.domain.id
)
self.cleanup = [self.account]
return
def tearDown(self):
try:
self.debug("Cleaning up the resources")
cleanup_resources(self.apiclient, self.cleanup)
self.debug("Cleanup complete!")
except Exception as e:
self.debug("Warning! Exception in tearDown: %s" % e)
class TestDeployVMFromTemplate(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestDeployVMFromTemplate,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
# Create service, disk offerings etc
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"],
offerha=True
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
# Cleanup
cls._cleanup = [
cls.service_offering,
cls.disk_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.services = Services().services
self.services["virtual_machine"]["zoneid"] = self.zone.id
self.account = Account.create(
self.apiclient,
self.services["account"],
domainid=self.domain.id
)
self.template = Template.register(
self.apiclient,
self.services["template"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
try:
self.template.download(self.apiclient)
except Exception as e:
raise Exception("Template download failed: %s" % e)
self.cleanup = [self.account]
return
def tearDown(self):
try:
self.debug("Cleaning up the resources")
cleanup_resources(self.apiclient, self.cleanup)
self.debug("Cleanup complete!")
except Exception as e:
self.debug("Warning! Exception in tearDown: %s" % e)
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_deploy_vm_password_enabled(self):
"""Test Deploy Virtual Machine with startVM=false & enabledpassword in
template
"""
# Validate the following:
# 1. Create the password enabled template
# 2. Deploy Vm with this template and passing startvm=false
# 3. Start VM. Deploy VM should be successful and it should be in Up
# and running state
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
startvm=False,
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in stopped state after deployment"
)
self.debug("Starting the instance: %s" % self.virtual_machine.name)
self.virtual_machine.start(self.apiclient)
self.debug("Started the instance: %s" % self.virtual_machine.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in running state after deployment"
)
return
class TestVMAccountLimit(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestVMAccountLimit,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
# Create Account, VMs etc
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls._cleanup = [
cls.service_offering,
cls.account
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_vm_per_account(self):
"""Test VM limit per account
"""
# Validate the following
# 1. Set the resource limit for VM per account.
# 2. Deploy VMs more than limit in that account.
# 3. AIP should error out
self.debug(
"Updating instance resource limit for account: %s" %
self.account.name)
# Set usage_vm=1 for Account 1
update_resource_limit(
self.apiclient,
0, # Instance
account=self.account.name,
domainid=self.account.domainid,
max=1
)
self.debug(
"Deploying VM instance in account: %s" %
self.account.name)
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False
)
# Verify VM state
self.assertEqual(
virtual_machine.state,
'Stopped',
"Check VM state is Running or not"
)
# Exception should be raised for second instance (account_1)
with self.assertRaises(Exception):
VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False
)
return
class TestUploadAttachVolume(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestUploadAttachVolume,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
# Create Account, VMs etc
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls._cleanup = [
cls.service_offering,
cls.account
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_upload_attach_volume(self):
"""Test Upload volume and attach to VM in stopped state
"""
# Validate the following
# 1. Upload the volume using uploadVolume API call
# 2. Deploy VM with startvm=false.
# 3. Attach the volume to the deployed VM in step 2
self.debug(
"Uploading the volume: %s" %
self.services["volume"]["diskname"])
try:
volume = Volume.upload(
self.apiclient,
self.services["volume"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
self.debug("Uploading the volume: %s" % volume.name)
volume.wait_for_upload(self.apiclient)
self.debug("Volume: %s uploaded successfully")
except Exception as e:
self.fail("Failed to upload the volume: %s" % e)
self.debug(
"Deploying VM instance in account: %s" %
self.account.name)
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False
)
# Verify VM state
self.assertEqual(
virtual_machine.state,
'Stopped',
"Check VM state is Running or not"
)
virtual_machine.attach_volume(self.apiclient, volume)
return
class TestDeployOnSpecificHost(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestDeployOnSpecificHost,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls._cleanup = [
cls.service_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = []
return
def tearDown(self):
try:
self.account.delete(self.apiclient)
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "simulator",
"api", "basic", "eip", "sg"])
def test_deployVmOnGivenHost(self):
"""Test deploy VM on specific host
"""
# Steps for validation
# 1. as admin list available hosts that are Up
# 2. deployVM with hostid=above host
# 3. listVirtualMachines
# 4. destroy VM
# Validate the following
# 1. listHosts returns at least one host in Up state
# 2. VM should be in Running
# 3. VM should be on the host that it was deployed on
hosts = Host.list(
self.apiclient,
zoneid=self.zone.id,
type='Routing',
state='Up',
listall=True
)
self.assertEqual(
isinstance(hosts, list),
True,
"CS should have atleast one host Up and Running"
)
host = hosts[0]
self.debug("Deploting VM on host: %s" % host.name)
try:
vm = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
hostid=host.id
)
self.debug("Deploy VM succeeded")
except Exception as e:
self.fail("Deploy VM failed with exception: %s" % e)
self.debug("Cheking the state of deployed VM")
vms = VirtualMachine.list(
self.apiclient,
id=vm.id,
listall=True,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(vms, list),
True,
"List Vm should return a valid response"
)
vm_response = vms[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in running state after deployment"
)
self.assertEqual(
vm_response.hostid,
host.id,
"Host id where VM is deployed should match"
)
return
| 41.317884
| 120
| 0.452668
|
import marvin
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.remoteSSHClient import remoteSSHClient
from marvin.integration.lib.utils import *
from marvin.integration.lib.base import *
from marvin.integration.lib.common import *
import time
class Services:
def __init__(self):
self.services = {
"account": {
"email": "test@test.com",
"firstname": "Test",
"lastname": "User",
"username": "test",
"password": "password",
},
"virtual_machine":
{
"displayname": "testserver",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"service_offering":
{
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100,
"memory": 128,
},
"disk_offering": {
"displaytext": "Tiny volume",
"name": "Tiny volume",
"disksize": 1
},
"volume": {
"diskname": "DataDisk",
"url": 'http://download.cloud.com/releases/2.0.0/UbuntuServer-10-04-64bit.vhd.bz2',
"format": 'VHD'
},
"iso":
{
"displaytext": "Test ISO",
"name": "testISO",
"url": "http://people.apache.org/~tsp/dummy.iso",
"ostype": 'CentOS 5.3 (64-bit)',
"mode": 'HTTP_DOWNLOAD',
},
"template": {
"url": "http://download.cloud.com/releases/2.0.0/UbuntuServer-10-04-64bit.vhd.bz2",
"hypervisor": 'XenServer',
"format": 'VHD',
"isfeatured": True,
"ispublic": True,
"isextractable": True,
"displaytext": "Cent OS Template",
"name": "Cent OS Template",
"ostype": 'CentOS 5.3 (64-bit)',
"templatefilter": 'self',
"passwordenabled": True,
},
"sleep": 60,
"timeout": 10,
"ostype": 'CentOS 5.3 (64-bit)',
}
class TestDeployVM(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestDeployVM,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
cls._cleanup = [
cls.service_offering,
cls.disk_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.services = Services().services
self.services["virtual_machine"]["zoneid"] = self.zone.id
self.services["iso"]["zoneid"] = self.zone.id
self.services["virtual_machine"]["template"] = self.template.id
self.account = Account.create(
self.apiclient,
self.services["account"],
domainid=self.domain.id
)
self.cleanup = [self.account]
return
def tearDown(self):
try:
self.debug("Cleaning up the resources")
cleanup_resources(self.apiclient, self.cleanup)
self.debug("Cleanup complete!")
except Exception as e:
self.debug("Warning! Exception in tearDown: %s" % e)
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_01_deploy_vm_no_startvm(self):
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
mode=self.zone.networktype
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in Running state after deployment"
)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_02_deploy_vm_startvm_true(self):
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=True,
diskofferingid=self.disk_offering.id,
mode=self.zone.networktype
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in Running state after deployment"
)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_03_deploy_vm_startvm_false(self):
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False,
diskofferingid=self.disk_offering.id,
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Stopped state after deployment with startvm=false"
)
routers = Router.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertEqual(
routers,
None,
"List routers should return empty response"
)
self.debug("Destroying instance: %s" % self.virtual_machine.name)
self.virtual_machine.delete(self.apiclient)
self.debug("Instance is destroyed!")
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.debug("Instance destroyed..waiting till expunge interval")
interval = list_configurations(
self.apiclient,
name='expunge.interval'
)
delay = list_configurations(
self.apiclient,
name='expunge.delay'
)
time.sleep((int(interval[0].value) + int(delay[0].value)))
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.assertEqual(
list_vm_response,
None,
"Check list response returns a valid list"
)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_04_deploy_startvm_false_attach_volume(self):
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False,
diskofferingid=self.disk_offering.id,
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Stopped state after deployment with startvm=false"
)
self.debug("Creating a volume in account: %s" %
self.account.name)
volume = Volume.create(
self.apiclient,
self.services["volume"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
diskofferingid=self.disk_offering.id
)
self.debug("Created volume in account: %s" % self.account.name)
self.debug("Attaching volume to instance: %s" %
self.virtual_machine.name)
try:
self.virtual_machine.attach_volume(self.apiclient, volume)
except Exception as e:
self.fail("Attach volume failed!")
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_05_deploy_startvm_false_change_so(self):
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False,
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Stopped state after deployment with startvm=false"
)
medium_service_off = ServiceOffering.create(
self.apiclient,
self.services["service_offering"]
)
self.cleanup.append(medium_service_off)
self.debug("Changing service offering for instance: %s" %
self.virtual_machine.name)
try:
self.virtual_machine.change_service_offering(
self.apiclient,
medium_service_off.id
)
except Exception as e:
self.fail("Change service offering failed: %s" % e)
self.debug("Starting the instance: %s" % self.virtual_machine.name)
self.virtual_machine.start(self.apiclient)
self.debug("Instance: %s started" % self.virtual_machine.name)
listedvm = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id)
self.assert_(isinstance(listedvm, list))
self.assert_(len(listedvm) > 0)
self.assertEqual(listedvm[0].serviceofferingid, medium_service_off.id, msg="VM did not change service offering")
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_06_deploy_startvm_attach_detach(self):
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False,
diskofferingid=self.disk_offering.id,
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Stopped state after deployment with startvm=false"
)
self.debug("Creating a volume in account: %s" %
self.account.name)
volume = Volume.create(
self.apiclient,
self.services["volume"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
diskofferingid=self.disk_offering.id
)
self.debug("Created volume in account: %s" % self.account.name)
self.debug("Attaching volume to instance: %s" %
self.virtual_machine.name)
try:
self.virtual_machine.attach_volume(self.apiclient, volume)
except Exception as e:
self.fail("Attach volume failed!")
self.debug("Detaching the disk: %s" % volume.name)
self.virtual_machine.detach_volume(self.apiclient, volume)
self.debug("Datadisk %s detached!" % volume.name)
volumes = Volume.list(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='DATADISK',
id=volume.id,
listall=True
)
self.assertEqual(
volumes,
None,
"List Volumes should not list any volume for instance"
)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_07_deploy_startvm_attach_iso(self):
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False,
diskofferingid=self.disk_offering.id,
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Stopped state after deployment with startvm=false"
)
self.debug("Registering a ISO in account: %s" %
self.account.name)
iso = Iso.create(
self.apiclient,
self.services["iso"],
account=self.account.name,
domainid=self.account.domainid
)
self.debug("Successfully created ISO with ID: %s" % iso.id)
try:
iso.download(self.apiclient)
self.cleanup.append(iso)
except Exception as e:
self.fail("Exception while downloading ISO %s: %s"\
% (iso.id, e))
self.debug("Attach ISO with ID: %s to VM ID: %s" % (
iso.id,
self.virtual_machine.id
))
try:
self.virtual_machine.attach_iso(self.apiclient, iso)
except Exception as e:
self.fail("Attach ISO failed!")
vms = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List vms should return a valid list"
)
vm = vms[0]
self.assertEqual(
vm.isoid,
iso.id,
"The ISO status should be reflected in list Vm call"
)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_08_deploy_attached_volume(self):
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False,
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine_1.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine_1.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Stopped state after deployment with startvm=false"
)
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine_2 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine_2.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine_2.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in Stopped state after deployment with startvm=false"
)
self.debug(
"Fetching DATADISK details for instance: %s" %
self.virtual_machine_2.name)
volumes = Volume.list(
self.apiclient,
type='DATADISK',
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertEqual(
isinstance(volumes, list),
True,
"List volumes should return a valid list"
)
volume = volumes[0]
self.debug("Detaching the disk: %s" % volume.name)
try:
self.virtual_machine_2.detach_volume(self.apiclient, volume)
self.debug("Datadisk %s detached!" % volume.name)
except Exception as e:
self.fail("Detach volume failed!")
self.debug("Attaching volume to instance: %s" %
self.virtual_machine_1.name)
try:
self.virtual_machine_1.attach_volume(self.apiclient, volume)
except Exception as e:
self.fail("Attach volume failed with %s!" % e)
volumes = Volume.list(
self.apiclient,
virtualmachineid=self.virtual_machine_1.id,
type='DATADISK',
id=volume.id,
listall=True
)
self.assertNotEqual(
volumes,
None,
"List Volumes should not list any volume for instance"
)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_09_stop_vm_migrate_vol(self):
clusters = Cluster.list(
self.apiclient,
zoneid = self.zone.id
)
self.assertEqual(
isinstance(clusters, list),
True,
"Check list response returns a valid list"
)
i = 0
for cluster in clusters :
storage_pools = StoragePool.list(
self.apiclient,
clusterid = cluster.id
)
if len(storage_pools) > 1 :
self.cluster_id = cluster.id
i += 1
break
if i == 0 :
self.skipTest("No cluster with more than one primary storage pool to perform migrate volume test")
hosts = Host.list(
self.apiclient,
clusterid = self.cluster_id
)
self.assertEqual(
isinstance(hosts, list),
True,
"Check list response returns a valid list"
)
host = hosts[0]
self.debug("Deploying instance on host: %s" % host.id)
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
hostid=host.id,
mode=self.zone.networktype
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in Running state after deployment"
)
self.debug("Stopping instance: %s" % self.virtual_machine.name)
self.virtual_machine.stop(self.apiclient)
self.debug("Instance is stopped!")
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Stopped state after stoping vm"
)
volumes = Volume.list(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='ROOT',
listall=True
)
self.assertEqual(
isinstance(volumes, list),
True,
"Check volume list response returns a valid list"
)
vol_response = volumes[0]
storage_name = vol_response.storage
storage_pools = StoragePool.list(
self.apiclient,
clusterid = self.cluster_id
)
for spool in storage_pools:
if spool.name == storage_name:
continue
else:
self.storage_id = spool.id
self.storage_name = spool.name
break
self.debug("Migrating volume to storage pool: %s" % self.storage_name)
Volume.migrate(
self.apiclient,
storageid = self.storage_id,
volumeid = vol_response.id
)
volume = Volume.list(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='ROOT',
listall=True
)
self.assertEqual(
volume[0].storage,
self.storage_name,
"Check volume migration response")
return
class TestDeployHaEnabledVM(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestDeployHaEnabledVM,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"],
offerha=True
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
cls._cleanup = [
cls.service_offering,
cls.disk_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.services = Services().services
self.services["virtual_machine"]["zoneid"] = self.zone.id
self.services["virtual_machine"]["template"] = self.template.id
self.services["iso"]["zoneid"] = self.zone.id
self.account = Account.create(
self.apiclient,
self.services["account"],
domainid=self.domain.id
)
self.cleanup = [self.account]
return
def tearDown(self):
try:
self.debug("Cleaning up the resources")
cleanup_resources(self.apiclient, self.cleanup)
self.debug("Cleanup complete!")
except Exception as e:
self.debug("Warning! Exception in tearDown: %s" % e)
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_01_deploy_ha_vm_startvm_false(self):
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
startvm=False
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Stopped state after deployment"
)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_02_deploy_ha_vm_from_iso(self):
self.iso = Iso.create(
self.apiclient,
self.services["iso"],
account=self.account.name,
domainid=self.account.domainid
)
try:
self.iso.download(self.apiclient)
self.cleanup.append(self.iso)
except Exception as e:
raise Exception("Exception while downloading ISO %s: %s"\
% (self.iso.id, e))
self.debug("Registered ISO: %s" % self.iso.name)
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
templateid=self.iso.id,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
startvm=True
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in Running state after deployment"
)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_03_deploy_ha_vm_iso_startvm_false(self):
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
startvm=False
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Running state after deployment"
)
return
class TestRouterStateAfterDeploy(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestRouterStateAfterDeploy,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
cls._cleanup = [
cls.service_offering,
cls.disk_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.services = Services().services
self.services["virtual_machine"]["zoneid"] = self.zone.id
self.services["virtual_machine"]["template"] = self.template.id
self.services["iso"]["zoneid"] = self.zone.id
self.account = Account.create(
self.apiclient,
self.services["account"],
domainid=self.domain.id
)
self.cleanup = [self.account]
return
def tearDown(self):
try:
self.debug("Cleaning up the resources")
cleanup_resources(self.apiclient, self.cleanup)
self.debug("Cleanup complete!")
except Exception as e:
self.debug("Warning! Exception in tearDown: %s" % e)
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_01_deploy_vm_no_startvm(self):
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
startvm=False
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine_1.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine_1.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in stopped state after deployment"
)
self.debug("Checking the router state after VM deployment")
routers = Router.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertEqual(
routers,
None,
"List routers should return empty response"
)
self.debug(
"Deploying another instance (startvm=true) in the account: %s" %
self.account.name)
self.virtual_machine_2 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
startvm=True
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine_2.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine_2.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in Running state after deployment"
)
self.debug("Checking the router state after VM deployment")
routers = Router.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"List routers should not return empty response"
)
for router in routers:
self.debug("Router state: %s" % router.state)
self.assertEqual(
router.state,
"Running",
"Router should be in running state when instance is running in the account"
)
self.debug("Destroying the running VM:%s" %
self.virtual_machine_2.name)
self.virtual_machine_2.delete(self.apiclient)
self.debug("Instance destroyed..waiting till expunge interval")
interval = list_configurations(
self.apiclient,
name='expunge.interval'
)
delay = list_configurations(
self.apiclient,
name='expunge.delay'
)
time.sleep((int(interval[0].value) + int(delay[0].value)) * 2)
self.debug("Checking the router state after VM deployment")
routers = Router.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertNotEqual(
routers,
None,
"Router should get deleted after expunge delay+wait"
)
return
class TestDeployVMBasicZone(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestDeployVMBasicZone,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
cls._cleanup = [
cls.service_offering,
cls.disk_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.services = Services().services
self.services["virtual_machine"]["zoneid"] = self.zone.id
self.services["iso"]["zoneid"] = self.zone.id
self.services["virtual_machine"]["template"] = self.template.id
self.account = Account.create(
self.apiclient,
self.services["account"],
domainid=self.domain.id
)
self.cleanup = [self.account]
return
def tearDown(self):
try:
self.debug("Cleaning up the resources")
cleanup_resources(self.apiclient, self.cleanup)
self.debug("Cleanup complete!")
except Exception as e:
self.debug("Warning! Exception in tearDown: %s" % e)
class TestDeployVMFromTemplate(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestDeployVMFromTemplate,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"],
offerha=True
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
cls._cleanup = [
cls.service_offering,
cls.disk_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.services = Services().services
self.services["virtual_machine"]["zoneid"] = self.zone.id
self.account = Account.create(
self.apiclient,
self.services["account"],
domainid=self.domain.id
)
self.template = Template.register(
self.apiclient,
self.services["template"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
try:
self.template.download(self.apiclient)
except Exception as e:
raise Exception("Template download failed: %s" % e)
self.cleanup = [self.account]
return
def tearDown(self):
try:
self.debug("Cleaning up the resources")
cleanup_resources(self.apiclient, self.cleanup)
self.debug("Cleanup complete!")
except Exception as e:
self.debug("Warning! Exception in tearDown: %s" % e)
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_deploy_vm_password_enabled(self):
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
startvm=False,
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in stopped state after deployment"
)
self.debug("Starting the instance: %s" % self.virtual_machine.name)
self.virtual_machine.start(self.apiclient)
self.debug("Started the instance: %s" % self.virtual_machine.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in running state after deployment"
)
return
class TestVMAccountLimit(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestVMAccountLimit,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls._cleanup = [
cls.service_offering,
cls.account
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_vm_per_account(self):
self.debug(
"Updating instance resource limit for account: %s" %
self.account.name)
update_resource_limit(
self.apiclient,
0,
account=self.account.name,
domainid=self.account.domainid,
max=1
)
self.debug(
"Deploying VM instance in account: %s" %
self.account.name)
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False
)
self.assertEqual(
virtual_machine.state,
'Stopped',
"Check VM state is Running or not"
)
with self.assertRaises(Exception):
VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False
)
return
class TestUploadAttachVolume(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestUploadAttachVolume,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls._cleanup = [
cls.service_offering,
cls.account
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_upload_attach_volume(self):
self.debug(
"Uploading the volume: %s" %
self.services["volume"]["diskname"])
try:
volume = Volume.upload(
self.apiclient,
self.services["volume"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
self.debug("Uploading the volume: %s" % volume.name)
volume.wait_for_upload(self.apiclient)
self.debug("Volume: %s uploaded successfully")
except Exception as e:
self.fail("Failed to upload the volume: %s" % e)
self.debug(
"Deploying VM instance in account: %s" %
self.account.name)
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False
)
self.assertEqual(
virtual_machine.state,
'Stopped',
"Check VM state is Running or not"
)
virtual_machine.attach_volume(self.apiclient, volume)
return
class TestDeployOnSpecificHost(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestDeployOnSpecificHost,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls._cleanup = [
cls.service_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = []
return
def tearDown(self):
try:
self.account.delete(self.apiclient)
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "simulator",
"api", "basic", "eip", "sg"])
def test_deployVmOnGivenHost(self):
hosts = Host.list(
self.apiclient,
zoneid=self.zone.id,
type='Routing',
state='Up',
listall=True
)
self.assertEqual(
isinstance(hosts, list),
True,
"CS should have atleast one host Up and Running"
)
host = hosts[0]
self.debug("Deploting VM on host: %s" % host.name)
try:
vm = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
hostid=host.id
)
self.debug("Deploy VM succeeded")
except Exception as e:
self.fail("Deploy VM failed with exception: %s" % e)
self.debug("Cheking the state of deployed VM")
vms = VirtualMachine.list(
self.apiclient,
id=vm.id,
listall=True,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(vms, list),
True,
"List Vm should return a valid response"
)
vm_response = vms[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in running state after deployment"
)
self.assertEqual(
vm_response.hostid,
host.id,
"Host id where VM is deployed should match"
)
return
| true
| true
|
7903f14c63028b965ddddec618df99f3f887854f
| 682
|
py
|
Python
|
Postgress-example/peewee-orm-test.py
|
Raul-Flores/ORM-example
|
ff289f74f858514cebefe7070c3688ad773a0e2a
|
[
"MIT"
] | null | null | null |
Postgress-example/peewee-orm-test.py
|
Raul-Flores/ORM-example
|
ff289f74f858514cebefe7070c3688ad773a0e2a
|
[
"MIT"
] | null | null | null |
Postgress-example/peewee-orm-test.py
|
Raul-Flores/ORM-example
|
ff289f74f858514cebefe7070c3688ad773a0e2a
|
[
"MIT"
] | null | null | null |
from peewee import *
import psycopg2
import datetime
db = PostgresqlDatabase("prueba", host="localhost", port=5432, user="postgres", password="P@ssw0rd")
class BaseModel(Model):
class Meta:
database = db
class User(BaseModel):
Username = CharField(unique = True)
email = CharField(unique = True)
created_date = DateTimeField(default= datetime.datetime.now)
class Meta:
db_table = 'Users'
if __name__== '__main__':
if not User.table_exists():
User.create_table()
query_1 = User.select().where( User.Username == "Raul").get()
print (query_1.email)
for all_users in User.select():
print (all_users.Username)
| 26.230769
| 100
| 0.670088
|
from peewee import *
import psycopg2
import datetime
db = PostgresqlDatabase("prueba", host="localhost", port=5432, user="postgres", password="P@ssw0rd")
class BaseModel(Model):
class Meta:
database = db
class User(BaseModel):
Username = CharField(unique = True)
email = CharField(unique = True)
created_date = DateTimeField(default= datetime.datetime.now)
class Meta:
db_table = 'Users'
if __name__== '__main__':
if not User.table_exists():
User.create_table()
query_1 = User.select().where( User.Username == "Raul").get()
print (query_1.email)
for all_users in User.select():
print (all_users.Username)
| true
| true
|
7903f253e8075b929b3aebfdfe7655f7bad4b7f8
| 1,900
|
py
|
Python
|
swig_muesli/muesli/da/setup_da.py
|
NinaHerrmann/muesli2py
|
632bb67433c6f67eaa48dc431d51914e0fde8f22
|
[
"MIT"
] | null | null | null |
swig_muesli/muesli/da/setup_da.py
|
NinaHerrmann/muesli2py
|
632bb67433c6f67eaa48dc431d51914e0fde8f22
|
[
"MIT"
] | null | null | null |
swig_muesli/muesli/da/setup_da.py
|
NinaHerrmann/muesli2py
|
632bb67433c6f67eaa48dc431d51914e0fde8f22
|
[
"MIT"
] | 1
|
2021-11-05T11:20:39.000Z
|
2021-11-05T11:20:39.000Z
|
import os
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from Cython.Distutils import build_ext
import numpy as np
from os.path import join as pjoin
from setup_cuda import cuda_setup
mpi_compile_args = os.popen("mpic++ --showme:compile").read().strip().split(' ')
mpi_link_args = os.popen("mpic++ --showme:link").read().strip().split(' ')
def find_in_path(name, path):
"""Find a file in a search path"""
# Adapted fom http://code.activestate.com/recipes/52224
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
nvcc = find_in_path('nvcc', os.environ['PATH'])
if isinstance(nvcc, str):
print('CUDA')
# setup(name='PackageName',
# author='Nina Herrmann',
# version='1.0',
# description='This is a package for Muesli',
# ext_modules=cythonize(cuda_setup.get_module()),
# cmdclass={'build_ext': cuda_setup.custom_build_ext()}
# )
else:
module = Extension('_da', sources=['da.cxx', 'da_wrap.cxx'],
include_dirs=[np.get_include(), 'src'],
library_dirs=['/usr/include/boost/'],
language="c++",
swig_opts=['-c++'],
libraries=['/usr/include/boost/chrono'],
extra_compile_args=(["-fopenmp"] + mpi_compile_args),
extra_link_args=(["-fopenmp"] + mpi_link_args)
)
setup(name='da',
author='Nina Herrmann',
version='1.0',
description='This is a package for Muesli',
ext_modules=[module],
py_modules=["da"]
)
| 33.333333
| 80
| 0.587895
|
import os
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from Cython.Distutils import build_ext
import numpy as np
from os.path import join as pjoin
from setup_cuda import cuda_setup
mpi_compile_args = os.popen("mpic++ --showme:compile").read().strip().split(' ')
mpi_link_args = os.popen("mpic++ --showme:link").read().strip().split(' ')
def find_in_path(name, path):
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
nvcc = find_in_path('nvcc', os.environ['PATH'])
if isinstance(nvcc, str):
print('CUDA')
else:
module = Extension('_da', sources=['da.cxx', 'da_wrap.cxx'],
include_dirs=[np.get_include(), 'src'],
library_dirs=['/usr/include/boost/'],
language="c++",
swig_opts=['-c++'],
libraries=['/usr/include/boost/chrono'],
extra_compile_args=(["-fopenmp"] + mpi_compile_args),
extra_link_args=(["-fopenmp"] + mpi_link_args)
)
setup(name='da',
author='Nina Herrmann',
version='1.0',
description='This is a package for Muesli',
ext_modules=[module],
py_modules=["da"]
)
| true
| true
|
7903f2d420c11f5c52ca3c107b61adfa92f927b4
| 482
|
py
|
Python
|
main8.py
|
BraffordHunter/E01a-Control-Structues
|
32d3ba66169e2ff1f24d7d4b23c135022637aadb
|
[
"MIT"
] | null | null | null |
main8.py
|
BraffordHunter/E01a-Control-Structues
|
32d3ba66169e2ff1f24d7d4b23c135022637aadb
|
[
"MIT"
] | null | null | null |
main8.py
|
BraffordHunter/E01a-Control-Structues
|
32d3ba66169e2ff1f24d7d4b23c135022637aadb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import utils
utils.check_version((3,7)) # make sure we are running at least Python 3.7
utils.clear() # clear the screen
print('Greetings!')
color = ''
while (color != 'red'):color = input("What is my favorite color? ")
while (color != 'red'):
color = color.lower().strip()
if (color == 'red'):
print('Correct!')
elif (color == 'pink'):
print('Close!')
else:
print('Sorry, try again.')
| 25.368421
| 82
| 0.558091
|
import utils
utils.check_version((3,7))
utils.clear()
print('Greetings!')
color = ''
while (color != 'red'):color = input("What is my favorite color? ")
while (color != 'red'):
color = color.lower().strip()
if (color == 'red'):
print('Correct!')
elif (color == 'pink'):
print('Close!')
else:
print('Sorry, try again.')
| true
| true
|
7903f412d8067347ae393df508dd0039c1b1cec1
| 1,962
|
py
|
Python
|
scripts/objects_to_tags.py
|
scumatteo/rtabmap_ros
|
74abc0e46d9f3977cda386b6fd505b49c4fe5fff
|
[
"BSD-3-Clause"
] | 657
|
2015-01-29T10:50:57.000Z
|
2022-03-31T08:55:39.000Z
|
scripts/objects_to_tags.py
|
scumatteo/rtabmap_ros
|
74abc0e46d9f3977cda386b6fd505b49c4fe5fff
|
[
"BSD-3-Clause"
] | 714
|
2015-01-09T08:43:16.000Z
|
2022-03-30T04:04:00.000Z
|
scripts/objects_to_tags.py
|
scumatteo/rtabmap_ros
|
74abc0e46d9f3977cda386b6fd505b49c4fe5fff
|
[
"BSD-3-Clause"
] | 524
|
2015-02-04T15:23:22.000Z
|
2022-03-30T17:03:06.000Z
|
#!/usr/bin/env python
import rospy
from apriltag_ros.msg import AprilTagDetectionArray
from apriltag_ros.msg import AprilTagDetection
from find_object_2d.msg import ObjectsStamped
import tf
import geometry_msgs.msg
objFramePrefix_ = "object"
distanceMax_ = 0.0
def callback(data):
global objFramePrefix_
global distanceMax_
if len(data.objects.data) > 0:
output = AprilTagDetectionArray()
output.header = data.header
for i in range(0,len(data.objects.data),12):
try:
objId = data.objects.data[i]
(trans,quat) = listener.lookupTransform(data.header.frame_id, objFramePrefix_+'_'+str(int(objId)), data.header.stamp)
tag = AprilTagDetection()
tag.id.append(objId)
tag.pose.pose.pose.position.x = trans[0]
tag.pose.pose.pose.position.y = trans[1]
tag.pose.pose.pose.position.z = trans[2]
tag.pose.pose.pose.orientation.x = quat[0]
tag.pose.pose.pose.orientation.y = quat[1]
tag.pose.pose.pose.orientation.z = quat[2]
tag.pose.pose.pose.orientation.w = quat[3]
tag.pose.header = output.header
if distanceMax_ <= 0.0 or trans[2] < distanceMax_:
output.detections.append(tag)
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
if len(output.detections) > 0:
pub.publish(output)
if __name__ == '__main__':
pub = rospy.Publisher('tag_detections', AprilTagDetectionArray, queue_size=10)
rospy.init_node('objects_to_tags', anonymous=True)
rospy.Subscriber("objectsStamped", ObjectsStamped, callback)
objFramePrefix_ = rospy.get_param('~object_prefix', objFramePrefix_)
distanceMax_ = rospy.get_param('~distance_max', distanceMax_)
listener = tf.TransformListener()
rospy.spin()
| 41.744681
| 133
| 0.646789
|
import rospy
from apriltag_ros.msg import AprilTagDetectionArray
from apriltag_ros.msg import AprilTagDetection
from find_object_2d.msg import ObjectsStamped
import tf
import geometry_msgs.msg
objFramePrefix_ = "object"
distanceMax_ = 0.0
def callback(data):
global objFramePrefix_
global distanceMax_
if len(data.objects.data) > 0:
output = AprilTagDetectionArray()
output.header = data.header
for i in range(0,len(data.objects.data),12):
try:
objId = data.objects.data[i]
(trans,quat) = listener.lookupTransform(data.header.frame_id, objFramePrefix_+'_'+str(int(objId)), data.header.stamp)
tag = AprilTagDetection()
tag.id.append(objId)
tag.pose.pose.pose.position.x = trans[0]
tag.pose.pose.pose.position.y = trans[1]
tag.pose.pose.pose.position.z = trans[2]
tag.pose.pose.pose.orientation.x = quat[0]
tag.pose.pose.pose.orientation.y = quat[1]
tag.pose.pose.pose.orientation.z = quat[2]
tag.pose.pose.pose.orientation.w = quat[3]
tag.pose.header = output.header
if distanceMax_ <= 0.0 or trans[2] < distanceMax_:
output.detections.append(tag)
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
if len(output.detections) > 0:
pub.publish(output)
if __name__ == '__main__':
pub = rospy.Publisher('tag_detections', AprilTagDetectionArray, queue_size=10)
rospy.init_node('objects_to_tags', anonymous=True)
rospy.Subscriber("objectsStamped", ObjectsStamped, callback)
objFramePrefix_ = rospy.get_param('~object_prefix', objFramePrefix_)
distanceMax_ = rospy.get_param('~distance_max', distanceMax_)
listener = tf.TransformListener()
rospy.spin()
| true
| true
|
7903f5a22c3ec8d92ce95ff2f09cfe391dcc61f8
| 6,399
|
py
|
Python
|
common-python/oc_provisioning/oc_provision_wrappers/database/v11g/oracle_rdbms_clone.py
|
LaudateCorpus1/atg-commerce-iaas
|
f1ae31657fc0111a5c019d46a28a3c81aae1acb2
|
[
"MIT"
] | 28
|
2016-11-07T14:03:25.000Z
|
2022-02-01T08:46:52.000Z
|
common-python/oc_provisioning/oc_provision_wrappers/database/v11g/oracle_rdbms_clone.py
|
LaudateCorpus1/atg-commerce-iaas
|
f1ae31657fc0111a5c019d46a28a3c81aae1acb2
|
[
"MIT"
] | 3
|
2016-11-09T13:23:03.000Z
|
2018-04-05T15:49:22.000Z
|
common-python/oc_provisioning/oc_provision_wrappers/database/v11g/oracle_rdbms_clone.py
|
LaudateCorpus1/atg-commerce-iaas
|
f1ae31657fc0111a5c019d46a28a3c81aae1acb2
|
[
"MIT"
] | 13
|
2016-10-27T17:59:38.000Z
|
2022-02-18T04:38:38.000Z
|
# The MIT License (MIT)
#
# Copyright (c) 2016 Oracle
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
__author__ = "Michael Shanley (Oracle A-Team)"
__copyright__ = "Copyright (c) 2016 Oracle and/or its affiliates. All rights reserved."
__version__ = "1.0.0.0"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
from oc_provision_wrappers import commerce_setup_helper
import os
import time
import logging
logger = logging.getLogger(__name__)
json_key = 'ORACLE_11g_clone'
service_name = "Oracle DB clone"
def clone_oracle(configData, full_path):
if json_key in configData:
jsonData = configData[json_key]
else:
logging.error(json_key + " config data missing from json. will not install")
return
logging.info("installing " + service_name)
INSTALL_OWNER = jsonData['installOwner']
ORACLE_HOME = jsonData['oracleHome']
ORIG_HOST = jsonData['originalHost']
NEW_HOST = jsonData['newHost']
ORACLE_SID = jsonData['oracleSID']
UPDATE_DB_CONSOLE = jsonData['updateDBConsole']
db_script = "/etc/init.d/oracleDatabase"
db_console_script = "/etc/init.d/oracleDBconsole"
stop_db_cmd = db_script + " stop"
stop_db_console_cmd = db_console_script + " stop"
start_db_cmd = db_script + " start"
start_db_console_cmd = db_console_script + " start"
tns_path = ORACLE_HOME + "/network/admin/tnsnames.ora"
lsnr_path = ORACLE_HOME + "/network/admin/listener.ora"
if not os.path.exists(tns_path):
logging.error("tnsnames.ora not found at " + tns_path + " - will not proceed")
return False
# stop db
commerce_setup_helper.exec_cmd(stop_db_cmd)
# stop console
commerce_setup_helper.exec_cmd(stop_db_console_cmd)
tns_replacements = {}
lsnr_replacements = {}
if (ORIG_HOST and NEW_HOST):
tns_replacements[ORIG_HOST] = NEW_HOST
lsnr_replacements[ORIG_HOST] = NEW_HOST
# update tnsnames
if tns_replacements:
if not os.path.exists(tns_path):
logging.warn("tnsnames.ora not found at " + tns_path + " - cannot modify")
else:
# backup tnsnames
timestr = time.strftime("%Y%m%d-%H%M%S")
installCommand = "\"" + "cp " + tns_path + " " + tns_path + "." + timestr + "\""
commerce_setup_helper.exec_as_user(INSTALL_OWNER, installCommand)
commerce_setup_helper.substitute_file_fields(tns_path, tns_path, tns_replacements)
# update listener
if lsnr_replacements:
if not os.path.exists(lsnr_path):
logging.warn("listener.ora not found at " + lsnr_path + " - cannot modify")
else:
# backup listener
timestr = time.strftime("%Y%m%d-%H%M%S")
installCommand = "\"" + "cp " + lsnr_path + " " + lsnr_path + "." + timestr + "\""
commerce_setup_helper.exec_as_user(INSTALL_OWNER, installCommand)
commerce_setup_helper.substitute_file_fields(lsnr_path, lsnr_path, tns_replacements)
# update db name
orig_db_name = ORACLE_HOME + "/" + ORIG_HOST + "_" + ORACLE_SID
new_db_name = ORACLE_HOME + "/" + NEW_HOST + "_" + ORACLE_SID
if not os.path.exists(orig_db_name):
logging.error("db path not found at " + orig_db_name + " - cannot modify")
else:
mv_cmd = "\"" + "mv " + orig_db_name + " " + new_db_name + "\""
commerce_setup_helper.exec_as_user(INSTALL_OWNER, mv_cmd)
# update db console
if (UPDATE_DB_CONSOLE == "true") :
PORT = jsonData['lsnrPort']
ORACLE_PW = jsonData['adminPW']
orig_db_console = ORACLE_HOME + "/oc4j/j2ee/OC4J_DBConsole_" + ORIG_HOST + "_" + ORACLE_SID
new_db_console = ORACLE_HOME + "/oc4j/j2ee/OC4J_DBConsole_" + NEW_HOST + "_" + ORACLE_SID
if not os.path.exists(orig_db_console):
logging.warn("db console not found at " + orig_db_console + " - cannot modify")
else:
mv_cmd = "\"" + "mv " + orig_db_console + " " + new_db_console + "\""
commerce_setup_helper.exec_as_user(INSTALL_OWNER, mv_cmd)
# db must be running for emca to exec. make sure
# start db
commerce_setup_helper.exec_cmd(start_db_cmd)
emca_params = "-SID " + ORACLE_SID + " -PORT " + PORT + " -SYS_PWD " + ORACLE_PW + " -SYSMAN_PWD " + ORACLE_PW + " -DBSNMP_PWD " + ORACLE_PW
drop_repo_cmd = "\"" + ORACLE_HOME + "/bin/emca -deconfig dbcontrol db -repos drop -silent " + emca_params + "\""
create_repo_cmd = "\"" + ORACLE_HOME + "/bin/emca -config dbcontrol db -repos create -silent " + emca_params + "\""
commerce_setup_helper.exec_as_user(INSTALL_OWNER, drop_repo_cmd)
commerce_setup_helper.exec_as_user(INSTALL_OWNER, create_repo_cmd)
# stop db
commerce_setup_helper.exec_cmd(stop_db_cmd)
# stop console
commerce_setup_helper.exec_cmd(stop_db_console_cmd)
# start db
commerce_setup_helper.exec_cmd(start_db_cmd)
if (UPDATE_DB_CONSOLE == "true") :
# start dbconsole
commerce_setup_helper.exec_cmd(start_db_console_cmd)
| 42.946309
| 153
| 0.643694
|
__author__ = "Michael Shanley (Oracle A-Team)"
__copyright__ = "Copyright (c) 2016 Oracle and/or its affiliates. All rights reserved."
__version__ = "1.0.0.0"
from oc_provision_wrappers import commerce_setup_helper
import os
import time
import logging
logger = logging.getLogger(__name__)
json_key = 'ORACLE_11g_clone'
service_name = "Oracle DB clone"
def clone_oracle(configData, full_path):
if json_key in configData:
jsonData = configData[json_key]
else:
logging.error(json_key + " config data missing from json. will not install")
return
logging.info("installing " + service_name)
INSTALL_OWNER = jsonData['installOwner']
ORACLE_HOME = jsonData['oracleHome']
ORIG_HOST = jsonData['originalHost']
NEW_HOST = jsonData['newHost']
ORACLE_SID = jsonData['oracleSID']
UPDATE_DB_CONSOLE = jsonData['updateDBConsole']
db_script = "/etc/init.d/oracleDatabase"
db_console_script = "/etc/init.d/oracleDBconsole"
stop_db_cmd = db_script + " stop"
stop_db_console_cmd = db_console_script + " stop"
start_db_cmd = db_script + " start"
start_db_console_cmd = db_console_script + " start"
tns_path = ORACLE_HOME + "/network/admin/tnsnames.ora"
lsnr_path = ORACLE_HOME + "/network/admin/listener.ora"
if not os.path.exists(tns_path):
logging.error("tnsnames.ora not found at " + tns_path + " - will not proceed")
return False
commerce_setup_helper.exec_cmd(stop_db_cmd)
commerce_setup_helper.exec_cmd(stop_db_console_cmd)
tns_replacements = {}
lsnr_replacements = {}
if (ORIG_HOST and NEW_HOST):
tns_replacements[ORIG_HOST] = NEW_HOST
lsnr_replacements[ORIG_HOST] = NEW_HOST
if tns_replacements:
if not os.path.exists(tns_path):
logging.warn("tnsnames.ora not found at " + tns_path + " - cannot modify")
else:
timestr = time.strftime("%Y%m%d-%H%M%S")
installCommand = "\"" + "cp " + tns_path + " " + tns_path + "." + timestr + "\""
commerce_setup_helper.exec_as_user(INSTALL_OWNER, installCommand)
commerce_setup_helper.substitute_file_fields(tns_path, tns_path, tns_replacements)
if lsnr_replacements:
if not os.path.exists(lsnr_path):
logging.warn("listener.ora not found at " + lsnr_path + " - cannot modify")
else:
timestr = time.strftime("%Y%m%d-%H%M%S")
installCommand = "\"" + "cp " + lsnr_path + " " + lsnr_path + "." + timestr + "\""
commerce_setup_helper.exec_as_user(INSTALL_OWNER, installCommand)
commerce_setup_helper.substitute_file_fields(lsnr_path, lsnr_path, tns_replacements)
orig_db_name = ORACLE_HOME + "/" + ORIG_HOST + "_" + ORACLE_SID
new_db_name = ORACLE_HOME + "/" + NEW_HOST + "_" + ORACLE_SID
if not os.path.exists(orig_db_name):
logging.error("db path not found at " + orig_db_name + " - cannot modify")
else:
mv_cmd = "\"" + "mv " + orig_db_name + " " + new_db_name + "\""
commerce_setup_helper.exec_as_user(INSTALL_OWNER, mv_cmd)
if (UPDATE_DB_CONSOLE == "true") :
PORT = jsonData['lsnrPort']
ORACLE_PW = jsonData['adminPW']
orig_db_console = ORACLE_HOME + "/oc4j/j2ee/OC4J_DBConsole_" + ORIG_HOST + "_" + ORACLE_SID
new_db_console = ORACLE_HOME + "/oc4j/j2ee/OC4J_DBConsole_" + NEW_HOST + "_" + ORACLE_SID
if not os.path.exists(orig_db_console):
logging.warn("db console not found at " + orig_db_console + " - cannot modify")
else:
mv_cmd = "\"" + "mv " + orig_db_console + " " + new_db_console + "\""
commerce_setup_helper.exec_as_user(INSTALL_OWNER, mv_cmd)
commerce_setup_helper.exec_cmd(start_db_cmd)
emca_params = "-SID " + ORACLE_SID + " -PORT " + PORT + " -SYS_PWD " + ORACLE_PW + " -SYSMAN_PWD " + ORACLE_PW + " -DBSNMP_PWD " + ORACLE_PW
drop_repo_cmd = "\"" + ORACLE_HOME + "/bin/emca -deconfig dbcontrol db -repos drop -silent " + emca_params + "\""
create_repo_cmd = "\"" + ORACLE_HOME + "/bin/emca -config dbcontrol db -repos create -silent " + emca_params + "\""
commerce_setup_helper.exec_as_user(INSTALL_OWNER, drop_repo_cmd)
commerce_setup_helper.exec_as_user(INSTALL_OWNER, create_repo_cmd)
commerce_setup_helper.exec_cmd(stop_db_cmd)
commerce_setup_helper.exec_cmd(stop_db_console_cmd)
commerce_setup_helper.exec_cmd(start_db_cmd)
if (UPDATE_DB_CONSOLE == "true") :
commerce_setup_helper.exec_cmd(start_db_console_cmd)
| true
| true
|
7903f880b576c98e61ab228f6f1a8866e40b7802
| 1,335
|
py
|
Python
|
tests/changes/expanders/test_commands.py
|
vault-the/changes
|
37e23c3141b75e4785cf398d015e3dbca41bdd56
|
[
"Apache-2.0"
] | 443
|
2015-01-03T16:28:39.000Z
|
2021-04-26T16:39:46.000Z
|
tests/changes/expanders/test_commands.py
|
vault-the/changes
|
37e23c3141b75e4785cf398d015e3dbca41bdd56
|
[
"Apache-2.0"
] | 12
|
2015-07-30T19:07:16.000Z
|
2016-11-07T23:11:21.000Z
|
tests/changes/expanders/test_commands.py
|
vault-the/changes
|
37e23c3141b75e4785cf398d015e3dbca41bdd56
|
[
"Apache-2.0"
] | 47
|
2015-01-09T10:04:00.000Z
|
2020-11-18T17:58:19.000Z
|
from __future__ import absolute_import
import pytest
from changes.expanders.commands import CommandsExpander
from changes.testutils import TestCase
class CommandsExpanderTest(TestCase):
def setUp(self):
super(CommandsExpanderTest, self).setUp()
self.project = self.create_project()
def get_expander(self, data):
return CommandsExpander(self.project, data)
def test_validate(self):
with pytest.raises(AssertionError):
self.get_expander({}).validate()
self.get_expander({'commands': []}).validate()
def test_expand(self):
project = self.create_project()
build = self.create_build(project)
job = self.create_job(build)
results = list(self.get_expander({'commands': [
{'script': 'echo 1'},
{'script': 'echo 2', 'label': 'foo'}
]}).expand(job=job, max_executors=10))
assert len(results) == 2
assert results[0].label == 'echo 1'
assert len(results[0].commands) == 1
assert results[0].commands[0].label == 'echo 1'
assert results[0].commands[0].script == 'echo 1'
assert results[1].label == 'foo'
assert len(results[1].commands) == 1
assert results[1].commands[0].label == 'foo'
assert results[1].commands[0].script == 'echo 2'
| 31.785714
| 56
| 0.626966
|
from __future__ import absolute_import
import pytest
from changes.expanders.commands import CommandsExpander
from changes.testutils import TestCase
class CommandsExpanderTest(TestCase):
def setUp(self):
super(CommandsExpanderTest, self).setUp()
self.project = self.create_project()
def get_expander(self, data):
return CommandsExpander(self.project, data)
def test_validate(self):
with pytest.raises(AssertionError):
self.get_expander({}).validate()
self.get_expander({'commands': []}).validate()
def test_expand(self):
project = self.create_project()
build = self.create_build(project)
job = self.create_job(build)
results = list(self.get_expander({'commands': [
{'script': 'echo 1'},
{'script': 'echo 2', 'label': 'foo'}
]}).expand(job=job, max_executors=10))
assert len(results) == 2
assert results[0].label == 'echo 1'
assert len(results[0].commands) == 1
assert results[0].commands[0].label == 'echo 1'
assert results[0].commands[0].script == 'echo 1'
assert results[1].label == 'foo'
assert len(results[1].commands) == 1
assert results[1].commands[0].label == 'foo'
assert results[1].commands[0].script == 'echo 2'
| true
| true
|
7903f921b48d453a32ec92b9ee4383a94eb38785
| 8,861
|
py
|
Python
|
akshare/stock/zh_stock_a_sina.py
|
x109airfighter/akshare
|
5f9600fdba11c933c144e47d551129ec42cb56c5
|
[
"MIT"
] | 1
|
2020-05-31T14:50:35.000Z
|
2020-05-31T14:50:35.000Z
|
akshare/stock/zh_stock_a_sina.py
|
fellowfun/akshare
|
06b553d0a56f54a0e8f8a2031c374366a8b25e91
|
[
"MIT"
] | null | null | null |
akshare/stock/zh_stock_a_sina.py
|
fellowfun/akshare
|
06b553d0a56f54a0e8f8a2031c374366a8b25e91
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2019/10/30 11:28
Desc: 新浪财经-A股-实时行情数据和历史行情数据(包含前复权和后复权因子)
"""
import re
import demjson
import execjs
import pandas as pd
import requests
from tqdm import tqdm
from akshare.stock.cons import (zh_sina_a_stock_payload,
zh_sina_a_stock_url,
zh_sina_a_stock_count_url,
zh_sina_a_stock_hist_url,
hk_js_decode,
zh_sina_a_stock_hfq_url,
zh_sina_a_stock_qfq_url,
zh_sina_a_stock_amount_url)
def _get_zh_a_page_count() -> int:
"""
所有股票的总页数
http://vip.stock.finance.sina.com.cn/mkt/#hs_a
:return: 需要抓取的股票总页数
:rtype: int
"""
res = requests.get(zh_sina_a_stock_count_url)
page_count = int(re.findall(re.compile(r"\d+"), res.text)[0]) / 80
if isinstance(page_count, int):
return page_count
else:
return int(page_count) + 1
def stock_zh_a_spot() -> pd.DataFrame:
"""
从新浪财经-A股获取所有A股的实时行情数据, 重复运行本函数会被新浪暂时封 IP
http://vip.stock.finance.sina.com.cn/mkt/#qbgg_hk
:return: pandas.DataFrame
symbol code name trade pricechange changepercent buy \
0 sh600000 600000 浦发银行 12.920 -0.030 -0.232 12.920
1 sh600004 600004 白云机场 18.110 -0.370 -2.002 18.110
2 sh600006 600006 东风汽车 4.410 -0.030 -0.676 4.410
3 sh600007 600007 中国国贸 17.240 -0.360 -2.045 17.240
4 sh600008 600008 首创股份 3.320 -0.030 -0.896 3.310
... ... ... ... ... ... ...
3755 sh600096 600096 云天化 5.270 -0.220 -4.007 5.270
3756 sh600097 600097 开创国际 10.180 -0.120 -1.165 10.180
3757 sh600098 600098 广州发展 6.550 -0.040 -0.607 6.540
3758 sh600099 600099 林海股份 6.540 -0.150 -2.242 6.540
3759 sh600100 600100 同方股份 8.200 -0.100 -1.205 8.200
sell settlement open high low volume amount \
0 12.930 12.950 12.950 13.100 12.860 46023920 597016896
1 18.120 18.480 18.510 18.510 17.880 24175071 437419344
2 4.420 4.440 4.490 4.490 4.410 4304900 19130233
3 17.280 17.600 17.670 17.670 17.220 684801 11879731
4 3.320 3.350 3.360 3.360 3.300 8284294 27579688
... ... ... ... ... ... ...
3755 5.280 5.490 5.490 5.500 5.220 16964636 90595172
3756 10.190 10.300 10.220 10.340 10.090 1001676 10231669
3757 6.550 6.590 6.560 6.620 6.500 1996449 13098901
3758 6.580 6.690 6.650 6.680 6.530 1866180 12314997
3759 8.210 8.300 8.300 8.310 8.120 12087236 99281447
ticktime per pb mktcap nmc turnoverratio
0 15:00:00 6.984 0.790 3.792289e+07 3.631006e+07 0.16376
1 15:00:07 32.927 2.365 3.747539e+06 3.747539e+06 1.16826
2 15:00:02 15.926 1.207 8.820000e+05 8.820000e+05 0.21525
3 15:00:02 22.390 2.367 1.736555e+06 1.736555e+06 0.06798
4 15:00:07 22.912 1.730 1.887569e+06 1.600444e+06 0.17185
... ... ... ... ... ...
3755 15:00:00 56.728 1.566 7.523847e+05 6.963668e+05 1.28386
3756 15:00:00 17.552 1.434 2.452734e+05 2.303459e+05 0.44268
3757 15:00:00 25.476 1.059 1.785659e+06 1.785659e+06 0.07323
3758 15:00:00 540.496 3.023 1.433045e+05 1.433045e+05 0.85167
3759 15:00:07 -6.264 1.465 2.430397e+06 2.430397e+06 0.40782
"""
big_df = pd.DataFrame()
page_count = _get_zh_a_page_count()
zh_sina_stock_payload_copy = zh_sina_a_stock_payload.copy()
for page in tqdm(range(1, page_count+1), desc="Please wait for a moment"):
zh_sina_stock_payload_copy.update({"page": page})
r = requests.get(
zh_sina_a_stock_url,
params=zh_sina_stock_payload_copy)
data_json = demjson.decode(r.text)
big_df = big_df.append(pd.DataFrame(data_json), ignore_index=True)
return big_df
def stock_zh_a_daily(symbol: str = "sz000613", adjust: str = "qfq") -> pd.DataFrame:
"""
新浪财经-A股-个股的历史行情数据, 大量抓取容易封IP
:param symbol: sh600000
:type symbol: str
:param adjust: 默认为空: 返回不复权的数据; qfq: 返回前复权后的数据; hfq: 返回后复权后的数据; hfq-factor: 返回后复权因子; hfq-factor: 返回前复权因子
:type adjust: str
:return: specific data
:rtype: pandas.DataFrame
"""
res = requests.get(zh_sina_a_stock_hist_url.format(symbol))
js_code = execjs.compile(hk_js_decode)
dict_list = js_code.call(
'd', res.text.split("=")[1].split(";")[0].replace(
'"', "")) # 执行js解密代码
data_df = pd.DataFrame(dict_list)
data_df["date"] = data_df["date"].str.split("T", expand=True).iloc[:, 0]
data_df.index = pd.to_datetime(data_df["date"])
del data_df["date"]
data_df = data_df.astype("float")
r = requests.get(zh_sina_a_stock_amount_url.format(symbol, symbol))
amount_data_json = demjson.decode(r.text[r.text.find("["): r.text.rfind("]") + 1])
amount_data_df = pd.DataFrame(amount_data_json)
amount_data_df.index = pd.to_datetime(amount_data_df.date)
del amount_data_df["date"]
temp_df = pd.merge(data_df, amount_data_df, left_index=True, right_index=True, how="left")
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df["amount"] = temp_df["amount"] * 10000
temp_df["turnover"] = temp_df["volume"] / temp_df["amount"]
temp_df.columns = ['open', 'high', 'low', 'close', 'volume', 'outstanding_share', 'turnover']
if adjust == "":
return temp_df
if adjust == "hfq":
res = requests.get(zh_sina_a_stock_hfq_url.format(symbol))
hfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])['data'])
hfq_factor_df.columns = ["date", "hfq_factor"]
hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
del hfq_factor_df["date"]
temp_df = pd.merge(
temp_df, hfq_factor_df, left_index=True, right_index=True, how="left"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df["open"] = temp_df["open"] * temp_df["hfq_factor"]
temp_df["high"] = temp_df["high"] * temp_df["hfq_factor"]
temp_df["close"] = temp_df["close"] * temp_df["hfq_factor"]
temp_df["low"] = temp_df["low"] * temp_df["hfq_factor"]
return temp_df.iloc[:, :-1]
if adjust == "qfq":
res = requests.get(zh_sina_a_stock_qfq_url.format(symbol))
qfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])['data'])
qfq_factor_df.columns = ["date", "qfq_factor"]
qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)
del qfq_factor_df["date"]
temp_df = pd.merge(
temp_df, qfq_factor_df, left_index=True, right_index=True, how="left"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df["open"] = temp_df["open"] / temp_df["qfq_factor"]
temp_df["high"] = temp_df["high"] / temp_df["qfq_factor"]
temp_df["close"] = temp_df["close"] / temp_df["qfq_factor"]
temp_df["low"] = temp_df["low"] / temp_df["qfq_factor"]
return temp_df.iloc[:, :-1]
if adjust == "hfq-factor":
res = requests.get(zh_sina_a_stock_hfq_url.format(symbol))
hfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])['data'])
hfq_factor_df.columns = ["date", "hfq_factor"]
hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
del hfq_factor_df["date"]
return hfq_factor_df
if adjust == "qfq-factor":
res = requests.get(zh_sina_a_stock_qfq_url.format(symbol))
qfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])['data'])
qfq_factor_df.columns = ["date", "qfq_factor"]
qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)
del qfq_factor_df["date"]
return qfq_factor_df
if __name__ == "__main__":
stock_zh_a_daily_hfq_df = stock_zh_a_daily(symbol="sh600582", adjust="qfq-factor")
print(stock_zh_a_daily_hfq_df)
stock_zh_a_daily_df = stock_zh_a_daily(symbol="sz000613", adjust="qfq")
print(stock_zh_a_daily_df)
stock_zh_a_spot_df = stock_zh_a_spot()
print(stock_zh_a_spot_df)
| 44.979695
| 107
| 0.586616
|
import re
import demjson
import execjs
import pandas as pd
import requests
from tqdm import tqdm
from akshare.stock.cons import (zh_sina_a_stock_payload,
zh_sina_a_stock_url,
zh_sina_a_stock_count_url,
zh_sina_a_stock_hist_url,
hk_js_decode,
zh_sina_a_stock_hfq_url,
zh_sina_a_stock_qfq_url,
zh_sina_a_stock_amount_url)
def _get_zh_a_page_count() -> int:
res = requests.get(zh_sina_a_stock_count_url)
page_count = int(re.findall(re.compile(r"\d+"), res.text)[0]) / 80
if isinstance(page_count, int):
return page_count
else:
return int(page_count) + 1
def stock_zh_a_spot() -> pd.DataFrame:
big_df = pd.DataFrame()
page_count = _get_zh_a_page_count()
zh_sina_stock_payload_copy = zh_sina_a_stock_payload.copy()
for page in tqdm(range(1, page_count+1), desc="Please wait for a moment"):
zh_sina_stock_payload_copy.update({"page": page})
r = requests.get(
zh_sina_a_stock_url,
params=zh_sina_stock_payload_copy)
data_json = demjson.decode(r.text)
big_df = big_df.append(pd.DataFrame(data_json), ignore_index=True)
return big_df
def stock_zh_a_daily(symbol: str = "sz000613", adjust: str = "qfq") -> pd.DataFrame:
res = requests.get(zh_sina_a_stock_hist_url.format(symbol))
js_code = execjs.compile(hk_js_decode)
dict_list = js_code.call(
'd', res.text.split("=")[1].split(";")[0].replace(
'"', "")) # 执行js解密代码
data_df = pd.DataFrame(dict_list)
data_df["date"] = data_df["date"].str.split("T", expand=True).iloc[:, 0]
data_df.index = pd.to_datetime(data_df["date"])
del data_df["date"]
data_df = data_df.astype("float")
r = requests.get(zh_sina_a_stock_amount_url.format(symbol, symbol))
amount_data_json = demjson.decode(r.text[r.text.find("["): r.text.rfind("]") + 1])
amount_data_df = pd.DataFrame(amount_data_json)
amount_data_df.index = pd.to_datetime(amount_data_df.date)
del amount_data_df["date"]
temp_df = pd.merge(data_df, amount_data_df, left_index=True, right_index=True, how="left")
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df["amount"] = temp_df["amount"] * 10000
temp_df["turnover"] = temp_df["volume"] / temp_df["amount"]
temp_df.columns = ['open', 'high', 'low', 'close', 'volume', 'outstanding_share', 'turnover']
if adjust == "":
return temp_df
if adjust == "hfq":
res = requests.get(zh_sina_a_stock_hfq_url.format(symbol))
hfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])['data'])
hfq_factor_df.columns = ["date", "hfq_factor"]
hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
del hfq_factor_df["date"]
temp_df = pd.merge(
temp_df, hfq_factor_df, left_index=True, right_index=True, how="left"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df["open"] = temp_df["open"] * temp_df["hfq_factor"]
temp_df["high"] = temp_df["high"] * temp_df["hfq_factor"]
temp_df["close"] = temp_df["close"] * temp_df["hfq_factor"]
temp_df["low"] = temp_df["low"] * temp_df["hfq_factor"]
return temp_df.iloc[:, :-1]
if adjust == "qfq":
res = requests.get(zh_sina_a_stock_qfq_url.format(symbol))
qfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])['data'])
qfq_factor_df.columns = ["date", "qfq_factor"]
qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)
del qfq_factor_df["date"]
temp_df = pd.merge(
temp_df, qfq_factor_df, left_index=True, right_index=True, how="left"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df["open"] = temp_df["open"] / temp_df["qfq_factor"]
temp_df["high"] = temp_df["high"] / temp_df["qfq_factor"]
temp_df["close"] = temp_df["close"] / temp_df["qfq_factor"]
temp_df["low"] = temp_df["low"] / temp_df["qfq_factor"]
return temp_df.iloc[:, :-1]
if adjust == "hfq-factor":
res = requests.get(zh_sina_a_stock_hfq_url.format(symbol))
hfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])['data'])
hfq_factor_df.columns = ["date", "hfq_factor"]
hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
del hfq_factor_df["date"]
return hfq_factor_df
if adjust == "qfq-factor":
res = requests.get(zh_sina_a_stock_qfq_url.format(symbol))
qfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])['data'])
qfq_factor_df.columns = ["date", "qfq_factor"]
qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)
del qfq_factor_df["date"]
return qfq_factor_df
if __name__ == "__main__":
stock_zh_a_daily_hfq_df = stock_zh_a_daily(symbol="sh600582", adjust="qfq-factor")
print(stock_zh_a_daily_hfq_df)
stock_zh_a_daily_df = stock_zh_a_daily(symbol="sz000613", adjust="qfq")
print(stock_zh_a_daily_df)
stock_zh_a_spot_df = stock_zh_a_spot()
print(stock_zh_a_spot_df)
| true
| true
|
7903f9d911ea7281080096d18fbd810021aa2ca6
| 3,796
|
py
|
Python
|
recohut/models/ccpm.py
|
sparsh-ai/recohut
|
4121f665761ffe38c9b6337eaa9293b26bee2376
|
[
"Apache-2.0"
] | null | null | null |
recohut/models/ccpm.py
|
sparsh-ai/recohut
|
4121f665761ffe38c9b6337eaa9293b26bee2376
|
[
"Apache-2.0"
] | 1
|
2022-01-12T05:40:57.000Z
|
2022-01-12T05:40:57.000Z
|
recohut/models/ccpm.py
|
RecoHut-Projects/recohut
|
4121f665761ffe38c9b6337eaa9293b26bee2376
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/models/models.ccpm.ipynb (unless otherwise specified).
__all__ = ['CCPM']
# Cell
import torch
from torch import nn
from .layers.embedding import EmbeddingLayer
from .layers.common import KMaxPooling
from .bases.ctr import CTRModel
# Internal Cell
def get_activation(activation):
if isinstance(activation, str):
if activation.lower() == "relu":
return nn.ReLU()
elif activation.lower() == "sigmoid":
return nn.Sigmoid()
elif activation.lower() == "tanh":
return nn.Tanh()
else:
return getattr(nn, activation)()
else:
return activation
# Internal Cell
class CCPM_ConvLayer(nn.Module):
"""
Input X: tensor of shape (batch_size, 1, num_fields, embedding_dim)
"""
def __init__(self, num_fields, channels=[3], kernel_heights=[3], activation="Tanh"):
super(CCPM_ConvLayer, self).__init__()
if not isinstance(kernel_heights, list):
kernel_heights = [kernel_heights] * len(channels)
elif len(kernel_heights) != len(channels):
raise ValueError("channels={} and kernel_heights={} should have the same length."\
.format(channels, kernel_heights))
module_list = []
self.channels = [1] + channels
layers = len(kernel_heights)
for i in range(1, len(self.channels)):
in_channels = self.channels[i - 1]
out_channels = self.channels[i]
kernel_height = kernel_heights[i - 1]
module_list.append(nn.ZeroPad2d((0, 0, kernel_height - 1, kernel_height - 1)))
module_list.append(nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_height, 1)))
if i < layers:
k = max(3, int((1 - pow(float(i) / layers, layers - i)) * num_fields))
else:
k = 3
module_list.append(KMaxPooling(k, dim=2))
module_list.append(get_activation(activation))
self.conv_layer = nn.Sequential(*module_list)
def forward(self, X):
return self.conv_layer(X)
# Cell
class CCPM(CTRModel):
def __init__(self,
feature_map,
model_id="CCPM",
task="binary_classification",
learning_rate=1e-3,
embedding_initializer="torch.nn.init.normal_(std=1e-4)",
embedding_dim=10,
channels=[4, 4, 2],
kernel_heights=[6, 5, 3],
activation="Tanh",
**kwargs):
super(CCPM, self).__init__(feature_map,
model_id=model_id,
**kwargs)
self.embedding_layer = EmbeddingLayer(feature_map, embedding_dim)
self.conv_layer = CCPM_ConvLayer(feature_map.num_fields,
channels=channels,
kernel_heights=kernel_heights,
activation=activation)
conv_out_dim = 3 * embedding_dim * channels[-1] # 3 is k-max-pooling size of the last layer
self.fc = nn.Linear(conv_out_dim, 1)
self.output_activation = self.get_final_activation(task)
self.init_weights(embedding_initializer=embedding_initializer)
def forward(self, inputs):
feature_emb = self.embedding_layer(inputs)
conv_in = torch.unsqueeze(feature_emb, 1) # shape (bs, 1, field, emb)
conv_out = self.conv_layer(conv_in)
flatten_out = torch.flatten(conv_out, start_dim=1)
y_pred = self.fc(flatten_out)
if self.output_activation is not None:
y_pred = self.output_activation(y_pred)
return y_pred
| 40.382979
| 102
| 0.58667
|
__all__ = ['CCPM']
import torch
from torch import nn
from .layers.embedding import EmbeddingLayer
from .layers.common import KMaxPooling
from .bases.ctr import CTRModel
def get_activation(activation):
if isinstance(activation, str):
if activation.lower() == "relu":
return nn.ReLU()
elif activation.lower() == "sigmoid":
return nn.Sigmoid()
elif activation.lower() == "tanh":
return nn.Tanh()
else:
return getattr(nn, activation)()
else:
return activation
class CCPM_ConvLayer(nn.Module):
def __init__(self, num_fields, channels=[3], kernel_heights=[3], activation="Tanh"):
super(CCPM_ConvLayer, self).__init__()
if not isinstance(kernel_heights, list):
kernel_heights = [kernel_heights] * len(channels)
elif len(kernel_heights) != len(channels):
raise ValueError("channels={} and kernel_heights={} should have the same length."\
.format(channels, kernel_heights))
module_list = []
self.channels = [1] + channels
layers = len(kernel_heights)
for i in range(1, len(self.channels)):
in_channels = self.channels[i - 1]
out_channels = self.channels[i]
kernel_height = kernel_heights[i - 1]
module_list.append(nn.ZeroPad2d((0, 0, kernel_height - 1, kernel_height - 1)))
module_list.append(nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_height, 1)))
if i < layers:
k = max(3, int((1 - pow(float(i) / layers, layers - i)) * num_fields))
else:
k = 3
module_list.append(KMaxPooling(k, dim=2))
module_list.append(get_activation(activation))
self.conv_layer = nn.Sequential(*module_list)
def forward(self, X):
return self.conv_layer(X)
class CCPM(CTRModel):
def __init__(self,
feature_map,
model_id="CCPM",
task="binary_classification",
learning_rate=1e-3,
embedding_initializer="torch.nn.init.normal_(std=1e-4)",
embedding_dim=10,
channels=[4, 4, 2],
kernel_heights=[6, 5, 3],
activation="Tanh",
**kwargs):
super(CCPM, self).__init__(feature_map,
model_id=model_id,
**kwargs)
self.embedding_layer = EmbeddingLayer(feature_map, embedding_dim)
self.conv_layer = CCPM_ConvLayer(feature_map.num_fields,
channels=channels,
kernel_heights=kernel_heights,
activation=activation)
conv_out_dim = 3 * embedding_dim * channels[-1]
self.fc = nn.Linear(conv_out_dim, 1)
self.output_activation = self.get_final_activation(task)
self.init_weights(embedding_initializer=embedding_initializer)
def forward(self, inputs):
feature_emb = self.embedding_layer(inputs)
conv_in = torch.unsqueeze(feature_emb, 1)
conv_out = self.conv_layer(conv_in)
flatten_out = torch.flatten(conv_out, start_dim=1)
y_pred = self.fc(flatten_out)
if self.output_activation is not None:
y_pred = self.output_activation(y_pred)
return y_pred
| true
| true
|
7903f9f6cf7c12b33ef38fa38569087a4f42e65e
| 4,409
|
py
|
Python
|
Pytorch/ActorCritic/agent_and_model.py
|
FitMachineLearning/FitML
|
a60f49fce1799ca4b11b48307441325b6272719a
|
[
"MIT"
] | 171
|
2017-11-07T09:59:20.000Z
|
2022-03-29T13:59:18.000Z
|
Pytorch/ActorCritic/agent_and_model.py
|
FitMachineLearning/FitML
|
a60f49fce1799ca4b11b48307441325b6272719a
|
[
"MIT"
] | 1
|
2017-12-24T20:08:18.000Z
|
2018-01-31T22:26:49.000Z
|
Pytorch/ActorCritic/agent_and_model.py
|
FitMachineLearning/FitML
|
a60f49fce1799ca4b11b48307441325b6272719a
|
[
"MIT"
] | 44
|
2017-11-07T12:08:05.000Z
|
2022-01-04T15:53:12.000Z
|
## DQN Tutorial
## Implementation from https://github.com/FitMachineLearning
import torch
import gym
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from dataclasses import dataclass
from typing import Any
from random import random
@dataclass
class sars:
state: Any
action: Any
reward: float
next_state: Any
done: bool
qval: float
advantage: float = 0.0
class DQNAgent:
def __init__(self,actor_model,critic_model):
self.actor_model = actor_model
self.critic_model = critic_model
def get_actions(self, observations):
# import ipdb; ipdb.set_trace()
guessed_actions = self.actor_model(torch.Tensor(observations).to(self.actor_model.device))
return guessed_actions
def get_predicted_Q_values(self,observation_and_action):
guessed_Qs = self.critic_model(torch.Tensor(observation_and_action))
return guessed_Qs(-1)[1]
def update_target_model(self):
self.targetModel.load_state_dict(self.model.state_dict())
class ActorModel(nn.Module):
def __init__(self, obs_shape, action_shape,lr):
super(ActorModel,self).__init__()
assert len(obs_shape) ==1, "This network only works on flat observations"
self.obs_shape = obs_shape
self.action_shape = action_shape
# import ipdb; ipdb.set_trace()
self.net = torch.nn.Sequential(
torch.nn.Linear(obs_shape[0],512),
torch.nn.ReLU(),
# torch.nn.Linear(1024,256),
# torch.nn.ReLU(),
torch.nn.Linear(512,action_shape[0])
)
self.opt = optim.Adam(self.net.parameters(),lr=lr)
if torch.cuda.is_available():
print("Using CUDA")
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cuda:1')
self.to(self.device)
def forward(self, x):
return self.net(x)
class CriticModel(nn.Module):
def __init__(self, obs_shape, action_shape,lr):
super(CriticModel,self).__init__()
assert len(obs_shape) ==1, "This network only works on flat observations"
self.obs_shape = obs_shape
self.action_shape = action_shape
self.net = torch.nn.Sequential(
torch.nn.Linear(obs_shape[0]+action_shape[0],512),
torch.nn.ReLU(),
# torch.nn.Linear(2048,512),
# torch.nn.ReLU(),
torch.nn.Linear(512,1) # one out put because we are predicting Q values
)
self.opt = optim.Adam(self.net.parameters(),lr=lr)
if torch.cuda.is_available():
print("Using CUDA")
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cuda:1')
self.to(self.device)
def forward(self, x):
return self.net(x)
class ReplayBuffer:
def __init__(self, buffer_size = 1000):
# self.buffer_size = buffer_size
self.buffer_size = buffer_size
self.buffer = np.empty((buffer_size),dtype=object)
# self.buffer = []
self.index = 0
def insert(self, sars):
# self.buffer.append(sars)
# print("inserting index ", self.index, "@",self.index%self.buffer_size)
if(self.index == 10):
print("first 10 ",self.buffer[0:10])
# import ipdb; ipdb.set_trace()
# if(self.index > self.buffer_size and self.index%self.buffer_size==0):
# print("first 10 ",self.buffer[0:10])
# print("last 10 ",self.buffer[-10:])
# print("")
# import ipdb; ipdb.set_trace()
self.buffer[self.index%self.buffer_size] = sars
self.index+=1
# self.buffer.append(sars)
# if(len(self.buffer)>self.buffer_size):
# self.buffer = self.buffer[1:]
# # print("Clipping Buffer at size", len(self.buffer))
def sample(self, num_samples,current_episode_steps):
# assert num_samples < min(len(self.buffer),self.index)
# if num_samples>self.index:
# print("sampling n ",min(num_samples,self.index))
a = self.buffer[0:min(self.index,self.buffer_size)]
if len(self.buffer) > 0:
return np.random.choice(a, min(num_samples,self.index))
else:
return []
| 34.992063
| 99
| 0.608528
|
al as F
import torch.optim as optim
import numpy as np
from dataclasses import dataclass
from typing import Any
from random import random
@dataclass
class sars:
state: Any
action: Any
reward: float
next_state: Any
done: bool
qval: float
advantage: float = 0.0
class DQNAgent:
def __init__(self,actor_model,critic_model):
self.actor_model = actor_model
self.critic_model = critic_model
def get_actions(self, observations):
guessed_actions = self.actor_model(torch.Tensor(observations).to(self.actor_model.device))
return guessed_actions
def get_predicted_Q_values(self,observation_and_action):
guessed_Qs = self.critic_model(torch.Tensor(observation_and_action))
return guessed_Qs(-1)[1]
def update_target_model(self):
self.targetModel.load_state_dict(self.model.state_dict())
class ActorModel(nn.Module):
def __init__(self, obs_shape, action_shape,lr):
super(ActorModel,self).__init__()
assert len(obs_shape) ==1, "This network only works on flat observations"
self.obs_shape = obs_shape
self.action_shape = action_shape
self.net = torch.nn.Sequential(
torch.nn.Linear(obs_shape[0],512),
torch.nn.ReLU(),
torch.nn.Linear(512,action_shape[0])
)
self.opt = optim.Adam(self.net.parameters(),lr=lr)
if torch.cuda.is_available():
print("Using CUDA")
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cuda:1')
self.to(self.device)
def forward(self, x):
return self.net(x)
class CriticModel(nn.Module):
def __init__(self, obs_shape, action_shape,lr):
super(CriticModel,self).__init__()
assert len(obs_shape) ==1, "This network only works on flat observations"
self.obs_shape = obs_shape
self.action_shape = action_shape
self.net = torch.nn.Sequential(
torch.nn.Linear(obs_shape[0]+action_shape[0],512),
torch.nn.ReLU(),
torch.nn.Linear(512,1)
)
self.opt = optim.Adam(self.net.parameters(),lr=lr)
if torch.cuda.is_available():
print("Using CUDA")
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cuda:1')
self.to(self.device)
def forward(self, x):
return self.net(x)
class ReplayBuffer:
def __init__(self, buffer_size = 1000):
self.buffer_size = buffer_size
self.buffer = np.empty((buffer_size),dtype=object)
self.index = 0
def insert(self, sars):
if(self.index == 10):
print("first 10 ",self.buffer[0:10])
self.buffer[self.index%self.buffer_size] = sars
self.index+=1
teps):
a = self.buffer[0:min(self.index,self.buffer_size)]
if len(self.buffer) > 0:
return np.random.choice(a, min(num_samples,self.index))
else:
return []
| true
| true
|
7903fb03c190715ca6439f348e3c6613fbaab8c1
| 1,714
|
py
|
Python
|
Lib/site-packages/django/contrib/messages/storage/session.py
|
ashutoshsuman99/Web-Blog-D19
|
a01a0ccc40e8823110c01ebe4f43d9351df57295
|
[
"bzip2-1.0.6"
] | 123
|
2015-01-15T06:56:45.000Z
|
2022-03-19T22:18:55.000Z
|
Lib/site-packages/django/contrib/messages/storage/session.py
|
ashutoshsuman99/Web-Blog-D19
|
a01a0ccc40e8823110c01ebe4f43d9351df57295
|
[
"bzip2-1.0.6"
] | 68
|
2016-12-12T20:38:47.000Z
|
2020-07-26T18:28:49.000Z
|
Lib/site-packages/django/contrib/messages/storage/session.py
|
ashutoshsuman99/Web-Blog-D19
|
a01a0ccc40e8823110c01ebe4f43d9351df57295
|
[
"bzip2-1.0.6"
] | 120
|
2016-08-18T14:53:03.000Z
|
2020-06-16T13:27:20.000Z
|
import json
from django.contrib.messages.storage.base import BaseStorage
from django.contrib.messages.storage.cookie import (
MessageDecoder, MessageEncoder,
)
from django.utils import six
class SessionStorage(BaseStorage):
"""
Stores messages in the session (that is, django.contrib.sessions).
"""
session_key = '_messages'
def __init__(self, request, *args, **kwargs):
assert hasattr(request, 'session'), "The session-based temporary "\
"message storage requires session middleware to be installed, "\
"and come before the message middleware in the "\
"MIDDLEWARE_CLASSES list."
super(SessionStorage, self).__init__(request, *args, **kwargs)
def _get(self, *args, **kwargs):
"""
Retrieves a list of messages from the request's session. This storage
always stores everything it is given, so return True for the
all_retrieved flag.
"""
return self.deserialize_messages(self.request.session.get(self.session_key)), True
def _store(self, messages, response, *args, **kwargs):
"""
Stores a list of messages to the request's session.
"""
if messages:
self.request.session[self.session_key] = self.serialize_messages(messages)
else:
self.request.session.pop(self.session_key, None)
return []
def serialize_messages(self, messages):
encoder = MessageEncoder(separators=(',', ':'))
return encoder.encode(messages)
def deserialize_messages(self, data):
if data and isinstance(data, six.string_types):
return json.loads(data, cls=MessageDecoder)
return data
| 34.979592
| 90
| 0.65811
|
import json
from django.contrib.messages.storage.base import BaseStorage
from django.contrib.messages.storage.cookie import (
MessageDecoder, MessageEncoder,
)
from django.utils import six
class SessionStorage(BaseStorage):
session_key = '_messages'
def __init__(self, request, *args, **kwargs):
assert hasattr(request, 'session'), "The session-based temporary "\
"message storage requires session middleware to be installed, "\
"and come before the message middleware in the "\
"MIDDLEWARE_CLASSES list."
super(SessionStorage, self).__init__(request, *args, **kwargs)
def _get(self, *args, **kwargs):
return self.deserialize_messages(self.request.session.get(self.session_key)), True
def _store(self, messages, response, *args, **kwargs):
if messages:
self.request.session[self.session_key] = self.serialize_messages(messages)
else:
self.request.session.pop(self.session_key, None)
return []
def serialize_messages(self, messages):
encoder = MessageEncoder(separators=(',', ':'))
return encoder.encode(messages)
def deserialize_messages(self, data):
if data and isinstance(data, six.string_types):
return json.loads(data, cls=MessageDecoder)
return data
| true
| true
|
7903fb5373e14a37b633e0a4aaccefc50a37fcdd
| 591
|
py
|
Python
|
sfdoc/publish/migrations/0032_docset.py
|
SFDO-Tooling/sfdoc
|
6bc7277cbc6e01c03581a7217a2c33fbfa91a537
|
[
"BSD-3-Clause"
] | 5
|
2019-08-01T18:53:00.000Z
|
2022-02-07T16:16:09.000Z
|
sfdoc/publish/migrations/0032_docset.py
|
SFDO-Tooling/sfdoc
|
6bc7277cbc6e01c03581a7217a2c33fbfa91a537
|
[
"BSD-3-Clause"
] | 144
|
2019-04-25T21:40:44.000Z
|
2022-03-28T20:43:31.000Z
|
sfdoc/publish/migrations/0032_docset.py
|
SalesforceFoundation/sfdoc
|
6bc7277cbc6e01c03581a7217a2c33fbfa91a537
|
[
"BSD-3-Clause"
] | 1
|
2019-03-28T05:06:06.000Z
|
2019-03-28T05:06:06.000Z
|
# Generated by Django 2.2.1 on 2019-07-06 21:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('publish', '0031_bundle_description'),
]
operations = [
migrations.CreateModel(
name='Docset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('docset_id', models.CharField(max_length=255)),
('name', models.CharField(default='', max_length=255)),
],
),
]
| 26.863636
| 114
| 0.57868
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('publish', '0031_bundle_description'),
]
operations = [
migrations.CreateModel(
name='Docset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('docset_id', models.CharField(max_length=255)),
('name', models.CharField(default='', max_length=255)),
],
),
]
| true
| true
|
7903fb54012d2c5ce80e5a6f76f319ac3b640c93
| 2,845
|
py
|
Python
|
wxcloudrun/common/tabledrawer.py
|
vandyzhou/wxcloudrun-django
|
454f9c1ab827543f2635a549ca7e251ed35d9305
|
[
"MIT"
] | null | null | null |
wxcloudrun/common/tabledrawer.py
|
vandyzhou/wxcloudrun-django
|
454f9c1ab827543f2635a549ca7e251ed35d9305
|
[
"MIT"
] | null | null | null |
wxcloudrun/common/tabledrawer.py
|
vandyzhou/wxcloudrun-django
|
454f9c1ab827543f2635a549ca7e251ed35d9305
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2022/2/9 12:09 下午
# @Author: zhoumengjie
# @File : tabledrawer.py
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib.font_manager import FontProperties
def draw_table(columns_head:[], cell_vals=[]):
# 设置字体及负数
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# 画布
fig, ax = plt.subplots(figsize=(10, 4), dpi=100)
# 数据
data = [
[100, 200, 300, -100, 350],
[-120, 290, -90, 450, 150]
]
# 列与行
columns = ('一', '二', '三', '四', '五')
rows = ['A', 'B']
# 作图参数
index = np.arange(len(columns)) - 0.1
bar_width = 0.4
# 设置颜色
colors = ['turquoise', 'coral']
# 柱状图
bar1 = plt.bar(index, data[0], bar_width, color=colors[0], edgecolor='grey')
bar2 = plt.bar(index + bar_width, data[1], bar_width, color=colors[1], edgecolor='grey')
# 设置标题
ax.set_title('收益情况', fontsize=16, y=1.1, x=0.44)
ax.set_ylabel('元', fontsize=12, color='black', alpha=0.7, rotation=360)
ax.set_ylim(-150, 500)
# 显示数据标签
# ax.bar_label(bar1, label_type='edge')
# ax.bar_label(bar2, label_type='edge')
# x,y刻度不显示
ax.tick_params(axis=u'both', which=u'both', length=0)
plt.xticks([])
table = plt.table(cellText=data, rowLabels=rows,
rowColours=colors,
colLabels=columns, cellLoc='center', loc='bottom',
bbox=[0, -0.4, 1, 0.24])
cellDict = table.get_celld()
for i in range(0, len(columns)):
cellDict[(0, i)].set_height(0.6)
for j in range(1, len(rows) + 1):
cellDict[(j, i)].set_height(0.4)
cellDict[(1, -1)].set_height(0.4)
cellDict[(2, -1)].set_height(0.4)
table.auto_set_font_size(False)
table.set_fontsize(10)
for key, cell in table.get_celld().items():
cell.set_linewidth(0.6)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
name = ['', '']
ax.legend(name, handlelength=0.7, labelspacing=0.6,
bbox_to_anchor=(-0.1, -0.23), loc='upper left', frameon=False)
plt.show()
if __name__ == '__main__':
# draw_table(['A', 'B'], [['中国', '必胜'], ['你好', '谢谢']])
# print(4800 / 1100 / 1000)
data = {
'linux': [1.2, 2.2, 3.1, '中国', 2.0, 1.0, 2.1, 3.5, 4.0, 2.0, ],
'linuxmi': [5.2, 6.7, 7.9, 8.3, 1.2, 5.7, 6.1, 7.2, 8.3, '-', ],
}
df = pd.DataFrame(data)
fig, ax = plt.subplots(figsize=(3, 3))
ax.axis('off')
ax.axis('tight')
ax.table(cellText=df.values,
colLabels=df.columns,
bbox=[0, 0, 1, 1],
)
# plt.savefig('xx.png')
plt.show()
| 26.588785
| 92
| 0.557118
|
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib.font_manager import FontProperties
def draw_table(columns_head:[], cell_vals=[]):
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
fig, ax = plt.subplots(figsize=(10, 4), dpi=100)
data = [
[100, 200, 300, -100, 350],
[-120, 290, -90, 450, 150]
]
columns = ('一', '二', '三', '四', '五')
rows = ['A', 'B']
index = np.arange(len(columns)) - 0.1
bar_width = 0.4
colors = ['turquoise', 'coral']
bar1 = plt.bar(index, data[0], bar_width, color=colors[0], edgecolor='grey')
bar2 = plt.bar(index + bar_width, data[1], bar_width, color=colors[1], edgecolor='grey')
ax.set_title('收益情况', fontsize=16, y=1.1, x=0.44)
ax.set_ylabel('元', fontsize=12, color='black', alpha=0.7, rotation=360)
ax.set_ylim(-150, 500)
ax.tick_params(axis=u'both', which=u'both', length=0)
plt.xticks([])
table = plt.table(cellText=data, rowLabels=rows,
rowColours=colors,
colLabels=columns, cellLoc='center', loc='bottom',
bbox=[0, -0.4, 1, 0.24])
cellDict = table.get_celld()
for i in range(0, len(columns)):
cellDict[(0, i)].set_height(0.6)
for j in range(1, len(rows) + 1):
cellDict[(j, i)].set_height(0.4)
cellDict[(1, -1)].set_height(0.4)
cellDict[(2, -1)].set_height(0.4)
table.auto_set_font_size(False)
table.set_fontsize(10)
for key, cell in table.get_celld().items():
cell.set_linewidth(0.6)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
name = ['', '']
ax.legend(name, handlelength=0.7, labelspacing=0.6,
bbox_to_anchor=(-0.1, -0.23), loc='upper left', frameon=False)
plt.show()
if __name__ == '__main__':
data = {
'linux': [1.2, 2.2, 3.1, '中国', 2.0, 1.0, 2.1, 3.5, 4.0, 2.0, ],
'linuxmi': [5.2, 6.7, 7.9, 8.3, 1.2, 5.7, 6.1, 7.2, 8.3, '-', ],
}
df = pd.DataFrame(data)
fig, ax = plt.subplots(figsize=(3, 3))
ax.axis('off')
ax.axis('tight')
ax.table(cellText=df.values,
colLabels=df.columns,
bbox=[0, 0, 1, 1],
)
plt.show()
| true
| true
|
7903fc686ea9db90d55b8295e8e2299266fe40b7
| 635
|
py
|
Python
|
Python3-Basics/Chapter11_Exception02_Warning.py
|
anliven/Reading-Code-Learning-Python
|
a814cab207bbaad6b5c69b9feeb8bf2f459baf2b
|
[
"Apache-2.0"
] | null | null | null |
Python3-Basics/Chapter11_Exception02_Warning.py
|
anliven/Reading-Code-Learning-Python
|
a814cab207bbaad6b5c69b9feeb8bf2f459baf2b
|
[
"Apache-2.0"
] | null | null | null |
Python3-Basics/Chapter11_Exception02_Warning.py
|
anliven/Reading-Code-Learning-Python
|
a814cab207bbaad6b5c69b9feeb8bf2f459baf2b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import warnings
# warnings.filterwarnings("ignore") # 抑制告警,并指定采取的措施
warnings.warn("# This is a test warning 111.")
print("Hello One")
warnings.filterwarnings("ignore", category=DeprecationWarning) # 抑制特定类型的警告
warnings.warn("# This is a test warning 222.", DeprecationWarning) # 被抑制
warnings.warn("# Something else.") # 未被抑制
print("Hello Two")
warnings.filterwarnings("error") # 将警告转换为错误
warnings.warn("# This is a test warning 333.", DeprecationWarning) # 指定引发的异常
print("Hello Three")
# ### 警告
# 警告不是异常,不影响程序的运行,可用于指示程序的状态;
# 可根据异常来过滤掉特定类型的警告;
# 发出警告时,可指定引发的异常(告警类别必须是Warning的子类);
| 30.238095
| 78
| 0.707087
|
import warnings
# This is a test warning 111.")
print("Hello One")
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.warn("# This is a test warning 222.", DeprecationWarning)
warnings.warn("# Something else.")
print("Hello Two")
warnings.filterwarnings("error")
warnings.warn("# This is a test warning 333.", DeprecationWarning)
print("Hello Three")
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.