hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5dce8eb43814f4b1a92f8e04cfdb8ab66b1647ad
| 7,705
|
py
|
Python
|
astropy/io/fits/hdu/streaming.py
|
jayvdb/astropy
|
bc6d8f106dd5b60bf57a8e6e29c4e2ae2178991f
|
[
"BSD-3-Clause"
] | 445
|
2019-01-26T13:50:26.000Z
|
2022-03-18T05:17:38.000Z
|
astropy/io/fits/hdu/streaming.py
|
jayvdb/astropy
|
bc6d8f106dd5b60bf57a8e6e29c4e2ae2178991f
|
[
"BSD-3-Clause"
] | 242
|
2019-01-29T15:48:27.000Z
|
2022-03-31T22:09:21.000Z
|
astropy/io/fits/hdu/streaming.py
|
jayvdb/astropy
|
bc6d8f106dd5b60bf57a8e6e29c4e2ae2178991f
|
[
"BSD-3-Clause"
] | 31
|
2019-03-10T09:51:27.000Z
|
2022-02-14T23:11:12.000Z
|
# Licensed under a 3-clause BSD style license - see PYFITS.rst
import gzip
import os
from .base import _BaseHDU, BITPIX2DTYPE
from .hdulist import HDUList
from .image import PrimaryHDU
from astropy.io.fits.file import _File
from astropy.io.fits.header import _pad_length
from astropy.io.fits.util import fileobj_name
class StreamingHDU:
"""
A class that provides the capability to stream data to a FITS file
instead of requiring data to all be written at once.
The following pseudocode illustrates its use::
header = astropy.io.fits.Header()
for all the cards you need in the header:
header[key] = (value, comment)
shdu = astropy.io.fits.StreamingHDU('filename.fits', header)
for each piece of data:
shdu.write(data)
shdu.close()
"""
def __init__(self, name, header):
"""
Construct a `StreamingHDU` object given a file name and a header.
Parameters
----------
name : file path, file object, or file like object
The file to which the header and data will be streamed. If opened,
the file object must be opened in a writeable binary mode such as
'wb' or 'ab+'.
header : `Header` instance
The header object associated with the data to be written
to the file.
Notes
-----
The file will be opened and the header appended to the end of
the file. If the file does not already exist, it will be
created, and if the header represents a Primary header, it
will be written to the beginning of the file. If the file
does not exist and the provided header is not a Primary
header, a default Primary HDU will be inserted at the
beginning of the file and the provided header will be added as
the first extension. If the file does already exist, but the
provided header represents a Primary header, the header will
be modified to an image extension header and appended to the
end of the file.
"""
if isinstance(name, gzip.GzipFile):
raise TypeError('StreamingHDU not supported for GzipFile objects.')
self._header = header.copy()
# handle a file object instead of a file name
filename = fileobj_name(name) or ''
# Check if the file already exists. If it does not, check to see
# if we were provided with a Primary Header. If not we will need
# to prepend a default PrimaryHDU to the file before writing the
# given header.
newfile = False
if filename:
if not os.path.exists(filename) or os.path.getsize(filename) == 0:
newfile = True
elif (hasattr(name, 'len') and name.len == 0):
newfile = True
if newfile:
if 'SIMPLE' not in self._header:
hdulist = HDUList([PrimaryHDU()])
hdulist.writeto(name, 'exception')
else:
# This will not be the first extension in the file so we
# must change the Primary header provided into an image
# extension header.
if 'SIMPLE' in self._header:
self._header.set('XTENSION', 'IMAGE', 'Image extension',
after='SIMPLE')
del self._header['SIMPLE']
if 'PCOUNT' not in self._header:
dim = self._header['NAXIS']
if dim == 0:
dim = ''
else:
dim = str(dim)
self._header.set('PCOUNT', 0, 'number of parameters',
after='NAXIS' + dim)
if 'GCOUNT' not in self._header:
self._header.set('GCOUNT', 1, 'number of groups',
after='PCOUNT')
self._ffo = _File(name, 'append')
# TODO : Fix this once the HDU writing API is cleaned up
tmp_hdu = _BaseHDU()
# Passing self._header as an argument to _BaseHDU() will cause its
# values to be modified in undesired ways...need to have a better way
# of doing this
tmp_hdu._header = self._header
self._header_offset = tmp_hdu._writeheader(self._ffo)[0]
self._data_offset = self._ffo.tell()
self._size = self.size
if self._size != 0:
self.writecomplete = False
else:
self.writecomplete = True
# Support the 'with' statement
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def write(self, data):
"""
Write the given data to the stream.
Parameters
----------
data : ndarray
Data to stream to the file.
Returns
-------
writecomplete : int
Flag that when `True` indicates that all of the required
data has been written to the stream.
Notes
-----
Only the amount of data specified in the header provided to the class
constructor may be written to the stream. If the provided data would
cause the stream to overflow, an `OSError` exception is
raised and the data is not written. Once sufficient data has been
written to the stream to satisfy the amount specified in the header,
the stream is padded to fill a complete FITS block and no more data
will be accepted. An attempt to write more data after the stream has
been filled will raise an `OSError` exception. If the
dtype of the input data does not match what is expected by the header,
a `TypeError` exception is raised.
"""
size = self._ffo.tell() - self._data_offset
if self.writecomplete or size + data.nbytes > self._size:
raise OSError('Attempt to write more data to the stream than the '
'header specified.')
if BITPIX2DTYPE[self._header['BITPIX']] != data.dtype.name:
raise TypeError('Supplied data does not match the type specified '
'in the header.')
if data.dtype.str[0] != '>':
# byteswap little endian arrays before writing
output = data.byteswap()
else:
output = data
self._ffo.writearray(output)
if self._ffo.tell() - self._data_offset == self._size:
# the stream is full so pad the data to the next FITS block
self._ffo.write(_pad_length(self._size) * '\0')
self.writecomplete = True
self._ffo.flush()
return self.writecomplete
@property
def size(self):
"""
Return the size (in bytes) of the data portion of the HDU.
"""
size = 0
naxis = self._header.get('NAXIS', 0)
if naxis > 0:
simple = self._header.get('SIMPLE', 'F')
random_groups = self._header.get('GROUPS', 'F')
if simple == 'T' and random_groups == 'T':
groups = 1
else:
groups = 0
size = 1
for idx in range(groups, naxis):
size = size * self._header['NAXIS' + str(idx + 1)]
bitpix = self._header['BITPIX']
gcount = self._header.get('GCOUNT', 1)
pcount = self._header.get('PCOUNT', 0)
size = abs(bitpix) * gcount * (pcount + size) // 8
return size
def close(self):
"""
Close the physical FITS file.
"""
self._ffo.close()
| 33.5
| 79
| 0.573134
| 972
| 7,705
| 4.469136
| 0.257202
| 0.048343
| 0.014963
| 0.01174
| 0.101289
| 0.057781
| 0.034761
| 0.021409
| 0
| 0
| 0
| 0.004393
| 0.350032
| 7,705
| 229
| 80
| 33.646288
| 0.863019
| 0.416483
| 0
| 0.096774
| 0
| 0
| 0.094165
| 0
| 0
| 0
| 0
| 0.004367
| 0
| 1
| 0.064516
| false
| 0
| 0.086022
| 0.010753
| 0.193548
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dce95b004d795178936b1032e10425b07f77812
| 3,815
|
py
|
Python
|
geoprisma/tests/test_templatetags.py
|
groupe-conseil-nutshimit-nippour/django-geoprisma
|
4732fdb8a0684eb4d7fd50aa43e11b454ee71d08
|
[
"BSD-3-Clause"
] | null | null | null |
geoprisma/tests/test_templatetags.py
|
groupe-conseil-nutshimit-nippour/django-geoprisma
|
4732fdb8a0684eb4d7fd50aa43e11b454ee71d08
|
[
"BSD-3-Clause"
] | 5
|
2020-02-12T00:23:17.000Z
|
2021-12-13T19:46:33.000Z
|
geoprisma/tests/test_templatetags.py
|
groupe-conseil-nutshimit-nippour/django-geoprisma
|
4732fdb8a0684eb4d7fd50aa43e11b454ee71d08
|
[
"BSD-3-Clause"
] | null | null | null |
import django
from django.test import TestCase
from django.template import Template, Context
class genericObj(object):
"""
A generic object for testing templatetags
"""
def __init__(self):
self.name = "test"
self.status = "ready"
def getOption(self, optionName):
if optionName == "name":
return self.name
elif optionName == "status":
return self.status
def getName(self):
return self.name
def render(template_string, context_dict=None):
"""
A shortcut for testing template output.
"""
if context_dict is None:
context_dict = {}
c = Context(context_dict)
t = Template(template_string)
return t.render(c).strip()
class object_extrasTests(TestCase):
def test_callMethod(self):
genObj = genericObj()
template = """
{% load object_extras %}
{{ obj|args:"name"|call:"getOption" }}
"""
context = {
'obj': genObj
}
self.assertEqual(render(template, context), "test")
template = """
{% load object_extras %}
{{ obj|call:"getName" }}
"""
context = {
'obj': genObj
}
self.assertEqual(render(template, context), "test")
def test_check_type(self):
genObj = genericObj()
template = """
{% load object_extras %}
{{ obj|obj_type:"genericObj" }}
"""
context = {
'obj': genObj
}
self.assertEqual(render(template, context), "True")
template = """
{% load object_extras %}
{{ obj|obj_type:"notexist" }}
"""
context = {
'obj': genObj
}
self.assertEqual(render(template, context), "False")
class static_extrasTests(TestCase):
def setUp(self):
self.widgetTypeSetJs = set()
self.widgetTypeSetJs.add('queryonclick')
self.widgetTypeSetCss = set()
self.widgetTypeSetCss.add('geoexttoolbar')
def test_getJsStatics(self):
template = """
{% load staticfiles %}
{% load static_extras %}
{% getJsStatics widgetTypeSet as widget_js %}
{% for static_path in widget_js %}
<script src="{% static static_path %}" type="text/javascript"></script>
{% endfor %}
"""
context = {
'widgetTypeSet': self.widgetTypeSetJs
}
out = '<script src="/static/geoprisma/widgets/queryonclick/js/QueryOnClick.js" type="text/javascript"></script>'
self.assertEqual(render(template, context), out)
def test_getCssStatics(self):
template = """
{% load staticfiles %}
{% load static_extras %}
{% getCssStatics widgetTypeSet as widget_css %}
{% for static_path in widget_css %}
<link rel="stylesheet" type="text/css" href="{% static static_path %}" />
{% endfor %}
"""
context = {
'widgetTypeSet': self.widgetTypeSetCss
}
out = '<link rel="stylesheet" type="text/css" href="/static/geoprisma/widgets/geoexttoolbar/css/GeoExtToolbar.css" />'
self.assertEqual(render(template, context), out)
def test_template_exist(self):
template = """
{% load static_extras %}
{{ "geoprisma/widgets/queryonclick/queryonclick.html"|template_exists }}
"""
self.assertEqual(render(template), "True")
template = """
{% load static_extras %}
{{ "geoprisma/widgets/queryonclick/queryonclicknotexist.html"|template_exists }}
"""
self.assertEqual(render(template), "False")
| 28.901515
| 127
| 0.550459
| 341
| 3,815
| 6.043988
| 0.249267
| 0.061135
| 0.081514
| 0.112567
| 0.423096
| 0.389617
| 0.389617
| 0.231926
| 0.054343
| 0
| 0
| 0
| 0.32346
| 3,815
| 131
| 128
| 29.122137
| 0.798528
| 0.021232
| 0
| 0.45098
| 0
| 0.019608
| 0.408009
| 0.116214
| 0
| 0
| 0
| 0
| 0.078431
| 1
| 0.098039
| false
| 0
| 0.029412
| 0.009804
| 0.196078
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dceeb675241617c8282ee5a28736fe976ad2fa2
| 4,447
|
py
|
Python
|
src/ggrc_workflows/models/task_group.py
|
acidburn0zzz/ggrc-core
|
386781d08172102eb51030b65db8212974651628
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2016-11-06T05:21:24.000Z
|
2016-11-06T05:21:24.000Z
|
src/ggrc_workflows/models/task_group.py
|
acidburn0zzz/ggrc-core
|
386781d08172102eb51030b65db8212974651628
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2021-02-02T23:09:40.000Z
|
2021-02-08T21:00:48.000Z
|
src/ggrc_workflows/models/task_group.py
|
Acidburn0zzz/ggrc-core
|
386781d08172102eb51030b65db8212974651628
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""A module containing the workflow TaskGroup model."""
from sqlalchemy import or_
from ggrc import db
from ggrc.login import get_current_user
from ggrc.models.associationproxy import association_proxy
from ggrc.models.mixins import (
Titled, Slugged, Described, Timeboxed, WithContact
)
from ggrc.models.reflection import AttributeInfo
from ggrc.models.reflection import PublishOnly
from ggrc.models import all_models
from ggrc_workflows.models.task_group_object import TaskGroupObject
class TaskGroup(
WithContact, Timeboxed, Described, Titled, Slugged, db.Model):
"""Workflow TaskGroup model."""
__tablename__ = 'task_groups'
_title_uniqueness = False
workflow_id = db.Column(
db.Integer,
db.ForeignKey('workflows.id', ondelete="CASCADE"),
nullable=False,
)
lock_task_order = db.Column(db.Boolean(), nullable=True)
task_group_objects = db.relationship(
'TaskGroupObject', backref='task_group', cascade='all, delete-orphan')
objects = association_proxy(
'task_group_objects', 'object', 'TaskGroupObject')
task_group_tasks = db.relationship(
'TaskGroupTask', backref='task_group', cascade='all, delete-orphan')
cycle_task_groups = db.relationship(
'CycleTaskGroup', backref='task_group')
sort_index = db.Column(
db.String(length=250), default="", nullable=False)
_publish_attrs = [
'workflow',
'task_group_objects',
PublishOnly('objects'),
'task_group_tasks',
'lock_task_order',
'sort_index',
# Intentionally do not include `cycle_task_groups`
# 'cycle_task_groups',
]
_aliases = {
"title": "Summary",
"description": "Details",
"contact": {
"display_name": "Assignee",
"mandatory": True,
"filter_by": "_filter_by_contact",
},
"secondary_contact": None,
"start_date": None,
"end_date": None,
"workflow": {
"display_name": "Workflow",
"mandatory": True,
"filter_by": "_filter_by_workflow",
},
"task_group_objects": {
"display_name": "Objects",
"type": AttributeInfo.Type.SPECIAL_MAPPING,
"filter_by": "_filter_by_objects",
},
}
def copy(self, _other=None, **kwargs):
columns = [
'title', 'description', 'workflow', 'sort_index', 'modified_by',
'context'
]
if kwargs.get('clone_people', False) and getattr(self, "contact"):
columns.append("contact")
else:
kwargs["contact"] = get_current_user()
target = self.copy_into(_other, columns, **kwargs)
if kwargs.get('clone_objects', False):
self.copy_objects(target, **kwargs)
if kwargs.get('clone_tasks', False):
self.copy_tasks(target, **kwargs)
return target
def copy_objects(self, target, **kwargs):
# pylint: disable=unused-argument
for task_group_object in self.task_group_objects:
target.task_group_objects.append(task_group_object.copy(
task_group=target,
context=target.context,
))
return target
def copy_tasks(self, target, **kwargs):
for task_group_task in self.task_group_tasks:
target.task_group_tasks.append(task_group_task.copy(
None,
task_group=target,
context=target.context,
clone_people=kwargs.get("clone_people", False),
))
return target
@classmethod
def _filter_by_workflow(cls, predicate):
from ggrc_workflows.models import Workflow
return Workflow.query.filter(
(Workflow.id == cls.workflow_id) &
(predicate(Workflow.slug) | predicate(Workflow.title))
).exists()
@classmethod
def _filter_by_objects(cls, predicate):
parts = []
for model_name in all_models.__all__:
model = getattr(all_models, model_name)
query = getattr(model, "query", None)
field = getattr(model, "slug", getattr(model, "email", None))
if query is None or field is None or not hasattr(model, "id"):
continue
parts.append(query.filter(
(TaskGroupObject.object_type == model_name) &
(model.id == TaskGroupObject.object_id) &
predicate(field)
).exists())
return TaskGroupObject.query.filter(
(TaskGroupObject.task_group_id == cls.id) &
or_(*parts)
).exists()
| 29.256579
| 78
| 0.659996
| 505
| 4,447
| 5.570297
| 0.289109
| 0.067188
| 0.034127
| 0.017064
| 0.127266
| 0.07252
| 0.027017
| 0
| 0
| 0
| 0
| 0.002595
| 0.220148
| 4,447
| 151
| 79
| 29.450331
| 0.808535
| 0.064313
| 0
| 0.128205
| 0
| 0
| 0.156536
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042735
| false
| 0
| 0.08547
| 0
| 0.273504
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dcf0b13e0d53d6745a01c7cc15df8b5de13bc88
| 1,248
|
py
|
Python
|
src/tests/app_functions/menu/test_change_auto_login.py
|
DanielNoord/DuolingoPomodoro
|
307b386daf3216fb9ba86f983f0e39f6647ffd64
|
[
"MIT"
] | null | null | null |
src/tests/app_functions/menu/test_change_auto_login.py
|
DanielNoord/DuolingoPomodoro
|
307b386daf3216fb9ba86f983f0e39f6647ffd64
|
[
"MIT"
] | 4
|
2021-04-25T15:39:32.000Z
|
2022-02-18T20:58:00.000Z
|
src/tests/app_functions/menu/test_change_auto_login.py
|
DanielNoord/DuolingoPomodoro
|
307b386daf3216fb9ba86f983f0e39f6647ffd64
|
[
"MIT"
] | null | null | null |
import pytest
import rumps
from src.app_functions.menu.change_auto_login import change_auto_login
@pytest.fixture(name="basic_app")
def create_app():
"""Creates a basic app object with some variables to pass to functions
Returns:
rumps.App: Basic app
"""
app = rumps.App("TestApp")
app.settings = {}
return app
def test_setting_is_true(mocker, basic_app):
"""Check if setting is changed correctly if True"""
basic_app.settings["auto_login"] = True
mock_function = mocker.patch("src.app_functions.menu.change_auto_login.update_menu")
mocker.patch("src.app_functions.menu.change_auto_login.save_settings")
change_auto_login(basic_app)
assert basic_app.settings["auto_login"] is False
mock_function.assert_called_once_with(basic_app)
def test_setting_is_false(mocker, basic_app):
"""Check if setting is changed correctly if false"""
basic_app.settings["auto_login"] = False
mock_function = mocker.patch("src.app_functions.menu.change_auto_login.update_menu")
mocker.patch("src.app_functions.menu.change_auto_login.save_settings")
change_auto_login(basic_app)
assert basic_app.settings["auto_login"] is True
mock_function.assert_called_once_with(basic_app)
| 34.666667
| 88
| 0.758814
| 183
| 1,248
| 4.863388
| 0.251366
| 0.116854
| 0.134831
| 0.106742
| 0.735955
| 0.640449
| 0.640449
| 0.602247
| 0.51236
| 0.51236
| 0
| 0
| 0.143429
| 1,248
| 35
| 89
| 35.657143
| 0.832554
| 0.15625
| 0
| 0.363636
| 0
| 0
| 0.261719
| 0.207031
| 0
| 0
| 0
| 0
| 0.181818
| 1
| 0.136364
| false
| 0
| 0.136364
| 0
| 0.318182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dcf455584ab00f2818650ba6fb4636dff7442e6
| 3,105
|
py
|
Python
|
deepobs/tensorflow/testproblems/cifar100_vgg19.py
|
H0merJayS1mpson/deepobscustom
|
e85816ce42466326dac18841c58b79f87a4a1a7c
|
[
"MIT"
] | null | null | null |
deepobs/tensorflow/testproblems/cifar100_vgg19.py
|
H0merJayS1mpson/deepobscustom
|
e85816ce42466326dac18841c58b79f87a4a1a7c
|
[
"MIT"
] | null | null | null |
deepobs/tensorflow/testproblems/cifar100_vgg19.py
|
H0merJayS1mpson/deepobscustom
|
e85816ce42466326dac18841c58b79f87a4a1a7c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""VGG 19 architecture for CIFAR-100."""
import tensorflow as tf
from ._vgg import _vgg
from ..datasets.cifar100 import cifar100
from .testproblem import TestProblem
class cifar100_vgg19(TestProblem):
"""DeepOBS test problem class for the VGG 19 network on Cifar-100.
The CIFAR-100 images are resized to ``224`` by ``224`` to fit the input
dimension of the original VGG network, which was designed for ImageNet.
Details about the architecture can be found in the `original paper`_.
VGG 19 consists of 19 weight layers, of mostly convolutions. The model uses
cross-entroy loss. A weight decay is used on the weights (but not the biases)
which defaults to ``5e-4``.
.. _original paper: https://arxiv.org/abs/1409.1556
Args:
batch_size (int): Batch size to use.
weight_decay (float): Weight decay factor. Weight decay (L2-regularization)
is used on the weights but not the biases.
Defaults to ``5e-4``.
Attributes:
dataset: The DeepOBS data set class for Cifar-100.
train_init_op: A tensorflow operation initializing the test problem for the
training phase.
train_eval_init_op: A tensorflow operation initializing the test problem for
evaluating on training data.
test_init_op: A tensorflow operation initializing the test problem for
evaluating on test data.
losses: A tf.Tensor of shape (batch_size, ) containing the per-example loss
values.
regularizer: A scalar tf.Tensor containing a regularization term.
accuracy: A scalar tf.Tensor containing the mini-batch mean accuracy.
"""
def __init__(self, batch_size, weight_decay=5e-4):
"""Create a new VGG 19 test problem instance on Cifar-100.
Args:
batch_size (int): Batch size to use.
weight_decay (float): Weight decay factor. Weight decay (L2-regularization)
is used on the weights but not the biases.
Defaults to ``5e-4``.
"""
super(cifar100_vgg19, self).__init__(batch_size, weight_decay)
def set_up(self):
"""Set up the VGG 19 test problem on Cifar-100."""
self.dataset = cifar100(self._batch_size)
self.train_init_op = self.dataset.train_init_op
self.train_eval_init_op = self.dataset.train_eval_init_op
self.valid_init_op = self.dataset.valid_init_op
self.test_init_op = self.dataset.test_init_op
training = tf.equal(self.dataset.phase, "train")
x, y = self.dataset.batch
linear_outputs = _vgg(
x,
training,
variant=19,
num_outputs=100,
weight_decay=self._weight_decay,
)
self.losses = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=y, logits=linear_outputs
)
y_pred = tf.argmax(linear_outputs, 1)
y_correct = tf.argmax(y, 1)
correct_prediction = tf.equal(y_pred, y_correct)
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
self.regularizer = tf.losses.get_regularization_loss()
| 37.865854
| 85
| 0.679549
| 438
| 3,105
| 4.650685
| 0.319635
| 0.059401
| 0.034364
| 0.033382
| 0.299951
| 0.242514
| 0.242514
| 0.242514
| 0.242514
| 0.226313
| 0
| 0.035593
| 0.239936
| 3,105
| 81
| 86
| 38.333333
| 0.827542
| 0.544605
| 0
| 0
| 0
| 0
| 0.003867
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.133333
| 0
| 0.233333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dcfe247dd1cc19b83a077ac143e29f6729063b0
| 192
|
py
|
Python
|
write-a-function.py
|
TheHumanGoogle/Hackerrank-python-solution
|
ab2fa515444d7493340d7c7fbb88c3a090a3a8f5
|
[
"MIT"
] | 1
|
2022-01-12T16:05:01.000Z
|
2022-01-12T16:05:01.000Z
|
write-a-function.py
|
TheHumanGoogle/Hackerrank-python-solution
|
ab2fa515444d7493340d7c7fbb88c3a090a3a8f5
|
[
"MIT"
] | null | null | null |
write-a-function.py
|
TheHumanGoogle/Hackerrank-python-solution
|
ab2fa515444d7493340d7c7fbb88c3a090a3a8f5
|
[
"MIT"
] | null | null | null |
def is_leap(year):
leap=False
if year%400==0:
leap=True
elif year%4==0 and year%100!=0:
leap=True
else:
leap=False
return leap
year = int(input())
| 16
| 35
| 0.546875
| 30
| 192
| 3.466667
| 0.566667
| 0.153846
| 0.173077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.077519
| 0.328125
| 192
| 11
| 36
| 17.454545
| 0.728682
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dd0559b06c4b507ddd6a8e8abd9d084e5c41c75
| 3,483
|
py
|
Python
|
paasta_tools/async_utils.py
|
sobolevn/paasta
|
8b87e0b13816c09b3d063b6d3271e6c7627fd264
|
[
"Apache-2.0"
] | 1,711
|
2015-11-10T18:04:56.000Z
|
2022-03-23T08:53:16.000Z
|
paasta_tools/async_utils.py
|
sobolevn/paasta
|
8b87e0b13816c09b3d063b6d3271e6c7627fd264
|
[
"Apache-2.0"
] | 1,689
|
2015-11-10T17:59:04.000Z
|
2022-03-31T20:46:46.000Z
|
paasta_tools/async_utils.py
|
sobolevn/paasta
|
8b87e0b13816c09b3d063b6d3271e6c7627fd264
|
[
"Apache-2.0"
] | 267
|
2015-11-10T19:17:16.000Z
|
2022-02-08T20:59:52.000Z
|
import asyncio
import functools
import time
import weakref
from collections import defaultdict
from typing import AsyncIterable
from typing import Awaitable
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import TypeVar
T = TypeVar("T")
# NOTE: this method is not thread-safe due to lack of locking while checking
# and updating the cache
def async_ttl_cache(
ttl: Optional[float] = 300,
cleanup_self: bool = False,
*,
cache: Optional[Dict] = None,
) -> Callable[
[Callable[..., Awaitable[T]]], Callable[..., Awaitable[T]] # wrapped # inner
]:
async def call_or_get_from_cache(cache, async_func, args_for_key, args, kwargs):
# Please note that anything which is put into `key` will be in the
# cache forever, potentially causing memory leaks. The most common
# case is the `self` arg pointing to a huge object. To mitigate that
# we're using `args_for_key`, which is supposed not contain any huge
# objects.
key = functools._make_key(args_for_key, kwargs, typed=False)
try:
future, last_update = cache[key]
if ttl is not None and time.time() - last_update > ttl:
raise KeyError
except KeyError:
future = asyncio.ensure_future(async_func(*args, **kwargs))
# set the timestamp to +infinity so that we always wait on the in-flight request.
cache[key] = (future, float("Inf"))
try:
value = await future
except Exception:
# Only update the cache if it's the same future we awaited and
# it hasn't already been updated by another coroutine
# Note also that we use get() in case the key was deleted from the
# cache by another coroutine
if cache.get(key) == (future, float("Inf")):
del cache[key]
raise
else:
if cache.get(key) == (future, float("Inf")):
cache[key] = (future, time.time())
return value
if cleanup_self:
instance_caches: Dict = cache if cache is not None else defaultdict(dict)
def on_delete(w):
del instance_caches[w]
def outer(wrapped):
@functools.wraps(wrapped)
async def inner(self, *args, **kwargs):
w = weakref.ref(self, on_delete)
self_cache = instance_caches[w]
return await call_or_get_from_cache(
self_cache, wrapped, args, (self,) + args, kwargs
)
return inner
else:
cache2: Dict = cache if cache is not None else {} # Should be Dict[Any, T] but that doesn't work.
def outer(wrapped):
@functools.wraps(wrapped)
async def inner(*args, **kwargs):
return await call_or_get_from_cache(cache2, wrapped, args, args, kwargs)
return inner
return outer
async def aiter_to_list(aiter: AsyncIterable[T],) -> List[T]:
return [x async for x in aiter]
def async_timeout(
seconds: int = 10,
) -> Callable[
[Callable[..., Awaitable[T]]], Callable[..., Awaitable[T]] # wrapped # inner
]:
def outer(wrapped):
@functools.wraps(wrapped)
async def inner(*args, **kwargs):
return await asyncio.wait_for(wrapped(*args, **kwargs), timeout=seconds)
return inner
return outer
| 32.858491
| 106
| 0.611829
| 447
| 3,483
| 4.677852
| 0.326622
| 0.038259
| 0.053563
| 0.018651
| 0.228599
| 0.21999
| 0.21999
| 0.171688
| 0.14395
| 0.066954
| 0
| 0.002867
| 0.29888
| 3,483
| 105
| 107
| 33.171429
| 0.853399
| 0.211025
| 0
| 0.337838
| 0
| 0
| 0.003663
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081081
| false
| 0
| 0.162162
| 0
| 0.378378
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dd235954e00e3353720380ad5e4fd1579960a8d
| 3,788
|
py
|
Python
|
examples/scripts/sc/bpdn.py
|
manvhah/sporco
|
9237d7fc37e75089a2a65ebfe02b7491410da7d4
|
[
"BSD-3-Clause"
] | null | null | null |
examples/scripts/sc/bpdn.py
|
manvhah/sporco
|
9237d7fc37e75089a2a65ebfe02b7491410da7d4
|
[
"BSD-3-Clause"
] | null | null | null |
examples/scripts/sc/bpdn.py
|
manvhah/sporco
|
9237d7fc37e75089a2a65ebfe02b7491410da7d4
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""
Basis Pursuit DeNoising
=======================
This example demonstrates the use of class :class:`.admm.bpdn.BPDN` to solve the Basis Pursuit DeNoising (BPDN) problem :cite:`chen-1998-atomic`
$$\mathrm{argmin}_\mathbf{x} \; (1/2) \| D \mathbf{x} - \mathbf{s} \|_2^2 + \lambda \| \mathbf{x} \|_1 \;,$$
where $D$ is the dictionary, $\mathbf{x}$ is the sparse representation, and $\mathbf{s}$ is the signal to be represented. In this example the BPDN problem is used to estimate the reference sparse representation that generated a signal from a noisy version of the signal.
"""
from __future__ import print_function
from builtins import input
import numpy as np
from sporco.admm import bpdn
from sporco import util
from sporco import plot
"""
Configure problem size, sparsity, and noise level.
"""
N = 512 # Signal size
M = 4*N # Dictionary size
L = 32 # Number of non-zero coefficients in generator
sigma = 0.5 # Noise level
"""
Construct random dictionary, reference random sparse representation, and test signal consisting of the synthesis of the reference sparse representation with additive Gaussian noise.
"""
# Construct random dictionary and random sparse coefficients
np.random.seed(12345)
D = np.random.randn(N, M)
x0 = np.zeros((M, 1))
si = np.random.permutation(list(range(0, M-1)))
x0[si[0:L]] = np.random.randn(L, 1)
# Construct reference and noisy signal
s0 = D.dot(x0)
s = s0 + sigma*np.random.randn(N,1)
"""
Set BPDN solver class options.
"""
opt = bpdn.BPDN.Options({'Verbose': False, 'MaxMainIter': 500,
'RelStopTol': 1e-3, 'AutoRho': {'RsdlTarget': 1.0}})
"""
Select regularization parameter $\lambda$ by evaluating the error in recovering the sparse representation over a logarithmicaly spaced grid. (The reference representation is assumed to be known, which is not realistic in a real application.) A function is defined that evalues the BPDN recovery error for a specified $\lambda$, and this function is evaluated in parallel by :func:`sporco.util.grid_search`.
"""
# Function computing reconstruction error at lmbda
def evalerr(prm):
lmbda = prm[0]
b = bpdn.BPDN(D, s, lmbda, opt)
x = b.solve()
return np.sum(np.abs(x-x0))
# Parallel evalution of error function on lmbda grid
lrng = np.logspace(1, 2, 20)
sprm, sfvl, fvmx, sidx = util.grid_search(evalerr, (lrng,))
lmbda = sprm[0]
print('Minimum ℓ1 error: %5.2f at 𝜆 = %.2e' % (sfvl, lmbda))
"""
Once the best $\lambda$ has been determined, run BPDN with verbose display of ADMM iteration statistics.
"""
# Initialise and run BPDN object for best lmbda
opt['Verbose'] = True
b = bpdn.BPDN(D, s, lmbda, opt)
x = b.solve()
print("BPDN solve time: %.2fs" % b.timer.elapsed('solve'))
"""
Plot comparison of reference and recovered representations.
"""
plot.plot(np.hstack((x0, x)), title='Sparse representation',
lgnd=['Reference', 'Reconstructed'])
"""
Plot lmbda error curve, functional value, residuals, and rho
"""
its = b.getitstat()
fig = plot.figure(figsize=(15, 10))
plot.subplot(2, 2, 1)
plot.plot(fvmx, x=lrng, ptyp='semilogx', xlbl='$\lambda$',
ylbl='Error', fig=fig)
plot.subplot(2, 2, 2)
plot.plot(its.ObjFun, xlbl='Iterations', ylbl='Functional', fig=fig)
plot.subplot(2, 2, 3)
plot.plot(np.vstack((its.PrimalRsdl, its.DualRsdl)).T,
ptyp='semilogy', xlbl='Iterations', ylbl='Residual',
lgnd=['Primal', 'Dual'], fig=fig)
plot.subplot(2, 2, 4)
plot.plot(its.Rho, xlbl='Iterations', ylbl='Penalty Parameter', fig=fig)
fig.show()
# Wait for enter on keyboard
input()
| 30.063492
| 406
| 0.694298
| 570
| 3,788
| 4.596491
| 0.412281
| 0.00458
| 0.018321
| 0.019847
| 0.041603
| 0.041603
| 0.019847
| 0.019847
| 0.019847
| 0.019847
| 0
| 0.022166
| 0.166315
| 3,788
| 125
| 407
| 30.304
| 0.807473
| 0.297254
| 0
| 0.081633
| 0
| 0
| 0.154664
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020408
| false
| 0
| 0.122449
| 0
| 0.163265
| 0.061224
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dd337ba7906e3c3c7b8bae81a44d4305edc633f
| 1,361
|
py
|
Python
|
tests/auto_test_class_creation_spec.py
|
MountainField/uspec
|
a4f8908b1a3af519d9d2ce7b85a4b4cca7b85883
|
[
"MIT"
] | 2
|
2020-03-02T01:58:05.000Z
|
2022-01-25T08:44:40.000Z
|
tests/auto_test_class_creation_spec.py
|
MountainField/uspec
|
a4f8908b1a3af519d9d2ce7b85a4b4cca7b85883
|
[
"MIT"
] | null | null | null |
tests/auto_test_class_creation_spec.py
|
MountainField/uspec
|
a4f8908b1a3af519d9d2ce7b85a4b4cca7b85883
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# =================================================================
# uspec
#
# Copyright (c) 2020 Takahide Nogayama
#
# This software is released under the MIT License.
# http://opensource.org/licenses/mit-license.php
# =================================================================
from __future__ import unicode_literals, print_function, division
import unittest
import uspec
from uspec import describe, context, it
###################################
class TestGame(unittest.TestCase): pass
with describe("Game", test_class=TestGame):
assert test_class is TestGame
@it("hoge")
def _(self):
self.assertTrue(True)
assert TestGame is not None
##################################
TEST_CLASS_NAME_GAME2 = None
with describe("Game2"):
TEST_CLASS_NAME_GAME2 = test_class.__name__
@it("hoge")
def _(self):
self.assertTrue(True)
assert TEST_CLASS_NAME_GAME2 in globals()
##################################
def wrap():
global TEST_CLASS_NAME_GAME3
with describe("Game3"):
TEST_CLASS_NAME_GAME3 = locals()["test_class"].__name__
@it("hoge")
def _(self):
self.assertTrue(True)
wrap()
assert TEST_CLASS_NAME_GAME3 in globals()
if __name__ == '__main__':
import unittest
unittest.main(verbosity=2)
| 20.621212
| 67
| 0.556209
| 141
| 1,361
| 5.035461
| 0.432624
| 0.126761
| 0.146479
| 0.05493
| 0.184507
| 0.184507
| 0.184507
| 0.184507
| 0.123944
| 0.123944
| 0
| 0.01267
| 0.188097
| 1,361
| 65
| 68
| 20.938462
| 0.629864
| 0.214548
| 0
| 0.366667
| 0
| 0
| 0.046025
| 0
| 0
| 0
| 0
| 0
| 0.233333
| 1
| 0.133333
| false
| 0.033333
| 0.166667
| 0
| 0.333333
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dd4998614beb1247cc3bb983c52f0476fab9cb0
| 495
|
py
|
Python
|
main.py
|
Matthewk01/Snake-AI
|
d5f211334436676966f17bb6dbfea8aba61ee6b4
|
[
"MIT"
] | null | null | null |
main.py
|
Matthewk01/Snake-AI
|
d5f211334436676966f17bb6dbfea8aba61ee6b4
|
[
"MIT"
] | null | null | null |
main.py
|
Matthewk01/Snake-AI
|
d5f211334436676966f17bb6dbfea8aba61ee6b4
|
[
"MIT"
] | null | null | null |
import pygame
from game.game_logic.game import Game
import matplotlib.pyplot as plt
def main():
scores_history = []
GAME_COUNT = 2
for i in range(GAME_COUNT):
game = Game(400, "Snake AI")
score = game.start()
scores_history.append(score)
print("Game:", i)
plt.ylim(0, 36)
plt.plot(range(len(scores_history)), scores_history)
plt.ylabel('Snake length')
plt.xlabel('Game count')
plt.show()
if __name__ == "__main__":
main()
| 20.625
| 56
| 0.628283
| 68
| 495
| 4.352941
| 0.544118
| 0.175676
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018667
| 0.242424
| 495
| 23
| 57
| 21.521739
| 0.770667
| 0
| 0
| 0
| 0
| 0
| 0.086869
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.166667
| 0
| 0.222222
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dd4d65be6fbb2b5be1a2991fade5b69cc8efed5
| 792
|
py
|
Python
|
closed/Intel/code/resnet50/openvino-cpu/src/tools/create_image_list.py
|
ctuning/inference_results_v1.1
|
d9176eca28fcf6d7a05ccb97994362a76a1eb5ab
|
[
"Apache-2.0"
] | 19
|
2020-10-26T17:37:22.000Z
|
2022-01-20T09:32:38.000Z
|
closed/Intel/code/resnet50/openvino-cpu/src/tools/create_image_list.py
|
ctuning/inference_results_v1.1
|
d9176eca28fcf6d7a05ccb97994362a76a1eb5ab
|
[
"Apache-2.0"
] | 24
|
2021-07-19T01:09:35.000Z
|
2022-03-17T11:44:02.000Z
|
closed/Intel/code/resnet50/openvino-cpu/src/tools/create_image_list.py
|
ctuning/inference_results_v1.1
|
d9176eca28fcf6d7a05ccb97994362a76a1eb5ab
|
[
"Apache-2.0"
] | 19
|
2020-10-21T19:15:17.000Z
|
2022-01-04T08:32:08.000Z
|
import os
import sys
from glob import glob
def create_list(images_dir, output_file, img_ext=".jpg"):
ImgList = os.listdir(images_dir)
val_list = []
for img in ImgList:
img,ext = img.split(".")
val_list.append(img)
with open(os.path.join(images_dir, output_file),'w') as fid:
for line in val_list[:-1]:
fid.write(line + "\n")
fid.write(val_list[-1])
def main():
if len(sys.argv) < 2:
print("Requires images directory")
sys.exit(1)
elif len(sys.argv) < 3:
images_dir = sys.argv[1]
output_file = "image_list.txt"
else:
images_dir = sys.argv[1]
output_file = sys.argv[2]
create_list(images_dir, output_file)
if __name__=="__main__":
main()
| 22.628571
| 64
| 0.582071
| 114
| 792
| 3.807018
| 0.421053
| 0.124424
| 0.103687
| 0.131336
| 0.258065
| 0.258065
| 0.124424
| 0
| 0
| 0
| 0
| 0.014159
| 0.286616
| 792
| 35
| 65
| 22.628571
| 0.753982
| 0
| 0
| 0.076923
| 0
| 0
| 0.069357
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.115385
| 0
| 0.192308
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dd5c073bdc1758efc5e43f31738feb8fc1ef917
| 4,434
|
py
|
Python
|
AI/others/churn/churn_2.py
|
honchardev/Fun
|
ca7c0076e9bb3017c5d7e89aa7d5bd54a83c8ecc
|
[
"MIT"
] | null | null | null |
AI/others/churn/churn_2.py
|
honchardev/Fun
|
ca7c0076e9bb3017c5d7e89aa7d5bd54a83c8ecc
|
[
"MIT"
] | 3
|
2020-03-24T16:26:35.000Z
|
2020-04-15T19:40:41.000Z
|
AI/others/churn/churn_2.py
|
honchardev/Fun
|
ca7c0076e9bb3017c5d7e89aa7d5bd54a83c8ecc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# src: http://datareview.info/article/prognozirovanie-ottoka-klientov-so-scikit-learn/
# In[ ]:
# Показатель оттока клиентов – бизнес-термин, описывающий
# насколько интенсивно клиенты покидают компанию или
# прекращают оплачивать товары или услуги.
# Это ключевой показатель для многих компаний, потому что
# зачастую приобретение новых клиентов обходится намного дороже,
# чем удержание старых (в некоторых случаях от 5 до 20 раз дороже).
# Примеры использования:
# 1. мобильные операторы, операторы кабельного телевидения и
# компании, обслуживающие прием платежей с помощью кредитных карт
# 2. казино используют прогнозные модели, чтобы предсказать
# идеальные условия в зале, позволяющие удержать игроков
# в Блэкджек за столом.
# 3. Aвиакомпании могут предложить клиентам, у которых есть
# жалобы, заменить их билет на билет первого класса.
# Эффективное удержание клиентов сводится к задаче, в рамках
# которой, используя имеющиеся данные, необходимо отличить
# клиентов, собирающихся уйти, от тех, кто этого делать
# не собирается.
# In[ ]:
# datset src: https://raw.githubusercontent.com/michaelulin/churn/master/work/churn_model/data/churn.csv
# In[88]:
# Load libraries
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score, confusion_matrix, precision_recall_fscore_support
from sklearn.model_selection import KFold, train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
# In[3]:
# Load dataset
raw_churn_df = pd.read_csv('churn.csv')
# In[17]:
display(raw_churn_df.shape)
display(raw_churn_df.head(), raw_churn_df.tail())
display(raw_churn_df.columns.values)
display(raw_churn_df.dtypes)
display(raw_churn_df.isnull().sum())
# In[78]:
# Isolate target data
y = raw_churn_df['Churn?']
X = raw_churn_df.drop('Churn?', axis=1)
# In[79]:
# Drop irrelevant features
features_to_drop = ['State', 'Area Code', 'Phone']
X = X.drop(features_to_drop, axis=1)
# In[80]:
# Encode yes/no with 1/0 values
X["Int'l Plan"] = X["Int'l Plan"].map({'no': 0, 'yes': 1})
X["VMail Plan"] = X["VMail Plan"].map({'no': 0, 'yes': 1})
# In[81]:
# Scale everything
std_scaler = StandardScaler(with_mean=True)
X = std_scaler.fit_transform(X)
display(X.shape)
# In[90]:
# Perform CV for SVM, random forest and kNN
def try_clf(X, y, clf_nofit):
X_tr, X_val, y_tr, y_val = train_test_split(X, y, random_state=42)
clf = clf_nofit.fit(X_tr, y_tr)
y_pred = clf.predict(X_val)
display(clf_nofit.__class__.__name__)
display(accuracy_score(y_val, y_pred))
display(confusion_matrix(y_val, y_pred))
display("prec, rec, f1, support", precision_recall_fscore_support(y_val, y_pred))
try_clf(X, y, SVC(gamma='scale'))
try_clf(X, y, RandomForestClassifier(n_estimators=100, n_jobs=-1))
try_clf(X, y, KNeighborsClassifier())
# std scaler with_mean=False accuracies:
# 0.9256594724220624
# 0.9484412470023981
# 0.8896882494004796
# std scaler with_mean=True accuracies:
# 0.9256594724220624
# 0.9496402877697842
# 0.8896882494004796
# In[86]:
# Recall
# Каково отношение количества правильно спрогнозированных уходов
# к общему количеству фактических уходов?
# Precision
# Каково отношение количества правильно спрогнозированных уходов
# к общему количеству спрогнозированных уходов?
# In[101]:
# # Predict probabilities
# def try_probab(X, y, clf_nofit):
# X_tr, X_val, y_tr, y_val = train_test_split(X, y, random_state=42)
# clf = clf_nofit.fit(X_tr, y_tr)
# y_prob = clf.predict_proba(X_val)
# # for i in range(len(X)):
# # display("y_true={0}, Predicted={1}".format(y[i], y_prob[i]))
# display(pd.value_counts(y_prob[:, 1]))
# try_probab(X, y, SVC(gamma='scale', probability=True))
# # try_probab(X, y, RandomForestClassifier(n_estimators=100, n_jobs=-1))
# # try_probab(X, y, KNeighborsClassifier())
# # for i in range(len(Xnew)):
# # print("X=%s, Predicted=%s" % (Xnew[i], ynew[i]))
# In[ ]:
# todo: calibration and discrimination
# https://github.com/ghuiber/churn/blob/master/churn_measurements.py
# from churn_measurements import calibration, discrimination
| 21.735294
| 104
| 0.728913
| 637
| 4,434
| 4.908948
| 0.464678
| 0.006396
| 0.028782
| 0.027183
| 0.168212
| 0.133674
| 0.12472
| 0.12472
| 0.12472
| 0.07739
| 0
| 0.040947
| 0.151782
| 4,434
| 203
| 105
| 21.842365
| 0.790215
| 0.580289
| 0
| 0
| 0
| 0
| 0.074719
| 0
| 0
| 0
| 0
| 0.004926
| 0
| 1
| 0.027778
| false
| 0
| 0.25
| 0
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dd63a69cf7b02ed5bd4b36b349a9d84dec480ac
| 4,518
|
py
|
Python
|
pytrivia/trivia.py
|
Dnewman9/Python-Trivia-API
|
0af7f999cc4ab278fb0ac6fd64733ab168984e60
|
[
"MIT"
] | 6
|
2018-01-15T15:17:56.000Z
|
2021-06-16T19:48:14.000Z
|
pytrivia/trivia.py
|
MaT1g3R/Python-Trivia-API
|
0af7f999cc4ab278fb0ac6fd64733ab168984e60
|
[
"MIT"
] | null | null | null |
pytrivia/trivia.py
|
MaT1g3R/Python-Trivia-API
|
0af7f999cc4ab278fb0ac6fd64733ab168984e60
|
[
"MIT"
] | 7
|
2017-05-15T23:41:43.000Z
|
2021-07-10T01:09:09.000Z
|
"""
A simple python api wrapper for https://opentdb.com/
"""
from aiohttp import ClientSession
from requests import get
from pytrivia.__helpers import decode_dict, get_token, make_request
from pytrivia.enums import *
class Trivia:
def __init__(self, with_token: bool):
"""
Initialize an instance of the Trivia class
:param with_token: If True then the instance will uses a session token
"""
self.token = get_token() if with_token else None
def request(self, num_questions: int, category: Category = None,
diffculty: Diffculty = None, type_: Type = None) -> dict:
"""
Send an api request to https://opentdb.com/
Limitations:
Only 1 Category can be requested per API Call.
To get questions from any category, don't specify a category.
A Maximum of 50 Questions can be retrieved per call.
:param num_questions: the number of questions,
must be between 1 and 50 (inclusive)
:param category: the category of the question. None for any category
:param diffculty: the diffculty of the question. None for any diffculty
:param type_: the type of the question. None for any type
:return: the api call response
:rtype: dict
:raises: ValueError when the num_questions parameter is less than 1
or greater than 50
"""
result = get(
self.__url(num_questions, category, diffculty, type_)).json()
if result['response_code'] in (3, 4):
self.token = get_token()
return self.request(num_questions, category, diffculty, type_)
else:
return decode_dict(result)
async def request_async(self, session: ClientSession, close_session: bool,
num_questions: int, category: Category = None,
diffculty: Diffculty = None,
type_: Type = None) -> dict:
"""
Send an api request to https://opentdb.com/
Limitations:
Only 1 Category can be requested per API Call.
To get questions from any category, don't specify a category.
A Maximum of 50 Questions can be retrieved per call.
:param session: an Aiohttp client session.
:param close_session: True to close the session after the request.
:param num_questions: the number of questions,
must be between 1 and 50 (inclusive)
:param category: the category of the question. None for any category
:param diffculty: the diffculty of the question. None for any diffculty
:param type_: the type of the question. None for any type
:return: the api call response
:rtype: dict
:raises: ValueError when the num_questions parameter is less than 1
or greater than 50
:raises ClientResponseError if the HTTP response code isn't 200
"""
try:
return await self.__request(
session, num_questions, category, diffculty, type_)
finally:
if close_session:
session.close()
async def __request(self, session: ClientSession, num_questions: int,
category: Category = None, diffculty: Diffculty = None,
type_: Type = None) -> dict:
"""
Helper method for the async request.
"""
resp = await make_request(
session, self.__url(num_questions, category, diffculty, type_))
result = await resp.json()
if result['response_code'] in (3, 4):
self.token = get_token()
return await self.__request(
session, num_questions, category, diffculty, type_)
else:
return decode_dict(result)
def __url(self, num_questions, category, diffculty, type_):
"""
Helper method to generate request url.
"""
if num_questions < 1 or num_questions > 50:
raise ValueError
url = 'https://opentdb.com/api.php?amount={}&encode=base64'.format(
num_questions)
if category is not None:
url += '&category={}'.format(category.value)
if diffculty is not None:
url += '&difficulty={}'.format(diffculty.value)
if type_ is not None:
url += '&type={}'.format(type_.value)
if self.token is not None:
url += '&token={}'.format(self.token)
return url
| 35.857143
| 79
| 0.607791
| 547
| 4,518
| 4.90128
| 0.212066
| 0.071615
| 0.029094
| 0.038046
| 0.609474
| 0.597165
| 0.597165
| 0.567326
| 0.567326
| 0.567326
| 0
| 0.009699
| 0.315405
| 4,518
| 125
| 80
| 36.144
| 0.857097
| 0.18703
| 0
| 0.269231
| 0
| 0
| 0.04886
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057692
| false
| 0
| 0.076923
| 0
| 0.269231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dd6aca7ea5896f561da5d7ef0e8b1303417fa33
| 1,249
|
py
|
Python
|
utils.py
|
py-ranoid/practical-nlp
|
514fd4da3b72f26597d91cdb89704a849bf6b36d
|
[
"MIT"
] | null | null | null |
utils.py
|
py-ranoid/practical-nlp
|
514fd4da3b72f26597d91cdb89704a849bf6b36d
|
[
"MIT"
] | null | null | null |
utils.py
|
py-ranoid/practical-nlp
|
514fd4da3b72f26597d91cdb89704a849bf6b36d
|
[
"MIT"
] | null | null | null |
import requests
import tarfile
import os
def download_file(url, directory):
local_filename = os.path.join(directory, url.split('/')[-1])
print ("Downloading %s --> %s"%(url, local_filename))
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
return local_filename
def extract_tar(fpath):
fname_dir, fname = os.path.split(fpath)
dest_path = os.path.join(fname_dir,fname.split('.')[0])
print ("Extracting %s --> %s"%(fpath, dest_path))
if fname.endswith("tar.gz"):
tar = tarfile.open(fpath, "r:gz")
tar.extractall(path=fname_dir)
tar.close()
elif fname.endswith("tar"):
tar = tarfile.open(fname, "r:")
tar.extractall(path=fname_dir)
tar.close()
return dest_path
def list_files(startpath):
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * (level)
print('{}{}/'.format(indent, os.path.basename(root)))
subindent = ' ' * 4 * (level + 1)
for f in files:
print('{}{}'.format(subindent, f))
| 34.694444
| 64
| 0.602082
| 167
| 1,249
| 4.39521
| 0.401198
| 0.070845
| 0.027248
| 0.059946
| 0.089918
| 0.089918
| 0.089918
| 0
| 0
| 0
| 0
| 0.009464
| 0.238591
| 1,249
| 36
| 65
| 34.694444
| 0.762355
| 0
| 0
| 0.121212
| 0
| 0
| 0.0568
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.090909
| 0
| 0.242424
| 0.121212
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dd72494fca93c6bb84fb81618dd74141e12e413
| 5,733
|
py
|
Python
|
plotting/make_bar_graph.py
|
DanielTakeshi/debridement-code
|
d1a946d1fa3c60b60284c977ecb2d6584e524ae2
|
[
"MIT"
] | 3
|
2017-09-29T01:41:20.000Z
|
2021-03-29T01:51:18.000Z
|
plotting/make_bar_graph.py
|
DanielTakeshi/debridement-code
|
d1a946d1fa3c60b60284c977ecb2d6584e524ae2
|
[
"MIT"
] | null | null | null |
plotting/make_bar_graph.py
|
DanielTakeshi/debridement-code
|
d1a946d1fa3c60b60284c977ecb2d6584e524ae2
|
[
"MIT"
] | 3
|
2017-09-29T01:42:35.000Z
|
2019-10-20T07:10:44.000Z
|
""" A bar graph.
(c) September 2017 by Daniel Seita
"""
import argparse
from collections import defaultdict
from keras.models import Sequential
from keras.layers import Dense, Activation
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import sys
np.set_printoptions(suppress=True, linewidth=200)
# Some matplotlib settings.
plt.style.use('seaborn-darkgrid')
titlesize = 21
labelsize = 17
legendsize = 15
ticksize = 15
bar_width = 0.80
opacity = 1.0
error_config = {'ecolor': '0.0', 'linewidth':3.0}
def deprecated():
"""
This is a deprecated method, only to show how to possibly combine these into
one plot. However, I find this unwieldly.
"""
fig, ax = plt.subplots()
bar_width = 0.80
opacity = 0.5
error_config = {'ecolor': '0.3'}
rects1 = plt.bar(np.array([0,1]), means_lin, bar_width,
alpha=opacity,
color='b',
yerr=std_lin,
error_kw=error_config,
label='Lin')
rects2 = plt.bar(np.array([3,4,5,6,7]), means_rfs, bar_width,
alpha=opacity,
color='r',
yerr=std_rfs,
error_kw=error_config,
label='RF')
rects3 = plt.bar(np.array([9,10]), means_dnn, bar_width,
alpha=opacity,
color='y',
yerr=std_dnn,
error_kw=error_config,
label='DNN')
plt.xticks(np.arange(11) + bar_width / 2,
('A','B','','D','E','F','G','','','J','K'))
plt.xlabel('Group')
plt.ylabel('Scores')
plt.title('Scores by group and gender')
plt.tight_layout()
plt.legend()
plt.savefig('figures/validation_set_results.png')
def plot(results, vv):
lin_mean = []
lin_std = []
lin_keys = []
rfs_mean = []
rfs_std = []
rfs_keys = []
dnn_mean = []
dnn_std = []
dnn_keys = []
sorted_keys = sorted(results.keys())
for key in sorted_keys:
info = [ss['loss'] for ss in results[key]]
if 'Lin' in key:
lin_mean.append(np.mean(info))
lin_std.append(np.std(info))
lin_keys.append(key)
elif 'RFs' in key:
rfs_mean.append(np.mean(info))
rfs_std.append(np.std(info))
rfs_keys.append(key)
elif 'DNN' in key:
dnn_mean.append(np.mean(info))
dnn_std.append(np.std(info))
dnn_keys.append(key)
print("\nlin_mean: {}".format(lin_mean))
print("lin_std: {}".format(lin_std))
print("lin_keys: {}".format(lin_keys))
print("\nrfs_mean: {}".format(rfs_mean))
print("rfs_std: {}".format(rfs_std))
print("rfs_keys: {}".format(rfs_keys))
print("\nDNN results:")
for (mean,std,key) in zip(dnn_mean,dnn_std,dnn_keys):
print("{:.2f}\t{:.2f}\t{}".format(mean,std,key))
# sys.exit()
# Use this to determine which DNN models should be here.
dnn_threshold = 3.0
real_index = 0
for ii,(mean,std,key) in enumerate(zip(dnn_mean,dnn_std,dnn_keys)):
if mean > dnn_threshold:
continue
real_index += 1
# Gah! Now I can finally make the bar chart. I think it's easiest to have it
# split across three different subplots, one per algorithm category.
width_ratio = [len(lin_keys),len(rfs_keys),real_index]
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(16,5),
gridspec_kw={'width_ratios':width_ratio})
for ii,(mean,std,key) in enumerate(zip(lin_mean,lin_std,lin_keys)):
ax[0].bar(np.array([ii]), mean, bar_width,
alpha=opacity,
yerr=std,
error_kw=error_config,
label=key[4:])
for ii,(mean,std,key) in enumerate(zip(rfs_mean,rfs_std,rfs_keys)):
ax[1].bar(np.array([ii]), mean, bar_width,
alpha=opacity,
yerr=std,
error_kw=error_config,
label=key[4:])
real_index = 0
for ii,(mean,std,key) in enumerate(zip(dnn_mean,dnn_std,dnn_keys)):
if mean > dnn_threshold:
continue
ax[2].bar(np.array([real_index]), mean, bar_width,
alpha=opacity,
yerr=std,
error_kw=error_config,
label=key[4:])
real_index += 1
# Some rather tedious but necessary stuff to make it publication-quality.
ax[0].set_title('Linear', fontsize=titlesize)
ax[1].set_title('Random Forests', fontsize=titlesize)
ax[2].set_title('Deep Neural Networks', fontsize=titlesize)
ax[0].set_ylabel('Average Squared $L_2$, 10-Fold CV', fontsize=labelsize)
for i in range(3):
ax[i].set_xlabel('Algorithm', fontsize=labelsize)
ax[i].set_ylim([0.0,9.0])
ax[i].tick_params(axis='y', labelsize=ticksize)
ax[i].set_xticklabels([])
ax[0].legend(loc="best", ncol=1, prop={'size':legendsize})
ax[1].legend(loc="best", ncol=2, prop={'size':legendsize})
ax[2].legend(loc="best", ncol=3, prop={'size':legendsize})
plt.tight_layout()
plt.savefig('figures/validation_set_results_v'+vv+'.png')
if __name__ == "__main__":
pp = argparse.ArgumentParser()
pp.add_argument('--version', type=int)
pp.add_argument('--kfolds', type=int, default=10)
args = pp.parse_args()
assert args.version is not None
VERSION = str(args.version).zfill(2)
file_name = 'results/results_kfolds10_v'+VERSION+'.npy'
results = np.load(file_name)[()]
print("results has keys: {}".format(results.keys()))
plot(results, VERSION)
| 33.138728
| 80
| 0.580499
| 784
| 5,733
| 4.08801
| 0.307398
| 0.022465
| 0.018721
| 0.037442
| 0.288924
| 0.197192
| 0.142902
| 0.135725
| 0.117629
| 0.117629
| 0
| 0.02141
| 0.2749
| 5,733
| 172
| 81
| 33.331395
| 0.749579
| 0.082679
| 0
| 0.230216
| 0
| 0
| 0.093188
| 0.017604
| 0
| 0
| 0
| 0
| 0.007194
| 1
| 0.014388
| false
| 0
| 0.057554
| 0
| 0.071942
| 0.071942
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dd728898f384c5addbd3fc04712cc8f4bb79103
| 998
|
py
|
Python
|
setup.py
|
tzengerink/groceries-api
|
a22cc3503006b87b731b956f6341d730b143bf10
|
[
"MIT"
] | null | null | null |
setup.py
|
tzengerink/groceries-api
|
a22cc3503006b87b731b956f6341d730b143bf10
|
[
"MIT"
] | null | null | null |
setup.py
|
tzengerink/groceries-api
|
a22cc3503006b87b731b956f6341d730b143bf10
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from setuptools import find_packages, setup
import os
import re
ROOT = os.path.dirname(__file__)
VERSION_RE = re.compile(r'''__version__ = \'([0-9.]+)\'''')
def get_version():
init = open(os.path.join(ROOT, 'application', '__init__.py')).read()
return VERSION_RE.search(init).group(1)
setup(
name='groceries-api',
version=get_version(),
license='MIT',
packages=find_packages(),
include_package_data=True,
install_requires=[
'alembic==0.7.5.post2',
'APScheduler==3.1.0',
'Flask==0.10.1',
'Flask-Cors==2.0.0',
'Flask-SQLAlchemy==2.0',
'gunicorn==19.3.0',
'psycopg2==2.6.1',
'PyJWT==1.1.0',
'requests==2.8.1',
'six==1.9.0',
],
extras_require={
'dev': {
'coverage==3.7.1',
'coveralls==0.5',
'flake8==2.4.0',
'mock==1.0.1',
'pytest==2.7.0',
'tox==2.1.1',
},
},
)
| 22.177778
| 72
| 0.516032
| 133
| 998
| 3.706767
| 0.548872
| 0.01217
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074792
| 0.276553
| 998
| 44
| 73
| 22.681818
| 0.608033
| 0.02004
| 0
| 0
| 0
| 0
| 0.307062
| 0.021494
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.081081
| 0
| 0.135135
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dda086e2a6749797c92ff4afeb274d3586e3b33
| 536
|
py
|
Python
|
cookie-cutter/src/templates/template.py
|
noname34/CHARM_Project_Hazard_Perception_I
|
2d03d9e8911afad21818c6f837558503508a59bd
|
[
"Unlicense",
"MIT"
] | null | null | null |
cookie-cutter/src/templates/template.py
|
noname34/CHARM_Project_Hazard_Perception_I
|
2d03d9e8911afad21818c6f837558503508a59bd
|
[
"Unlicense",
"MIT"
] | null | null | null |
cookie-cutter/src/templates/template.py
|
noname34/CHARM_Project_Hazard_Perception_I
|
2d03d9e8911afad21818c6f837558503508a59bd
|
[
"Unlicense",
"MIT"
] | null | null | null |
#!/user/bin/env python3
# -*- coding: utf-8 -*-
#!/user/bin/env python3
# -*- coding: utf-8 -*-
# @Author: Kevin Bürgisser
# @Email: kevin.buergisser@edu.hefr.ch
# @Date: 04.2020
# Context: CHARM PROJECT - Harzard perception
"""
Module documentation.
"""
# Imports
import sys
#import os
# Global variables
# Class declarations
# Function declarations
def main():
args = sys.argv[1:]
if not args:
print('usage: [--flags options] [inputs] ')
sys.exit(1)
# Main body
if __name__ == '__main__':
main()
| 14.888889
| 51
| 0.630597
| 67
| 536
| 4.925373
| 0.731343
| 0.042424
| 0.060606
| 0.10303
| 0.163636
| 0.163636
| 0.163636
| 0
| 0
| 0
| 0
| 0.028169
| 0.205224
| 536
| 36
| 52
| 14.888889
| 0.746479
| 0.589552
| 0
| 0
| 0
| 0
| 0.21
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.25
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dde2db2c5518f1b83b708f088e5f614029ac9a9
| 2,794
|
py
|
Python
|
Module_III/PySparkNetworkSimilarityClass.py
|
wuchiehhan/KDD2019-HandsOn-Tutorial
|
0377ae4b2a74e9cc08b15c983e4e0f59ab02debe
|
[
"MIT"
] | null | null | null |
Module_III/PySparkNetworkSimilarityClass.py
|
wuchiehhan/KDD2019-HandsOn-Tutorial
|
0377ae4b2a74e9cc08b15c983e4e0f59ab02debe
|
[
"MIT"
] | null | null | null |
Module_III/PySparkNetworkSimilarityClass.py
|
wuchiehhan/KDD2019-HandsOn-Tutorial
|
0377ae4b2a74e9cc08b15c983e4e0f59ab02debe
|
[
"MIT"
] | null | null | null |
# Databricks notebook source
from pyspark.sql.types import *
from pyspark.sql import functions as F
import base64
import array
# COMMAND ----------
# s is a base64 encoded float[] with first element being the magnitude
def Base64ToFloatArray(s):
arr = array.array('f', base64.b64decode(s))
return (arr[0], arr[1:])
def cosineSimilarity(s1, s2):
(m1, v1) = Base64ToFloatArray(s1)
(m2, v2) = Base64ToFloatArray(s2)
if (m1 == 0) or (m2 == 0):
return 0
else :
return sum(x*y for x,y in zip(v1, v2))/(m1 * m2)
# Register udf functions so that it could be used in dataframe
#
# Perform same computation as cosineSimilarity()
#
@F.udf("float")
def udfCosineSimilarity(s1, s2):
return cosineSimilarity(s1, s2)
# COMMAND ----------
# MAGIC %md **NetworkSimilarity** class to compute Network Similarity
# COMMAND ----------
# Parameters:
# resource: resource stream path
# container: container name in Azure Storage (AS) account
# account: Azure Storage (AS) account
# sas: complete 'Blob service SAS URL' of the shared access signature (sas) for the container
# key: access key for the container, if sas is specified, key is ignored
#
# Note:
# resource does not have header
# you need to provide value for either sas or key
#
class NetworkSimilarity(AzureStorageAccess):
# constructor
def __init__(self, resource, container, account, sas='', key=''):
AzureStorageAccess.__init__(self, container, account, sas, key)
schema = StructType()
schema.add(StructField('EntityId', LongType(), False))
schema.add(StructField('EntityType', StringType(), False))
schema.add(StructField('Data', StringType(), False))
self.df = spark.read.format('csv').options(header='false', delimiter='\t').schema(schema).load(self.getFullpath(resource))
def getDataframe(self):
return self.df
def raiseErrorIfNotFound(self, row, e):
if row is None:
raise KeyError('entity ' + str(e) + ' not found')
def getSimilarity(self, e1, e2):
df = self.df
row1 = df.where(df.EntityId == e1).first()
self.raiseErrorIfNotFound(row1, e1)
row2 = df.where(df.EntityId == e2).first()
self.raiseErrorIfNotFound(row2, e2)
return cosineSimilarity(row1.Data, row2.Data)
def getTopEntities(self, e, targetType = '', maxCount = 20, minScore = 0.0):
df1 = self.df
row1 = df1.where(df1.EntityId == e).first()
self.raiseErrorIfNotFound(row1, e)
if targetType == '':
df2 = df1.where(df1.EntityId != e)
else :
df2 = df1.where((df1.EntityId != e) & (df1.EntityType == targetType))
df3 = df2.select(df2.EntityId, df2.EntityType, udfCosineSimilarity(F.lit(row1.Data), df2.Data).alias('Score'))
return df3.where(df3.Score >= minScore).orderBy(df3.Score.desc()).limit(maxCount)
| 33.261905
| 126
| 0.678597
| 364
| 2,794
| 5.186813
| 0.425824
| 0.012712
| 0.03178
| 0.030191
| 0.034958
| 0.024364
| 0
| 0
| 0
| 0
| 0
| 0.0323
| 0.180029
| 2,794
| 83
| 127
| 33.662651
| 0.791794
| 0.269864
| 0
| 0.042553
| 0
| 0
| 0.029747
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.170213
| false
| 0
| 0.085106
| 0.042553
| 0.425532
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5ddff0c682bfeb9cf9d9bdcf324ee0733eb92a14
| 2,899
|
py
|
Python
|
Animation/Main.py
|
olesmith/SmtC
|
dfae5097f02192b60aae05b9d02404fcfe893be3
|
[
"CC0-1.0"
] | null | null | null |
Animation/Main.py
|
olesmith/SmtC
|
dfae5097f02192b60aae05b9d02404fcfe893be3
|
[
"CC0-1.0"
] | null | null | null |
Animation/Main.py
|
olesmith/SmtC
|
dfae5097f02192b60aae05b9d02404fcfe893be3
|
[
"CC0-1.0"
] | null | null | null |
import gd,os,time
from Html import Animation_Html
from Iteration import Animation_Iteration
from Write import Animation_Write
from Base import *
from Canvas2 import *
from Canvas2 import Canvas2
from Image import Image
from HTML import HTML
__Canvas__=None
class Animation(
Animation_Html,
Animation_Iteration,
Animation_Write,
Base,HTML
):
Convert_Bin="/usr/bin/convert"
HTML_Root="http://127.0.0.1/Graphics"
CGI_Root="http://127.0.0.1/cgi-bin/Graphics/Display.py"
__Switches__={
"v": {
"Attr": "Verbose",
"Text": "Verbosity level. Augment to see more numbers...",
"Type": None,
},
"-clean": {
"Attr": "Clean",
"Text": "Remove PNGs generated",
"Type": "int",
},
"-rewrite": {
"Attr": "Images_Rewrite",
"Text": "Rewrite image file between iterations",
"Type": None,
},
"l": {
"Attr": "Loop",
"Text": "Animated GIF no of loops (passed to convert)",
"Type": None,
},
"d": {
"Attr": "Delay",
"Text": "Animated GIF delay (passed to convert)",
"Type": None,
},
"W": {
"Attr": "W",
"Text": "White background",
"Type": "bool",
},
}
__Args__=[]
Indent=" "
W=False
Verbose=1
Delay="5"
Loop="0"
Path="curves"
Curve_Parms_Path=""
FileName="Curve"
Name="Curve"
Parameters=["a","b","c"]
Parameter_Names=["a","b","c"]
Clean=0 #Clean up afterwords
Iteration_Files=[]
Images_Rewrite=1
def __init__(self,pmin,pmax,vals={}):
self.Hash2Obj(vals)
self.__Canvas__=Canvas2(vals,[ pmin,pmax ])
self.Canvas([ pmin,pmax ]).CLI2Obj()
##!
##! Overrride __str__ to print some useful info.
##!
def __str__(self):
text="Animation, Path: "+self.Path
text+="\n\tFileName: "+self.FileName
text+="\n\tParms: "+self.Curve_Parms_Path
text+="\n\tLoop: "+self.Loop
text+="\n\tDelay: "+self.Delay
text+="\n\tClean: "+str(self.Clean)
text+="\n"+str(self.Canvas())
return text
##!
##! Returns Canvas object, stored in self.__Canvas__
##!
def Canvas(self,pexts=[]):
global __Canvas__ # Needed to modify global copy of __Canvas__
if (not __Canvas__):
parms={
}
__Canvas__=Canvas2(parms,pexts)
return __Canvas__
def BackGround_Color(self):
if (self.W):
return "White"
else:
return "Black"
def Initialize(self):
self.Canvas().Resolution=self.Resolution
self.Canvas().Image_Rewrite()
| 23.762295
| 73
| 0.519489
| 305
| 2,899
| 4.704918
| 0.393443
| 0.041812
| 0.019512
| 0.032056
| 0.051568
| 0.019512
| 0
| 0
| 0
| 0
| 0
| 0.012539
| 0.339772
| 2,899
| 121
| 74
| 23.958678
| 0.7372
| 0.055881
| 0
| 0.042553
| 0
| 0
| 0.196107
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053191
| false
| 0.021277
| 0.095745
| 0
| 0.404255
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5de1c133ca3046f5ca60bc9f85bbcefa4f2854dd
| 1,839
|
py
|
Python
|
pytorch_metric_learning/miners/distance_weighted_miner.py
|
junjungoal/pytorch_metric_learning
|
e56bb440d1ec63e13622025209135a788c6f51c1
|
[
"MIT"
] | 1
|
2019-11-28T19:31:29.000Z
|
2019-11-28T19:31:29.000Z
|
pytorch_metric_learning/miners/distance_weighted_miner.py
|
junjungoal/pytorch_metric_learning
|
e56bb440d1ec63e13622025209135a788c6f51c1
|
[
"MIT"
] | null | null | null |
pytorch_metric_learning/miners/distance_weighted_miner.py
|
junjungoal/pytorch_metric_learning
|
e56bb440d1ec63e13622025209135a788c6f51c1
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
from .base_miner import BasePostGradientMiner
import torch
from ..utils import loss_and_miner_utils as lmu
# adapted from
# https://github.com/chaoyuaw/incubator-mxnet/blob/master/example/gluon/
# /embedding_learning/model.py
class DistanceWeightedMiner(BasePostGradientMiner):
def __init__(self, cutoff, nonzero_loss_cutoff, **kwargs):
super().__init__(**kwargs)
self.cutoff = cutoff
self.nonzero_loss_cutoff = nonzero_loss_cutoff
def mine(self, embeddings, labels):
label_set = torch.unique(labels)
n, d = embeddings.size()
dist_mat = lmu.dist_mat(embeddings)
dist_mat = dist_mat + torch.eye(dist_mat.size(0)).to(embeddings.device)
# so that we don't get log(0). We mask the diagonal out later anyway
# Cut off to avoid high variance.
dist_mat = torch.max(dist_mat, torch.tensor(self.cutoff).to(dist_mat.device))
# Subtract max(log(distance)) for stability.
# See the first equation from Section 4 of the paper
log_weights = (2.0 - float(d)) * torch.log(dist_mat) - (
float(d - 3) / 2
) * torch.log(1.0 - 0.25 * (dist_mat ** 2.0))
weights = torch.exp(log_weights - torch.max(log_weights))
# Sample only negative examples by setting weights of
# the same-class examples to 0.
mask = torch.ones(weights.size()).to(embeddings.device)
for i in label_set:
idx = (labels == i).nonzero()
mask[torch.meshgrid(idx.squeeze(1), idx.squeeze(1))] = 0
weights = weights * mask * ((dist_mat < self.nonzero_loss_cutoff).float())
weights = weights / torch.sum(weights, dim=1, keepdim=True)
np_weights = weights.cpu().numpy()
return lmu.get_random_triplet_indices(labels, weights=np_weights)
| 39.978261
| 85
| 0.657423
| 252
| 1,839
| 4.630952
| 0.460317
| 0.065981
| 0.058269
| 0.039417
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014055
| 0.22621
| 1,839
| 45
| 86
| 40.866667
| 0.806044
| 0.222947
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.115385
| 0
| 0.269231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5de40eed6f013ca3b73d1af645e0c517f3a9ec93
| 4,728
|
py
|
Python
|
pulsar/apps/data/redis/store.py
|
goodboy/pulsar
|
e4b42d94b7e262a165782747d65f8b39fb8d3ba9
|
[
"BSD-3-Clause"
] | 1
|
2020-11-30T07:36:57.000Z
|
2020-11-30T07:36:57.000Z
|
pulsar/apps/data/redis/store.py
|
goodboy/pulsar
|
e4b42d94b7e262a165782747d65f8b39fb8d3ba9
|
[
"BSD-3-Clause"
] | null | null | null |
pulsar/apps/data/redis/store.py
|
goodboy/pulsar
|
e4b42d94b7e262a165782747d65f8b39fb8d3ba9
|
[
"BSD-3-Clause"
] | null | null | null |
from functools import partial
from pulsar import Connection, Pool, get_actor
from pulsar.utils.pep import to_string
from pulsar.apps.data import RemoteStore
from pulsar.apps.ds import redis_parser
from .client import RedisClient, Pipeline, Consumer, ResponseError
from .pubsub import RedisPubSub, RedisChannels
class RedisStoreConnection(Connection):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.parser = self._producer._parser_class()
async def execute(self, *args, **options):
consumer = self.current_consumer()
await consumer.start((args, options))
result = await consumer.on_finished
if isinstance(result, ResponseError):
raise result.exception
return result
async def execute_pipeline(self, commands, raise_on_error=True):
consumer = self.current_consumer()
consumer.start((commands, raise_on_error, []))
result = await consumer.on_finished
if isinstance(result, ResponseError):
raise result.exception
return result
class RedisStore(RemoteStore):
'''Redis :class:`.Store` implementation.
'''
protocol_factory = partial(RedisStoreConnection, Consumer)
supported_queries = frozenset(('filter', 'exclude'))
def _init(self, namespace=None, parser_class=None, pool_size=50,
decode_responses=False, **kwargs):
self._decode_responses = decode_responses
if not parser_class:
actor = get_actor()
pyparser = actor.cfg.redis_py_parser if actor else False
parser_class = redis_parser(pyparser)
self._parser_class = parser_class
if namespace:
self._urlparams['namespace'] = namespace
self._pool = Pool(self.connect, pool_size=pool_size, loop=self._loop)
if self._database is None:
self._database = 0
self._database = int(self._database)
self.loaded_scripts = set()
@property
def pool(self):
return self._pool
@property
def namespace(self):
'''The prefix namespace to append to all transaction on keys
'''
n = self._urlparams.get('namespace')
return '%s:' % n if n else ''
def key(self):
return (self._dns, self._encoding)
def client(self):
'''Get a :class:`.RedisClient` for the Store'''
return RedisClient(self)
def pipeline(self):
'''Get a :class:`.Pipeline` for the Store'''
return Pipeline(self)
def pubsub(self, protocol=None):
return RedisPubSub(self, protocol=protocol)
def channels(self, protocol=None, **kw):
return RedisChannels(self.pubsub(protocol=protocol), **kw)
def ping(self):
return self.client().ping()
async def execute(self, *args, **options):
connection = await self._pool.connect()
with connection:
result = await connection.execute(*args, **options)
return result
async def execute_pipeline(self, commands, raise_on_error=True):
conn = await self._pool.connect()
with conn:
result = await conn.execute_pipeline(commands, raise_on_error)
return result
async def connect(self, protocol_factory=None):
protocol_factory = protocol_factory or self.create_protocol
if isinstance(self._host, tuple):
host, port = self._host
transport, connection = await self._loop.create_connection(
protocol_factory, host, port)
else:
raise NotImplementedError('Could not connect to %s' %
str(self._host))
if self._password:
await connection.execute('AUTH', self._password)
if self._database:
await connection.execute('SELECT', self._database)
return connection
def flush(self):
return self.execute('flushdb')
def close(self):
'''Close all open connections.'''
return self._pool.close()
def has_query(self, query_type):
return query_type in self.supported_queries
def basekey(self, meta, *args):
key = '%s%s' % (self.namespace, meta.table_name)
postfix = ':'.join((to_string(p) for p in args if p is not None))
return '%s:%s' % (key, postfix) if postfix else key
def meta(self, meta):
'''Extract model metadata for lua script stdnet/lib/lua/odm.lua'''
# indices = dict(((idx.attname, idx.unique) for idx in meta.indices))
data = meta.as_dict()
data['namespace'] = self.basekey(meta)
return data
class CompiledQuery:
def __init__(self, pipe, query):
self.pipe = pipe
| 33.295775
| 78
| 0.635787
| 548
| 4,728
| 5.322993
| 0.270073
| 0.022626
| 0.020569
| 0.027425
| 0.139184
| 0.122729
| 0.10216
| 0.10216
| 0.10216
| 0.10216
| 0
| 0.00086
| 0.262267
| 4,728
| 141
| 79
| 33.531915
| 0.835436
| 0.073816
| 0
| 0.174757
| 0
| 0
| 0.021389
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15534
| false
| 0.019417
| 0.067961
| 0.067961
| 0.446602
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5de5910c5b5ea17215e0b0e1f87d78465a65ecbe
| 2,683
|
py
|
Python
|
pcg_libraries/src/pcg_gazebo/parsers/types/vector.py
|
boschresearch/pcg_gazebo_pkgs
|
1c112d01847ca4f8da61ce9b273e13d13bc7eb73
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 42
|
2019-06-26T09:46:03.000Z
|
2022-03-18T17:56:26.000Z
|
pcg_libraries/src/pcg_gazebo/parsers/types/vector.py
|
boschresearch/pcg_gazebo_pkgs
|
1c112d01847ca4f8da61ce9b273e13d13bc7eb73
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 9
|
2019-07-18T10:36:05.000Z
|
2020-10-02T15:26:32.000Z
|
pcg_libraries/src/pcg_gazebo/parsers/types/vector.py
|
boschresearch/pcg_gazebo_pkgs
|
1c112d01847ca4f8da61ce9b273e13d13bc7eb73
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 2
|
2019-11-01T03:20:11.000Z
|
2020-10-15T23:23:44.000Z
|
# Copyright (c) 2019 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import XMLBase
import collections
class XMLVector(XMLBase):
_NAME = ''
def __init__(self, size=None):
XMLBase.__init__(self)
assert size is not None, 'Vector size cannot be None'
assert isinstance(size, int), \
'[{}] Vector size input must be an integer, received={}'.format(
self.xml_element_name, size)
assert size > 0, '[{}] Size must be greater than zero'.format(
self.xml_element_name)
self._size = size
self._value = [0 for _ in range(self._size)]
def _set_value(self, value):
assert isinstance(value, collections.Iterable), \
'Input must be iterable, element={}, received={}, type={}'.format(
self._NAME, value, type(value))
assert len(list(value)) == self._size, \
'Input vector has the wrong size, element={}, received={}, ' \
'size of received={}, expected length={}'.format(
self._NAME, value, len(list(value)), self._size)
for item in value:
assert isinstance(item, float) or isinstance(item, int)
self._value = list(value)
def reset(self):
self._value = [0 for _ in range(self._size)]
XMLBase.reset(self)
def is_valid(self):
if not isinstance(self._value, list):
print('Vector object must have a list as value')
return False
if len(self._value) != self._size:
print('Normal value must be a list with 3 elements')
return False
for item in self._value:
if not isinstance(item, float) and not isinstance(item, int):
print('Each vector element must be a float or integer')
return False
return True
def get_formatted_value_as_str(self):
assert self.is_valid(), 'Invalid vector'
output_str = ' '.join(['{}'] * self._size)
return output_str.format(*[format(x, 'n') for x in self._value])
| 40.044776
| 78
| 0.633619
| 355
| 2,683
| 4.673239
| 0.371831
| 0.038577
| 0.023508
| 0.019289
| 0.086799
| 0.033755
| 0.033755
| 0.033755
| 0
| 0
| 0
| 0.006094
| 0.26612
| 2,683
| 66
| 79
| 40.651515
| 0.836465
| 0.244875
| 0
| 0.113636
| 0
| 0
| 0.205868
| 0
| 0
| 0
| 0
| 0
| 0.159091
| 1
| 0.113636
| false
| 0
| 0.045455
| 0
| 0.318182
| 0.068182
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5de5b5ee5bf23c10f66da04af7327075aad14c24
| 9,531
|
py
|
Python
|
tests/main/helpers/test_buyers_helpers.py
|
uk-gov-mirror/alphagov.digitalmarketplace-briefs-frontend
|
2325f01b1bdb13fb5b0afe7fe110c0be0c031da6
|
[
"MIT"
] | 1
|
2021-05-06T22:37:05.000Z
|
2021-05-06T22:37:05.000Z
|
tests/main/helpers/test_buyers_helpers.py
|
uk-gov-mirror/alphagov.digitalmarketplace-briefs-frontend
|
2325f01b1bdb13fb5b0afe7fe110c0be0c031da6
|
[
"MIT"
] | 108
|
2017-06-14T10:48:10.000Z
|
2021-06-11T08:55:25.000Z
|
tests/main/helpers/test_buyers_helpers.py
|
uk-gov-mirror/alphagov.digitalmarketplace-briefs-frontend
|
2325f01b1bdb13fb5b0afe7fe110c0be0c031da6
|
[
"MIT"
] | 5
|
2017-06-27T15:13:11.000Z
|
2021-04-10T18:06:29.000Z
|
import mock
import pytest
from werkzeug.exceptions import NotFound
import app.main.helpers as helpers
from dmcontent.content_loader import ContentLoader
from dmtestutils.api_model_stubs import BriefStub, FrameworkStub, LotStub
content_loader = ContentLoader('tests/fixtures/content')
content_loader.load_manifest('dos', 'data', 'edit_brief')
questions_builder = content_loader.get_manifest('dos', 'edit_brief')
class TestBuyersHelpers(object):
def test_get_framework_and_lot(self):
provided_lot = LotStub(slug='digital-specialists', allows_brief=True).response()
data_api_client = mock.Mock()
data_api_client.get_framework.return_value = FrameworkStub(
slug='digital-outcomes-and-specialists-4',
status='live',
lots=[provided_lot],
).single_result_response()
framework, lot = helpers.buyers_helpers.get_framework_and_lot('digital-outcomes-and-specialists-4',
'digital-specialists',
data_api_client)
assert framework['status'] == "live"
assert framework['name'] == 'Digital Outcomes and Specialists 4'
assert framework['slug'] == 'digital-outcomes-and-specialists-4'
assert framework['clarificationQuestionsOpen'] is True
assert lot == provided_lot
def test_get_framework_and_lot_404s_for_wrong_framework_status(self):
data_api_client = mock.Mock()
data_api_client.get_framework.return_value = FrameworkStub(
slug='digital-outcomes-and-specialists-4',
status='open',
lots=[
LotStub(slug='digital-specialists', allows_brief=True).response()
]
).single_result_response()
with pytest.raises(NotFound):
helpers.buyers_helpers.get_framework_and_lot(
'digital-outcomes-and-specialists-4',
'digital-specialists',
data_api_client,
allowed_statuses=['live'],
)
def test_get_framework_and_lot_404s_if_allows_brief_required(self):
data_api_client = mock.Mock()
data_api_client.get_framework.return_value = FrameworkStub(
slug='digital-outcomes-and-specialists-4',
status='live',
lots=[
LotStub(slug='digital-specialists', allows_brief=False).response()
]
).single_result_response()
with pytest.raises(NotFound):
helpers.buyers_helpers.get_framework_and_lot(
'digital-outcomes-and-specialists-4',
'digital-specialists',
data_api_client,
must_allow_brief=True,
)
@pytest.mark.parametrize(
['framework', 'lot', 'user', 'result'],
[
('digital-outcomes-and-specialists-4', 'digital-specialists', 123, True),
('not-digital-outcomes-and-specialists', 'digital-specialists', 123, False),
('digital-outcomes-and-specialists-4', 'not-digital-specialists', 123, False),
('digital-outcomes-and-specialists-4', 'digital-specialists', 124, False),
]
)
def test_is_brief_correct(self, framework, lot, user, result):
brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status='live').response()
assert helpers.buyers_helpers.is_brief_correct(brief, framework, lot, user) is result
@pytest.mark.parametrize(
['status', 'allow_withdrawn', 'result'],
[
('withdrawn', True, True),
('withdrawn', False, False),
('live', True, True),
('live', False, True),
]
)
def test_if_brief_correct_allow_withdrawn(self, status, allow_withdrawn, result):
brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status=status).response()
assert helpers.buyers_helpers.is_brief_correct(
brief, 'digital-outcomes-and-specialists-4', 'digital-specialists', 123, allow_withdrawn=allow_withdrawn
) is result
@pytest.mark.parametrize(
'allowed_statuses, result', [
(['live', 'closed'], True),
(['closed'], False)
]
)
def test_is_brief_correct_allowed_statuses(self, allowed_statuses, result):
brief = BriefStub(framework_slug='digital-outcomes-and-specialists-4', user_id=123, status='live').response()
assert helpers.buyers_helpers.is_brief_correct(
brief, 'digital-outcomes-and-specialists-4', 'digital-specialists', 123, allowed_statuses=allowed_statuses
) is result
def test_is_brief_associated_with_user(self):
brief = BriefStub(user_id=123).response()
assert helpers.buyers_helpers.is_brief_associated_with_user(brief, 123) is True
assert helpers.buyers_helpers.is_brief_associated_with_user(brief, 234) is False
def test_brief_can_be_edited(self):
assert helpers.buyers_helpers.brief_can_be_edited(BriefStub(status='draft').response()) is True
assert helpers.buyers_helpers.brief_can_be_edited(BriefStub(status='live').response()) is False
def test_brief_is_withdrawn(self):
assert helpers.buyers_helpers.brief_is_withdrawn(BriefStub(status='withdrawn').response()) is True
assert helpers.buyers_helpers.brief_is_withdrawn(BriefStub(status='live').response()) is False
def test_section_has_at_least_one_required_question(self):
content = content_loader.get_manifest('dos', 'edit_brief').filter(
{'lot': 'digital-specialists'}
)
sections_with_required_questions = {
'section-1': True,
'section-2': True,
'section-4': False,
'section-5': True
}
for section in content.sections:
assert helpers.buyers_helpers.section_has_at_least_one_required_question(section) \
== sections_with_required_questions[section.slug]
def test_count_unanswered_questions(self):
brief = {
'status': 'draft',
'frameworkSlug': 'dos',
'lotSlug': 'digital-specialists',
'required1': True
}
content = content_loader.get_manifest('dos', 'edit_brief').filter(
{'lot': 'digital-specialists'}
)
sections = content.summary(brief)
unanswered_required, unanswered_optional = helpers.buyers_helpers.count_unanswered_questions(sections)
assert unanswered_required == 2
assert unanswered_optional == 2
def test_add_unanswered_counts_to_briefs(self):
briefs = [{
'status': 'draft',
'frameworkSlug': 'dos',
'lotSlug': 'digital-specialists',
'required1': True
}]
assert helpers.buyers_helpers.add_unanswered_counts_to_briefs(briefs, content_loader) == [{
'status': 'draft',
'frameworkSlug': 'dos',
'lotSlug': 'digital-specialists',
'required1': True,
'unanswered_required': 2,
'unanswered_optional': 2
}]
def test_get_sorted_responses_for_brief(self):
data_api_client = mock.Mock()
data_api_client.find_brief_responses.return_value = {
"briefResponses": [
{"id": "five", "niceToHaveRequirements": [True, True, True, True, True]},
{"id": "zero", "niceToHaveRequirements": [False, False, False, False, False]},
{"id": "three", "niceToHaveRequirements": [True, True, False, False, True]},
{"id": "five", "niceToHaveRequirements": [True, True, True, True, True]},
{"id": "four", "niceToHaveRequirements": [True, True, True, True, False]},
{"id": "one", "niceToHaveRequirements": [False, False, False, True, False]},
{"id": "four", "niceToHaveRequirements": [True, True, True, True, False]},
]
}
brief = {"id": 1, "niceToHaveRequirements": ["Nice", "to", "have", "yes", "please"]}
assert helpers.buyers_helpers.get_sorted_responses_for_brief(brief, data_api_client) == [
{'id': 'five', 'niceToHaveRequirements': [True, True, True, True, True]},
{'id': 'five', 'niceToHaveRequirements': [True, True, True, True, True]},
{'id': 'four', 'niceToHaveRequirements': [True, True, True, True, False]},
{'id': 'four', 'niceToHaveRequirements': [True, True, True, True, False]},
{'id': 'three', 'niceToHaveRequirements': [True, True, False, False, True]},
{"id": "one", "niceToHaveRequirements": [False, False, False, True, False]},
{'id': 'zero', 'niceToHaveRequirements': [False, False, False, False, False]}
]
def test_get_sorted_responses_does_not_sort_if_no_nice_to_haves(self):
data_api_client = mock.Mock()
data_api_client.find_brief_responses.return_value = {
"briefResponses": [
{"id": "five"},
{"id": "zero"},
{"id": "three"},
{"id": "five"}
]
}
brief = {"id": 1, "niceToHaveRequirements": []}
assert helpers.buyers_helpers.get_sorted_responses_for_brief(brief, data_api_client) == [
{"id": "five"},
{"id": "zero"},
{"id": "three"},
{"id": "five"}
]
| 44.125
| 118
| 0.615255
| 977
| 9,531
| 5.746162
| 0.136131
| 0.0456
| 0.04275
| 0.087816
| 0.732633
| 0.669576
| 0.653367
| 0.594585
| 0.530103
| 0.424296
| 0
| 0.01005
| 0.258735
| 9,531
| 215
| 119
| 44.330233
| 0.784572
| 0
| 0
| 0.42246
| 0
| 0
| 0.203127
| 0.101668
| 0
| 0
| 0
| 0
| 0.106952
| 1
| 0.074866
| false
| 0
| 0.032086
| 0
| 0.112299
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5de70a07393091d4b0d1b81bb83f4335c31b6482
| 3,329
|
py
|
Python
|
Plot/src/test/java/io/deephaven/db/plot/example_plots/PlottingPQ.py
|
devinrsmith/deephaven-core
|
3a6930046faf1cd556f62a914ce1cfd7860147b9
|
[
"MIT"
] | null | null | null |
Plot/src/test/java/io/deephaven/db/plot/example_plots/PlottingPQ.py
|
devinrsmith/deephaven-core
|
3a6930046faf1cd556f62a914ce1cfd7860147b9
|
[
"MIT"
] | 1
|
2022-03-03T21:24:40.000Z
|
2022-03-03T21:24:54.000Z
|
Plot/src/test/java/io/deephaven/db/plot/example_plots/PlottingPQ.py
|
devinrsmith/deephaven-core
|
3a6930046faf1cd556f62a914ce1cfd7860147b9
|
[
"MIT"
] | null | null | null |
import deephaven.TableTools as tt
import deephaven.Plot as plt
t = tt.emptyTable(50)\
.update("X = i + 5", "XLow = X -1", "XHigh = X + 1", "Y = Math.random() * 5", "YLow = Y - 1", "YHigh = Y + 1", "USym = i % 2 == 0 ? `AAPL` : `MSFT`")
p = plt.plot("S1", t, "X", "Y").lineColor("black").show()
p2 = plt.plot("S1", t, "X", "Y").plotStyle("bar").gradientVisible(True).show()
p3 = plt.plot("S1", t, "X", "Y").plotStyle("scatter").pointColor("black").pointSize(2).show()
p4 = plt.plot("S1", t, "X", "Y").plotStyle("area").seriesColor("red").show()
p4 = plt.plot3d("S1", t, "X", "X", "Y").show()
pBy = plt.plotBy("S1", t, "X", "Y", "USym").show()
pBy = plt.plot3dBy("S1", t, "X", "X", "Y", "USym").show()
cp = plt.catPlot("S1", t, "X", "Y").lineColor("black").show()
cp2 = plt.catPlot("S1", t, "X", "Y").plotStyle("bar").gradientVisible(True).show()
cp3 = plt.catPlot("S1", t, "X", "Y").plotStyle("scatter").pointColor("black").pointSize(2).show()
cp4 = plt.catPlot("S1", t, "X", "Y").plotStyle("area").seriesColor("red").show()
cp = plt.catPlot3d("S1", t, "X", "X", "Y").show()
cpBy = plt.catPlotBy("S1", t, "X", "Y", "USym").show()
cpBy = plt.catPlot3dBy("S1", t, "X", "X", "Y", "USym").show()
pp = plt.piePlot("S1", t, "X", "Y")
chp = plt.catHistPlot("S1", t, "X").show()
hp = plt.histPlot("S1", t, "X", 5).show()
hp = plt.histPlot("S1", t, "X", 0, 10, 5).show()
ep = plt.errorBarXY("S1", t, "X", "XLow", "XHigh", "Y", "YLow", "YHigh").show()
epBy = plt.errorBarXYBy("S1", t, "X", "XLow", "XHigh", "Y", "YLow", "YHigh", "USym").show()
ep2 = plt.errorBarX("S1", t, "X", "XLow", "XHigh", "Y").show()
epBy2 = plt.errorBarXBy("S1", t, "X", "XLow", "XHigh", "Y", "USym").show()
ep3 = plt.errorBarY("S1", t, "X", "Y", "YLow", "YHigh").show()
epBy3 = plt.errorBarYBy("S1", t, "X", "Y", "YLow", "YHigh", "USym").show()
doubles = [3, 4, 3, 5, 4, 5]
time = 1491946585000000000
t = tt.newTable(tt.col("USym", ["A", "B", "A", "B", "A", "B"]),
tt.doubleCol("Open", doubles), tt.doubleCol("High", doubles),
tt.doubleCol("Low", doubles), tt.doubleCol("Close", doubles))
t = t.updateView("Time = new DBDateTime(time + (MINUTE * i))")
ohlc = plt.ohlcPlot("Test1", t, "Time", "Open", "High", "Low", "Close")
ohlcPlotBy = plt.figure().newChart(0)\
.chartTitle("Chart Title")\
.newAxes()\
.xLabel("X")\
.yLabel("Y")\
.ohlcPlotBy("Test1", t, "Time", "Open", "High", "Low", "Close", "USym")
categories = ["Samsung", "Others", "Nokia", "Apple", "MSFT"]
valuesD = [27.8, 55.3, 16.8, 17.1, 23.1]
valuesI = [27, 55, 16, 17, 15]
ap = plt.plot("S1", valuesD, valuesI).show()
ap = plt.plot3d("S1", valuesI, valuesI, valuesI).show()
acp = plt.catPlot("S1", categories, valuesI).show()
acp2 = plt.catPlot3d("S1", categories, categories, valuesD).show()
achp = plt.catHistPlot("S1", categories).show()
app = plt.figure().xLabel("X").yLabel("Y").piePlot("S1", categories, valuesI).pointLabelFormat("{0}").show()
aep = plt.errorBarXY("S1", valuesD, valuesD, valuesD, valuesD, valuesD, valuesD).show()
aep2 = plt.errorBarX("S1", valuesD, valuesD, valuesD, valuesD).show()
aep3 = plt.errorBarY("S1", valuesD, valuesD, valuesD, valuesD).show()
hp = plt.histPlot("S1", valuesD, 5).show()
hp = plt.histPlot("S1", valuesD, 0, 10, 5).show()
hp = plt.histPlot("S1", valuesI, 5).show()
| 37.829545
| 153
| 0.578252
| 494
| 3,329
| 3.896761
| 0.263158
| 0.037403
| 0.04987
| 0.033766
| 0.435325
| 0.402078
| 0.268571
| 0.154805
| 0.130909
| 0.051948
| 0
| 0.046455
| 0.139982
| 3,329
| 87
| 154
| 38.264368
| 0.625917
| 0
| 0
| 0
| 0
| 0
| 0.167017
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.035714
| 0
| 0.035714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5deb3af9396589471b73ff049da7ac957d8d19d7
| 14,680
|
py
|
Python
|
anyway/parsers/united.py
|
ayalapol/anyway
|
ebf2436a8f9b152ae8f4d051c129bac754cb8cc1
|
[
"BSD-3-Clause"
] | null | null | null |
anyway/parsers/united.py
|
ayalapol/anyway
|
ebf2436a8f9b152ae8f4d051c129bac754cb8cc1
|
[
"BSD-3-Clause"
] | null | null | null |
anyway/parsers/united.py
|
ayalapol/anyway
|
ebf2436a8f9b152ae8f4d051c129bac754cb8cc1
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import calendar
import csv
from datetime import datetime
import os
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import and_
from ..constants import CONST
from ..models import AccidentMarker
from ..utilities import init_flask, decode_hebrew, open_utf8
from ..import importmail
from xml.dom import minidom
import math
import requests
import logging
############################################################################################
# United.py is responsible for the parsing and deployment of "united hatzala" data to the DB
############################################################################################
PROVIDER_CODE = CONST.UNITED_HATZALA_CODE
TIME_ZONE = 2
# convert IMS hours code to hours
RAIN_DURATION_CODE_TO_HOURS = {"1": 6, "2": 12, "3": 18, "4": 24, "/": 24, "5": 1, "6": 2, "7": 3, "8": 9, "9": 15}
WEATHER = {"0": 1, "1": 2, "3": 3, "4": 4, "5": 5, "7": 6, "8": 6, "9": 7, "10": 8, "11": 9,
"12": 10, "17": 11, "18": 12, "19": 13, "20": 14, "21": 15, "22": 16, "23": 17, "24": 18,
"25": 19, "26": 20, "27": 21, "28": 22, "29": 23, "30": 24, "31": 24, "32": 24, "33": 7,
"34": 7, "35": 7, "36": 25, "37": 25, "38": 25, "39": 25, "40": 26, "41": 27, "42": 28,
"43": 29, "44": 9, "45": 30, "46": 30, "47": 30, "48": 31, "49": 32, "50": 33, "51": 34,
"52": 33, "53": 35, "54": 36, "55": 37, "56": 38, "57": 39, "58": 37, "59": 37, "61": 37, "60": 36,
"62": 40, "63": 15, "64": 41, "65": 19, "66": 42, "67": 43, "68": 44, "69": 45, "70": 46, "71": 47,
"72": 48, "73": 16, "74": 50, "75": 51, "76": 52, "77": 53, "78": 54, "79": 55, "80": 56, "81": 57,
"82": 58, "83": 59, "84": 60, "85": 61, "86": 62, "87": 63, "88": 64, "89": 65, "90": 66, "91": 67,
"92": 68, "93": 69, "94": 70, "95": 71, "96": 72, "97": 73, "98": 74, "99": 75}
def retrieve_ims_xml(): # getting an xml document from the ims(israel meteorological service) website
logging.basicConfig(level=logging.DEBUG)
s = requests.session()
r = s.get('http://www.ims.gov.il/ims/PublicXML/observ.xml')
xml_doc = minidom.parseString(r.text)
collection = xml_doc.documentElement
return collection
def parse_date(created):
"""
:param created: Date & Time string from csv
:return: Python datetime object
"""
global time
global hour
DATE_FORMATS = ['%m/%d/%Y %I:%M:%S', '%Y-%m-%d %H:%M:%S', '%Y/%m/%d %I:%M:%S', '%d/%m/%Y %I:%M', '%Y/%m/%d %I:%M', '%m/%d/%Y %I:%M']
for date_format in DATE_FORMATS:
try:
if date_format == '%Y-%m-%d %H:%M:%S':
time = datetime.strptime(str(created)[:-4], date_format)
hour = time.strftime('%H')
hour = int(hour)
else:
time = datetime.strptime(str(created)[:-3], date_format)
hour = time.strftime('%H')
hour = int(hour) if str(created).endswith('AM') else int(hour) + 12
break
except ValueError:
pass
return datetime(time.year, time.month, time.day, hour, time.minute, 0)
def is_nth_weekday(nth, daynum, year,
month): # find if date is the nth occurrence of the daynum day of the week (ex: the forth sunday of april 2016)
# start counting the daynum from monday = 0
return calendar.Calendar(nth).monthdatescalendar(
year,
month
)[nth][daynum]
def get_parent_object_node(node):
while node.parentNode:
node = node.parentNode
if node.nodeName == "Object":
return node
def accident_time_zone_adjustment(created): # return accident time in UTC time
# pylint: disable=unexpected-keyword-arg
accident_date = parse_date(created)
daylight_saving_time = is_nth_weekday(4, 4, accident_date.year, 3)
winter_clock = is_nth_weekday(4, 6, accident_date.year, 10)
# weather is given in UTC time
# therefore in daylight_saving_time we deduct 3 hours from the local time and in winter clock 2 hours
# [
accident_date = accident_date.replace(hour=accident_date.hour - TIME_ZONE)
# if accident happend between april and september
if accident_date.month < 10 & accident_date.month > 3:
accident_date.replace(hour=accident_date.hour - 1)
# if accident happend before the last sunday of october at 2:00 o'clock
elif accident_date.month == 10 & (
winter_clock.day > accident_date.day | (
winter_clock.day == accident_date.day & accident_date.hour < 2)):
accident_date.replace(hour=accident_date.hour - 1)
# if accident happend after the last friday of march at 2:00 o'clock
elif (accident_date.month == 3 & daylight_saving_time.day < accident_date.day | (
daylight_saving_time.day == accident_date.day & accident_date.hour >= 2)):
accident_date.replace(hour=accident_date.hour - 1)
# ]
adate = ''.join(
(str(accident_date.year), str(accident_date.month), str(accident_date.day), str(accident_date.hour)))
return adate
def all_station_in_date_frame(collection, created): # return the stations data in the time of the accident
doc = minidom.Document()
base = doc.createElement('accident_date')
doc.appendChild(base)
station_data_in_date = collection.getElementsByTagName('date_selected')
station_data_in_date.sort()
accident_date = accident_time_zone_adjustment(created)
for station in enumerate(station_data_in_date):
if accident_date in str(station.childNodes[0].nodeValue):
base.appendChild(get_parent_object_node(station))
return base
def find_station_by_coordinate(collection, latitude, longitude):
station_place_in_xml = -1
min_distance = float("inf") # initialize big starting value so the distance will always be smaller than the initial
station_data = collection.getElementsByTagName('surface_station')
for i, station in enumerate(station_data):
station_lon = station.getElementsByTagName('station_lon')
assert len(station_lon) == 1
lon = float(station_lon[0].childNodes[0].nodeValue)
lon_difference = (lon - float(longitude)) ** 2
station_lat = station.getElementsByTagName('station_lat')
assert len(station_lat) == 1
lat = float(station_lat[0].childNodes[0].nodeValue)
lat_difference = (lat - float(latitude)) ** 2
temp_dis = math.sqrt(lat_difference + lon_difference)
if temp_dis < min_distance:
min_distance = temp_dis
station_place_in_xml = i
return station_place_in_xml
def convert_xml_values_to_numbers(rain):
num_conv = rain[:2] # variable to help convert from string to number
for char in num_conv: # in the xml number are in a three digits format (4-004), we delete the 0es before the number
if char == '0':
rain.replace(char, '')
else:
break
rain_in_millimeters = float(rain)
if rain_in_millimeters >= 990:
# numbers that are higher then 990 in the xml code equals 0.(the last digit) for example 991 = 0.1
rain_in_millimeters *= 0.01
return rain_in_millimeters
def get_weather_element(station, weather_data, tag):
element = weather_data[station].getElementsByTagName(tag)
if element:
weather_element = element[0].childNodes[0].nodeValue
else:
weather_element = None
return weather_element
def process_weather_data(collection, latitude, longitude):
weather = 1 # default weather is clear sky
station = find_station_by_coordinate(collection, latitude, longitude)
weather_data = collection.getElementsByTagName('surface_observation')
wind_force = get_weather_element(station, weather_data, 'FF')
rain = get_weather_element(station, weather_data, 'RRR')
rain_duration = get_weather_element(station, weather_data,
'TR') # the duration of time in which the rain amount was measured
weather_code = get_weather_element(station, weather_data, 'WW')
if weather_code is not None:
return WEATHER[weather_code.strip()]
if wind_force is not None:
if int(wind_force) > 8:
weather = 76 # סופת רוחות
elif int(wind_force) > 5:
weather = 77 # רוחות חזקות
if rain is not None and rain_duration is not None:
rain_in_millimeters = convert_xml_values_to_numbers(rain)
rain_hours = RAIN_DURATION_CODE_TO_HOURS[str(rain_duration).strip()]
# rain amount is between 0.1 and 0.5 millimeter
if 0.0 < rain_in_millimeters <= 0.5 or (
0.0 < rain_in_millimeters / rain_hours <= 0.5):
if weather == 76:
weather = 80 # סופת רוחות, גשם קל
elif weather == 77:
weather = 84 # רוחות חזקות, גשם קל
else:
weather = 37 # גשם קל
# average rain amount per hour is between 0.5 and 4.0 millimeters
if 0.5 < rain_in_millimeters / rain_hours <= 4:
if weather == 76:
weather = 81 # גשם וסופת רוחות
elif weather == 77:
weather = 85 # גשם ורוחות חזקות
else:
weather = 15 # גשם
# average rain amount per hour is between 4.0 and 8.0 millimeters
elif 4 < rain_in_millimeters / rain_hours <= 8:
if 76 == weather:
weather = 82 # סופת רוחות, גשם שוטף
if weather == 77:
weather = 86 # רוחות חזקות, גשם שוטף
else:
weather = 78 # גשם שוטף
# average rain amount per hour is more than 8.0 millimeters
elif rain_in_millimeters / rain_hours > 8:
if weather == 76:
weather = 83 # סופת רוחות, גשם זלעפות
if weather == 77:
weather = 87 # רוחות חזקות, גשם זלעפות
else:
weather = 79 # גשם זלעפות
return weather
CSVMAP = [
{"id": 0, "time": 1, "lat": 2, "long": 3, "street": 4, "city": 6, "comment": 7, "type": 8, "casualties": 9},
{"id": 0, "time": 1, "type": 2, "long": 3, "lat": 4, "city": 5, "street": 6, "comment": 7, "casualties": 8},
]
def create_accidents(collection, file_location):
"""
:param file_location: local location of .csv
:return: Yields a marker object with every iteration
"""
logging.info("\tReading accidents data from '%s'..." % file_location)
with open_utf8(file_location, 'rU') as f:
reader = csv.reader(f, delimiter=',', dialect=csv.excel_tab)
for line, accident in enumerate(reader):
if line == 0: # header
format_version = 0 if "MissionID" in accident[0] else 1
continue
if not accident: # empty line
continue
if line == 1 and accident[0] == "":
logging.warn("\t\tEmpty File!")
continue
csvmap = CSVMAP[format_version]
if accident[csvmap["lat"]] == "" or accident[csvmap["long"]] == "" or \
accident[csvmap["lat"]] is None or accident[csvmap["long"]] is None or \
accident[csvmap["lat"]] == "NULL" or accident[csvmap["long"]] == "NULL":
logging.warn("\t\tMissing coordinates in line {0}. Moving on...".format(line + 1))
continue
created = parse_date(accident[csvmap["time"]])
marker = {'id': accident[csvmap["id"]], 'latitude': accident[csvmap["lat"]],
'longitude': accident[csvmap["long"]], 'created': created, 'provider_code': PROVIDER_CODE,
'title': decode_hebrew(accident[csvmap["type"]], encoding="utf-8")[:100],
'address': decode_hebrew((accident[csvmap["street"]] + ' ' + accident[csvmap["city"]]), encoding="utf-8"),
'accident_severity': 2 if u"קשה" in decode_hebrew(accident[csvmap["type"]], encoding="utf-8") else 3,
'location_accuracy': 1, 'accident_type': 21, 'type': CONST.MARKER_TYPE_ACCIDENT,
'description': decode_hebrew(accident[csvmap["comment"]], encoding="utf-8"),
'weather': process_weather_data(collection, accident[csvmap["lat"]],
accident[csvmap["long"]])}
if format_version == 0:
casualties = accident[csvmap["casualties"]]
marker['road_intactness'] = casualties if casualties.isdigit() else 0
yield marker
def import_to_db(collection, path):
"""
:param path: Local files directory ('united_path' on main() below)
:return: length of DB entries after execution
"""
app = init_flask()
db = SQLAlchemy(app)
accidents = list(create_accidents(collection, path))
if not accidents:
return 0
new_ids = [m["id"] for m in accidents
if 0 == db.session.query(AccidentMarker).filter(and_(AccidentMarker.id == m["id"],
AccidentMarker.provider_code == m["provider_code"])).count()]
if not new_ids:
logging.info("\t\tNothing loaded, all accidents already in DB")
return 0
db.session.execute(AccidentMarker.__table__.insert(), [m for m in accidents if m["id"] in new_ids])
db.session.commit()
return len(new_ids)
def update_db(collection):
"""
:return: length of DB entries after execution
"""
app = init_flask()
db = SQLAlchemy(app)
united = db.session.query(AccidentMarker).filter(AccidentMarker.provider_code == 2)
for accident in united:
if not accident.weather:
accident.weather = process_weather_data(collection, accident.latitude, accident.longitude)
db.session.commit()
logging.info("\tFinished commiting the changes")
def main(light=True, username='', password='', lastmail=False):
"""
Calls importmail.py prior to importing to DB
"""
collection = retrieve_ims_xml()
if not light:
logging.info("Importing data from mail...")
importmail.main(username, password, lastmail)
united_path = "static/data/united/"
total = 0
logging.info("Loading United accidents...")
for united_file in os.listdir(united_path):
if united_file.endswith(".csv"):
total += import_to_db(collection, united_path + united_file)
logging.info("\tImported {0} items".format(total))
update_db(collection)
| 40.891365
| 136
| 0.596322
| 1,910
| 14,680
| 4.44555
| 0.243979
| 0.040985
| 0.020021
| 0.014133
| 0.208456
| 0.158521
| 0.10517
| 0.067837
| 0.057944
| 0.041456
| 0
| 0.053124
| 0.265259
| 14,680
| 358
| 137
| 41.005587
| 0.7341
| 0.141894
| 0
| 0.132
| 0
| 0
| 0.088903
| 0
| 0
| 0
| 0
| 0
| 0.008
| 1
| 0.056
| false
| 0.012
| 0.076
| 0.004
| 0.188
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dec35ee70a7a827dfe8596bcb69fa8833b6491d
| 15,992
|
py
|
Python
|
hysds/log_utils.py
|
fgreg/hysds
|
74a1019665b02f0f475cc4e7fc0a993dd71d7a53
|
[
"Apache-2.0"
] | null | null | null |
hysds/log_utils.py
|
fgreg/hysds
|
74a1019665b02f0f475cc4e7fc0a993dd71d7a53
|
[
"Apache-2.0"
] | null | null | null |
hysds/log_utils.py
|
fgreg/hysds
|
74a1019665b02f0f475cc4e7fc0a993dd71d7a53
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import open
from builtins import str
from future import standard_library
standard_library.install_aliases()
import os
import re
import json
import copy
import socket
import msgpack
import traceback
import types
import backoff
from datetime import datetime
from uuid import uuid4
from redis import BlockingConnectionPool, StrictRedis, RedisError
from celery.utils.log import get_task_logger
import hysds
from hysds.celery import app
from prov_es.model import get_uuid, ProvEsDocument
# logger
logger = get_task_logger(__name__)
# redis connection pools
JOB_STATUS_POOL = None
JOB_INFO_POOL = None
WORKER_STATUS_POOL = None
EVENT_STATUS_POOL = None
# job status key template
JOB_STATUS_KEY_TMPL = "hysds-job-status-%s"
# worker status key template
WORKER_STATUS_KEY_TMPL = "hysds-worker-status-%s"
# task worker key template
TASK_WORKER_KEY_TMPL = "hysds-task-worker-%s"
def backoff_max_value():
"""Return max value for backoff."""
return app.conf.BACKOFF_MAX_VALUE
def backoff_max_tries():
"""Return max tries for backoff."""
return app.conf.BACKOFF_MAX_TRIES
def hard_time_limit_gap():
"""Return minimum gap time after soft time limit."""
return app.conf.HARD_TIME_LIMIT_GAP
def ensure_hard_time_limit_gap(soft_time_limit, time_limit):
"""Ensure hard time limit gap."""
gap = hard_time_limit_gap()
if soft_time_limit is not None and (time_limit is None or
time_limit <= soft_time_limit+gap):
time_limit = soft_time_limit + gap
return soft_time_limit, time_limit
def set_redis_job_status_pool():
"""Set redis connection pool for job status."""
global JOB_STATUS_POOL
if JOB_STATUS_POOL is None:
JOB_STATUS_POOL = BlockingConnectionPool.from_url(
app.conf.REDIS_JOB_STATUS_URL)
def set_redis_job_info_pool():
"""Set redis connection pool for job info metrics."""
global JOB_INFO_POOL
if JOB_INFO_POOL is None:
JOB_INFO_POOL = BlockingConnectionPool.from_url(
app.conf.REDIS_JOB_INFO_URL)
def set_redis_worker_status_pool():
"""Set redis connection pool for worker status."""
global WORKER_STATUS_POOL
if WORKER_STATUS_POOL is None:
WORKER_STATUS_POOL = BlockingConnectionPool.from_url(
app.conf.REDIS_JOB_STATUS_URL)
def set_redis_event_status_pool():
"""Set redis connection pool for event status."""
global EVENT_STATUS_POOL
if EVENT_STATUS_POOL is None:
EVENT_STATUS_POOL = BlockingConnectionPool.from_url(
app.conf.REDIS_JOB_STATUS_URL)
@backoff.on_exception(backoff.expo,
RedisError,
max_tries=backoff_max_tries,
max_value=backoff_max_value)
def log_task_worker(task_id, worker):
"""Log task worker for task ID in redis."""
set_redis_worker_status_pool()
global WORKER_STATUS_POOL
# set task worker for task ID
r = StrictRedis(connection_pool=WORKER_STATUS_POOL)
r.setex(TASK_WORKER_KEY_TMPL % task_id,
app.conf.HYSDS_JOB_STATUS_EXPIRES,
worker)
@backoff.on_exception(backoff.expo,
RedisError,
max_tries=backoff_max_tries,
max_value=backoff_max_value)
def get_task_worker(task_id):
"""Retrieve task worker by task ID from redis."""
set_redis_worker_status_pool()
global WORKER_STATUS_POOL
# retrieve task worker
r = StrictRedis(connection_pool=WORKER_STATUS_POOL)
return r.get(TASK_WORKER_KEY_TMPL % task_id)
@backoff.on_exception(backoff.expo,
RedisError,
max_tries=backoff_max_tries,
max_value=backoff_max_value)
def get_worker_status(worker):
"""Retrieve worker status by worker ID from redis."""
set_redis_worker_status_pool()
global WORKER_STATUS_POOL
# retrieve worker status
r = StrictRedis(connection_pool=WORKER_STATUS_POOL)
return r.get(WORKER_STATUS_KEY_TMPL % worker)
@backoff.on_exception(backoff.expo,
RedisError,
max_tries=backoff_max_tries,
max_value=backoff_max_value)
def get_job_status(task_id):
"""Retrieve job status by task ID from redis."""
set_redis_job_status_pool()
global JOB_STATUS_POOL
# retrieve job status
r = StrictRedis(connection_pool=JOB_STATUS_POOL)
return r.get(JOB_STATUS_KEY_TMPL % task_id)
@backoff.on_exception(backoff.expo,
RedisError,
max_tries=backoff_max_tries,
max_value=backoff_max_value)
def log_job_status(job):
"""Print job status."""
set_redis_job_status_pool()
global JOB_STATUS_POOL
job['resource'] = 'job'
job['type'] = job.get('job', {}).get('type', 'unknown')
job['@version'] = '1'
job['@timestamp'] = "%sZ" % datetime.utcnow().isoformat()
if 'tag' in job.get('job', {}):
tags = job.setdefault('tags', [])
if isinstance(tags, str):
tags = [tags]
tags.append(job['job']['tag'])
job['tags'] = tags
# send update to redis
r = StrictRedis(connection_pool=JOB_STATUS_POOL)
r.setex(JOB_STATUS_KEY_TMPL % job['uuid'],
app.conf.HYSDS_JOB_STATUS_EXPIRES,
job['status']) # for dedup
r.rpush(app.conf.REDIS_JOB_STATUS_KEY, msgpack.dumps(job)) # for ES
logger.info("job_status_json:%s" % json.dumps(job))
@backoff.on_exception(backoff.expo,
RedisError,
max_tries=backoff_max_tries,
max_value=backoff_max_value)
def log_job_info(job):
"""Print job info."""
set_redis_job_info_pool()
global JOB_INFO_POOL
filtered_info = {}
for info in ('job_info', 'job_id', 'task_id', 'delivery_info', 'tag',
'priority', 'container_image_name', 'container_image_url',
'name'):
if info in job:
filtered_info[info] = job[info]
job_info = {'type': 'job_info',
'@version': '1',
'@timestamp': "%sZ" % datetime.utcnow().isoformat(),
'job': filtered_info,
'job_type': job['type']}
# send update to redis
r = StrictRedis(connection_pool=JOB_INFO_POOL)
r.rpush(app.conf.REDIS_JOB_INFO_KEY, msgpack.dumps(job_info))
logger.info("job_info_json:%s" % json.dumps(job_info))
@backoff.on_exception(backoff.expo,
RedisError,
max_tries=backoff_max_tries,
max_value=backoff_max_value)
def log_custom_event(event_type, event_status, event, tags=[], hostname=None):
"""Log custom event."""
set_redis_event_status_pool()
global EVENT_STATUS_POOL
uuid = str(uuid4())
if hostname is None:
try:
hostname = socket.getfqdn()
except:
try:
hostname = socket.gethostbyname(socket.gethostname())
except:
hostname = ''
info = {'resource': 'event',
'type': event_type,
'status': event_status,
'@timestamp': "%sZ" % datetime.utcnow().isoformat(),
'hostname': hostname,
'uuid': uuid,
'tags': tags,
'@version': '1',
'event': event}
# send update to redis
r = StrictRedis(connection_pool=EVENT_STATUS_POOL)
r.rpush(app.conf.REDIS_JOB_STATUS_KEY, msgpack.dumps(info))
logger.info("hysds.custom_event:%s" % json.dumps(info))
return uuid
def log_prov_es(job, prov_es_info, prov_es_file):
"""Log PROV-ES document. Create temp PROV-ES document to populate
attributes that only the worker has access to (e.g. PID)."""
# create PROV-ES doc to generate attributes that only verdi know
ps_id = "hysds:%s" % get_uuid(job['job_id'])
bundle_id = "hysds:%s" % get_uuid('bundle-%s' % job['job_id'])
doc = ProvEsDocument()
# get bundle
#bndl = doc.bundle(bundle_id)
bndl = None
# create sofware agent
sa_label = "hysds:pge_wrapper/%s/%d/%s" % (job['job_info']['execute_node'],
job['job_info']['pid'],
datetime.utcnow().isoformat())
sa_id = "hysds:%s" % get_uuid(sa_label)
doc.softwareAgent(sa_id, str(job['job_info']['pid']),
job['job_info']['execute_node'],
role=job.get('username', None),
label=sa_label, bundle=bndl)
# create processStep
doc.processStep(ps_id, job['job_info']['cmd_start'],
job['job_info']['cmd_end'], [], sa_id,
None, [], [], bundle=bndl,
prov_type="hysds:%s" % job['type'])
# get json
pd = json.loads(doc.serialize())
# update software agent and process step
if 'bundle' in prov_es_info:
if len(prov_es_info['bundle']) == 1:
bundle_id_orig = list(prov_es_info['bundle'].keys())[0]
# update software agent
prov_es_info['bundle'][bundle_id_orig].setdefault(
'agent', {}).update(pd['bundle'][bundle_id]['agent'])
# update wasAssociatedWith
prov_es_info['bundle'][bundle_id_orig].setdefault(
'wasAssociatedWith', {}).update(pd['bundle'][bundle_id]['wasAssociatedWith'])
# update activity
if 'activity' in prov_es_info['bundle'][bundle_id_orig]:
if len(prov_es_info['bundle'][bundle_id_orig]['activity']) == 1:
ps_id_orig = list(
prov_es_info['bundle'][bundle_id_orig]['activity'].keys())[0]
prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][
'prov:startTime'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:startTime']
prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][
'prov:endTime'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:endTime']
prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_id'] = job['job_id']
prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_type'] = job['type']
prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_url'] = job['job_info']['job_url']
prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:mozart_url'] = app.conf.MOZART_URL
if 'prov:type' not in prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]:
prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][
'prov:type'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:type']
# update wasAssociatedWith activity ids
for waw_id in prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith']:
if prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith'][waw_id]['prov:activity'] == ps_id:
prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith'][waw_id]['prov:activity'] = ps_id_orig
else:
prov_es_info['bundle'][bundle_id_orig]['activity'].update(
pd['bundle'][bundle_id]['activity'])
else:
prov_es_info['bundle'][bundle_id_orig]['activity'] = pd['bundle'][bundle_id]['activity']
else:
# update software agent
prov_es_info.setdefault('agent', {}).update(pd['agent'])
# update wasAssociatedWith
prov_es_info.setdefault('wasAssociatedWith', {}).update(
pd['wasAssociatedWith'])
# update process step
if 'activity' in prov_es_info:
if len(prov_es_info['activity']) == 1:
ps_id_orig = list(prov_es_info['activity'].keys())[0]
prov_es_info['activity'][ps_id_orig]['prov:startTime'] = pd['activity'][ps_id]['prov:startTime']
prov_es_info['activity'][ps_id_orig]['prov:endTime'] = pd['activity'][ps_id]['prov:endTime']
prov_es_info['activity'][ps_id_orig]['hysds:job_id'] = job['job_id']
prov_es_info['activity'][ps_id_orig]['hysds:job_type'] = job['type']
prov_es_info['activity'][ps_id_orig]['hysds:job_url'] = job['job_info']['job_url']
prov_es_info['activity'][ps_id_orig]['hysds:mozart_url'] = app.conf.MOZART_URL
if 'prov:type' not in prov_es_info['activity'][ps_id_orig]:
prov_es_info['activity'][ps_id_orig]['prov:type'] = pd['activity'][ps_id]['prov:type']
# update wasAssociatedWith activity ids
for waw_id in prov_es_info['wasAssociatedWith']:
if prov_es_info['wasAssociatedWith'][waw_id]['prov:activity'] == ps_id:
prov_es_info['wasAssociatedWith'][waw_id]['prov:activity'] = ps_id_orig
else:
prov_es_info['activity'].update(pd['activity'])
else:
prov_es_info['activity'] = pd['activity']
# write prov
with open(prov_es_file, 'w') as f:
json.dump(prov_es_info, f, indent=2)
def log_publish_prov_es(prov_es_info, prov_es_file, prod_path, pub_urls,
prod_metrics, objectid):
"""Log publish step in PROV-ES document."""
# create PROV-ES doc
doc = ProvEsDocument(namespaces=prov_es_info['prefix'])
# get bundle
#bndl = doc.bundle(bundle_id)
bndl = None
# add input entity
execute_node = socket.getfqdn()
prod_url = "file://%s%s" % (execute_node, prod_path)
input_id = "hysds:%s" % get_uuid(prod_url)
input_ent = doc.granule(input_id, None, [prod_url], [], None, None, None,
label=os.path.basename(prod_url), bundle=bndl)
# add output entity
output_id = "hysds:%s" % get_uuid(pub_urls[0])
output_ent = doc.product(output_id, None, [pub_urls[0]], [], None, None,
None, label=objectid, bundle=bndl)
# software and algorithm
algorithm = "eos:product_publishing"
software_version = hysds.__version__
software_title = "%s v%s" % (hysds.__description__, software_version)
software = "eos:HySDS-%s" % software_version
software_location = hysds.__url__
doc.software(software, [algorithm], software_version, label=software_title,
location=software_location, bundle=bndl)
# create sofware agent
pid = os.getpid()
sa_label = "hysds:publish_dataset/%s/%d/%s" % (execute_node, pid,
prod_metrics['time_start'])
sa_id = "hysds:%s" % get_uuid(sa_label)
doc.softwareAgent(sa_id, str(pid), execute_node, role="invoked",
label=sa_label, bundle=bndl)
# create processStep
job_id = "publish_dataset-%s" % os.path.basename(prod_path)
doc.processStep("hysds:%s" % get_uuid(job_id), prod_metrics['time_start'],
prod_metrics['time_end'], [software], sa_id, None,
[input_id], [output_id], label=job_id, bundle=bndl,
prov_type="hysds:publish_dataset")
# get json
pd = json.loads(doc.serialize())
# update input entity
orig_ent = prov_es_info.get('entity', {}).get(input_id, {})
pd['entity'][input_id].update(orig_ent)
# update output entity
for attr in orig_ent:
if attr in ('prov:location', 'prov:label', 'prov:type'):
continue
pd['entity'][output_id][attr] = orig_ent[attr]
# write prov
with open(prov_es_file, 'w') as f:
json.dump(pd, f, indent=2)
| 36.763218
| 128
| 0.619497
| 2,035
| 15,992
| 4.55086
| 0.106143
| 0.036281
| 0.047511
| 0.034554
| 0.552748
| 0.482885
| 0.440341
| 0.386459
| 0.320808
| 0.273297
| 0
| 0.001266
| 0.25938
| 15,992
| 434
| 129
| 36.847926
| 0.780648
| 0.100113
| 0
| 0.286713
| 0
| 0
| 0.12948
| 0.00996
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059441
| false
| 0
| 0.08042
| 0
| 0.167832
| 0.003497
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5deeffa5857206493c1d342dae064f6fd87a3184
| 8,920
|
py
|
Python
|
openstack_dashboard/api/rest/swift.py
|
CplusShen/aurora-horizon
|
8df16b3b87097d5a19bae3752d4b341ac64bda75
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/api/rest/swift.py
|
CplusShen/aurora-horizon
|
8df16b3b87097d5a19bae3752d4b341ac64bda75
|
[
"Apache-2.0"
] | 12
|
2022-03-22T07:28:29.000Z
|
2022-03-22T07:29:55.000Z
|
openstack_dashboard/api/rest/swift.py
|
CplusShen/aurora-horizon
|
8df16b3b87097d5a19bae3752d4b341ac64bda75
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015, Rackspace, US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for the swift service.
"""
import os
from django import forms
from django.http import StreamingHttpResponse
from django.utils.http import urlunquote
from django.views.decorators.csrf import csrf_exempt
from django.views import generic
import six
from horizon import exceptions
from openstack_dashboard import api
from openstack_dashboard.api.rest import urls
from openstack_dashboard.api.rest import utils as rest_utils
from openstack_dashboard.api import swift
@urls.register
class Info(generic.View):
"""API for information about the Swift installation.
"""
url_regex = r'swift/info/$'
@rest_utils.ajax()
def get(self, request):
"""Get information about the Swift installation.
"""
capabilities = api.swift.swift_get_capabilities(request)
return {'info': capabilities}
@urls.register
class Containers(generic.View):
"""API for swift container listing for an account
"""
url_regex = r'swift/containers/$'
@rest_utils.ajax()
def get(self, request):
"""Get the list of containers for this account
TODO(neillc): Add pagination
"""
containers, has_more = api.swift.swift_get_containers(request)
containers = [container.to_dict() for container in containers]
return {'items': containers, 'has_more': has_more}
@urls.register
class Container(generic.View):
"""API for swift container level information
"""
url_regex = r'swift/containers/(?P<container>[^/]+)/metadata/$'
@rest_utils.ajax()
def get(self, request, container):
"""Get the container details
"""
return api.swift.swift_get_container(request, container).to_dict()
@rest_utils.ajax()
def post(self, request, container):
metadata = {}
if 'is_public' in request.DATA:
metadata['is_public'] = request.DATA['is_public']
# This will raise an exception if the container already exists
try:
api.swift.swift_create_container(request, container,
metadata=metadata)
except exceptions.AlreadyExists as e:
# 409 Conflict
return rest_utils.JSONResponse(str(e), 409)
return rest_utils.CreatedResponse(
u'/api/swift/containers/%s' % container,
)
@rest_utils.ajax()
def delete(self, request, container):
try:
api.swift.swift_delete_container(request, container)
except exceptions.Conflict as e:
# It cannot be deleted if it's not empty.
return rest_utils.JSONResponse(str(e), 409)
@rest_utils.ajax(data_required=True)
def put(self, request, container):
metadata = {'is_public': request.DATA['is_public']}
api.swift.swift_update_container(request, container, metadata=metadata)
@urls.register
class Objects(generic.View):
"""API for a list of swift objects
"""
url_regex = r'swift/containers/(?P<container>[^/]+)/objects/$'
@rest_utils.ajax()
def get(self, request, container):
"""Get object information.
:param request:
:param container:
:return:
"""
path = request.GET.get('path')
if path is not None:
path = urlunquote(path)
objects = api.swift.swift_get_objects(
request,
container,
prefix=path
)
# filter out the folder from the listing if we're filtering for
# contents of a (pseudo) folder
contents = [{
'path': o.subdir if isinstance(o, swift.PseudoFolder) else o.name,
'name': o.name.split('/')[-1],
'bytes': o.bytes,
'is_subdir': isinstance(o, swift.PseudoFolder),
'is_object': not isinstance(o, swift.PseudoFolder),
'content_type': getattr(o, 'content_type', None)
} for o in objects[0] if o.name != path]
return {'items': contents}
class UploadObjectForm(forms.Form):
file = forms.FileField(required=False)
@urls.register
class Object(generic.View):
"""API for a single swift object or pseudo-folder
"""
url_regex = r'swift/containers/(?P<container>[^/]+)/object/' \
'(?P<object_name>.+)$'
# note: not an AJAX request - the body will be raw file content
@csrf_exempt
def post(self, request, container, object_name):
"""Create or replace an object or pseudo-folder
:param request:
:param container:
:param object_name:
If the object_name (ie. POST path) ends in a '/' then a folder is
created, rather than an object. Any file content passed along with
the request will be ignored in that case.
POST parameter:
:param file: the file data for the upload.
:return:
"""
form = UploadObjectForm(request.POST, request.FILES)
if not form.is_valid():
raise rest_utils.AjaxError(500, 'Invalid request')
data = form.clean()
if object_name[-1] == '/':
result = api.swift.swift_create_pseudo_folder(
request,
container,
object_name
)
else:
result = api.swift.swift_upload_object(
request,
container,
object_name,
data['file']
)
return rest_utils.CreatedResponse(
u'/api/swift/containers/%s/object/%s' % (container, result.name)
)
@rest_utils.ajax()
def delete(self, request, container, object_name):
if object_name[-1] == '/':
try:
api.swift.swift_delete_folder(request, container, object_name)
except exceptions.Conflict as e:
# In case the given object is pseudo folder
# It cannot be deleted if it's not empty.
return rest_utils.JSONResponse(str(e), 409)
else:
api.swift.swift_delete_object(request, container, object_name)
def get(self, request, container, object_name):
"""Get the object contents.
"""
obj = api.swift.swift_get_object(
request,
container,
object_name
)
# Add the original file extension back on if it wasn't preserved in the
# name given to the object.
filename = object_name.rsplit(api.swift.FOLDER_DELIMITER)[-1]
if not os.path.splitext(obj.name)[1] and obj.orig_name:
name, ext = os.path.splitext(obj.orig_name)
filename = "%s%s" % (filename, ext)
response = StreamingHttpResponse(obj.data)
safe = filename.replace(",", "")
if six.PY2:
safe = safe.encode('utf-8')
response['Content-Disposition'] = 'attachment; filename="%s"' % safe
response['Content-Type'] = 'application/octet-stream'
response['Content-Length'] = obj.bytes
return response
@urls.register
class ObjectMetadata(generic.View):
"""API for a single swift object
"""
url_regex = r'swift/containers/(?P<container>[^/]+)/metadata/' \
'(?P<object_name>.+)$'
@rest_utils.ajax()
def get(self, request, container, object_name):
return api.swift.swift_get_object(
request,
container_name=container,
object_name=object_name,
with_data=False
).to_dict()
@urls.register
class ObjectCopy(generic.View):
"""API to copy a swift object
"""
url_regex = r'swift/containers/(?P<container>[^/]+)/copy/' \
'(?P<object_name>.+)$'
@rest_utils.ajax()
def post(self, request, container, object_name):
dest_container = request.DATA['dest_container']
dest_name = request.DATA['dest_name']
try:
result = api.swift.swift_copy_object(
request,
container,
object_name,
dest_container,
dest_name
)
except exceptions.AlreadyExists as e:
return rest_utils.JSONResponse(str(e), 409)
return rest_utils.CreatedResponse(
u'/api/swift/containers/%s/object/%s' % (dest_container,
result.name)
)
| 31.971326
| 79
| 0.610762
| 1,053
| 8,920
| 5.068376
| 0.222222
| 0.065955
| 0.034102
| 0.053588
| 0.368934
| 0.267004
| 0.232528
| 0.170883
| 0.100056
| 0.067454
| 0
| 0.005322
| 0.283744
| 8,920
| 278
| 80
| 32.086331
| 0.83002
| 0.218946
| 0
| 0.365269
| 0
| 0
| 0.101899
| 0.05132
| 0
| 0
| 0
| 0.003597
| 0
| 1
| 0.071856
| false
| 0
| 0.071856
| 0.005988
| 0.317365
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5def303cbd1f1433f2580e86e412f8af092aba1f
| 5,621
|
py
|
Python
|
datagen.py
|
kuangliu/pytorch-ssd
|
02ed1cbe6962e791895ab1c455dc5ddfb87291b9
|
[
"MIT"
] | 124
|
2017-02-16T01:53:14.000Z
|
2022-02-22T12:48:13.000Z
|
datagen.py
|
droogg/pytorch-ssd
|
02ed1cbe6962e791895ab1c455dc5ddfb87291b9
|
[
"MIT"
] | 10
|
2017-07-04T01:38:56.000Z
|
2021-08-03T09:34:34.000Z
|
datagen.py
|
droogg/pytorch-ssd
|
02ed1cbe6962e791895ab1c455dc5ddfb87291b9
|
[
"MIT"
] | 43
|
2017-07-31T10:46:23.000Z
|
2021-02-16T14:12:42.000Z
|
'''Load image/class/box from a annotation file.
The annotation file is organized as:
image_name #obj xmin ymin xmax ymax class_index ..
'''
from __future__ import print_function
import os
import sys
import os.path
import random
import numpy as np
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from encoder import DataEncoder
from PIL import Image, ImageOps
class ListDataset(data.Dataset):
img_size = 300
def __init__(self, root, list_file, train, transform):
'''
Args:
root: (str) ditectory to images.
list_file: (str) path to index file.
train: (boolean) train or test.
transform: ([transforms]) image transforms.
'''
self.root = root
self.train = train
self.transform = transform
self.fnames = []
self.boxes = []
self.labels = []
self.data_encoder = DataEncoder()
with open(list_file) as f:
lines = f.readlines()
self.num_samples = len(lines)
for line in lines:
splited = line.strip().split()
self.fnames.append(splited[0])
num_objs = int(splited[1])
box = []
label = []
for i in range(num_objs):
xmin = splited[2+5*i]
ymin = splited[3+5*i]
xmax = splited[4+5*i]
ymax = splited[5+5*i]
c = splited[6+5*i]
box.append([float(xmin),float(ymin),float(xmax),float(ymax)])
label.append(int(c))
self.boxes.append(torch.Tensor(box))
self.labels.append(torch.LongTensor(label))
def __getitem__(self, idx):
'''Load a image, and encode its bbox locations and class labels.
Args:
idx: (int) image index.
Returns:
img: (tensor) image tensor.
loc_target: (tensor) location targets, sized [8732,4].
conf_target: (tensor) label targets, sized [8732,].
'''
# Load image and bbox locations.
fname = self.fnames[idx]
img = Image.open(os.path.join(self.root, fname))
boxes = self.boxes[idx].clone()
labels = self.labels[idx]
# Data augmentation while training.
if self.train:
img, boxes = self.random_flip(img, boxes)
img, boxes, labels = self.random_crop(img, boxes, labels)
# Scale bbox locaitons to [0,1].
w,h = img.size
boxes /= torch.Tensor([w,h,w,h]).expand_as(boxes)
img = img.resize((self.img_size,self.img_size))
img = self.transform(img)
# Encode loc & conf targets.
loc_target, conf_target = self.data_encoder.encode(boxes, labels)
return img, loc_target, conf_target
def random_flip(self, img, boxes):
'''Randomly flip the image and adjust the bbox locations.
For bbox (xmin, ymin, xmax, ymax), the flipped bbox is:
(w-xmax, ymin, w-xmin, ymax).
Args:
img: (PIL.Image) image.
boxes: (tensor) bbox locations, sized [#obj, 4].
Returns:
img: (PIL.Image) randomly flipped image.
boxes: (tensor) randomly flipped bbox locations, sized [#obj, 4].
'''
if random.random() < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
w = img.width
xmin = w - boxes[:,2]
xmax = w - boxes[:,0]
boxes[:,0] = xmin
boxes[:,2] = xmax
return img, boxes
def random_crop(self, img, boxes, labels):
'''Randomly crop the image and adjust the bbox locations.
For more details, see 'Chapter2.2: Data augmentation' of the paper.
Args:
img: (PIL.Image) image.
boxes: (tensor) bbox locations, sized [#obj, 4].
labels: (tensor) bbox labels, sized [#obj,].
Returns:
img: (PIL.Image) cropped image.
selected_boxes: (tensor) selected bbox locations.
labels: (tensor) selected bbox labels.
'''
imw, imh = img.size
while True:
min_iou = random.choice([None, 0.1, 0.3, 0.5, 0.7, 0.9])
if min_iou is None:
return img, boxes, labels
for _ in range(100):
w = random.randrange(int(0.1*imw), imw)
h = random.randrange(int(0.1*imh), imh)
if h > 2*w or w > 2*h:
continue
x = random.randrange(imw - w)
y = random.randrange(imh - h)
roi = torch.Tensor([[x, y, x+w, y+h]])
center = (boxes[:,:2] + boxes[:,2:]) / 2 # [N,2]
roi2 = roi.expand(len(center), 4) # [N,4]
mask = (center > roi2[:,:2]) & (center < roi2[:,2:]) # [N,2]
mask = mask[:,0] & mask[:,1] #[N,]
if not mask.any():
continue
selected_boxes = boxes.index_select(0, mask.nonzero().squeeze(1))
iou = self.data_encoder.iou(selected_boxes, roi)
if iou.min() < min_iou:
continue
img = img.crop((x, y, x+w, y+h))
selected_boxes[:,0].add_(-x).clamp_(min=0, max=w)
selected_boxes[:,1].add_(-y).clamp_(min=0, max=h)
selected_boxes[:,2].add_(-x).clamp_(min=0, max=w)
selected_boxes[:,3].add_(-y).clamp_(min=0, max=h)
return img, selected_boxes, labels[mask]
def __len__(self):
return self.num_samples
| 31.9375
| 81
| 0.534424
| 706
| 5,621
| 4.161473
| 0.233711
| 0.035398
| 0.019061
| 0.016338
| 0.117767
| 0.096664
| 0.09258
| 0.081007
| 0.056501
| 0.036079
| 0
| 0.021633
| 0.34211
| 5,621
| 175
| 82
| 32.12
| 0.77285
| 0.254047
| 0
| 0.031579
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.115789
| 0.010526
| 0.242105
| 0.010526
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5defd443987097ce80f96a0e6f43dc63945abf24
| 13,258
|
py
|
Python
|
lingvo/core/builder.py
|
allenwang28/lingvo
|
26d3d6672d3f46d8f281c2aa9f57166ef6296738
|
[
"Apache-2.0"
] | 2,611
|
2018-10-16T20:14:10.000Z
|
2022-03-31T14:48:41.000Z
|
lingvo/core/builder.py
|
allenwang28/lingvo
|
26d3d6672d3f46d8f281c2aa9f57166ef6296738
|
[
"Apache-2.0"
] | 249
|
2018-10-27T06:02:29.000Z
|
2022-03-30T18:00:39.000Z
|
lingvo/core/builder.py
|
allenwang28/lingvo
|
26d3d6672d3f46d8f281c2aa9f57166ef6296738
|
[
"Apache-2.0"
] | 436
|
2018-10-25T05:31:45.000Z
|
2022-03-31T07:26:03.000Z
|
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library to build composite layers.
WARNING:
The builder pattern is still experimental and we need to gain experience
on when to use and when not to use.
Please discuss w/ teammates before using it to build complicated
layers.
"""
import functools
from lingvo.core import activations
from lingvo.core import builder_layers
from lingvo.core import hyperparams
from lingvo.core import layers
from lingvo.core import py_utils
from lingvo.core import tshape
class Base:
"""Model builder with commonly used layers.
A method in a builder class constructs a layer param. FProp of a layer
constructed by a builder takes a tuple of tf.Tensor (one or more) and returns
a tuple of tf.Tensor (one or more). Even though certain layers support FProp
argument being None (e.g., Conv2DLayer), builder should not depend on such a
support.
The constructed layer is often a composition of multiple sub-layers connected
in certain patterns. We expect to have a few methods to facilitate building
these patterns. For example, _Seq() helps to build a sequential layer that
calls its sub-layer one after another.
TODO(zhifengc): Adds a more concrete example.
"""
@classmethod
def Params(cls):
"""The params of this layer."""
p = hyperparams.InstantiableParams(cls)
p.Define('deterministic_dropout', False,
'Used deterministic dropout or not.')
p.Define(
'fprop_dtype', None,
'Activations datatype to use. To enable bfloat16 activations for '
'layers built using model builder, set fprop_dtype to '
'tf.bfloat16, which will be propagated to layers that support '
'bfloat16 activations. Default is None, which will use float32 '
'activations.')
# SPMD partition related params.
p.Define(
'device_mesh', None,
'A numpy.ndarray specifying the topology of a device mesh to place the '
'computations onto. If device_mesh is None, it is assumed to be a '
'single device. Here are some examples: '
'np.array([0, 1, 2, 3, 4, 5, 6, 7]) which is a 1d mesh with 8 devices, '
'np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) which is 2d matrix of 8 '
'devices.')
p.Define(
'weight_split_dims_mapping', None,
'Relevant only if device_mesh above is not None. If not None, it '
'specifies how weight of this layer or those of the sublayers should '
'be sharded over device mesh. ')
p.Define(
'activation_split_dims_mapping', None,
'Relevant only if device_mesh above is not None. If not None, it '
'specifies how activation of this layer or those of the sublayers '
'should be sharded over device mesh. ')
return p
@property
def params(self):
"""Returns the params upon which this layer is built."""
return self._params
def __init__(self, params):
# Sub-classes should put some options common to many layers in __init__.
self._params = params.Copy()
######################################################################
# Layers to compose multiple layers.
#
# Sub-classes are discouraged to override these composition method.
######################################################################
def _Rep(self, name, repeat, *subs):
r"""Connects sub-layers sequentially and repeat multiple times.
E.g., _Rep('foo', 2, sa, sb, sc) constructs a layer with 6 layers
sequentially connected: [sa1, sb1, sc1, sa2, sb2, sc2]. sa1 and sa2 have
the same structure as the given sa, but sa1 and sa2 do not share the same
weight.
Args:
name: The layer name.
repeat: Repeat \*subs this many times in the compose layer.
*subs: A list of sub-layers.
Returns:
The param for the composed layer.
"""
iterations = []
for i in range(repeat):
iterations.append(self._Seq('iter_%03d' % i, *[p.Copy() for p in subs]))
return self._Seq(name, *iterations)
def _Seq(self, name, *subs):
"""Connects sub-layers sequentially."""
return builder_layers.SequentialLayer.Params().Set(
name=name, sub=list(subs))
def _Graph(self, name, input_endpoints, output_endpoints,
*signature_sub_param_list):
"""Connects sub-layers into a data flow graph."""
return builder_layers.GraphLayer.Params().Set(
name=name,
input_endpoints=input_endpoints,
output_endpoints=output_endpoints,
sub=list(signature_sub_param_list))
def _Id(self, name):
"""Identity. (t_1, ..., t_n) -> (t1, ..., t_n)."""
return self._Seq(name)
def _Arg(self, name, index):
"""Picks index-th element. (t_1, ..., t_n) -> (t_{index},)."""
return builder_layers.ArgIndexLayer.Params().Set(name=name, idx=[index])
def _Par(self, name, *subs):
"""y = (f1, f2, ..., fn)(x).
We feed the input tuple to all sub-layers and concatenates their output
tuples into one tuple.
Args:
name: The layer name.
*subs: A list of sub-layers.
Returns:
The param for the composed layer.
"""
def ConcatTuples(tuples):
# tuples is a list of tuples.
return tuple(functools.reduce(lambda x, y: x + list(y), tuples, []))
def ConcatMeta(tuples):
return py_utils.NestedMap(
flops=0,
out_shapes=tuple(
functools.reduce(lambda x, y: x + list(y), tuples, [])))
return builder_layers.ParallelLayer.Params().Set(
name=name, sub=list(subs), merge=ConcatTuples, merge_meta=ConcatMeta)
def _Fn(self, name, fn, fn_out=None, fn_flops=None):
"""y = fn(x).
Applies a fn: tuple(Tensor) -> a single Tensor or tuple(Tensor) to the input
tuple. Typically, fn is a very simple python function. This layer can be
used for prototyping but we advice to implement the logic as a sub-class of
BaseLayer for all established layers as FnLayer can't be serialized.
Args:
name: The layer name.
fn: A lambda tuple(Tensor) -> tuple(Tensor).
fn_out: A lambda tuple(tshape.Shape) -> output tuple(tshape.Shape)
fn_flops: A lambda tuple(tshape.Shape) -> estimated flops of fn.
If None, we assume flops == sum of elements in the inputs.
Returns:
The param for the composed layer.
"""
def FnMeta(*shapes):
"""A lambda tuple(tshape.Shape) -> NestedMap{flops, out_shapes}."""
if fn_out:
out_shapes = fn_out(*shapes)
if isinstance(out_shapes, tshape.Shape):
out_shapes = (out_shapes,)
else:
out_shapes = shapes
if fn_flops:
flops = fn_flops(*shapes)
else:
flops = sum([s.size for s in shapes])
return py_utils.NestedMap(flops=flops, out_shapes=out_shapes)
return builder_layers.FnLayer.Params().Set(name=name, fn=fn, fn_meta=FnMeta)
def _Save(self, name):
"""Returns a layer from which the activation and gradient can be accessed."""
return layers.FetchLayer.Params().Set(name=name)
def _AddFetches(self, name, body, fetches):
"""Fetches saved activations in the body sub-layer.
E.g.:
_AddFetches('foo', _Seq( 'stack', _Layer('layer1', ...),
_Save('layer1_out', ...), _Layer('layer2', ...), _Save('layer2_out', ...),
_Output('output', ...)), ['layer1_out', 'layer2_out'])
The layer returns the stack's final output together with intermediate
activations from layer1_out and layer2_out.
Args:
name: This layer's name.
body: The sub-layer.
fetches: A list of fetch names inside the sub-layer body.
Returns:
A layer whose outputs correspond to the activations of fetch points
in the sub-layer body. [input1, input2, ..., inputN, fetch1, ..., fetchM].
"""
return builder_layers.BranchLayer.Params().Set(
name=name, body=body, fetches=fetches)
def _Rematerialize(self, name, body):
"""Forces rematerialization on FProp of the body layer."""
return builder_layers.RematerializationLayer.Params().Set(
name=name, body=body)
def _BatchParallel(self, name, sub):
"""Splits the batch and compute the forward pass on multiple devices.
Args:
name: This layer's name.
sub: The sub-layer.
Returns:
A BatchParallel layer which splits the batch and computes the forward pass
on multiple devices.
"""
return builder_layers.BatchParallelLayer.Params().Set(name=name, sub=sub)
def _PrintShape(self, name):
"""Print FProp input shape information."""
return builder_layers.PrintShapeLayer.Params().Set(name=name)
def _CreateNestedMap(self, name, keys):
"""Returns a NestedMap with keys from fprop args."""
return builder_layers.CreateNestedMapLayer.Params().Set(
name=name, keys=keys)
###########################################################################
# Basic nn layers.
#
# The following method returns a layer param, whose FProp takes a single
# Tensor and returns a single Tensor.
#
# These methods are designed to have minimal knobs. Sub-classes which needs to
# be flexible can override these methods with different options. E.g., a
# sub-class builder can override _BN() to tune the decay option.
###########################################################################
def _BN(self, name, dims):
"""Batch norm."""
return layers.BatchNormLayer.Params().Set(name=name, dim=dims, decay=0.99)
def _LN(self, name, dims, use_fused_layernorm=False):
"""Layer norm."""
return layers.LayerNorm.Params().Set(
name=name,
input_dim=dims,
use_fused_layernorm=use_fused_layernorm,
fprop_dtype=self.params.fprop_dtype)
def _Dropout(self, name, keep_prob, noise_shape_broadcast_dims=None):
"""Returns a DropoutLayer Params."""
if self.params.deterministic_dropout:
return layers.DeterministicDropoutLayer.Params().Set(
name=name,
keep_prob=keep_prob,
noise_shape_broadcast_dims=noise_shape_broadcast_dims)
return layers.DropoutLayer.Params().Set(
name=name,
keep_prob=keep_prob,
noise_shape_broadcast_dims=noise_shape_broadcast_dims,
fprop_dtype=self.params.fprop_dtype)
def _Linear(self,
name,
idims,
odims,
device_mesh=None,
weight_split_dims_mapping=None,
qdomain=None):
"""Linear layer. y = matmul([..., idims], [idims, odims])."""
p = builder_layers.LinearLayer.Params()
p.name = name
p.input_dims = idims
p.output_dims = odims
p.fprop_dtype = self.params.fprop_dtype
p.device_mesh = device_mesh
p.weight_split_dims_mapping = weight_split_dims_mapping
p.qdomain.default = qdomain
return p
def _Bias(self, name, dims, device_mesh=None, weight_split_dims_mapping=None):
"""Bias layer. The bias is added to the last dimension of the input."""
return builder_layers.BiasLayer.Params().Set(
name=name,
dims=dims,
fprop_dtype=self.params.fprop_dtype,
device_mesh=device_mesh,
weight_split_dims_mapping=weight_split_dims_mapping)
def _Activation(self, name, fn='RELU'):
"""Activation layer."""
return activations.ActivationLayer.Params().Set(activation=fn, name=name)
def _FC(self, name, idims, odims, act='RELU'):
"""Feed-forward fully connected. y = act(matmul(x, w) + b)."""
# pyformat: disable
return self._Seq(
name,
self._Linear('linear', idims, odims),
self._Bias('bias', odims),
self._Activation('act', fn=act))
def _MLP(self, name, dims, act='RELU'):
"""Multiple layers of feed-forward fully connected.
Args:
name: The layer name.
dims: A list of int. i-th layer has dims[i] as its input dimension, and
dims[i+1] as its output dimensions.
act: The activation function.
Returns:
The param for the composed layer.
"""
l = []
for n, (i, o) in enumerate(zip(dims[:-1], dims[1:])):
l += [self._FC('l%03d' % n, i, o, act)]
return self._Seq(name, *l)
def _Conv2D(self, name, filter_shape, filter_stride):
"""Conv2D layer."""
return layers.Conv2DLayerNoPadding.Params().Set(
name=name, filter_shape=filter_shape, filter_stride=filter_stride,
fprop_dtype=self.params.fprop_dtype)
def _Reshape(self, name, shape):
"""Reshape inputs to the shape provided."""
return builder_layers.ReshapeLayer.Params().Set(name=name,
shape=shape)
| 36.827778
| 81
| 0.648439
| 1,787
| 13,258
| 4.701175
| 0.24958
| 0.021902
| 0.027854
| 0.036424
| 0.206761
| 0.163314
| 0.137484
| 0.110939
| 0.080229
| 0.080229
| 0
| 0.007764
| 0.222809
| 13,258
| 359
| 82
| 36.930362
| 0.80755
| 0.411374
| 0
| 0.133333
| 0
| 0.012121
| 0.150354
| 0.010608
| 0
| 0
| 0
| 0.002786
| 0
| 1
| 0.175758
| false
| 0
| 0.042424
| 0.012121
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5defe80f544d4d152b4eab27921e74e04e7e4df0
| 4,589
|
py
|
Python
|
instmakelib/instmake_toolnames.py
|
gilramir/instmake
|
7b083a5061be43e9b92bdcf0f3badda7c4107eef
|
[
"BSD-3-Clause"
] | null | null | null |
instmakelib/instmake_toolnames.py
|
gilramir/instmake
|
7b083a5061be43e9b92bdcf0f3badda7c4107eef
|
[
"BSD-3-Clause"
] | null | null | null |
instmakelib/instmake_toolnames.py
|
gilramir/instmake
|
7b083a5061be43e9b92bdcf0f3badda7c4107eef
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2010 by Cisco Systems, Inc.
"""
Manage the tool plugins and use them appropriately.
"""
import os
TOOLNAME_PLUGIN_PREFIX = "toolname"
class ToolNameManager:
"""ToolName plugins have to register with this manager
the circumstances under which they wish to be called."""
def __init__(self, plugins):
toolname_plugins = plugins.LoadAllPlugins(TOOLNAME_PLUGIN_PREFIX)
self.first_arg_matches = []
self.first_arg_basename_matches = []
self.first_arg_regexes= []
self.first_arg_basename_regexes = []
self.command_line_regexes = []
for plugin in toolname_plugins:
plugin.register(self)
def RegisterFirstArgumentMatch(self, text, cb):
"""Call back parameters: first_arg, argv, cwd"""
self.first_arg_matches.append((text, cb))
def RegisterFirstArgumentRegex(self, regex, cb):
"""Call back parameters: first_arg, argv, cwd, regex_match"""
self.first_arg_regexes.append((regex, cb))
def RegisterFirstArgumentBasenameMatch(self, text, cb):
"""Call back parameters: basename, first_arg, argv, cwd"""
self.first_arg_basename_matches.append((text, cb))
def RegisterFirstArgumentBasenameRegex(self, regex, cb):
"""Call back parameters: basename, first_arg, argv, cw, regex_match"""
self.first_arg_basename_regexes.append((regex, cb))
def RegisterCommandLineRegex(self, regex, cb):
"""Call back parameters: argv, cwd, regex_match"""
self.command_line_regexes.append((regex, cb))
def GetTool(self, cmdline_args, cwd):
"""Returns a single string representing the tool in this
command-line. cmdline_args is an array of strings that will
be concatenated with spaces to form a single command-line."""
# It's done this way because of the way the command-line is
# stored in the instmake log. The top-most process (which is
# the first 'make' run, i.e., the last record in the instmake log)
# has a cmdline_args with one true argv-item per item. However,
# the instmakes that were called from 'make' have their entire
# command-line existing as a single string (the first and only
# item in cmdline_args).
argv_joined = ' '.join(cmdline_args)
argv = argv_joined.split()
# Call _GetTool as many times as necessary to find
# a non-changing answer.
seen = {}
max_iterations = 100
i = 0
while 1:
seen[argv_joined] = None
new_argv = self._GetTool(argv, cwd)
new_argv_joined = ' '.join(new_argv)
if new_argv_joined == argv_joined:
return new_argv[0]
elif seen.has_key(new_argv_joined):
return new_argv[0]
else:
i += 1
if i == max_iterations:
return new_argv[0]
argv = new_argv
argv_joined = new_argv_joined
def _GetTool(self, argv, cwd):
cmdline = ' '.join(argv)
# Check the command-line
for (regex, cb) in self.command_line_regexes:
m = regex.search(cmdline)
if m:
retval = cb(argv, cwd, m)
if retval != None:
return retval
# Get the first argument
if len(argv) >= 1:
first_arg = argv[0]
else:
return argv
# Check the first argument
for (text, cb) in self.first_arg_matches:
if first_arg == text:
retval = cb(first_arg, argv, cwd)
if retval != None:
return retval
for (regex, cb) in self.first_arg_regexes:
m = regex.search(first_arg)
if m:
retval = cb(first_arg, argv, cwd, m)
if retval != None:
return retval
# Check the basename of the first arg
basename = os.path.basename(first_arg)
for (text, cb) in self.first_arg_basename_matches:
if basename == text:
retval = cb(basename, first_arg, argv, cwd)
if retval != None:
return retval
for (regex, cb) in self.first_arg_basename_regexes:
m = regex.search(basename)
if m:
retval = cb(basename, first_arg, argv, cwd, m)
if retval != None:
return retval
# Nothing matched. Return the default value.
return argv
| 35.573643
| 78
| 0.58771
| 558
| 4,589
| 4.670251
| 0.267025
| 0.076746
| 0.055257
| 0.040292
| 0.350729
| 0.244052
| 0.189179
| 0.148887
| 0.079048
| 0.079048
| 0
| 0.004867
| 0.328394
| 4,589
| 128
| 79
| 35.851563
| 0.840688
| 0.271519
| 0
| 0.25641
| 0
| 0
| 0.003368
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102564
| false
| 0
| 0.012821
| 0
| 0.25641
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5df1af1171ca12ddbf5a2ce6aeb42a6d24730f8d
| 12,991
|
py
|
Python
|
raiden/tests/integration/long_running/test_stress.py
|
tirkarthi/raiden
|
dbd03ddda039332b54ec0c02d81cbe1100bc8028
|
[
"MIT"
] | 2,101
|
2016-06-01T11:31:49.000Z
|
2022-03-27T20:13:19.000Z
|
raiden/tests/integration/long_running/test_stress.py
|
tirkarthi/raiden
|
dbd03ddda039332b54ec0c02d81cbe1100bc8028
|
[
"MIT"
] | 5,291
|
2016-06-01T18:14:04.000Z
|
2022-03-31T11:19:09.000Z
|
raiden/tests/integration/long_running/test_stress.py
|
tirkarthi/raiden
|
dbd03ddda039332b54ec0c02d81cbe1100bc8028
|
[
"MIT"
] | 484
|
2016-06-01T18:21:06.000Z
|
2022-03-22T10:29:45.000Z
|
import time
from http import HTTPStatus
from itertools import count
from typing import Sequence
import gevent
import grequests
import pytest
import structlog
from eth_utils import to_canonical_address
from flask import url_for
from raiden.api.python import RaidenAPI
from raiden.api.rest import APIServer, RestAPI
from raiden.constants import RoutingMode
from raiden.message_handler import MessageHandler
from raiden.network.transport import MatrixTransport
from raiden.raiden_event_handler import RaidenEventHandler
from raiden.raiden_service import RaidenService
from raiden.settings import RestApiConfig
from raiden.tests.integration.api.utils import wait_for_listening_port
from raiden.tests.integration.fixtures.raiden_network import RestartNode
from raiden.tests.utils.detect_failure import raise_on_failure
from raiden.tests.utils.protocol import HoldRaidenEventHandler
from raiden.tests.utils.transfer import (
assert_synced_channel_state,
wait_assert,
watch_for_unlock_failures,
)
from raiden.transfer import views
from raiden.ui.startup import RaidenBundle
from raiden.utils.formatting import to_checksum_address
from raiden.utils.typing import (
Address,
BlockNumber,
Host,
Iterator,
List,
Port,
TokenAddress,
TokenAmount,
TokenNetworkAddress,
Tuple,
)
log = structlog.get_logger(__name__)
def iwait_and_get(items: Sequence[gevent.Greenlet]) -> None:
"""Iteratively wait and get on passed greenlets.
This ensures exceptions in the greenlets are re-raised as soon as possible.
"""
for item in gevent.iwait(items):
item.get()
def _url_for(apiserver: APIServer, endpoint: str, **kwargs) -> str:
# url_for() expects binary address so we have to convert here
for key, val in kwargs.items():
if isinstance(val, str) and val.startswith("0x"):
kwargs[key] = to_canonical_address(val)
with apiserver.flask_app.app_context():
return url_for(f"v1_resources.{endpoint}", **kwargs)
def start_apiserver(raiden_app: RaidenService, rest_api_port_number: Port) -> APIServer:
raiden_api = RaidenAPI(raiden_app)
rest_api = RestAPI(raiden_api)
api_server = APIServer(
rest_api, config=RestApiConfig(host=Host("localhost"), port=rest_api_port_number)
)
# required for url_for
api_server.flask_app.config["SERVER_NAME"] = f"localhost:{rest_api_port_number}"
api_server.start()
wait_for_listening_port(rest_api_port_number)
return api_server
def start_apiserver_for_network(
raiden_network: List[RaidenService], port_generator: Iterator[Port]
) -> List[APIServer]:
return [start_apiserver(app, next(port_generator)) for app in raiden_network]
def restart_app(app: RaidenService, restart_node: RestartNode) -> RaidenService:
new_transport = MatrixTransport(
config=app.config.transport, environment=app.config.environment_type
)
raiden_event_handler = RaidenEventHandler()
hold_handler = HoldRaidenEventHandler(raiden_event_handler)
app = RaidenService(
config=app.config,
rpc_client=app.rpc_client,
proxy_manager=app.proxy_manager,
query_start_block=BlockNumber(0),
raiden_bundle=RaidenBundle(
app.default_registry,
app.default_secret_registry,
),
services_bundle=app.default_services_bundle,
transport=new_transport,
raiden_event_handler=hold_handler,
message_handler=MessageHandler(),
routing_mode=RoutingMode.PRIVATE,
)
restart_node(app)
return app
def restart_network(
raiden_network: List[RaidenService], restart_node: RestartNode
) -> List[RaidenService]:
for app in raiden_network:
app.stop()
wait_network = (gevent.spawn(restart_app, app, restart_node) for app in raiden_network)
gevent.joinall(set(wait_network), raise_error=True)
new_network = [greenlet.get() for greenlet in wait_network]
return new_network
def restart_network_and_apiservers(
raiden_network: List[RaidenService],
restart_node: RestartNode,
api_servers: List[APIServer],
port_generator: Iterator[Port],
) -> Tuple[List[RaidenService], List[APIServer]]:
"""Stop an app and start it back"""
for rest_api in api_servers:
rest_api.stop()
new_network = restart_network(raiden_network, restart_node)
new_servers = start_apiserver_for_network(new_network, port_generator)
return (new_network, new_servers)
def address_from_apiserver(apiserver: APIServer) -> Address:
return apiserver.rest_api.raiden_api.address
def transfer_and_assert(
server_from: APIServer,
server_to: APIServer,
token_address: TokenAddress,
identifier: int,
amount: TokenAmount,
) -> None:
url = _url_for(
server_from,
"token_target_paymentresource",
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(address_from_apiserver(server_to)),
)
json = {"amount": amount, "identifier": identifier}
log.debug("PAYMENT REQUEST", url=url, json=json)
request = grequests.post(url, json=json)
start = time.monotonic()
response = request.send().response
duration = time.monotonic() - start
log.debug("PAYMENT RESPONSE", url=url, json=json, response=response, duration=duration)
assert getattr(request, "exception", None) is None
assert response is not None
assert response.status_code == HTTPStatus.OK, f"Payment failed, reason: {response.content}"
assert response.headers["Content-Type"] == "application/json"
def sequential_transfers(
server_from: APIServer,
server_to: APIServer,
number_of_transfers: int,
token_address: TokenAddress,
identifier_generator: Iterator[int],
) -> None:
for _ in range(number_of_transfers):
transfer_and_assert(
server_from=server_from,
server_to=server_to,
token_address=token_address,
identifier=next(identifier_generator),
amount=TokenAmount(1),
)
def stress_send_serial_transfers(
rest_apis: List[APIServer],
token_address: TokenAddress,
identifier_generator: Iterator[int],
deposit: TokenAmount,
) -> None:
"""Send `deposit` transfers of value `1` one at a time, without changing
the initial capacity.
"""
pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]]))
# deplete the channels in one direction
for server_from, server_to in pairs:
sequential_transfers(
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit,
token_address=token_address,
identifier_generator=identifier_generator,
)
# deplete the channels in the backwards direction
for server_to, server_from in pairs:
sequential_transfers(
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit * 2,
token_address=token_address,
identifier_generator=identifier_generator,
)
# reset the balances balances by sending the "extra" deposit forward
for server_from, server_to in pairs:
sequential_transfers(
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit,
token_address=token_address,
identifier_generator=identifier_generator,
)
def stress_send_parallel_transfers(
rest_apis: List[APIServer],
token_address: TokenAddress,
identifier_generator: Iterator[int],
deposit: TokenAmount,
) -> None:
"""Send `deposit` transfers in parallel, without changing the initial capacity."""
pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]]))
# deplete the channels in one direction
iwait_and_get(
[
gevent.spawn(
sequential_transfers,
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit,
token_address=token_address,
identifier_generator=identifier_generator,
)
for server_from, server_to in pairs
]
)
# deplete the channels in the backwards direction
iwait_and_get(
[
gevent.spawn(
sequential_transfers,
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit * 2,
token_address=token_address,
identifier_generator=identifier_generator,
)
for server_to, server_from in pairs
]
)
# reset the balances balances by sending the "extra" deposit forward
iwait_and_get(
[
gevent.spawn(
sequential_transfers,
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit,
token_address=token_address,
identifier_generator=identifier_generator,
)
for server_from, server_to in pairs
]
)
def stress_send_and_receive_parallel_transfers(
rest_apis: List[APIServer],
token_address: TokenAddress,
identifier_generator: Iterator[int],
deposit: TokenAmount,
) -> None:
"""Send transfers of value one in parallel"""
pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]]))
forward_transfers = [
gevent.spawn(
sequential_transfers,
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit,
token_address=token_address,
identifier_generator=identifier_generator,
)
for server_from, server_to in pairs
]
backwards_transfers = [
gevent.spawn(
sequential_transfers,
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit,
token_address=token_address,
identifier_generator=identifier_generator,
)
for server_to, server_from in pairs
]
iwait_and_get(forward_transfers + backwards_transfers)
def assert_channels(
raiden_network: List[RaidenService],
token_network_address: TokenNetworkAddress,
deposit: TokenAmount,
) -> None:
pairs = list(zip(raiden_network, raiden_network[1:] + [raiden_network[0]]))
for first, second in pairs:
wait_assert(
assert_synced_channel_state,
token_network_address,
first,
deposit,
[],
second,
deposit,
[],
)
@pytest.mark.skip(reason="flaky, see https://github.com/raiden-network/raiden/issues/4803")
@raise_on_failure
@pytest.mark.parametrize("number_of_nodes", [3])
@pytest.mark.parametrize("number_of_tokens", [1])
@pytest.mark.parametrize("channels_per_node", [2])
@pytest.mark.parametrize("deposit", [2])
@pytest.mark.parametrize("reveal_timeout", [15])
@pytest.mark.parametrize("settle_timeout", [120])
def test_stress(
raiden_network: List[RaidenService],
restart_node: RestartNode,
deposit: TokenAmount,
token_addresses: List[TokenAddress],
port_generator: Iterator[Port],
) -> None:
token_address = token_addresses[0]
rest_apis = start_apiserver_for_network(raiden_network, port_generator)
identifier_generator = count(start=1)
token_network_address = views.get_token_network_address_by_token_address(
views.state_from_raiden(raiden_network[0]),
raiden_network[0].default_registry.address,
token_address,
)
assert token_network_address
for _ in range(2):
assert_channels(raiden_network, token_network_address, deposit)
with watch_for_unlock_failures(*raiden_network):
stress_send_serial_transfers(rest_apis, token_address, identifier_generator, deposit)
raiden_network, rest_apis = restart_network_and_apiservers(
raiden_network, restart_node, rest_apis, port_generator
)
assert_channels(raiden_network, token_network_address, deposit)
with watch_for_unlock_failures(*raiden_network):
stress_send_parallel_transfers(rest_apis, token_address, identifier_generator, deposit)
raiden_network, rest_apis = restart_network_and_apiservers(
raiden_network, restart_node, rest_apis, port_generator
)
assert_channels(raiden_network, token_network_address, deposit)
with watch_for_unlock_failures(*raiden_network):
stress_send_and_receive_parallel_transfers(
rest_apis, token_address, identifier_generator, deposit
)
raiden_network, rest_apis = restart_network_and_apiservers(
raiden_network, restart_node, rest_apis, port_generator
)
restart_network(raiden_network, restart_node)
| 31.531553
| 99
| 0.693557
| 1,484
| 12,991
| 5.762803
| 0.166442
| 0.047123
| 0.043031
| 0.029467
| 0.454397
| 0.420603
| 0.387746
| 0.352198
| 0.344481
| 0.344481
| 0
| 0.003293
| 0.228697
| 12,991
| 411
| 100
| 31.608273
| 0.8502
| 0.057655
| 0
| 0.38125
| 0
| 0
| 0.030924
| 0.006808
| 0
| 0
| 0
| 0
| 0.046875
| 1
| 0.046875
| false
| 0
| 0.084375
| 0.00625
| 0.153125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5df3d1e6a9c7a37c58251913284702c80bde4fc2
| 15,348
|
py
|
Python
|
dask/dataframe/io/hdf.py
|
TryTestspace/dask
|
86d4f7d8c6d48ec6c4b1de1b6cfd2d3f4e5a4c1b
|
[
"BSD-3-Clause"
] | 1
|
2017-10-06T05:59:15.000Z
|
2017-10-06T05:59:15.000Z
|
dask/dataframe/io/hdf.py
|
TryTestspace/dask
|
86d4f7d8c6d48ec6c4b1de1b6cfd2d3f4e5a4c1b
|
[
"BSD-3-Clause"
] | null | null | null |
dask/dataframe/io/hdf.py
|
TryTestspace/dask
|
86d4f7d8c6d48ec6c4b1de1b6cfd2d3f4e5a4c1b
|
[
"BSD-3-Clause"
] | 1
|
2021-03-28T04:50:43.000Z
|
2021-03-28T04:50:43.000Z
|
from __future__ import absolute_import, division, print_function
from fnmatch import fnmatch
from glob import glob
import os
import uuid
from warnings import warn
import pandas as pd
from toolz import merge
from .io import _link
from ...base import get_scheduler
from ..core import DataFrame, new_dd_object
from ... import config, multiprocessing
from ...base import tokenize, compute_as_if_collection
from ...bytes.utils import build_name_function
from ...compatibility import PY3
from ...delayed import Delayed, delayed
from ...utils import get_scheduler_lock
def _pd_to_hdf(pd_to_hdf, lock, args, kwargs=None):
""" A wrapper function around pd_to_hdf that enables locking"""
if lock:
lock.acquire()
try:
pd_to_hdf(*args, **kwargs)
finally:
if lock:
lock.release()
return None
def to_hdf(df, path, key, mode='a', append=False, get=None, scheduler=None,
name_function=None, compute=True, lock=None, dask_kwargs={},
**kwargs):
""" Store Dask Dataframe to Hierarchical Data Format (HDF) files
This is a parallel version of the Pandas function of the same name. Please
see the Pandas docstring for more detailed information about shared keyword
arguments.
This function differs from the Pandas version by saving the many partitions
of a Dask DataFrame in parallel, either to many files, or to many datasets
within the same file. You may specify this parallelism with an asterix
``*`` within the filename or datapath, and an optional ``name_function``.
The asterix will be replaced with an increasing sequence of integers
starting from ``0`` or with the result of calling ``name_function`` on each
of those integers.
This function only supports the Pandas ``'table'`` format, not the more
specialized ``'fixed'`` format.
Parameters
----------
path: string
Path to a target filename. May contain a ``*`` to denote many filenames
key: string
Datapath within the files. May contain a ``*`` to denote many locations
name_function: function
A function to convert the ``*`` in the above options to a string.
Should take in a number from 0 to the number of partitions and return a
string. (see examples below)
compute: bool
Whether or not to execute immediately. If False then this returns a
``dask.Delayed`` value.
lock: Lock, optional
Lock to use to prevent concurrency issues. By default a
``threading.Lock``, ``multiprocessing.Lock`` or ``SerializableLock``
will be used depending on your scheduler if a lock is required. See
dask.utils.get_scheduler_lock for more information about lock
selection.
**other:
See pandas.to_hdf for more information
Examples
--------
Save Data to a single file
>>> df.to_hdf('output.hdf', '/data') # doctest: +SKIP
Save data to multiple datapaths within the same file:
>>> df.to_hdf('output.hdf', '/data-*') # doctest: +SKIP
Save data to multiple files:
>>> df.to_hdf('output-*.hdf', '/data') # doctest: +SKIP
Save data to multiple files, using the multiprocessing scheduler:
>>> df.to_hdf('output-*.hdf', '/data', scheduler='processes') # doctest: +SKIP
Specify custom naming scheme. This writes files as
'2000-01-01.hdf', '2000-01-02.hdf', '2000-01-03.hdf', etc..
>>> from datetime import date, timedelta
>>> base = date(year=2000, month=1, day=1)
>>> def name_function(i):
... ''' Convert integer 0 to n to a string '''
... return base + timedelta(days=i)
>>> df.to_hdf('*.hdf', '/data', name_function=name_function) # doctest: +SKIP
Returns
-------
None: if compute == True
delayed value: if compute == False
See Also
--------
read_hdf:
to_parquet:
"""
name = 'to-hdf-' + uuid.uuid1().hex
pd_to_hdf = getattr(df._partition_type, 'to_hdf')
single_file = True
single_node = True
# if path is string, format using i_name
if isinstance(path, str):
if path.count('*') + key.count('*') > 1:
raise ValueError("A maximum of one asterisk is accepted in file "
"path and dataset key")
fmt_obj = lambda path, i_name: path.replace('*', i_name)
if '*' in path:
single_file = False
else:
if key.count('*') > 1:
raise ValueError("A maximum of one asterisk is accepted in "
"dataset key")
fmt_obj = lambda path, _: path
if '*' in key:
single_node = False
if 'format' in kwargs and kwargs['format'] not in ['t', 'table']:
raise ValueError("Dask only support 'table' format in hdf files.")
if mode not in ('a', 'w', 'r+'):
raise ValueError("Mode must be one of 'a', 'w' or 'r+'")
if name_function is None:
name_function = build_name_function(df.npartitions - 1)
# we guarantee partition order is preserved when its saved and read
# so we enforce name_function to maintain the order of its input.
if not (single_file and single_node):
formatted_names = [name_function(i) for i in range(df.npartitions)]
if formatted_names != sorted(formatted_names):
warn("To preserve order between partitions name_function "
"must preserve the order of its input")
# If user did not specify scheduler and write is sequential default to the
# sequential scheduler. otherwise let the _get method choose the scheduler
if (get is None and
not config.get('get', None) and
scheduler is None and
not config.get('scheduler', None) and
single_node and single_file):
scheduler = 'single-threaded'
# handle lock default based on whether we're writing to a single entity
_actual_get = get_scheduler(get=get, collections=[df], scheduler=scheduler)
if lock is None:
if not single_node:
lock = True
elif not single_file and _actual_get is not multiprocessing.get:
# if we're writing to multiple files with the multiprocessing
# scheduler we don't need to lock
lock = True
else:
lock = False
if lock:
lock = get_scheduler_lock(get, df, scheduler=scheduler)
kwargs.update({'format': 'table', 'mode': mode, 'append': append})
dsk = dict()
i_name = name_function(0)
dsk[(name, 0)] = (_pd_to_hdf, pd_to_hdf, lock,
[(df._name, 0), fmt_obj(path, i_name),
key.replace('*', i_name)], kwargs)
kwargs2 = kwargs.copy()
if single_file:
kwargs2['mode'] = 'a'
if single_node:
kwargs2['append'] = True
filenames = []
for i in range(0,df.npartitions):
i_name = name_function(i)
filenames.append(fmt_obj(path, i_name))
for i in range(1, df.npartitions):
i_name = name_function(i)
task = (_pd_to_hdf, pd_to_hdf, lock,
[(df._name, i), fmt_obj(path, i_name),
key.replace('*', i_name)], kwargs2)
if single_file:
link_dep = i - 1 if single_node else 0
task = (_link, (name, link_dep), task)
dsk[(name, i)] = task
dsk = merge(df.dask, dsk)
if single_file and single_node:
keys = [(name, df.npartitions - 1)]
else:
keys = [(name, i) for i in range(df.npartitions)]
if compute:
compute_as_if_collection(DataFrame, dsk, keys, get=get,
scheduler=scheduler, **dask_kwargs)
return filenames
else:
return delayed([Delayed(k, dsk) for k in keys])
dont_use_fixed_error_message = """
This HDFStore is not partitionable and can only be use monolithically with
pandas. In the future when creating HDFStores use the ``format='table'``
option to ensure that your dataset can be parallelized"""
read_hdf_error_msg = """
The start and stop keywords are not supported when reading from more than
one file/dataset.
The combination is ambiguous because it could be interpreted as the starting
and stopping index per file, or starting and stopping index of the global
dataset."""
def _read_single_hdf(path, key, start=0, stop=None, columns=None,
chunksize=int(1e6), sorted_index=False, lock=None,
mode='a'):
"""
Read a single hdf file into a dask.dataframe. Used for each file in
read_hdf.
"""
def get_keys_stops_divisions(path, key, stop, sorted_index, chunksize):
"""
Get the "keys" or group identifiers which match the given key, which
can contain wildcards. This uses the hdf file identified by the
given path. Also get the index of the last row of data for each matched
key.
"""
with pd.HDFStore(path, mode=mode) as hdf:
keys = [k for k in hdf.keys() if fnmatch(k, key)]
stops = []
divisions = []
for k in keys:
storer = hdf.get_storer(k)
if storer.format_type != 'table':
raise TypeError(dont_use_fixed_error_message)
if stop is None:
stops.append(storer.nrows)
elif stop > storer.nrows:
raise ValueError("Stop keyword exceeds dataset number "
"of rows ({})".format(storer.nrows))
else:
stops.append(stop)
if sorted_index:
division = [storer.read_column('index', start=start, stop=start + 1)[0]
for start in range(0, storer.nrows, chunksize)]
division_end = storer.read_column('index',
start=storer.nrows - 1,
stop=storer.nrows)[0]
division.append(division_end)
divisions.append(division)
else:
divisions.append(None)
return keys, stops, divisions
def one_path_one_key(path, key, start, stop, columns, chunksize, division, lock):
"""
Get the data frame corresponding to one path and one key (which should
not contain any wildcards).
"""
empty = pd.read_hdf(path, key, mode=mode, stop=0)
if columns is not None:
empty = empty[columns]
token = tokenize((path, os.path.getmtime(path), key, start,
stop, empty, chunksize, division))
name = 'read-hdf-' + token
if empty.ndim == 1:
base = {'name': empty.name, 'mode': mode}
else:
base = {'columns': empty.columns, 'mode': mode}
if start >= stop:
raise ValueError("Start row number ({}) is above or equal to stop "
"row number ({})".format(start, stop))
def update(s):
new = base.copy()
new.update({'start': s, 'stop': s + chunksize})
return new
dsk = dict(((name, i), (_pd_read_hdf, path, key, lock,
update(s)))
for i, s in enumerate(range(start, stop, chunksize)))
if division:
divisions = division
else:
divisions = [None] * (len(dsk) + 1)
return new_dd_object(dsk, name, empty, divisions)
keys, stops, divisions = get_keys_stops_divisions(path, key, stop, sorted_index, chunksize)
if (start != 0 or stop is not None) and len(keys) > 1:
raise NotImplementedError(read_hdf_error_msg)
from ..multi import concat
return concat([one_path_one_key(path, k, start, s, columns, chunksize, d, lock)
for k, s, d in zip(keys, stops, divisions)])
def _pd_read_hdf(path, key, lock, kwargs):
""" Read from hdf5 file with a lock """
if lock:
lock.acquire()
try:
result = pd.read_hdf(path, key, **kwargs)
finally:
if lock:
lock.release()
return result
def read_hdf(pattern, key, start=0, stop=None, columns=None,
chunksize=1000000, sorted_index=False, lock=True, mode='a'):
"""
Read HDF files into a Dask DataFrame
Read hdf files into a dask dataframe. This function is like
``pandas.read_hdf``, except it can read from a single large file, or from
multiple files, or from multiple keys from the same file.
Parameters
----------
pattern : string, list
File pattern (string), buffer to read from, or list of file
paths. Can contain wildcards.
key : group identifier in the store. Can contain wildcards
start : optional, integer (defaults to 0), row number to start at
stop : optional, integer (defaults to None, the last row), row number to
stop at
columns : list of columns, optional
A list of columns that if not None, will limit the return
columns (default is None)
chunksize : positive integer, optional
Maximal number of rows per partition (default is 1000000).
sorted_index : boolean, optional
Option to specify whether or not the input hdf files have a sorted
index (default is False).
lock : boolean, optional
Option to use a lock to prevent concurrency issues (default is True).
mode : {'a', 'r', 'r+'}, default 'a'. Mode to use when opening file(s).
'r'
Read-only; no data can be modified.
'a'
Append; an existing file is opened for reading and writing,
and if the file does not exist it is created.
'r+'
It is similar to 'a', but the file must already exist.
Returns
-------
dask.DataFrame
Examples
--------
Load single file
>>> dd.read_hdf('myfile.1.hdf5', '/x') # doctest: +SKIP
Load multiple files
>>> dd.read_hdf('myfile.*.hdf5', '/x') # doctest: +SKIP
>>> dd.read_hdf(['myfile.1.hdf5', 'myfile.2.hdf5'], '/x') # doctest: +SKIP
Load multiple datasets
>>> dd.read_hdf('myfile.1.hdf5', '/*') # doctest: +SKIP
"""
if lock is True:
lock = get_scheduler_lock()
key = key if key.startswith('/') else '/' + key
if isinstance(pattern, str):
paths = sorted(glob(pattern))
else:
paths = pattern
if (start != 0 or stop is not None) and len(paths) > 1:
raise NotImplementedError(read_hdf_error_msg)
if chunksize <= 0:
raise ValueError("Chunksize must be a positive integer")
if (start != 0 or stop is not None) and sorted_index:
raise ValueError("When assuming pre-partitioned data, data must be "
"read in its entirety using the same chunksizes")
from ..multi import concat
return concat([_read_single_hdf(path, key, start=start, stop=stop,
columns=columns, chunksize=chunksize,
sorted_index=sorted_index,
lock=lock, mode=mode)
for path in paths])
if PY3:
from ..core import _Frame
_Frame.to_hdf.__doc__ = to_hdf.__doc__
| 36.028169
| 95
| 0.601968
| 2,017
| 15,348
| 4.479425
| 0.188399
| 0.011068
| 0.006973
| 0.005755
| 0.190592
| 0.16104
| 0.110127
| 0.077809
| 0.063641
| 0.04881
| 0
| 0.00875
| 0.300039
| 15,348
| 425
| 96
| 36.112941
| 0.832263
| 0.348645
| 0
| 0.148837
| 0
| 0
| 0.121164
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037209
| false
| 0
| 0.093023
| 0
| 0.172093
| 0.004651
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5df7763c501c1594868f6878a3ef39da6fe70cae
| 842
|
py
|
Python
|
tests/test_parsers.py
|
FlorisHoogenboom/BoxRec
|
c9cc5d149318f916facdf57d7dbe94e797d81582
|
[
"MIT"
] | 5
|
2018-04-20T11:47:43.000Z
|
2021-05-04T18:54:16.000Z
|
tests/test_parsers.py
|
FlorisHoogenboom/BoxRec
|
c9cc5d149318f916facdf57d7dbe94e797d81582
|
[
"MIT"
] | 1
|
2018-03-21T08:44:25.000Z
|
2018-03-22T12:08:17.000Z
|
tests/test_parsers.py
|
FlorisHoogenboom/BoxRec
|
c9cc5d149318f916facdf57d7dbe94e797d81582
|
[
"MIT"
] | 6
|
2018-03-16T14:05:55.000Z
|
2018-03-16T14:08:41.000Z
|
import unittest
from boxrec.parsers import FightParser
class MockResponse(object):
def __init__(self, content, encoding, url):
self.content= content
self.encoding = encoding
self.url = url
class TestFightParser(unittest.TestCase):
def setUp(self):
with open('mock_data/fights/draw.html', 'rb') as file:
self.drawn_fight = file.read()
self.parser = FightParser()
def test_parses_draw(self):
"""Test it correctly handles draws"""
mock_response = MockResponse(
self.drawn_fight,
'UTF-8',
"http://boxrec.com/en/event/115689/202488"
)
result = self.parser.parse(mock_response)
self.assertEqual(result.winner, 'drawn', "Result should equal draw.")
class TestBoxerParser(unittest.TestCase):
pass
| 25.515152
| 77
| 0.63658
| 95
| 842
| 5.526316
| 0.578947
| 0.041905
| 0.053333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020734
| 0.255344
| 842
| 32
| 78
| 26.3125
| 0.816587
| 0.036817
| 0
| 0
| 0
| 0
| 0.12795
| 0.032298
| 0
| 0
| 0
| 0
| 0.045455
| 1
| 0.136364
| false
| 0.045455
| 0.090909
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5df786c7bbc659882d2ccb4bb744e69c8b4ccbd8
| 4,868
|
py
|
Python
|
hyperdock/common/workqueue.py
|
ErikGartner/hyperdock
|
19510b4bf1e123576d7be067555d959cb8a7cf45
|
[
"Apache-2.0"
] | 8
|
2018-05-07T19:12:35.000Z
|
2021-12-21T01:30:48.000Z
|
hyperdock/common/workqueue.py
|
ErikGartner/hyperdock
|
19510b4bf1e123576d7be067555d959cb8a7cf45
|
[
"Apache-2.0"
] | 92
|
2018-05-15T14:57:48.000Z
|
2019-12-27T10:48:25.000Z
|
hyperdock/common/workqueue.py
|
ErikGartner/hyperdock
|
19510b4bf1e123576d7be067555d959cb8a7cf45
|
[
"Apache-2.0"
] | 2
|
2019-06-01T22:42:17.000Z
|
2019-12-25T12:48:36.000Z
|
from datetime import datetime, timedelta
from bson.objectid import ObjectId
WORK_TIMEOUT = 600
class WorkQueue:
"""
A simple MongoDB priority work queue that handles the queue
of experiment.
"""
def __init__(self, mongodb):
super().__init__()
self._mongodb = mongodb
self._collection = mongodb.workqueue
def assign_next_job(self, worker_id):
"""
Assigns the next free job to worker.
Returns the object from the mongodb.
"""
t = datetime.utcnow()
job = self._collection.find_and_modify(
query={"start_time": -1, "cancelled": False},
sort=[("priority", -1), ("created_on", 1)],
update={"$set": {"start_time": t, "last_update": t, "worker": worker_id}},
new=True,
)
return job
def add_job(self, parameters, data, trial_id, trial_name, priority=0):
"""
Adds new work to the workqueue.
"""
id = self._collection.insert(
{
"start_time": -1,
"end_time": -1,
"last_update": -1,
"created_on": datetime.utcnow(),
"priority": priority,
"parameters": parameters,
"data": data,
"worker": None,
"result": {},
"trial": trial_id,
"trial_name": trial_name,
"_id": str(ObjectId()),
"cancelled": False,
"orphaned": False,
}
)
return id
def update_job(self, _id, update=None):
"""
Marks the job as alive and post an update from the job.
"""
t = datetime.utcnow()
self._collection.update(
{"_id": _id}, {"$set": {"last_update": t, "update": update}}
)
def is_job_cancelled(self, _id):
"""
Checks if a certain job has been cancelled or all together removed.
"""
return self._collection.find_one({"_id": _id, "cancelled": False}) is None
def finish_job(self, _id, result):
"""
Marks the job as finished and attach the result.
"""
t = datetime.utcnow()
self._collection.update_one(
{"_id": _id}, {"$set": {"end_time": t, "last_update": t, "result": result}}
)
def purge_dead_jobs(self):
"""
Returns jobs that have timed out due to worker death and cancel them.
"""
now = datetime.utcnow()
deadline = now - timedelta(seconds=WORK_TIMEOUT)
jobs = []
while True:
job = self._collection.find_and_modify(
query={
"start_time": {"$ne": -1},
"end_time": -1,
"last_update": {"$lt": deadline},
},
sort=[("priority", -1), ("last_update", 1)],
update={
"$set": {
"cancelled": True,
"orphaned": True,
"end_time": now,
"result": {"state": "fail", "msg": "Timed out!"},
}
},
new=True,
)
if job is not None:
jobs.append(job)
else:
return jobs
def check_for_orphans(self, id_list):
"""
Checks if a list of Docker container ids are marked as orphans.
Returns a list of (Docker id, experiment id) tuples.
"""
jobs = self._collection.find(
{"orphaned": True, "update.container.long_id": {"$in": id_list}}
)
return [(j["update"]["container"]["long_id"], j["_id"]) for j in list(jobs)]
def not_orphaned(self, _id):
"""
Marks a job as not orphaned.
"""
job = self._collection.find_and_modify(
query={"_id": _id}, update={"$set": {"orphaned": False}}, new=True
)
return job is not None
def cancel_invalid_jobs(self, trial_list):
"""
Takes a list of all active (not finished, cancelled or removed) trial ids.
Work that is not associated with any of these are cancelled.
"""
now = datetime.utcnow()
jobs = []
while True:
job = self._collection.find_and_modify(
query={"trial": {"$nin": trial_list}, "end_time": -1},
update={
"$set": {
"cancelled": True,
"end_time": now,
"result": {"state": "fail", "msg": "Abandoned"},
}
},
new=True,
)
if job is not None:
jobs.append(job)
else:
return jobs
| 31.205128
| 87
| 0.474528
| 494
| 4,868
| 4.493927
| 0.267206
| 0.063063
| 0.048649
| 0.037838
| 0.236937
| 0.203604
| 0.154955
| 0.139189
| 0.11036
| 0.086486
| 0
| 0.005146
| 0.401191
| 4,868
| 155
| 88
| 31.406452
| 0.756432
| 0.144823
| 0
| 0.326923
| 0
| 0
| 0.126534
| 0.006135
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096154
| false
| 0
| 0.019231
| 0
| 0.192308
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5df79191a02e9cdc36eab83fa9b24e2f2d9fe213
| 7,695
|
py
|
Python
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/apache_libcloud-0.15.1-py2.7.egg/libcloud/test/test_connection.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 1
|
2019-07-29T02:53:51.000Z
|
2019-07-29T02:53:51.000Z
|
libcloud/test/test_connection.py
|
elastacloud/libcloud
|
f3792b2dca835c548bdbce0da2eb71bfc9463b72
|
[
"Apache-2.0"
] | 1
|
2021-09-11T14:30:32.000Z
|
2021-09-11T14:30:32.000Z
|
libcloud/test/test_connection.py
|
elastacloud/libcloud
|
f3792b2dca835c548bdbce0da2eb71bfc9463b72
|
[
"Apache-2.0"
] | 2
|
2016-12-19T02:27:46.000Z
|
2019-07-29T02:53:54.000Z
|
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more§
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import ssl
from mock import Mock, call
from libcloud.test import unittest
from libcloud.common.base import Connection
from libcloud.common.base import LoggingConnection
class ConnectionClassTestCase(unittest.TestCase):
def setUp(self):
self.originalConnect = Connection.connect
self.originalResponseCls = Connection.responseCls
Connection.connect = Mock()
Connection.responseCls = Mock()
Connection.allow_insecure = True
def tearDown(self):
Connection.connect = self.originalConnect
Connection.responseCls = Connection.responseCls
Connection.allow_insecure = True
def test_dont_allow_insecure(self):
Connection.allow_insecure = True
Connection(secure=False)
Connection.allow_insecure = False
expected_msg = (r'Non https connections are not allowed \(use '
'secure=True\)')
self.assertRaisesRegexp(ValueError, expected_msg, Connection,
secure=False)
def test_content_length(self):
con = Connection()
con.connection = Mock()
# GET method
# No data, no content length should be present
con.request('/test', method='GET', data=None)
call_kwargs = con.connection.request.call_args[1]
self.assertTrue('Content-Length' not in call_kwargs['headers'])
# '' as data, no content length should be present
con.request('/test', method='GET', data='')
call_kwargs = con.connection.request.call_args[1]
self.assertTrue('Content-Length' not in call_kwargs['headers'])
# 'a' as data, content length should be present (data in GET is not
# correct, but anyways)
con.request('/test', method='GET', data='a')
call_kwargs = con.connection.request.call_args[1]
self.assertEqual(call_kwargs['headers']['Content-Length'], '1')
# POST, PUT method
# No data, content length should be present
for method in ['POST', 'PUT', 'post', 'put']:
con.request('/test', method=method, data=None)
call_kwargs = con.connection.request.call_args[1]
self.assertEqual(call_kwargs['headers']['Content-Length'], '0')
# '' as data, content length should be present
for method in ['POST', 'PUT', 'post', 'put']:
con.request('/test', method=method, data='')
call_kwargs = con.connection.request.call_args[1]
self.assertEqual(call_kwargs['headers']['Content-Length'], '0')
# No data, raw request, do not touch Content-Length if present
for method in ['POST', 'PUT', 'post', 'put']:
con.request('/test', method=method, data=None,
headers={'Content-Length': '42'}, raw=True)
putheader_call_list = con.connection.putheader.call_args_list
self.assertIn(call('Content-Length', '42'), putheader_call_list)
# '' as data, raw request, do not touch Content-Length if present
for method in ['POST', 'PUT', 'post', 'put']:
con.request('/test', method=method, data=None,
headers={'Content-Length': '42'}, raw=True)
putheader_call_list = con.connection.putheader.call_args_list
self.assertIn(call('Content-Length', '42'), putheader_call_list)
# 'a' as data, content length should be present
for method in ['POST', 'PUT', 'post', 'put']:
con.request('/test', method=method, data='a')
call_kwargs = con.connection.request.call_args[1]
self.assertEqual(call_kwargs['headers']['Content-Length'], '1')
def test_cache_busting(self):
params1 = {'foo1': 'bar1', 'foo2': 'bar2'}
params2 = [('foo1', 'bar1'), ('foo2', 'bar2')]
con = Connection()
con.connection = Mock()
con.pre_connect_hook = Mock()
con.pre_connect_hook.return_value = {}, {}
con.cache_busting = False
con.request(action='/path', params=params1)
args, kwargs = con.pre_connect_hook.call_args
self.assertFalse('cache-busting' in args[0])
self.assertEqual(args[0], params1)
con.request(action='/path', params=params2)
args, kwargs = con.pre_connect_hook.call_args
self.assertFalse('cache-busting' in args[0])
self.assertEqual(args[0], params2)
con.cache_busting = True
con.request(action='/path', params=params1)
args, kwargs = con.pre_connect_hook.call_args
self.assertTrue('cache-busting' in args[0])
con.request(action='/path', params=params2)
args, kwargs = con.pre_connect_hook.call_args
self.assertTrue('cache-busting' in args[0][len(params2)])
def test_context_is_reset_after_request_has_finished(self):
context = {'foo': 'bar'}
def responseCls(connection, response):
connection.called = True
self.assertEqual(connection.context, context)
con = Connection()
con.called = False
con.connection = Mock()
con.responseCls = responseCls
con.set_context(context)
self.assertEqual(con.context, context)
con.request('/')
# Context should have been reset
self.assertTrue(con.called)
self.assertEqual(con.context, {})
# Context should also be reset if a method inside request throws
con = Connection()
con.connection = Mock()
con.set_context(context)
self.assertEqual(con.context, context)
con.connection.request = Mock(side_effect=ssl.SSLError())
try:
con.request('/')
except ssl.SSLError:
pass
self.assertEqual(con.context, {})
con.connection = Mock()
con.set_context(context)
self.assertEqual(con.context, context)
con.responseCls = Mock(side_effect=ValueError())
try:
con.request('/')
except ValueError:
pass
self.assertEqual(con.context, {})
def test_log_curl(self):
url = '/test/path'
body = None
headers = {}
con = LoggingConnection()
con.protocol = 'http'
con.host = 'example.com'
con.port = 80
for method in ['GET', 'POST', 'PUT', 'DELETE']:
cmd = con._log_curl(method=method, url=url, body=body,
headers=headers)
self.assertEqual(cmd, 'curl -i -X %s --compress http://example.com:80/test/path' %
(method))
# Should use --head for head requests
cmd = con._log_curl(method='HEAD', url=url, body=body, headers=headers)
self.assertEqual(cmd, 'curl -i --head --compress http://example.com:80/test/path')
if __name__ == '__main__':
sys.exit(unittest.main())
| 36.995192
| 94
| 0.624172
| 915
| 7,695
| 5.154098
| 0.224044
| 0.052375
| 0.023749
| 0.033927
| 0.553011
| 0.487489
| 0.472858
| 0.459288
| 0.451654
| 0.451654
| 0
| 0.009097
| 0.25718
| 7,695
| 207
| 95
| 37.173913
| 0.81578
| 0.178298
| 0
| 0.477273
| 0
| 0
| 0.103448
| 0
| 0
| 0
| 0
| 0
| 0.189394
| 1
| 0.060606
| false
| 0.015152
| 0.045455
| 0
| 0.113636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5df7daeb42f8803f9c7b7af1f59daf2cde2ea6c7
| 3,605
|
py
|
Python
|
igibson/utils/data_utils/ext_object/scripts/step_1_visual_mesh.py
|
mamadbiabon/iGibson
|
d416a470240eb7ad86e04fee475ae4bd67263a7c
|
[
"MIT"
] | 360
|
2020-04-02T11:12:09.000Z
|
2022-03-24T21:46:58.000Z
|
igibson/utils/data_utils/ext_object/scripts/step_1_visual_mesh.py
|
mamadbiabon/iGibson
|
d416a470240eb7ad86e04fee475ae4bd67263a7c
|
[
"MIT"
] | 169
|
2020-04-07T21:01:05.000Z
|
2022-03-31T10:07:39.000Z
|
igibson/utils/data_utils/ext_object/scripts/step_1_visual_mesh.py
|
mamadbiabon/iGibson
|
d416a470240eb7ad86e04fee475ae4bd67263a7c
|
[
"MIT"
] | 94
|
2020-04-09T23:22:17.000Z
|
2022-03-17T21:49:03.000Z
|
import os
import sys
import bpy
script_dir = os.path.dirname(os.path.abspath(__file__))
utils_dir = os.path.join(script_dir, "../../blender_utils")
sys.path.append(utils_dir)
from utils import bake_model, clean_unused, export_ig_object, import_obj_folder
#############################################
# Parse command line arguments
#############################################
def get_arg(argv, flag, default=None):
if flag in argv:
return argv[argv.index(flag) + 1]
return default
should_bake = "--bake" in sys.argv
axis = ["X", "Y", "Z", "-X", "-Y", "-Z"]
import_axis_up = get_arg(sys.argv, "--up", default="Z")
if import_axis_up not in axis:
raise ValueError("Axis up not supported: {} (should be among X,Y,Z,-X,-Y,-Z)".format(import_axis_up))
import_axis_forward = get_arg(sys.argv, "--forward", default="X")
if import_axis_forward not in axis:
raise ValueError("Axis forward not supported: {} (should be among X,Y,Z,-X,-Y,-Z)".format(import_axis_forward))
source_dir = get_arg(sys.argv, "--source_dir")
if source_dir is None:
raise ValueError("Source directory not specified.")
dest_dir = get_arg(sys.argv, "--dest_dir")
if dest_dir is None:
raise ValueError("Destination directory not specified.")
os.makedirs(dest_dir, exist_ok=True)
model_id = os.path.basename(source_dir)
#############################################
# Importing obj files from source dir
#############################################
for on in bpy.context.scene.objects.keys():
obj = bpy.context.scene.objects[on]
bpy.data.objects.remove(obj)
clean_unused()
import_obj_folder(model_id, source_dir, up=import_axis_up, forward=import_axis_forward)
#############################################
# Optional UV Unwrapping
# This only needed if baking will be performed
#############################################
if should_bake:
uv_unwrapped = True
for o in bpy.context.scene.objects:
if not o.data.uv_layers:
uv_unwrapped = False
if not uv_unwrapped:
bpy.ops.object.mode_set(mode="OBJECT")
vl = bpy.context.view_layer
bpy.ops.object.select_all(action="DESELECT")
for on in bpy.context.scene.objects.keys():
obj = bpy.context.scene.objects[on]
new_uv = bpy.context.scene.objects[on].data.uv_layers.new(name="obj_uv")
vl.objects.active = obj
obj.select_set(True)
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.select_all(action="SELECT")
bpy.ops.uv.smart_project(angle_limit=66, island_margin=0.02)
bpy.context.tool_settings.mesh_select_mode = (False, False, True)
bpy.ops.object.mode_set(mode="OBJECT")
#############################################
# Export models
#############################################
export_ig_object(dest_dir, save_material=not should_bake)
#############################################
# Optional Texture Baking
#############################################
if should_bake:
mat_dir = os.path.join(dest_dir, "material")
os.makedirs(mat_dir, exist_ok=True)
# bpy.ops.wm.open_mainfile(filepath=blend_path)
# import_ig_object(model_root, import_mat=True)
for obj in bpy.context.scene.objects:
obj.select_set(True)
bpy.context.view_layer.objects.active = obj
bpy.ops.object.select_all(action="SELECT")
bpy.ops.object.join()
channels = {
"DIFFUSE": (2048, 32),
"ROUGHNESS": (1024, 16),
"METALLIC": (1024, 16),
"NORMAL": (1024, 16),
}
bake_model(mat_dir, channels, overwrite=True)
bpy.ops.wm.quit_blender()
| 33.073394
| 115
| 0.603606
| 480
| 3,605
| 4.335417
| 0.2875
| 0.048054
| 0.050457
| 0.074003
| 0.296492
| 0.20519
| 0.130706
| 0.102835
| 0.102835
| 0.102835
| 0
| 0.009881
| 0.157836
| 3,605
| 108
| 116
| 33.37963
| 0.67556
| 0.072677
| 0
| 0.147059
| 0
| 0.029412
| 0.116158
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014706
| false
| 0
| 0.161765
| 0
| 0.205882
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5df83448e7dd852878051c1b5e24915762ddad3f
| 3,057
|
py
|
Python
|
ceilometerclient/common/base.py
|
mail2nsrajesh/python-ceilometerclient
|
3b4e35abada626ce052f20d55c71fe12ab77052a
|
[
"Apache-2.0"
] | null | null | null |
ceilometerclient/common/base.py
|
mail2nsrajesh/python-ceilometerclient
|
3b4e35abada626ce052f20d55c71fe12ab77052a
|
[
"Apache-2.0"
] | null | null | null |
ceilometerclient/common/base.py
|
mail2nsrajesh/python-ceilometerclient
|
3b4e35abada626ce052f20d55c71fe12ab77052a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base utilities to build API operation managers and objects on top of.
"""
import copy
from ceilometerclient.apiclient import base
from ceilometerclient.apiclient import exceptions
from ceilometerclient import exc
def getid(obj):
"""Extracts object ID.
Abstracts the common pattern of allowing both an object or an
object's ID (UUID) as a parameter when dealing with relationships.
"""
try:
return obj.id
except AttributeError:
return obj
class Manager(object):
"""Managers interact with a particular type of API.
It works with samples, meters, alarms, etc. and provide CRUD operations for
them.
"""
resource_class = None
def __init__(self, api):
self.api = api
@property
def client(self):
"""Compatible with latest oslo-incubator.apiclient code."""
return self.api
def _create(self, url, body):
body = self.api.post(url, json=body).json()
if body:
return self.resource_class(self, body)
def _list(self, url, response_key=None, obj_class=None, body=None,
expect_single=False):
try:
resp = self.api.get(url)
except exceptions.NotFound:
raise exc.HTTPNotFound
if not resp.content:
raise exc.HTTPNotFound
body = resp.json()
if obj_class is None:
obj_class = self.resource_class
if response_key:
try:
data = body[response_key]
except KeyError:
return []
else:
data = body
if expect_single:
data = [data]
return [obj_class(self, res, loaded=True) for res in data if res]
def _update(self, url, body, response_key=None):
body = self.api.put(url, json=body).json()
# PUT requests may not return a body
if body:
return self.resource_class(self, body)
def _delete(self, url):
self.api.delete(url)
class Resource(base.Resource):
"""A resource represents a particular instance of an object.
Resource might be tenant, user, etc.
This is pretty much just a bag for attributes.
:param manager: Manager object
:param info: dictionary representing resource attributes
:param loaded: prevent lazy-loading if set to True
"""
def to_dict(self):
return copy.deepcopy(self._info)
| 28.570093
| 79
| 0.648021
| 400
| 3,057
| 4.8925
| 0.4375
| 0.025038
| 0.02606
| 0.016352
| 0.040879
| 0.040879
| 0.040879
| 0.040879
| 0.040879
| 0
| 0
| 0.003613
| 0.275761
| 3,057
| 106
| 80
| 28.839623
| 0.880307
| 0.435394
| 0
| 0.18
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16
| false
| 0
| 0.08
| 0.02
| 0.46
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dfa61d9200420a717e96bb426552082800e9861
| 11,020
|
py
|
Python
|
lib/charms/layer/azure.py
|
freyes/charm-azure-integrator
|
9c96eed30388e5e7ae2ff590574890e27e845b5c
|
[
"Apache-2.0"
] | null | null | null |
lib/charms/layer/azure.py
|
freyes/charm-azure-integrator
|
9c96eed30388e5e7ae2ff590574890e27e845b5c
|
[
"Apache-2.0"
] | null | null | null |
lib/charms/layer/azure.py
|
freyes/charm-azure-integrator
|
9c96eed30388e5e7ae2ff590574890e27e845b5c
|
[
"Apache-2.0"
] | null | null | null |
import json
import os
import re
import subprocess
from base64 import b64decode
from enum import Enum
from math import ceil, floor
from pathlib import Path
from urllib.error import HTTPError
from urllib.request import urlopen
import yaml
from charmhelpers.core import hookenv
from charmhelpers.core.unitdata import kv
from charms.layer import status
ENTITY_PREFIX = 'charm.azure'
MODEL_UUID = os.environ['JUJU_MODEL_UUID']
MAX_ROLE_NAME_LEN = 64
MAX_POLICY_NAME_LEN = 128
class StandardRole(Enum):
NETWORK_MANAGER = '4d97b98b-1d4f-4787-a291-c67834d212e7'
SECURITY_MANAGER = 'e3d13bf0-dd5a-482e-ba6b-9b8433878d10'
DNS_MANAGER = 'befefa01-2a29-4197-83a8-272ff33ce314'
OBJECT_STORE_READER = '2a2b9908-6ea1-4ae2-8e65-a410df84e7d1'
OBJECT_STORE_MANAGER = 'ba92f5b4-2d11-453d-a403-e96b0029c9fe'
# When debugging hooks, for some reason HOME is set to /home/ubuntu, whereas
# during normal hook execution, it's /root. Set it here to be consistent.
os.environ['HOME'] = '/root'
def log(msg, *args):
hookenv.log(msg.format(*args), hookenv.INFO)
def log_err(msg, *args):
hookenv.log(msg.format(*args), hookenv.ERROR)
def get_credentials():
"""
Get the credentials from either the config or the hook tool.
Prefers the config so that it can be overridden.
"""
no_creds_msg = 'missing credentials; set credentials config'
config = hookenv.config()
# try to use Juju's trust feature
try:
result = subprocess.run(['credential-get'],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
creds = yaml.load(result.stdout.decode('utf8'))
creds_data = creds['credential']['attributes']
login_cli(creds_data)
return True
except FileNotFoundError:
pass # juju trust not available
except subprocess.CalledProcessError as e:
if 'permission denied' not in e.stderr.decode('utf8'):
raise
no_creds_msg = 'missing credentials access; grant with: juju trust'
# try credentials config
if config['credentials']:
try:
creds_data = b64decode(config['credentials']).decode('utf8')
login_cli(creds_data)
return True
except Exception:
status.blocked('invalid value for credentials config')
return False
# no creds provided
status.blocked(no_creds_msg)
return False
def login_cli(creds_data):
"""
Use the credentials to authenticate the Azure CLI.
"""
app_id = creds_data['application-id']
app_pass = creds_data['application-password']
sub_id = creds_data['subscription-id']
tenant_id = _get_tenant_id(sub_id)
try:
log('Forcing logout of Azure CLI')
_azure('logout')
except AzureError:
pass
try:
log('Logging in to Azure CLI')
_azure('login',
'--service-principal',
'-u', app_id,
'-p', app_pass,
'-t', tenant_id)
# cache the subscription ID for use in roles
kv().set('charm.azure.sub-id', sub_id)
except AzureError as e:
# redact the credential info from the exception message
stderr = re.sub(app_id, '<app-id>', e.args[0])
stderr = re.sub(app_pass, '<app-pass>', stderr)
stderr = re.sub(tenant_id, '<tenant-id>', stderr)
# from None suppresses the previous exception from the stack trace
raise AzureError(stderr) from None
def ensure_msi(request):
msi = _get_msi(request.vm_id)
if not msi:
log('Enabling Managed Service Identity')
result = _azure('vm', 'identity', 'assign',
'--name', request.vm_name,
'--resource-group', request.resource_group)
vm_identities = kv().get('charm.azure.vm-identities', {})
msi = vm_identities[request.vm_id] = result['systemAssignedIdentity']
kv().set('charm.azure.vm-identities', vm_identities)
log('Instance MSI is: {}', msi)
def send_additional_metadata(request):
"""
Get additional info about the requesting instance via the API that isn't
available from the metadata server.
"""
res_grp = _azure('group', 'show', '--name', request.resource_group)
# hard-code most of these because with Juju, they're always the same
# and the queries required to look them up are a PITA
request.send_additional_metadata(
resource_group_location=res_grp['location'],
vnet_name='juju-internal-network',
vnet_resource_group=request.resource_group,
subnet_name='juju-internal-subnet',
security_group_name='juju-internal-nsg',
)
def tag_instance(request):
"""
Tag the given instance with the given tags.
"""
log('Tagging instance with: {}', request.instance_tags)
_azure('vm', 'update',
'--name', request.vm_name,
'--resource-group', request.resource_group,
'--set', *['tags.{}={}'.format(tag, value)
for tag, value in request.instance_tags.items()])
def enable_instance_inspection(request):
"""
Enable instance inspection access for the given application.
"""
log('Enabling instance inspection')
_assign_role(request, _get_role('vm-reader'))
def enable_network_management(request):
"""
Enable network management for the given application.
"""
log('Enabling network management')
_assign_role(request, StandardRole.NETWORK_MANAGER)
def enable_security_management(request):
"""
Enable security management for the given application.
"""
log('Enabling security management')
_assign_role(request, StandardRole.SECURITY_MANAGER)
def enable_block_storage_management(request):
"""
Enable block storage (disk) management for the given application.
"""
log('Enabling block storage management')
_assign_role(request, _get_role('disk-manager'))
def enable_dns_management(request):
"""
Enable DNS management for the given application.
"""
log('Enabling DNS management')
_assign_role(request, StandardRole.DNS_MANAGER)
def enable_object_storage_access(request):
"""
Enable object storage read-only access for the given application.
"""
log('Enabling object storage read')
_assign_role(request, StandardRole.OBJECT_STORE_READER)
def enable_object_storage_management(request):
"""
Enable object storage management for the given application.
"""
log('Enabling object store management')
_assign_role(request, StandardRole.OBJECT_STORE_MANAGER)
def cleanup():
"""
Perform cleanup.
"""
pass
# Internal helpers
class AzureError(Exception):
"""
Exception class representing an error returned from the azure-cli tool.
"""
@classmethod
def get(cls, message):
"""
Factory method to create either an instance of this class or a
meta-subclass for certain `message`s.
"""
if 'already exists' in message:
return AlreadyExistsAzureError(message)
return AzureError(message)
class AlreadyExistsAzureError(AzureError):
"""
Meta-error subclass of AzureError representing something already existing.
"""
pass
def _elide(s, max_len, ellipsis='...'):
"""
Elide s in the middle to ensure it is under max_len.
That is, shorten the string, inserting an ellipsis where the removed
characters were to show that they've been removed.
"""
if len(s) > max_len:
hl = (max_len - len(ellipsis)) / 2
headl, taill = floor(hl), ceil(hl)
s = s[:headl] + ellipsis + s[-taill:]
return s
def _get_tenant_id(subscription_id):
"""
Translate the subscription ID into a tenant ID by making an unauthorized
request to the API and extracting the tenant ID from the WWW-Authenticate
header in the error response.
"""
url = ('https://management.azure.com/subscriptions/'
'{}?api-version=2018-03-01-01.6.1'.format(subscription_id))
try:
urlopen(url)
log_err('Error getting tenant ID: did not get "unauthorized" response')
return None
except HTTPError as e:
if 'WWW-Authenticate' not in e.headers:
log_err('Error getting tenant ID: missing WWW-Authenticate header')
return None
www_auth = e.headers['WWW-Authenticate']
match = re.search(r'authorization_uri="[^"]*/([^/"]*)"', www_auth)
if not match:
log_err('Error getting tenant ID: unable to find in {}', www_auth)
return None
return match.group(1)
def _azure(cmd, *args, return_stderr=False):
"""
Call the azure-cli tool.
"""
cmd = ['az', cmd]
cmd.extend(args)
result = subprocess.run(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout = result.stdout.decode('utf8').strip()
stderr = result.stderr.decode('utf8').strip()
if result.returncode != 0:
raise AzureError.get(stderr)
if return_stderr:
return stderr
if stdout:
stdout = json.loads(stdout)
return stdout
def _get_msi(vm_id):
"""
Get the Managed System Identity for the VM.
"""
vm_identities = kv().get('charm.azure.vm-identities', {})
return vm_identities.get(vm_id)
def _get_role(role_name):
"""
Translate short role name into a full role name and ensure that the
custom role is loaded.
The custom roles have to be applied to a specific subscription ID, but
the subscription ID applies to the entire credential, so will almost
certainly be reused, so there's not much danger in hitting the 2k
custom role limit.
"""
known_roles = kv().get('charm.azure.roles', {})
if role_name in known_roles:
return known_roles[role_name]
sub_id = kv().get('charm.azure.sub-id')
role_file = Path('files/roles/{}.json'.format(role_name))
role_data = json.loads(role_file.read_text())
role_fullname = role_data['Name'].format(sub_id)
scope = role_data['AssignableScopes'][0].format(sub_id)
role_data['Name'] = role_fullname
role_data['AssignableScopes'][0] = scope
try:
log('Ensuring role {}', role_fullname)
_azure('role', 'definition', 'create',
'--role-definition', json.dumps(role_data))
except AzureError as e:
if 'already exists' not in e.args[0]:
raise
known_roles[role_name] = role_fullname
return role_fullname
def _assign_role(request, role):
if isinstance(role, StandardRole):
role = role.value
msi = _get_msi(request.vm_id)
try:
_azure('role', 'assignment', 'create',
'--assignee-object-id', msi,
'--resource-group', request.resource_group,
'--role', role)
except AlreadyExistsAzureError:
pass
| 30.955056
| 79
| 0.649909
| 1,368
| 11,020
| 5.09576
| 0.262427
| 0.012624
| 0.019509
| 0.022092
| 0.171855
| 0.12925
| 0.08937
| 0.03615
| 0.014345
| 0
| 0
| 0.016577
| 0.244555
| 11,020
| 355
| 80
| 31.042254
| 0.820781
| 0.205898
| 0
| 0.17619
| 0
| 0
| 0.205394
| 0.043442
| 0
| 0
| 0
| 0
| 0
| 1
| 0.104762
| false
| 0.038095
| 0.066667
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dfb825aca8a665a7da3ab055c3e267e40f81b41
| 3,040
|
py
|
Python
|
research/utils/_check_pipelines.py
|
joaopfonseca/research
|
02659512218d077d9ef28d481178e62172ef18cd
|
[
"MIT"
] | 1
|
2021-01-25T00:09:32.000Z
|
2021-01-25T00:09:32.000Z
|
mlresearch/utils/_check_pipelines.py
|
joaopfonseca/research
|
ac4ad6fa05b5985050c63dc9e4e18cd00965e09b
|
[
"MIT"
] | null | null | null |
mlresearch/utils/_check_pipelines.py
|
joaopfonseca/research
|
ac4ad6fa05b5985050c63dc9e4e18cd00965e09b
|
[
"MIT"
] | null | null | null |
from itertools import product
from sklearn.base import clone
from sklearn.preprocessing import FunctionTransformer
from sklearn.model_selection import ParameterGrid
from imblearn.pipeline import Pipeline
from rlearn.utils import check_random_states
def check_pipelines(objects_list, random_state, n_runs):
"""Extract estimators and parameters grids."""
# Create random states
random_states = check_random_states(random_state, n_runs)
pipelines = []
param_grid = []
for comb, rs in product(product(*objects_list), random_states):
name = "|".join([i[0] for i in comb])
# name, object, sub grid
comb = [
(nm, ob, ParameterGrid(sg))
if ob is not None
else (nm, FunctionTransformer(), ParameterGrid(sg))
for nm, ob, sg in comb
]
# Create estimator
if name not in [n[0] for n in pipelines]:
est = Pipeline([(nm, ob) for nm, ob, _ in comb])
pipelines.append((name, est))
# Create intermediate parameter grids
sub_grids = [
[{f"{nm}__{k}": v for k, v in param_def.items()} for param_def in sg]
for nm, obj, sg in comb
]
# Create parameter grids
for sub_grid in product(*sub_grids):
param_prefix = "" if len(comb) == 1 else f"{name}__"
grid = {"est_name": [name]}
grid.update(
{f"{param_prefix}{k}": [v] for d in sub_grid for k, v in d.items()}
)
random_states = {
f"{param_prefix}{param}": [rs]
for param in est.get_params()
if "random_state" in param
}
grid.update(random_states)
# Avoid multiple runs over pipelines without random state
if grid not in param_grid:
param_grid.append(grid)
return pipelines, param_grid
def check_pipelines_wrapper(
objects_list, wrapper, random_state, n_runs, wrapped_only=False
):
wrapper_label = wrapper[0]
wrapper_obj = wrapper[1]
wrapper_grid = wrapper[2]
estimators, param_grids = check_pipelines(objects_list, random_state, n_runs)
wrapped_estimators = [
(
f"{wrapper_label}|{name}",
clone(wrapper_obj).set_params(**{"classifier": pipeline}),
)
for name, pipeline in estimators
]
wrapped_param_grids = [
{
"est_name": [f'{wrapper_label}|{d["est_name"][0]}'],
**{
f'{wrapper_label}|{d["est_name"][0]}__classifier__{k}': v
for k, v in d.items()
if k != "est_name"
},
**{
f'{wrapper_label}|{d["est_name"][0]}__{k}': v
for k, v in wrapper_grid.items()
},
}
for d in param_grids
]
if wrapped_only:
return wrapped_estimators, wrapped_param_grids
else:
return (estimators + wrapped_estimators, param_grids + wrapped_param_grids)
| 31.666667
| 83
| 0.575329
| 369
| 3,040
| 4.520325
| 0.222222
| 0.009592
| 0.028777
| 0.038369
| 0.142686
| 0.1247
| 0.097122
| 0.083933
| 0.034772
| 0
| 0
| 0.004358
| 0.320724
| 3,040
| 95
| 84
| 32
| 0.80339
| 0.071382
| 0
| 0.027397
| 0
| 0
| 0.088193
| 0.059388
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027397
| false
| 0
| 0.082192
| 0
| 0.150685
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dfc18ba2772ffd25b6600bc97edfc21e288fb90
| 13,044
|
py
|
Python
|
libs/python-daemon-2.2.0/test/test_metadata.py
|
helion-security/helion
|
1e5f22da9808c4d67bb773b93c5295c72fcaf45a
|
[
"MIT"
] | 1
|
2021-10-10T20:05:07.000Z
|
2021-10-10T20:05:07.000Z
|
libs/python-daemon-2.2.0/test/test_metadata.py
|
helion-security/helion
|
1e5f22da9808c4d67bb773b93c5295c72fcaf45a
|
[
"MIT"
] | null | null | null |
libs/python-daemon-2.2.0/test/test_metadata.py
|
helion-security/helion
|
1e5f22da9808c4d67bb773b93c5295c72fcaf45a
|
[
"MIT"
] | 5
|
2020-02-02T14:41:30.000Z
|
2022-03-18T08:34:01.000Z
|
# -*- coding: utf-8 -*-
#
# test/test_metadata.py
# Part of ‘python-daemon’, an implementation of PEP 3143.
#
# This is free software, and you are welcome to redistribute it under
# certain conditions; see the end of this file for copyright
# information, grant of license, and disclaimer of warranty.
""" Unit test for ‘_metadata’ private module.
"""
from __future__ import (absolute_import, unicode_literals)
import collections
import errno
import functools
import json
import re
try:
# Python 3 standard library.
import urllib.parse as urlparse
except ImportError:
# Python 2 standard library.
import urlparse
import mock
import pkg_resources
import testtools.helpers
import testtools.matchers
from . import scaffold
from .scaffold import unicode
import daemon._metadata as metadata
class HasAttribute(testtools.matchers.Matcher):
""" A matcher to assert an object has a named attribute. """
def __init__(self, name):
self.attribute_name = name
def match(self, instance):
""" Assert the object `instance` has an attribute named `name`. """
result = None
if not testtools.helpers.safe_hasattr(instance, self.attribute_name):
result = AttributeNotFoundMismatch(instance, self.attribute_name)
return result
class AttributeNotFoundMismatch(testtools.matchers.Mismatch):
""" The specified instance does not have the named attribute. """
def __init__(self, instance, name):
self.instance = instance
self.attribute_name = name
def describe(self):
""" Emit a text description of this mismatch. """
text = (
"{instance!r}"
" has no attribute named {name!r}").format(
instance=self.instance, name=self.attribute_name)
return text
class metadata_value_TestCase(scaffold.TestCaseWithScenarios):
""" Test cases for metadata module values. """
expected_str_attributes = set([
'version_installed',
'author',
'copyright',
'license',
'url',
])
scenarios = [
(name, {'attribute_name': name})
for name in expected_str_attributes]
for (name, params) in scenarios:
if name == 'version_installed':
# No duck typing, this attribute might be None.
params['ducktype_attribute_name'] = NotImplemented
continue
# Expect an attribute of ‘str’ to test this value.
params['ducktype_attribute_name'] = 'isdigit'
def test_module_has_attribute(self):
""" Metadata should have expected value as a module attribute. """
self.assertThat(
metadata, HasAttribute(self.attribute_name))
def test_module_attribute_has_duck_type(self):
""" Metadata value should have expected duck-typing attribute. """
if self.ducktype_attribute_name == NotImplemented:
self.skipTest("Can't assert this attribute's type")
instance = getattr(metadata, self.attribute_name)
self.assertThat(
instance, HasAttribute(self.ducktype_attribute_name))
class YearRange_TestCase(scaffold.TestCaseWithScenarios):
""" Test cases for ‘YearRange’ class. """
scenarios = [
('simple', {
'begin_year': 1970,
'end_year': 1979,
'expected_text': "1970–1979",
}),
('same year', {
'begin_year': 1970,
'end_year': 1970,
'expected_text': "1970",
}),
('no end year', {
'begin_year': 1970,
'end_year': None,
'expected_text': "1970",
}),
]
def setUp(self):
""" Set up test fixtures. """
super(YearRange_TestCase, self).setUp()
self.test_instance = metadata.YearRange(
self.begin_year, self.end_year)
def test_text_representation_as_expected(self):
""" Text representation should be as expected. """
result = unicode(self.test_instance)
self.assertEqual(result, self.expected_text)
FakeYearRange = collections.namedtuple('FakeYearRange', ['begin', 'end'])
@mock.patch.object(metadata, 'YearRange', new=FakeYearRange)
class make_year_range_TestCase(scaffold.TestCaseWithScenarios):
""" Test cases for ‘make_year_range’ function. """
scenarios = [
('simple', {
'begin_year': "1970",
'end_date': "1979-01-01",
'expected_range': FakeYearRange(begin=1970, end=1979),
}),
('same year', {
'begin_year': "1970",
'end_date': "1970-01-01",
'expected_range': FakeYearRange(begin=1970, end=1970),
}),
('no end year', {
'begin_year': "1970",
'end_date': None,
'expected_range': FakeYearRange(begin=1970, end=None),
}),
('end date UNKNOWN token', {
'begin_year': "1970",
'end_date': "UNKNOWN",
'expected_range': FakeYearRange(begin=1970, end=None),
}),
('end date FUTURE token', {
'begin_year': "1970",
'end_date': "FUTURE",
'expected_range': FakeYearRange(begin=1970, end=None),
}),
]
def test_result_matches_expected_range(self):
""" Result should match expected YearRange. """
result = metadata.make_year_range(self.begin_year, self.end_date)
self.assertEqual(result, self.expected_range)
class metadata_content_TestCase(scaffold.TestCase):
""" Test cases for content of metadata. """
def test_copyright_formatted_correctly(self):
""" Copyright statement should be formatted correctly. """
regex_pattern = (
"Copyright © "
"\d{4}" # Four-digit year.
"(?:–\d{4})?" # Optional range dash and four-digit year.
)
regex_flags = re.UNICODE
self.assertThat(
metadata.copyright,
testtools.matchers.MatchesRegex(regex_pattern, regex_flags))
def test_author_formatted_correctly(self):
""" Author information should be formatted correctly. """
regex_pattern = (
".+ " # Name.
"<[^>]+>" # Email address, in angle brackets.
)
regex_flags = re.UNICODE
self.assertThat(
metadata.author,
testtools.matchers.MatchesRegex(regex_pattern, regex_flags))
def test_copyright_contains_author(self):
""" Copyright information should contain author information. """
self.assertThat(
metadata.copyright,
testtools.matchers.Contains(metadata.author))
def test_url_parses_correctly(self):
""" Homepage URL should parse correctly. """
result = urlparse.urlparse(metadata.url)
self.assertIsInstance(
result, urlparse.ParseResult,
"URL value {url!r} did not parse correctly".format(
url=metadata.url))
try:
FileNotFoundError
except NameError:
# Python 2 uses IOError.
FileNotFoundError = functools.partial(IOError, errno.ENOENT)
version_info_filename = "version_info.json"
def fake_func_has_metadata(testcase, resource_name):
""" Fake the behaviour of ‘pkg_resources.Distribution.has_metadata’. """
if (
resource_name != testcase.version_info_filename
or not hasattr(testcase, 'test_version_info')):
return False
return True
def fake_func_get_metadata(testcase, resource_name):
""" Fake the behaviour of ‘pkg_resources.Distribution.get_metadata’. """
if not fake_func_has_metadata(testcase, resource_name):
error = FileNotFoundError(resource_name)
raise error
content = testcase.test_version_info
return content
def fake_func_get_distribution(testcase, distribution_name):
""" Fake the behaviour of ‘pkg_resources.get_distribution’. """
if distribution_name != metadata.distribution_name:
raise pkg_resources.DistributionNotFound
if hasattr(testcase, 'get_distribution_error'):
raise testcase.get_distribution_error
mock_distribution = testcase.mock_distribution
mock_distribution.has_metadata.side_effect = functools.partial(
fake_func_has_metadata, testcase)
mock_distribution.get_metadata.side_effect = functools.partial(
fake_func_get_metadata, testcase)
return mock_distribution
@mock.patch.object(metadata, 'distribution_name', new="mock-dist")
class get_distribution_version_info_TestCase(scaffold.TestCaseWithScenarios):
""" Test cases for ‘get_distribution_version_info’ function. """
default_version_info = {
'release_date': "UNKNOWN",
'version': "UNKNOWN",
'maintainer': "UNKNOWN",
}
scenarios = [
('version 0.0', {
'test_version_info': json.dumps({
'version': "0.0",
}),
'expected_version_info': {'version': "0.0"},
}),
('version 1.0', {
'test_version_info': json.dumps({
'version': "1.0",
}),
'expected_version_info': {'version': "1.0"},
}),
('file lorem_ipsum.json', {
'test_filename': "lorem_ipsum.json",
'version_info_filename': "lorem_ipsum.json",
'test_version_info': json.dumps({
'version': "1.0",
}),
'expected_resource_name': "lorem_ipsum.json",
'expected_version_info': {'version': "1.0"},
}),
('not installed', {
'get_distribution_error': pkg_resources.DistributionNotFound(),
'expected_version_info': default_version_info,
}),
('no version_info', {
'expected_version_info': default_version_info,
}),
('wrong filename', {
'test_filename': "lorem_ipsum.json",
'test_version_info': json.dumps({
'version': "1.0",
}),
'expected_resource_name': "lorem_ipsum.json",
'expected_version_info': default_version_info,
}),
]
def setUp(self):
""" Set up test fixtures. """
super(get_distribution_version_info_TestCase, self).setUp()
self.test_args = {}
if hasattr(self, 'test_filename'):
self.test_args['filename'] = self.test_filename
if not hasattr(self, 'version_info_filename'):
self.version_info_filename = version_info_filename
if not hasattr(self, 'expected_resource_name'):
self.expected_resource_name = version_info_filename
self.mock_distribution = mock.MagicMock()
func_patcher_get_distribution = mock.patch.object(
pkg_resources, 'get_distribution')
func_patcher_get_distribution.start()
self.addCleanup(func_patcher_get_distribution.stop)
pkg_resources.get_distribution.side_effect = functools.partial(
fake_func_get_distribution, self)
def test_requests_installed_distribution(self):
""" The package distribution should be retrieved. """
expected_distribution_name = metadata.distribution_name
metadata.get_distribution_version_info(**self.test_args)
pkg_resources.get_distribution.assert_called_with(
expected_distribution_name)
def test_requests_specified_filename(self):
""" The specified metadata resource name should be requested. """
if hasattr(self, 'get_distribution_error'):
self.skipTest("No access to distribution")
metadata.get_distribution_version_info(**self.test_args)
self.mock_distribution.has_metadata.assert_called_with(
self.expected_resource_name)
def test_result_matches_expected_items(self):
""" The result should match the expected items. """
version_info = metadata.get_distribution_version_info(**self.test_args)
self.assertEqual(self.expected_version_info, version_info)
# Copyright © 2008–2018 Ben Finney <ben+python@benfinney.id.au>
#
# This is free software: you may copy, modify, and/or distribute this work
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; version 3 of that license or any later version.
# No warranty expressed or implied. See the file ‘LICENSE.GPL-3’ for details.
# Local variables:
# coding: utf-8
# mode: python
# End:
# vim: fileencoding=utf-8 filetype=python :
| 35.835165
| 79
| 0.611546
| 1,348
| 13,044
| 5.689169
| 0.198813
| 0.048768
| 0.013561
| 0.016691
| 0.350111
| 0.256357
| 0.180988
| 0.126092
| 0.092711
| 0.046421
| 0
| 0.015065
| 0.287565
| 13,044
| 363
| 80
| 35.933884
| 0.809642
| 0.174026
| 0
| 0.310757
| 0
| 0
| 0.144665
| 0.032672
| 0
| 0
| 0
| 0
| 0.047809
| 1
| 0.079681
| false
| 0
| 0.063745
| 0
| 0.219124
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dfe1873a422b9d98cb23a45aa91a24e21973cf8
| 1,725
|
py
|
Python
|
text_preprocessing/normalizer.py
|
cyberpunk317/inverted_index
|
f49ae3ca4f0255928986c1610c5ff8ee38c5f1ff
|
[
"MIT"
] | 9
|
2021-09-03T10:02:16.000Z
|
2021-12-22T14:19:33.000Z
|
text_preprocessing/normalizer.py
|
cyberpunk317/inverted_index
|
f49ae3ca4f0255928986c1610c5ff8ee38c5f1ff
|
[
"MIT"
] | 3
|
2021-04-19T17:13:57.000Z
|
2022-03-18T15:11:53.000Z
|
text_preprocessing/normalizer.py
|
cyberpunk317/inverted_index
|
f49ae3ca4f0255928986c1610c5ff8ee38c5f1ff
|
[
"MIT"
] | 1
|
2021-12-11T09:47:46.000Z
|
2021-12-11T09:47:46.000Z
|
import re
from typing import Union, List
import nltk
from bs4 import BeautifulSoup
class Normalizer:
def __init__(self):
self.lemmatizer = nltk.stem.WordNetLemmatizer()
def normalize(self, x: Union[list, str]) -> List[str]:
"""
Accepts text (possibly tokenized) and makes it suitable for machine processing
"""
x = self._remove_stop_words(x)
x = self._denoise(x)
x = self._lemmatize(x)
return x
def _remove_stop_words(self, x: Union[list, str]) -> List[str]:
"""
Removes stop words from text in english
"""
if isinstance(x, str):
x = x.split(' ')
stop_words = set(nltk.corpus.stopwords.words('english'))
return [w for w in x if not w in stop_words]
def _lemmatize(self, x: Union[list, str]) -> List[str]:
"""
Removes endings,
"""
if isinstance(x, list):
x = ' '.join(x)
x = self.lemmatizer.lemmatize(x)
return x
def _denoise(self, x: Union[list, str]) -> str:
if isinstance(x, list):
x = ' '.join(x)
def strip_html(x):
soup = BeautifulSoup(x, "html.parser")
x = soup.get_text()
return x
def remove_between_square_brackets(x):
x = re.sub('\[[^]]*\]', '', x)
x = re.sub(r'http\S+', '', x)
return x
def remove_rating(x):
return re.sub('\W\d/\d+\S*', '', x)
x = x.lower()
x = re.sub(',|\.|!|\?', '', x)
x = strip_html(x)
x = remove_between_square_brackets(x)
x = remove_rating(x)
return x
| 27.822581
| 86
| 0.506667
| 213
| 1,725
| 3.976526
| 0.305164
| 0.025974
| 0.047226
| 0.066116
| 0.336482
| 0.224321
| 0.155844
| 0.0732
| 0
| 0
| 0
| 0.000901
| 0.356522
| 1,725
| 62
| 87
| 27.822581
| 0.762162
| 0.078261
| 0
| 0.219512
| 0
| 0
| 0.037475
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.195122
| false
| 0
| 0.097561
| 0.02439
| 0.487805
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dfe4e27d16878f382ef6d6119132647294b2b99
| 1,874
|
py
|
Python
|
env/lib/python3.7/site-packages/prompt_toolkit/filters/cli.py
|
MarcoMancha/BreastCancerDetector
|
be0dfdcebd1ae66da6d0cf48e2525c24942ae877
|
[
"Apache-2.0"
] | 2
|
2020-09-30T00:11:09.000Z
|
2021-10-04T13:00:38.000Z
|
env/lib/python3.7/site-packages/prompt_toolkit/filters/cli.py
|
MarcoMancha/BreastCancerDetector
|
be0dfdcebd1ae66da6d0cf48e2525c24942ae877
|
[
"Apache-2.0"
] | 9
|
2020-08-11T15:19:55.000Z
|
2022-03-12T00:11:12.000Z
|
env/lib/python3.7/site-packages/prompt_toolkit/filters/cli.py
|
MarcoMancha/BreastCancerDetector
|
be0dfdcebd1ae66da6d0cf48e2525c24942ae877
|
[
"Apache-2.0"
] | 2
|
2020-08-03T13:02:06.000Z
|
2020-11-04T03:15:44.000Z
|
"""
For backwards-compatibility. keep this file.
(Many people are going to have key bindings that rely on this file.)
"""
from __future__ import unicode_literals
from .app import *
__all__ = [
# Old names.
'HasArg',
'HasCompletions',
'HasFocus',
'HasSelection',
'HasValidationError',
'IsDone',
'IsReadOnly',
'IsMultiline',
'RendererHeightIsKnown',
'InEditingMode',
'InPasteMode',
'ViMode',
'ViNavigationMode',
'ViInsertMode',
'ViInsertMultipleMode',
'ViReplaceMode',
'ViSelectionMode',
'ViWaitingForTextObjectMode',
'ViDigraphMode',
'EmacsMode',
'EmacsInsertMode',
'EmacsSelectionMode',
'IsSearching',
'HasSearch',
'ControlIsSearchable',
]
# Keep the original classnames for backwards compatibility.
HasValidationError = lambda: has_validation_error
HasArg = lambda: has_arg
IsDone = lambda: is_done
RendererHeightIsKnown = lambda: renderer_height_is_known
ViNavigationMode = lambda: vi_navigation_mode
InPasteMode = lambda: in_paste_mode
EmacsMode = lambda: emacs_mode
EmacsInsertMode = lambda: emacs_insert_mode
ViMode = lambda: vi_mode
IsSearching = lambda: is_searching
HasSearch = lambda: is_searching
ControlIsSearchable = lambda: control_is_searchable
EmacsSelectionMode = lambda: emacs_selection_mode
ViDigraphMode = lambda: vi_digraph_mode
ViWaitingForTextObjectMode = lambda: vi_waiting_for_text_object_mode
ViSelectionMode = lambda: vi_selection_mode
ViReplaceMode = lambda: vi_replace_mode
ViInsertMultipleMode = lambda: vi_insert_multiple_mode
ViInsertMode = lambda: vi_insert_mode
HasSelection = lambda: has_selection
HasCompletions = lambda: has_completions
IsReadOnly = lambda: is_read_only
IsMultiline = lambda: is_multiline
HasFocus = has_focus # No lambda here! (Has_focus is callable that returns a callable.)
InEditingMode = in_editing_mode
| 27.558824
| 88
| 0.766275
| 191
| 1,874
| 7.230366
| 0.481675
| 0.046343
| 0.036206
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.154216
| 1,874
| 67
| 89
| 27.970149
| 0.871293
| 0.132337
| 0
| 0
| 0
| 0
| 0.205446
| 0.029084
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.037037
| 0
| 0.037037
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dfec5e4fee06a96072b5a9530a2216e08d3cbd3
| 1,988
|
py
|
Python
|
genetic/spaces.py
|
shilpasayura/bk
|
2b0a1aa9300da80e201264bcf80226b3c5ff4ad6
|
[
"MIT"
] | 4
|
2018-09-08T10:30:27.000Z
|
2021-07-23T07:59:24.000Z
|
genetic/spaces.py
|
shilpasayura/bk
|
2b0a1aa9300da80e201264bcf80226b3c5ff4ad6
|
[
"MIT"
] | null | null | null |
genetic/spaces.py
|
shilpasayura/bk
|
2b0a1aa9300da80e201264bcf80226b3c5ff4ad6
|
[
"MIT"
] | 6
|
2018-09-07T05:54:17.000Z
|
2021-07-23T07:59:25.000Z
|
#spaces.py
'''
AlgoHack Genetic Algorithm for University Semaster Planning
Version 0.03 2018
Niranjan Meegammana Shilpasayura.org
'''
import xdb
def crt_spaces_table(cursor,drop=False):
if (drop):
sql="DROP TABLE IF EXISTS spaces;"
success, count=xdb.runSQL(cursor, sql)
sql='''CREATE TABLE IF NOT EXISTS spaces (
spid INTEGER PRIMARY KEY AUTOINCREMENT,
name varchar(30),
sptype INTEGER,
fitness INTEGER,
gid INTEGER DEFAULT 0,
semid INTEGER DEFAULT 0)
'''
success, count=xdb.runSQL(cursor, sql)
return success
def insert_spaces(cursor,nlect,nlabs,gid,semid, delay):
# nlabs is number of labs
# nlecs is number of lecture halls
# if gid =0 common for all groups else dedicated
# if semid=0 common for all semasters else dedicated
sql="SELECT * FROM spaces LIMIT 1";
success, count=xdb.runSQL(cursor, sql)
if (count > 0):
print("spaces table: Records exist")
return False, 0
sqls=""
fitness=1
for i in range (nlect):
name="Lect Hall " + str(i+1)
sptype=1
sqls=sqls +'INSERT INTO spaces (name,sptype,fitness,gid,semid) VALUES ('+ '"{}",{}, {},{},{}'.format(name, sptype,fitness,gid,semid) +');'
for i in range (nlabs):
name="Lab " + str(i+1)
sptype=2
sqls=sqls +'INSERT INTO spaces (name,sptype,fitness,gid,semid) VALUES ('+ '"{}",{}, {},{},{}'.format(name, sptype,fitness,gid,semid) +');'
success, count=xdb.runSQL_stmts(cursor, sqls,delay)
return success, count
if __name__ == "__main__":
delay=0.05
conn=xdb.opendb('genetic56.db')
cursor =conn.cursor() # create a cursor object
success=crt_spaces_table(cursor, True) # create spaces table
#dedicated lecture hall, lab for group and semaster
success, count =insert_spaces(cursor,1,1,1,1,delay) # generate records
xdb.commit(conn)
xdb.closedb(conn)
| 32.064516
| 147
| 0.628773
| 262
| 1,988
| 4.71374
| 0.374046
| 0.0583
| 0.048583
| 0.068016
| 0.212146
| 0.212146
| 0.139271
| 0.139271
| 0.139271
| 0.139271
| 0
| 0.020067
| 0.247988
| 1,988
| 61
| 148
| 32.590164
| 0.80602
| 0.195674
| 0
| 0.121951
| 0
| 0
| 0.312746
| 0.040736
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04878
| false
| 0
| 0.02439
| 0
| 0.146341
| 0.02439
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dff31a15c326fed56b2875daa3e36cda971efde
| 2,062
|
py
|
Python
|
threaded_remote_pi_camera.py
|
hyansuper/flask-video-streaming
|
a6ba19519b9ba5470e59e535552b3e8c448d57ae
|
[
"MIT"
] | 7
|
2020-01-03T17:35:29.000Z
|
2021-11-24T14:29:50.000Z
|
threaded_remote_pi_camera.py
|
hyansuper/flask-video-streaming
|
a6ba19519b9ba5470e59e535552b3e8c448d57ae
|
[
"MIT"
] | null | null | null |
threaded_remote_pi_camera.py
|
hyansuper/flask-video-streaming
|
a6ba19519b9ba5470e59e535552b3e8c448d57ae
|
[
"MIT"
] | 4
|
2020-04-30T15:41:25.000Z
|
2021-08-07T17:05:54.000Z
|
import urllib.request
import cv2
import numpy as np
import time
import threading
class ThreadedRemotePiCamera:
def __init__(self, pi_address, resolution=(320,240), framerate=10, hflip=False, vflip=False):
if hflip and vflip:
self.flip = -1
elif hflip:
self.flip = 0
elif vflip:
self.flip = 1
else:
self.flip = None
self.stream = urllib.request.urlopen('http://%s:5000/video_feed?w=%d&h=%d&fps=%d' % ((pi_address,)+resolution+(framerate,)))
self.total_bytes = b''
self.ev = threading.Event()
self.th = threading.Thread(target=self.run, daemon=True)
self.running = True
self.frame = None
self.th.start()
def run(self):
while self.running:
self.frame = self.get_frame()
self.ev.set()
self.stream.close()
def read(self):
'''
while self.frame is None:
time.sleep(.1)
f = self.frame
self.frame = None
return f
'''
self.ev.wait()
self.ev.clear()
return self.frame
def get_frame(self):
while True:
self.total_bytes += self.stream.read(1024)
end = self.total_bytes.find(b'\xff\xd9') # JPEG end
if not end == -1:
start = self.total_bytes.find(b'\xff\xd8') # JPEG start
jpg = cv2.imdecode(np.fromstring(self.total_bytes[start: end+2], dtype=np.uint8), cv2.IMREAD_COLOR)
if self.flip is not None:
jpg = cv2.flip(jpg, self.flip)
self.total_bytes = self.total_bytes[end+2:]
return jpg
def release(self):
self.running = False
self.th.join()
def frames(self):
while True:
yield self.read()
def __iter__(self):
return self.frames()
def __enter__(self):
return self
def __exit__(self, *args):
self.release()
def __del__(self):
self.release()
| 31.242424
| 132
| 0.541707
| 256
| 2,062
| 4.234375
| 0.363281
| 0.058118
| 0.090406
| 0.02583
| 0.04059
| 0.04059
| 0
| 0
| 0
| 0
| 0
| 0.022075
| 0.340931
| 2,062
| 65
| 133
| 31.723077
| 0.77557
| 0.051406
| 0
| 0.071429
| 0
| 0
| 0.030542
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.178571
| false
| 0
| 0.089286
| 0.035714
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5dff826ca431e889e0cef41a0054e1a64431e876
| 22,520
|
py
|
Python
|
scheduler/misc/Ec2SpotCustomScheduler_jan19.py
|
jalawala/custom-kubernetes-scheduler
|
07ccba57610048185a245257a1501f6273399d80
|
[
"Apache-2.0"
] | 4
|
2021-02-24T23:42:17.000Z
|
2021-03-10T06:31:35.000Z
|
misc-folder-ignore/scheduler/misc/Ec2SpotCustomScheduler_jan19.py
|
ABottleofWater7/custom-kubernetes-scheduler
|
f179a45c85291ba8d34d37e11a33396c94fd5bac
|
[
"Apache-2.0"
] | null | null | null |
misc-folder-ignore/scheduler/misc/Ec2SpotCustomScheduler_jan19.py
|
ABottleofWater7/custom-kubernetes-scheduler
|
f179a45c85291ba8d34d37e11a33396c94fd5bac
|
[
"Apache-2.0"
] | 2
|
2021-09-27T09:08:37.000Z
|
2022-03-21T04:20:07.000Z
|
#! /usr/bin/python3
import time
import random
import json
import os
from pprint import pprint
from kubernetes.client.rest import ApiException
from pint import UnitRegistry
from collections import defaultdict
from kubernetes import client, config, watch
from timeloop import Timeloop
from datetime import timedelta
config.load_kube_config()
#config.load_incluster_config()
# doing this computation within a k8s cluster
#k8s.config.load_incluster_config()
core_api = client.CoreV1Api()
apis_api = client.AppsV1Api()
#sdclient = SdcClient(<Your Sysdig API token>)
sysdig_metric = "net.http.request.time"
metrics = [{ "id": sysdig_metric, "aggregations": { "time": "timeAvg", "group": "avg" } }]
#scheduler_name = "Ec2SpotK8sScheduler"
CustomSchedulerName ='K8SCustomScheduler'
ureg = UnitRegistry()
ureg.load_definitions('kubernetes_units.txt')
pendingPodsList = []
failedPodsList = []
runningPodsList =[]
nodesListPerNodeLabel = {}
Q_ = ureg.Quantity
def scheduler(name, node, namespace):
target=client.V1ObjectReference(api_version='v1', kind="Node", name=node)
meta=client.V1ObjectMeta()
meta.name=name
body=client.V1Binding(metadata=meta, target=target)
return core_api.create_namespaced_binding(namespace, body, _preload_content=False)
#tl = Timeloop()
#@tl.job(interval=timedelta(seconds=10))
def RunEc2SpotCustomScheduler():
#global pendingPodsList
#global failedPodsList
CustomKubeSchedulingClusterDeploymentData = get_custom_deployments()
pprint("CustomKubeSchedulingClusterDeploymentData={}".format(CustomKubeSchedulingClusterDeploymentData))
for namespace, deploymentCustomSchedulingData in CustomKubeSchedulingClusterDeploymentData.items():
print("namespace={} deploymentCustomSchedulingData={}".format(namespace, deploymentCustomSchedulingData))
if deploymentCustomSchedulingData != {}:
CustomSchedulePerNamespace(namespace, deploymentCustomSchedulingData)
def CustomSchedulePerNamespace(namespace, deploymentCustomSchedulingData):
global runningPodsList
global pendingPodsList
global failedPodsList
global nodesListPerNodeLabel
print("namespace={} deploymentCustomSchedulingData={}".format(namespace, deploymentCustomSchedulingData))
#exit(0)
#namespace = 'default'
#lifecycleList = ['OnDemand', 'Ec2Spot']
for deploymentName, CustomSchedulingData in deploymentCustomSchedulingData.items():
print("deploymentName={} CustomSchedulingData={}".format(deploymentName, CustomSchedulingData))
#exit(0)
#podsList = getPodsListForDeployment(namespace, deploymentName)
runningPodsList = []
pendingPodsList = []
failedPodsList =[]
getPodsListForDeployment(namespace, deploymentName)
NumOfPodsRunning = len (runningPodsList)
NumOfPodsPending = len (pendingPodsList)
NumOfPodsFailed = len (failedPodsList)
#print("NumOfPodsRunning={} runningPodsList={}".format(NumOfPodsRunning, runningPodsList))
#print("NumOfPodsPending={} pendingPodsList={}".format(NumOfPodsPending, pendingPodsList))
#print("NumOfPodsFailed={} failedPodsList={}".format(NumOfPodsFailed, failedPodsList))
get_node_available_nodes_list(CustomSchedulingData)
for i, p in enumerate (runningPodsList):
pprint("i={} running pod_name={} node_name={}".format(i,p['node_name'], p['name']))
for i, p in enumerate (pendingPodsList):
pprint("i={} pending pod_name={} node_name={}".format(i,p['node_name'], p['name']))
for i, p in enumerate (failedPodsList):
pprint("i={} failed pod_name={} node_name={}".format(i,p['node_name'], p['name']))
#print("nodeLabel={} NumOfAlreadyRunningPods={}".format(nodeLabel, NumOfAlreadyRunningPods))
print("lifecycle={} NumOfNodes={}".format(lifecycle, len(NodesList)))
for nodeLabel, in NodesList.keys():
pprint("node_name={}".format(n))
#exit(0)
#runningPodsList = podsList['runningPodsList']
#pendingPodsList = podsList['pendingPodsList']
#failedPodsList = podsList['failedPodsList']
for nodeLabel, numOfReplicas in CustomSchedulingData.items():
print("Scheduling numOfReplicas={} on nodeLabel={}".format(numOfReplicas, nodeLabel))
#pprint(podsList)
#lifecycle = 'OnDemand'
#NodesList = get_node_available_nodes_list(lifecycle)
#pprint(NodesList)
NumOfPodsRunningAlready = 0
podsAlreadyRunningOnNodeLabelList = []
for podRunning in runningPodsList:
if podRunning['node_name'] in nodesListPerNodeLabel[nodeLabel].keys():
podsAlreadyRunningOnNodeLabelList.append(podRunning)
NumOfAlreadyRunningPods = len (podsAlreadyRunningOnNodeLabelList)
for i, p in enumerate (podsAlreadyRunningOnNodeLabelList):
pprint("running pod i={} nodeLabel={} node_name={} name={}".format(i,nodeLabel, p['node_name'], p['name']))
if NumOfAlreadyRunningPods == NumOfPodsToBeRunning:
print("NumOfAlreadyRunningPods == NumOfPodsToBeRunning = {}. So no need to Schedule".format(NumOfAlreadyRunningPods))
elif NumOfAlreadyRunningPods < NumOfPodsToBeRunning:
NumOfPodsToBeScheduled = NumOfPodsToBeRunning - NumOfAlreadyRunningPods
try:
schedulePods(NumOfPodsToBeScheduled, NodesList)
except Exception as e:
pprint(e)
elif NumOfAlreadyRunningPods > NumOfPodsToBeRunning:
NumOfPodsToDeleted = NumOfAlreadyRunningPods - NumOfPodsToBeRunning
try:
deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList)
except Exception as e:
pprint(e)
pendingPodsList = []
NumOfPodsFailed = []
#pprint(podsList)
#lifecycle = 'OnDemand'
#lifecycle = 'Ec2Spot'
#get_node_available_nodes_list(lifecycle)
def deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList):
namespace = 'default'
for i in range(0, NumOfPodsToDeleted):
pod = podsAlreadyRunningOnNodeLabelList[i]
grace_period_seconds = 30
body = client.V1DeleteOptions()
#body = {}
pprint("deletePods i={} pod={} NumOfPodsToDeleted={}".format(i, pod['name'], NumOfPodsToDeleted ))
response = core_api.delete_namespaced_pod(name=pod['name'], namespace=namespace, grace_period_seconds=grace_period_seconds, body=body)
pprint(response)
def schedulePods(NumOfPodsToBeScheduled, NodesList):
global pendingPodsList
global failedPodsList
namespace = 'default'
if NumOfPodsToBeScheduled > len(pendingPodsList):
pprint("schedulePods NumOfPodsToBeScheduled={} is greater than number of pending pods={}. So skipping schedulePods".format(NumOfPodsToBeScheduled, len(pendingPodsList)))
return
for i in range(NumOfPodsToBeScheduled):
pod = pendingPodsList[0]
print("schedulePods Trying to schedule i={} NumOfPodsToBeScheduled={} pod={} with cpu_req={} mem_req={}".format(i, NumOfPodsToBeScheduled, pod['name'], pod['cpu_req'], pod['mem_req']))
for node, stats in NodesList.items():
print("schedulePods Checking for free resources on node={} with cpu_free={} mem_free={}".format(node, stats['cpu_free'], stats['mem_free']))
#pprint(node)
if pod['cpu_req'] <= stats['cpu_free'] and pod['mem_req'] <= stats['mem_free']:
print("schedulePods scheduling pod={} onto the node={}".format(pod['name'], node))
res = scheduler(pod['name'], node, namespace)
pprint(res)
stats['cpu_free'] = stats['cpu_free'] - pod['cpu_req']
stats['mem_free'] = stats['mem_free'] - pod['mem_req']
pendingPodsList.remove(pod)
break
def getPodsListForDeployment(namespace, deploymentName):
#global pendingPodsList
#runningPodsList =[]
#failedPodsList =[]
#podsList = {}
#namespace='default'
#name='Ec2SpotK8sScheduler'
#field_selector = ("spec.scheduler_name=" + CustomSchedulerName)
field_selector = ("spec.schedulerName=" + CustomSchedulerName)
pods = core_api.list_namespaced_pod(namespace=namespace, field_selector=field_selector).to_dict()
#pods = core_api.list_namespaced_pod(namespace=namespace).to_dict()
#print("pods={}".format(pods))
for pod in pods['items']:
#pprint(pod)
#print("node_name={}".format(pod['spec']['node_name']))
#return ""
stats = {}
cpureqs,cpulmts,memreqs,memlmts = [], [], [], []
if deploymentName in pod['metadata']['name'] and pod['spec']['scheduler_name'] == CustomSchedulerName:
for container in pod['spec']['containers']:
res = container['resources']
reqs = defaultdict(lambda: 0, res['requests'] or {})
lmts = defaultdict(lambda: 0, res['limits'] or {})
cpureqs.append(Q_(reqs["cpu"]))
memreqs.append(Q_(reqs["memory"]))
cpulmts.append(Q_(lmts["cpu"]))
memlmts.append(Q_(lmts["memory"]))
stats["cpu_req"] = sum(cpureqs)
stats["cpu_lmt"] = sum(cpulmts)
stats["mem_req"] = sum(memreqs)
stats["mem_lmt"] = sum(memlmts)
stats["name"] = pod['metadata']['name']
stats["status"] = pod['status']['phase']
if stats["status"] == 'Pending':
pendingPodsList.append(stats)
elif stats["status"] == 'Running':
stats["node_name"] = pod['spec']['node_name']
runningPodsList.append(stats)
elif stats["status"] == 'Failed':
failedPodsList.append(stats)
#podsList['pendingPodsList'] = pendingPodsList
#podsList['runningPodsList'] = runningPodsList
#podsList['failedPodsList'] = failedPodsList
#pprint(podsList)
#pprint("pendingPodsList={} runningPodsList={} failedPodsList={}".format(runningPodsList, runningPodsList, failedPodsList )
#return pendingPodsList,runningPodsList,failedPodsList
#return podsList
def get_custom_deployments():
CustomKubeSchedulingClusterDeploymentData = {}
#namespaceList =[]
namespacedataList = core_api.list_namespace().to_dict()['items']
for namespaceData in namespacedataList:
namespace = namespaceData['metadata']['name']
CustomKubeSchedulingClusterDeploymentData[namespace] = get_custom_deployments_per_namespace(namespace)
#namespaceList.append(name)
print("CustomKubeSchedulingClusterDeploymentData={}".format(CustomKubeSchedulingClusterDeploymentData))
return CustomKubeSchedulingClusterDeploymentData
def get_custom_deployments_per_namespace(namespace):
#CustomKubeSchedulingDeploymentData = []
CustomKubeSchedulingDeploymentData = {}
#namespace='default'
#name = 'nginx'
name = '1'
#field_selector = ("metadata.name=" + name)
field_selector = ("metadata.annotations.OnDemandBase=" + name)
# get deployment by namespace
#resp = apis_api.list_namespaced_deployment(namespace=namespace, field_selector=field_selector)
resp = apis_api.list_namespaced_deployment(namespace=namespace)
for deployment in resp.items:
#pprint(deployment.metadata.annotations)
#pprint(deployment)
deploymentData = {}
CustomPodScheduleStrategy = {}
annotations = deployment.metadata.annotations
if 'UseCustomKubeScheduler' in annotations.keys():
if annotations['UseCustomKubeScheduler'] == 'true':
deploymentName = deployment.metadata.name
numOfReplicas = deployment.spec.replicas
#deploymentData[deploymentName] = deployment.metadata.name
Strategy = annotations['CustomPodScheduleStrategy']
#deploymentData['pod_replicas'] = deployment.spec.replicas
#deploymentData['CustomPodScheduleStrategy'] = get_pods_custom_pod_schedule_strategy(Strategy, deployment.spec.replicas)
CustomKubeSchedulingDeploymentData[deploymentName] = get_pods_custom_pod_schedule_strategy(Strategy, numOfReplicas)
#deploymentData['NumOfOnDemandPodsToBeRunning'] = int (deploymentData['OnDemandBase'] + (deploymentData['pod_replicas'] - deploymentData['OnDemandBase']) * deploymentData['OnDemandAbovePercentage'] / 100)
#deploymentData['NumOfSpotPodsToBeRunning'] = deploymentData['pod_replicas'] - deploymentData['NumOfOnDemandPodsToBeRunning']
#CustomKubeSchedulingDeploymentData.append(deploymentData)
return CustomKubeSchedulingDeploymentData
#print("OnDemandBase={}, OnDemandAbovePercentage={} SpotASGName={} OnDemandASGName={} pod_replicas={} NumOfOnDemandPods={} NumOfSpotPods={}".format(OnDemandBase, OnDemandAbovePercentage, SpotASGName, OnDemandASGName, pod_replicas, NumOfOnDemandPods, NumOfSpotPods))
def get_pods_custom_pod_schedule_strategy(Strategy, numOfReplicas):
print("Strategy={} numOfReplicas={}".format(Strategy, numOfReplicas))
CustomPodScheduleStrategy = {}
nodeLabelToReplicas = {}
nodeLabelToWights = {}
totalWeight = 0
StrategyList = Strategy.split(':')
print("StrategyList={}".format(StrategyList))
numOfBaseValues = 0
for nodeStrategy in StrategyList:
print("nodeStrategy: {}".format(nodeStrategy))
nodeStrategyPartsList = nodeStrategy.split(',')
base = 0
weight = 0
nodeLabel = ''
for nodeStrategyPart in nodeStrategyPartsList:
nodeStrategySubPartList = nodeStrategyPart.split('=')
if nodeStrategySubPartList[0] == 'base':
if numOfBaseValues != 0:
print("base value cannot be non-zero for more than node strategy")
exit(1)
else:
numOfBaseValues += 1
base = int(nodeStrategySubPartList[1])
if base <= numOfReplicas:
numOfReplicas -= base
else:
base = numOfReplicas
numOfReplicas = 0
print("base={}".format(nodeStrategySubPartList[1]))
elif nodeStrategySubPartList[0] == 'weight':
weight = int(nodeStrategySubPartList[1])
totalWeight += weight
print("weight={}".format(weight))
else:
nodeLabel = nodeStrategyPart
print("label key={} value={}".format(nodeStrategySubPartList[0], nodeStrategySubPartList[1]))
#nodeLabelToReplicas [nodeLabel] = base
nodeLabelToWights [nodeLabel] = weight
CustomPodScheduleStrategy [nodeLabel] = base
print("nodeLabelToReplicas={} nodeLabelToWights={}".format(nodeLabelToReplicas, nodeLabelToWights))
print("numOfBaseValues = {} totalWeight={} numOfReplicas={}".format(numOfBaseValues, totalWeight, numOfReplicas))
print("CustomPodScheduleStrategy = {}".format(CustomPodScheduleStrategy))
totalNumOfLables = len (CustomPodScheduleStrategy)
labelNum = 0
for key, replicas in CustomPodScheduleStrategy.items():
weight = nodeLabelToWights[key]
print("key: {} replicas={} weight={}, totalWeight={}".format(key, replicas, weight, totalWeight))
if labelNum == totalNumOfLables - 1:
weightReplicas = numOfReplicas
replicas = replicas + weightReplicas
else:
weightReplicas = int (numOfReplicas * (weight/totalWeight))
replicas = replicas + weightReplicas
labelNum += 1
numOfReplicas -= weightReplicas
print("weightReplicas: {} replicas={} labelNum={}, numOfReplicas={}".format(weightReplicas, replicas, labelNum, numOfReplicas))
CustomPodScheduleStrategy[key] = replicas
print("CustomPodScheduleStrategy = {}".format(CustomPodScheduleStrategy))
print("numOfBaseValues = {} totalWeight={} numOfReplicas={}".format(numOfBaseValues, totalWeight, numOfReplicas))
return CustomPodScheduleStrategy
__all__ = ["get_node_available_nodes_list"]
def get_node_available_nodes_list(CustomSchedulingData):
global nodesListPerNodeLabel
#data = []
#data = {}
for nodeLabel in CustomSchedulingData.keys():
nodesListPerNodeLabel[nodeLabel] = {}
nodeLabelParts = nodeLabel.split('=')
nodeLabelKey = nodeLabelParts[0]
nodeLabelValue = nodeLabelParts[1]
#selector = "metadata.labels."+nodeLabelParts[0]+"="+nodeLabelParts[1]
#selector = "metadata.labels.nodesize="+nodeLabelParts[1]
#print("selector={}".format(selector))
#name = 'ip-192-168-73-104.ec2.internal'
#selector = "metadata.name"+"="+name
#print("selector={}".format(selector))
#field_selector = (selector)
#resp = core_api.list_node(field_selector=field_selector).to_dict()['items']
#pprint("resp={}".format(resp))
#exit(0)
availableNodesData = {}
for node in core_api.list_node().to_dict()['items']:
#pprint(node)
node_labels = node['metadata']['labels']
if nodeLabelKey in node_labels.keys():
if node_labels[nodeLabelKey] == nodeLabelValue:
stats = {}
node_name = node['metadata']['name']
allocatable = node['status']['allocatable']
max_pods = int(int(allocatable["pods"]) * 1.5)
field_selector = ("status.phase!=Succeeded,status.phase!=Failed," +
"spec.nodeName=" + node_name)
stats["cpu_alloc"] = Q_(allocatable["cpu"])
stats["mem_alloc"] = Q_(allocatable["memory"])
#stats["lifecycle"] = lifecycle
pods = core_api.list_pod_for_all_namespaces(limit=max_pods,
field_selector=field_selector).to_dict()['items']
# compute the allocated resources
cpureqs,cpulmts,memreqs,memlmts = [], [], [], []
for pod in pods:
#pprint(pod)
for container in pod['spec']['containers']:
res = container['resources']
reqs = defaultdict(lambda: 0, res['requests'] or {})
lmts = defaultdict(lambda: 0, res['limits'] or {})
cpureqs.append(Q_(reqs["cpu"]))
memreqs.append(Q_(reqs["memory"]))
cpulmts.append(Q_(lmts["cpu"]))
memlmts.append(Q_(lmts["memory"]))
stats["cpu_req"] = sum(cpureqs)
stats["cpu_lmt"] = sum(cpulmts)
stats["cpu_req_per"] = (stats["cpu_req"] / stats["cpu_alloc"] * 100)
stats["cpu_lmt_per"] = (stats["cpu_lmt"] / stats["cpu_alloc"] * 100)
stats["mem_req"] = sum(memreqs)
stats["mem_lmt"] = sum(memlmts)
stats["mem_req_per"] = (stats["mem_req"] / stats["mem_alloc"] * 100)
stats["mem_lmt_per"] = (stats["mem_lmt"] / stats["mem_alloc"] * 100)
stats["cpu_free"] = stats["cpu_alloc"] - stats["cpu_req"]
stats["mem_free"] = stats["mem_alloc"] - stats["mem_req"]
#stats["name"] = node['metadata']['name']
#data.append(stats)
availableNodesData[node_name] = stats
nodesListPerNodeLabel[nodeLabel] = availableNodesData
#print(nodesListPerNodeLabel)
#for nodeLabel, availableNodesData in nodesListPerNodeLabel.items():
#print("nodeLabel={} availableNodesData={}".format(nodeLabel, availableNodesData))
#exit(0)
#pprint(data)
return data
if __name__ == '__main__':
#ready_nodes = nodes_available()
#pprint(ready_nodes)
#name='review-v1-787d8fbfbb-ltdzt'
node='ip-10-0-3-253.ec2.internal'
#namespace='ecommerce'
#ret=scheduler(name, node, namespace)
#pprint(ret)
#main()
#test()
#testpod()
#check_node_resources(node)
#RunEc2SpotCustomScheduler()
#getPodsListForDeployment(' ')
#lifecycle = 'OnDemand'
#lifecycle = 'Ec2Spot'
#get_node_available_nodes_list(lifecycle)
#RunEc2SpotCustomScheduler()
#NumOfPodsToDeleted = 1
#podsAlreadyRunningOnNodeLabelList = []
#d ={'name':'nginx-66cb875766-vx6bp'}
#podsAlreadyRunningOnNodeLabelList.append(d)
#deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList)
#deploymentName='nginx'
#deploymentName = 'kube-ops-view'
#getPodsListForDeployment(deploymentName)
#testlist()
#tl.start(block=True)
while True:
RunEc2SpotCustomScheduler()
time.sleep(10)
| 42.330827
| 281
| 0.607948
| 1,769
| 22,520
| 7.608253
| 0.17524
| 0.010699
| 0.007133
| 0.009362
| 0.188127
| 0.16502
| 0.128464
| 0.11613
| 0.065161
| 0.065161
| 0
| 0.007172
| 0.28175
| 22,520
| 531
| 282
| 42.410546
| 0.824915
| 0.203774
| 0
| 0.225
| 0
| 0
| 0.139737
| 0.031832
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.039286
| 0
| 0.096429
| 0.132143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b900fe014c618b5968bd75cca2f986adc96f1a10
| 13,806
|
py
|
Python
|
src/models/nn/adaptive_softmax.py
|
dumpmemory/state-spaces
|
2a85503cb3e9e86cc05753950d4a249df9a0fffb
|
[
"Apache-2.0"
] | 513
|
2021-11-03T23:08:23.000Z
|
2022-03-31T16:29:18.000Z
|
src/models/nn/adaptive_softmax.py
|
dumpmemory/state-spaces
|
2a85503cb3e9e86cc05753950d4a249df9a0fffb
|
[
"Apache-2.0"
] | 18
|
2021-11-05T12:42:59.000Z
|
2022-03-27T19:49:55.000Z
|
src/models/nn/adaptive_softmax.py
|
MikeOwino/state-spaces
|
b6672bca994b6a36347f414faa59761e42b1e2b1
|
[
"Apache-2.0"
] | 47
|
2021-11-04T01:32:54.000Z
|
2022-03-30T18:24:26.000Z
|
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
class OptionalParameterList(nn.ParameterList):
def extra_repr(self):
child_lines = []
for k, p in self._parameters.items():
if p is not None:
size_str = 'x'.join(str(size) for size in p.size())
device_str = '' if not p.is_cuda else ' (GPU {})'.format(p.get_device())
parastr = 'Parameter containing: [{} of size {}{}]'.format(
torch.typename(p), size_str, device_str)
child_lines.append(' (' + str(k) + '): ' + parastr)
tmpstr = '\n'.join(child_lines)
return tmpstr
class ProjectedAdaptiveLogSoftmax(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
tie_projs=None, out_layers_weights=None, out_projs=None,
keep_order=False,
bias_scale=0.0,
dropout=0.0,
):
super().__init__()
self.n_token = n_token
self.d_embed = d_embed
self.d_proj = d_proj
self.cutoffs = list(cutoffs) + [n_token]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
# [21-09-15 AG]: bake the first False into the definition, just as [0] is built into the cutoffs
if tie_projs is None: tie_projs = []
elif isinstance(tie_projs, bool): tie_projs = [tie_projs] * len(cutoffs)
else: tie_projs = list(tie_projs)
tie_projs = [False] + tie_projs
self.tie_projs = tie_projs
if self.n_clusters > 0:
self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed))
self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
if not out_layers_weights:
self.out_layers_weights = nn.ParameterList()
else:
self.out_layers_weights = out_layers_weights
self.out_layers_biases = nn.ParameterList()
self.shared_out_projs = out_projs
self.out_projs = OptionalParameterList()
self.dropout = dropout
self.drop = nn.Dropout(dropout)
if div_val == 1:
if d_proj != d_embed:
for i in range(len(self.cutoffs)):
if tie_projs[i]:
self.out_projs.append(None)
else:
self.out_projs.append(
nn.Parameter(torch.zeros(d_proj, d_embed))
)
else:
# self.out_projs = [None] * len(self.cutoffs)
self.out_projs.append(None)
self.out_layers_biases.append(
nn.Parameter(torch.zeros(n_token))
)
if not out_layers_weights:
self.out_layers_weights.append(
nn.Parameter(torch.zeros(n_token, d_embed))
)
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = d_embed // (div_val ** i)
if tie_projs[i]:
self.out_projs.append(None)
else:
self.out_projs.append(
nn.Parameter(torch.zeros(d_proj, d_emb_i))
)
self.out_layers_biases.append(
nn.Parameter(torch.zeros(r_idx - l_idx))
)
if not out_layers_weights:
self.out_layers_weights.append(
nn.Parameter(torch.zeros(r_idx - l_idx, d_emb_i))
)
for bias in self.out_layers_biases:
bound = bias_scale * d_proj ** -.5
nn.init.uniform_(bias, -bound, bound)
self.keep_order = keep_order
def _compute_logit(self, hidden, weight, bias, proj):
if proj is None:
logit = F.linear(hidden, weight, bias=bias)
else:
if self.dropout > 0.0:
logit = hidden @ proj
logit = self.drop(logit)
logit = logit @ weight.t()
else:
logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
if bias is not None:
logit = logit + bias
return logit
def get_out_proj(self, i):
if self.tie_projs[i]:
if len(self.shared_out_projs) == 0:
return None
elif len(self.shared_out_projs) == 1:
return self.shared_out_projs[0]
else:
return self.shared_out_projs[i]
else:
return self.out_projs[i]
def forward(self, hidden, target, keep_order=False, key_padding_mask=None, *args, **kwargs):
# [21-09-15 AG]: TODO may need to handle key_padding_mask
'''
hidden :: [len*bsz x d_proj]
target :: [len*bsz]
'''
hidden = hidden.reshape(-1, hidden.size(-1))
target = target.reshape(-1)
if hidden.size(0) != target.size(0):
print(hidden.shape, target.shape)
raise RuntimeError('Input and target should have the same size '
'in the batch dimension.')
if self.n_clusters == 0:
logit = self._compute_logit(hidden, self.out_layers_weights[0],
self.out_layers_biases[0], self.get_out_proj(0))
nll = -F.log_softmax(logit, dim=-1) \
.gather(1, target.unsqueeze(1)).squeeze(1)
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers_weights[0][l_idx:r_idx]
bias_i = self.out_layers_biases[0][l_idx:r_idx]
else:
weight_i = self.out_layers_weights[i]
bias_i = self.out_layers_biases[i]
if i == 0:
weight_i = torch.cat(
[weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat(
[bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.get_out_proj(0)
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
head_logprob = F.log_softmax(head_logit, dim=1)
nll = torch.zeros_like(target, dtype=hidden.dtype, device=hidden.device)
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
mask_i = (target >= l_idx) & (target < r_idx)
indices_i = mask_i.nonzero(as_tuple=False).squeeze()
if indices_i.numel() == 0:
continue
target_i = target.index_select(0, indices_i) - l_idx
head_logprob_i = head_logprob.index_select(0, indices_i)
if i == 0:
logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.get_out_proj(i)
hidden_i = hidden.index_select(0, indices_i)
tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
logprob_i = head_logprob_i[:, -i] \
+ tail_logprob_i.gather(1, target_i[:, None]).squeeze(1)
if self.keep_order or keep_order:
nll.index_copy_(0, indices_i, -logprob_i)
else:
nll[offset:offset+logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return nll.mean() # TODO maybe cases for length or padding_mask
class AdaptiveEmbedding(nn.Module):
""" Copy of transformers.AdaptiveEmbedding that works with fp16 by replacing the index_put_ operation
Initialization has been fixed for the case when d_proj = d_embed
"""
def __init__(self, n_token, d_embed, d_proj, cutoffs : List[int], div_val=1, init_scale=1.0, sample_softmax=False, dropout=0.0):
super().__init__()
self.n_token = n_token
self.d_embed = d_embed
self.cutoffs = list(cutoffs) + [n_token]
self.div_val = div_val
self.d_proj = d_proj
self.drop = nn.Dropout(dropout) if dropout > 0.0 else nn.Identity()
self.emb_scale = d_proj ** 0.5
self.cutoff_ends = [0] + self.cutoffs
self.emb_layers = nn.ModuleList()
self.emb_projs = nn.ParameterList()
if div_val == 1:
self.emb_layers.append(nn.Embedding(n_token, d_embed, sparse=sample_softmax > 0))
_init_embed(self.emb_layers[-1].weight, d_embed, init_scale)
# torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale * d_embed ** -.5)
if d_proj != d_embed: # TODO
# self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
# torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale * 1./self.emb_scale)
_init_proj(self.emb_projs[-1], d_proj, init_scale)
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
d_emb_i = d_embed // (div_val ** i)
self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i))
# torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale * d_emb_i ** -.5)
_init_embed(self.emb_layers[-1].weight, d_emb_i, init_scale)
self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i)))
# torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale * 1./self.emb_scale)
_init_proj(self.emb_projs[-1], d_proj, init_scale)
def forward(self, inp, *args, **kwargs):
if self.div_val == 1:
embed = self.emb_layers[0](inp)
embed = self.drop(embed)
if self.d_proj != self.d_embed:
embed = F.linear(embed, self.emb_projs[0])
else:
param = next(self.parameters())
inp_flat = inp.view(-1)
# Changes
# emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device)
embeddings = []
indices = torch.zeros_like(inp_flat) # empty should work as long as cutoffs[-1] > max token
_total_tokens = 0
# emb_flat = inp.new_zeros(inp_flat.size(0), self.d_proj)
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
indices_i = mask_i.nonzero().squeeze(-1) # shape (_tokens,)
_tokens = indices_i.numel()
if _tokens == 0:
continue
inp_i = inp_flat.index_select(0, indices_i) - l_idx
emb_i = self.emb_layers[i](inp_i)
emb_i = self.drop(emb_i)
emb_i = F.linear(emb_i, self.emb_projs[i])
# Changes
embeddings.append(emb_i)
indices.index_put_(
(indices_i,),
torch.arange(_tokens, device=inp.device) + _total_tokens
)
_total_tokens += _tokens
# emb_flat.index_copy_(0, indices_i, emb_i)
embeddings = torch.cat(embeddings, dim=0)
emb_flat = embeddings[indices]
embed_shape = inp.size() + (self.d_proj,)
embed = emb_flat.view(embed_shape)
embed.mul_(self.emb_scale)
# embed.div_(self.emb_scale)
return embed
def _init_weight(weight, d : int, init_scale : Optional[float], default=None):
assert init_scale or default
if init_scale is None:
std = default
else:
std = init_scale * (d ** -0.5)
nn.init.normal_(weight, mean=0, std=std)
_init_embed = functools.partial(_init_weight, default=0.02)
_init_proj = functools.partial(_init_weight, default=0.01)
### Just for this codebase, we need to squeeze the last dimension because inputs are always given as (B, L, D) instead of (B, L)
import src.models.nn.utils as U
# AdaptiveEmbedding = U.Squeeze(AdaptiveEmbedding)
| 39.786744
| 132
| 0.563378
| 1,817
| 13,806
| 4.033572
| 0.156852
| 0.022923
| 0.024833
| 0.027016
| 0.392823
| 0.328012
| 0.266749
| 0.219129
| 0.196753
| 0.165916
| 0
| 0.014843
| 0.33145
| 13,806
| 346
| 133
| 39.901734
| 0.779198
| 0.143633
| 0
| 0.27572
| 0
| 0
| 0.011506
| 0
| 0
| 0
| 0
| 0.008671
| 0.004115
| 1
| 0.032922
| false
| 0
| 0.024691
| 0
| 0.102881
| 0.004115
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9014ad1cdd3760612e00e54f9b058e7af94d104
| 11,770
|
py
|
Python
|
the_el/cli.py
|
CityOfPhiladelphia/the-el
|
e3a97afc55d41f2e5fd76cef60ad9393dfa23547
|
[
"MIT"
] | 11
|
2017-04-19T18:44:51.000Z
|
2022-03-07T22:36:47.000Z
|
the_el/cli.py
|
CityOfPhiladelphia/the-el
|
e3a97afc55d41f2e5fd76cef60ad9393dfa23547
|
[
"MIT"
] | 9
|
2017-04-19T18:43:13.000Z
|
2017-12-08T16:42:38.000Z
|
the_el/cli.py
|
CityOfPhiladelphia/the-el
|
e3a97afc55d41f2e5fd76cef60ad9393dfa23547
|
[
"MIT"
] | 3
|
2017-12-08T15:09:03.000Z
|
2018-08-14T02:42:01.000Z
|
import json
import csv
import sys
import os
import re
import codecs
import logging
from logging.config import dictConfig
import click
import yaml
from sqlalchemy import create_engine
from jsontableschema_sql import Storage
from smart_open import smart_open
from . import postgres
from . import carto
csv.field_size_limit(sys.maxsize)
def get_logger(logging_config):
try:
with open(logging_config) as file:
config = yaml.load(file)
dictConfig(config)
except:
FORMAT = '[%(asctime)-15s] %(levelname)s [%(name)s] %(message)s'
logging.basicConfig(format=FORMAT, level=logging.INFO, stream=sys.stderr)
logger = logging.getLogger('the_el')
def exception_handler(type, value, tb):
logger.exception("Uncaught exception: {}".format(str(value)), exc_info=(type, value, tb))
sys.excepthook = exception_handler
return logger
@click.group()
def main():
pass
def get_connection_string(connection_string):
connection_string = os.getenv('CONNECTION_STRING', connection_string)
if connection_string == None:
raise Exception('`CONNECTION_STRING` environment variable or `--connection-string` option required')
return connection_string
def create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=None, to_srid=None):
engine = create_engine(connection_string)
storage = Storage(engine, dbschema=db_schema, geometry_support=geometry_support, from_srid=from_srid, to_srid=to_srid, views=True)
return engine, storage
def fopen(file, mode='r'):
if file == None:
if mode == 'r':
return sys.stdin
elif mode == 'w':
return sys.stdout
else:
return smart_open(file, mode=mode)
def get_table_schema(table_schema_path):
with fopen(table_schema_path) as file:
contents = file.read()
if not isinstance(contents, str):
contents = contents.decode('utf-8')
return json.loads(contents)
@main.command()
@click.argument('table_name')
@click.option('--connection-string')
@click.option('-o','--output-file')
@click.option('--db-schema')
@click.option('--geometry-support')
def describe_table(table_name, connection_string, output_file, db_schema, geometry_support):
connection_string = get_connection_string(connection_string)
engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support)
descriptor = storage.describe(table_name)
with fopen(output_file, mode='w') as file:
json.dump(descriptor, file)
@main.command()
@click.argument('table_name')
@click.argument('table_schema_path')
@click.option('--connection-string')
@click.option('--db-schema')
@click.option('--indexes-fields')
@click.option('--geometry-support')
@click.option('--if-not-exists', is_flag=True, default=False)
@click.option('--logging-config', default='logging_config.conf')
def create_table(table_name,
table_schema_path,
connection_string,
db_schema,
indexes_fields,
geometry_support,
if_not_exists,
logging_config):
logger = get_logger(logging_config)
table_schema = get_table_schema(table_schema_path)
if indexes_fields != None:
indexes_fields = indexes_fields.split(',')
if re.match(carto.carto_connection_string_regex, connection_string) != None:
load_postgis = geometry_support == 'postgis'
logger.info('{} - Creating table using Carto'.format(table_name))
return carto.create_table(logger, table_name, load_postgis, table_schema, if_not_exists, indexes_fields, connection_string)
connection_string = get_connection_string(connection_string)
engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support)
logger.info('{} - Creating table using SQLAlchemy'.format(table_name))
storage.create(table_name, table_schema, indexes_fields=indexes_fields)
@main.command()
@click.argument('table_name')
@click.option('--table-schema-path')
@click.option('--connection-string')
@click.option('-f','--input-file')
@click.option('--db-schema')
@click.option('--geometry-support')
@click.option('--from-srid')
@click.option('--skip-headers', is_flag=True)
@click.option('--indexes-fields')
@click.option('--upsert', is_flag=True)
@click.option('--truncate/--no-truncate', is_flag=True, default=False)
@click.option('--logging-config', default='logging_config.conf')
def write(table_name,
table_schema_path,
connection_string,
input_file,
db_schema,
geometry_support,
from_srid,
skip_headers,
indexes_fields,
upsert,
truncate,
logging_config):
logger = get_logger(logging_config)
table_schema = get_table_schema(table_schema_path)
## TODO: csv settings? use Frictionless Data csv standard?
## TODO: support line delimted json?
with fopen(input_file) as file:
rows = csv.reader(file)
if skip_headers:
next(rows)
if re.match(carto.carto_connection_string_regex, connection_string) != None:
load_postgis = geometry_support == 'postgis'
if indexes_fields != None:
indexes_fields = indexes_fields.split(',')
logger.info('{} - Writing to table using Carto'.format(table_name))
carto.load(logger,
db_schema,
table_name,
load_postgis,
table_schema,
connection_string,
rows,
indexes_fields,
truncate)
else:
connection_string = get_connection_string(connection_string)
engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=from_srid)
## TODO: truncate? carto does. Makes this idempotent
logger.info('{} - Writing to table using SQLAlchemy'.format(table_name))
if table_schema_path != None:
table_schema = get_table_schema(table_schema_path)
storage.describe(table_name, descriptor=table_schema)
else:
storage.describe(table_name)
if upsert:
postgres.upsert(engine, db_schema, table_name, table_schema, rows)
elif geometry_support == None and engine.dialect.driver == 'psycopg2':
postgres.copy_from(engine, table_name, table_schema, rows)
else:
storage.write(table_name, rows)
@main.command()
@click.argument('table_name')
@click.option('--connection-string')
@click.option('-o','--output-file')
@click.option('--db-schema')
@click.option('--geometry-support')
@click.option('--from-srid')
@click.option('--to-srid')
@click.option('--logging-config', default='logging_config.conf')
def read(table_name, connection_string, output_file, db_schema, geometry_support, from_srid, to_srid, logging_config):
logger = get_logger(logging_config)
connection_string = get_connection_string(connection_string)
engine, storage = create_storage_adaptor(connection_string, db_schema, geometry_support, from_srid=from_srid, to_srid=to_srid)
## TODO: csv settings? use Frictionless Data csv standard?
## TODO: support line delimited json?
with fopen(output_file, mode='w') as file:
writer = csv.writer(file)
descriptor = storage.describe(table_name)
fields = map(lambda x: x['name'], descriptor['fields'])
writer.writerow(fields)
if geometry_support == None and engine.dialect.driver == 'psycopg2':
postgres.copy_to(engine, table_name, file)
else:
for row in storage.iter(table_name):
row_out = []
for field in row:
if isinstance(field, dict) or isinstance(field, list):
field = json.dumps(field)
row_out.append(field)
writer.writerow(row_out)
@main.command()
@click.argument('new_table_name')
@click.argument('old_table_name')
@click.option('--connection-string')
@click.option('--db-schema')
@click.option('--select-users', help='Users to grant SELECT on updated table')
@click.option('--logging-config', default='logging_config.conf')
def swap_table(new_table_name, old_table_name, connection_string, db_schema, select_users, logging_config):
logger = get_logger(logging_config)
if re.match(carto.carto_connection_string_regex, connection_string) != None:
if select_users != None:
select_users = select_users.split(',')
else:
select_users = []
logger.info('Swapping tables using Carto: {} - {}'.format(new_table_name, old_table_name))
return carto.swap_table(logger, db_schema, new_table_name, old_table_name, select_users, connection_string)
connection_string = get_connection_string(connection_string)
engine = create_engine(connection_string)
if engine.dialect.driver == 'psycopg2':
logger.info('Swapping tables using psycopg2: {} - {}'.format(new_table_name, old_table_name))
conn = engine.raw_connection()
try:
with conn.cursor() as cur:
sql = 'ALTER TABLE "{}" RENAME TO "{}_old";'.format(old_table_name, old_table_name) +\
'ALTER TABLE "{}" RENAME TO "{}";'.format(new_table_name, old_table_name) +\
'DROP TABLE "{}_old";'.format(old_table_name)
cur.execute(sql)
conn.commit()
except:
conn.rollback()
raise
conn.close()
elif engine.dialect.driver == 'cx_oracle':
logger.info('Swapping tables using cx_Oracle: {} - {}'.format(new_table_name, old_table_name))
conn = engine.connect()
if select_users != None:
select_users = select_users.split(',')
else:
select_users = []
grants_sql = []
for user in select_users:
grants_sql.append('GRANT SELECT ON {} TO {}'.format(old_table_name, user.strip()))
# Oracle does not allow table modification within a transaction, so make individual transactions:
sql1 = 'ALTER TABLE {} RENAME TO {}_old'.format(old_table_name, old_table_name)
sql2 = 'ALTER TABLE {} RENAME TO {}'.format(new_table_name, old_table_name)
sql3 = 'DROP TABLE {}_old'.format(old_table_name)
try:
conn.execute(sql1)
except:
logger.error("Could not rename {} table. Does it exist?".format(old_table_name))
raise
try:
conn.execute(sql2)
except:
logger.error("Could not rename {} table. Does it exist?".format(new_table_name))
rb_sql = 'ALTER TABLE {}_old RENAME TO {}'.format(old_table_name, old_table_name)
conn.execute(rb_sql)
raise
try:
conn.execute(sql3)
except:
logger.error("Could not drop {}_old table. Do you have permission?".format(old_table_name))
rb_sql1 = 'DROP TABLE {}'.format(old_table_name)
conn.execute(rb_sql1)
rb_sql2 = 'ALTER TABLE {}_old RENAME TO {}'.format(old_table_name, old_table_name)
conn.execute(rb_sql2)
raise
try:
for sql in grants_sql:
conn.execute(sql)
except:
logger.error("Could not grant all permissions to {}.".format(old_table_name))
raise
else:
raise Exception('`{}` not supported by swap_table'.format(engine.dialect.driver))
| 37.603834
| 134
| 0.651572
| 1,401
| 11,770
| 5.232691
| 0.157744
| 0.068749
| 0.037648
| 0.025508
| 0.601009
| 0.53567
| 0.478789
| 0.433365
| 0.412495
| 0.364752
| 0
| 0.001888
| 0.235004
| 11,770
| 312
| 135
| 37.724359
| 0.812306
| 0.027698
| 0
| 0.418605
| 0
| 0
| 0.144769
| 0.003936
| 0
| 0
| 0
| 0.003205
| 0
| 1
| 0.046512
| false
| 0.003876
| 0.05814
| 0
| 0.139535
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b90258212d799fd07af2bd908c88516410b648a2
| 6,182
|
py
|
Python
|
examples/asr/experimental/speech_to_text_sclite.py
|
vadam5/NeMo
|
3c5db09539293c3c19a6bb7437011f91261119af
|
[
"Apache-2.0"
] | 2
|
2021-06-23T19:16:59.000Z
|
2022-02-23T18:49:07.000Z
|
examples/asr/experimental/speech_to_text_sclite.py
|
vadam5/NeMo
|
3c5db09539293c3c19a6bb7437011f91261119af
|
[
"Apache-2.0"
] | null | null | null |
examples/asr/experimental/speech_to_text_sclite.py
|
vadam5/NeMo
|
3c5db09539293c3c19a6bb7437011f91261119af
|
[
"Apache-2.0"
] | 12
|
2021-06-20T08:56:10.000Z
|
2022-03-16T19:07:10.000Z
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is based on speech_to_text_infer.py and allows you to score the hypotheses
with sclite. A local installation from https://github.com/usnistgov/SCTK is required.
Hypotheses and references are first saved in trn format and are scored after applying a glm
file (if provided).
"""
import errno
import json
import os
import subprocess
from argparse import ArgumentParser
import torch
from nemo.collections.asr.metrics.wer import WER
from nemo.collections.asr.models import EncDecCTCModel
from nemo.utils import logging
try:
from torch.cuda.amp import autocast
except ImportError:
from contextlib import contextmanager
@contextmanager
def autocast(enabled=None):
yield
def score_with_sctk(sctk_dir, ref_fname, hyp_fname, out_dir, glm=""):
sclite_path = os.path.join(sctk_dir, "bin", "sclite")
if not os.path.exists(sclite_path):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), sclite_path)
# apply glm
if os.path.exists(glm):
rfilter_path = os.path.join(sctk_dir, "bin", "rfilter1")
if not os.path.exists(rfilter_path):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), rfilter_path)
hypglm = os.path.join(out_dir, os.path.basename(hyp_fname)) + ".glm"
rfilt_cmd = [rfilter_path] + [glm]
with open(hypglm, "w") as hypf, open(hyp_fname, "r") as hyp_in:
subprocess.run(rfilt_cmd, stdin=hyp_in, stdout=hypf)
refglm = os.path.join(out_dir, os.path.basename(ref_fname)) + ".glm"
with open(refglm, "w") as reff, open(ref_fname, "r") as ref_in:
subprocess.run(rfilt_cmd, stdin=ref_in, stdout=reff)
else:
refglm = ref_fname
hypglm = hyp_fname
_ = subprocess.check_output(f"{sclite_path} -h {hypglm} -r {refglm} -i wsj -o all", shell=True)
can_gpu = torch.cuda.is_available()
def get_utt_info(manifest_path):
info_list = []
with open(manifest_path, "r") as utt_f:
for line in utt_f:
utt = json.loads(line)
info_list.append(utt)
return info_list
def main():
parser = ArgumentParser()
parser.add_argument(
"--asr_model", type=str, default="QuartzNet15x5Base-En", required=False, help="Pass: 'QuartzNet15x5Base-En'",
)
parser.add_argument("--dataset", type=str, required=True, help="path to evaluation data")
parser.add_argument("--batch_size", type=int, default=4)
parser.add_argument(
"--dont_normalize_text",
default=False,
action='store_true',
help="Turn off trasnscript normalization. Recommended for non-English.",
)
parser.add_argument("--out_dir", type=str, required=True, help="Destination dir for output files")
parser.add_argument("--sctk_dir", type=str, required=False, default="", help="Path to sctk root dir")
parser.add_argument("--glm", type=str, required=False, default="", help="Path to glm file")
args = parser.parse_args()
torch.set_grad_enabled(False)
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
use_sctk = os.path.exists(args.sctk_dir)
if args.asr_model.endswith('.nemo'):
logging.info(f"Using local ASR model from {args.asr_model}")
asr_model = EncDecCTCModel.restore_from(restore_path=args.asr_model)
else:
logging.info(f"Using NGC cloud ASR model {args.asr_model}")
asr_model = EncDecCTCModel.from_pretrained(model_name=args.asr_model)
asr_model.setup_test_data(
test_data_config={
'sample_rate': 16000,
'manifest_filepath': args.dataset,
'labels': asr_model.decoder.vocabulary,
'batch_size': args.batch_size,
'normalize_transcripts': not args.dont_normalize_text,
}
)
if can_gpu:
asr_model = asr_model.cuda()
asr_model.eval()
labels_map = dict([(i, asr_model.decoder.vocabulary[i]) for i in range(len(asr_model.decoder.vocabulary))])
wer = WER(vocabulary=asr_model.decoder.vocabulary)
hypotheses = []
references = []
all_log_probs = []
for test_batch in asr_model.test_dataloader():
if can_gpu:
test_batch = [x.cuda() for x in test_batch]
with autocast():
log_probs, encoded_len, greedy_predictions = asr_model(
input_signal=test_batch[0], input_signal_length=test_batch[1]
)
for r in log_probs.cpu().numpy():
all_log_probs.append(r)
hypotheses += wer.ctc_decoder_predictions_tensor(greedy_predictions)
for batch_ind in range(greedy_predictions.shape[0]):
reference = ''.join([labels_map[c] for c in test_batch[2][batch_ind].cpu().detach().numpy()])
references.append(reference)
del test_batch
info_list = get_utt_info(args.dataset)
hypfile = os.path.join(args.out_dir, "hyp.trn")
reffile = os.path.join(args.out_dir, "ref.trn")
with open(hypfile, "w") as hyp_f, open(reffile, "w") as ref_f:
for i in range(len(hypotheses)):
utt_id = os.path.splitext(os.path.basename(info_list[i]['audio_filepath']))[0]
# rfilter in sctk likes each transcript to have a space at the beginning
hyp_f.write(" " + hypotheses[i] + " (" + utt_id + ")" + "\n")
ref_f.write(" " + references[i] + " (" + utt_id + ")" + "\n")
if use_sctk:
score_with_sctk(args.sctk_dir, reffile, hypfile, args.out_dir, glm=args.glm)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| 38.880503
| 117
| 0.674054
| 868
| 6,182
| 4.619816
| 0.319124
| 0.0399
| 0.029676
| 0.01596
| 0.151372
| 0.115212
| 0.074314
| 0.062344
| 0.028928
| 0
| 0
| 0.005318
| 0.209156
| 6,182
| 158
| 118
| 39.126582
| 0.814891
| 0.160789
| 0
| 0.053097
| 0
| 0
| 0.112488
| 0.012391
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035398
| false
| 0.00885
| 0.106195
| 0
| 0.150442
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9034036dd7c92efb32754807bdeb44d6dc9be42
| 1,335
|
py
|
Python
|
accalib/utils.py
|
pj0620/acca-video-series
|
1b09548014cc899ded5a8fdd1293f7fc121a98bc
|
[
"MIT"
] | null | null | null |
accalib/utils.py
|
pj0620/acca-video-series
|
1b09548014cc899ded5a8fdd1293f7fc121a98bc
|
[
"MIT"
] | 3
|
2020-04-16T09:24:48.000Z
|
2021-03-27T19:27:48.000Z
|
accalib/utils.py
|
pj0620/acca-video-series
|
1b09548014cc899ded5a8fdd1293f7fc121a98bc
|
[
"MIT"
] | 1
|
2020-09-01T05:32:04.000Z
|
2020-09-01T05:32:04.000Z
|
from manimlib.imports import *
from manimlib.utils import bezier
import numpy as np
class VectorInterpolator:
def __init__(self,points):
self.points = points
self.n = len(self.points)
self.dists = [0]
for i in range(len(self.points)):
self.dists += [np.linalg.norm(
self.points[i] -
self.points[(i+1) % self.n]
)+self.dists[i]]
def interpolate(self,alpha):
dist = alpha*self.dists[-1]
idx = self.interpolate_index(dist)
mult = (dist - self.dists[idx])/np.linalg.norm(self.points[(idx+1)%self.n]-self.points[idx])
return self.points[idx] + \
mult*(self.points[(idx+1)%self.n]-self.points[idx])
def interpolate_index(self,dist):
def is_solution(idx):
if idx == self.n-1:
return self.dists[idx] <= dist
else:
return ((self.dists[cur] <= dist) and
(self.dists[(cur+1)%self.n] >= dist))
# binary search
step_size=int(self.n / 4)
cur=int(self.n / 2)
while not is_solution(cur):
if self.dists[cur] > dist:
cur -= step_size
else:
cur += step_size
step_size = max(int(step_size/2), 1)
return cur
| 31.785714
| 100
| 0.526592
| 172
| 1,335
| 4.011628
| 0.284884
| 0.15942
| 0.094203
| 0.043478
| 0.205797
| 0.092754
| 0.092754
| 0.092754
| 0.092754
| 0
| 0
| 0.012486
| 0.340075
| 1,335
| 42
| 101
| 31.785714
| 0.770715
| 0.009738
| 0
| 0.057143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114286
| false
| 0
| 0.085714
| 0
| 0.342857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9044d615f386c353b51176e0cfb09ae8fe5c1b6
| 5,834
|
py
|
Python
|
dodo.py
|
enerqi/bridge-bidding-systems
|
30ea2bf6f8bc0b786df4de8571063509d971236f
|
[
"MIT"
] | 2
|
2020-05-24T17:30:55.000Z
|
2020-11-22T15:27:56.000Z
|
dodo.py
|
enerqi/bridge-bidding-systems
|
30ea2bf6f8bc0b786df4de8571063509d971236f
|
[
"MIT"
] | null | null | null |
dodo.py
|
enerqi/bridge-bidding-systems
|
30ea2bf6f8bc0b786df4de8571063509d971236f
|
[
"MIT"
] | null | null | null |
#! /usr/bin/doit -f
# https://pydoit.org
# `pip install [--user] doit` adds `doit.exe` to the PATH
# - Note `doit auto`, the file watcher only works on Linux/Mac
# - All commands are relative to dodo.py (doit runs in the working dir of dodo.py
# even if ran from a different directory `doit -f path/to/dodo.py`)
from glob import glob
import json
from os import environ
from os.path import abspath, basename, dirname, exists, expanduser, join, splitext
from shutil import copyfile
from typing import Iterator, List, NewType, Optional
from doit.tools import title_with_actions
Path = NewType("Path", str)
home = Path(expanduser("~"))
bml_tools_dir = Path(environ.get("BML_TOOLS_DIRECTORY", join(home, "dev/bml")))
bml_includes_cache_file = ".include-deps.json"
def bml_include_dependencies(bml_path: Path) -> List[Path]:
# bml files can include others, so spend time scanning every bml file
# for new include directives every time a bml file is saved
def includes(file_handle) -> Iterator[Path]:
for line in file_handle.readlines():
line = line.strip()
if line.startswith("#INCLUDE"):
include_directive_tokens = line.split(maxsplit=1)
if len(include_directive_tokens) > 1:
# We assume the file name is not quoted, just a free form path string
included_file = include_directive_tokens[1].strip()
yield Path(included_file)
with open(bml_path, encoding='utf-8') as f:
unique_deps = {include for include in includes(f) if include != bml_path}
return list(unique_deps)
def read_bml_includes_cache(bml_path: Path) -> Optional[List[Path]]:
if not exists(bml_includes_cache_file):
return None
with open(bml_includes_cache_file, encoding='utf-8') as f:
try:
existing_deps = json.load(f)
except Exception:
# Manually edited messed up json perhaps
return None
if bml_path in existing_deps:
return existing_deps[bml_path]
else:
return None # Manually edited perhaps (assuming we got the task order correct)
def update_bml_includes_cache(bml_path: Path, bml_deps: List[Path]):
existing_deps = {}
if exists(bml_includes_cache_file):
with open(bml_includes_cache_file, encoding='utf-8') as f:
try:
existing_deps = json.load(f)
except Exception:
pass
existing_deps[bml_path] = bml_deps
with open(bml_includes_cache_file, "w", encoding='utf-8') as f:
json.dump(existing_deps, f, indent=4)
def task_bml_include_cache():
"""Populate the bml include cache."""
input_bml_file_paths = glob("*.bml")
def calc_include_deps_and_cache(file_dep) -> None:
bml_path = Path(file_dep)
bml_deps = bml_include_dependencies(bml_path)
update_bml_includes_cache(bml_path, bml_deps)
for bml_path in input_bml_file_paths:
# We don't use a target as doit cannot deal with more than one input file affecting the same output file
# and we are using a single cache file instead of one cache file per input file.
# This does mean that we are using the order of the tasks in this file to have the include cache updated
# before the html task reads the include cache as part of determining changing file dependencies
# The html task itself cannot use the include cache file as a doit file_dep dependency as it is being updated
# by other unrelated bml file changes.
# Actually, using a different notion of an update (not just tracking file modifications) if another feature of
# doit that could be applied if interested enough.
yield {
'name': basename(bml_path),
'actions': [(calc_include_deps_and_cache, [bml_path])],
'file_dep': [bml_path],
'title': title_with_actions
}
def task_bml2html():
"""Create html file from bridge bidding markup language file."""
bml2html_path = Path(join(bml_tools_dir, "bml2html.py"))
input_bml_file_paths = glob("*.bml")
def html_output_path(bml_path: Path) -> Path:
return Path(splitext(bml_path)[0] + ".html")
for bml_path in input_bml_file_paths:
bml_deps = read_bml_includes_cache(bml_path)
if bml_deps is None:
bml_deps = bml_include_dependencies(bml_path)
update_bml_includes_cache(bml_path, bml_deps)
yield {
'name': basename(bml_path),
'actions': [f"python {bml2html_path} {bml_path}"],
'file_dep': [bml_path] + bml_deps,
'targets': [html_output_path(bml_path)],
'title': title_with_actions
}
def task_bmlcss():
"""Copy the bml CSS style sheet to this directory."""
css_basename = "bml.css"
src_css_file = Path(join(bml_tools_dir, css_basename))
def copy_file() -> None:
# OS neutral compared to running a shell command
copyfile(src_css_file, css_basename)
return {
'actions': [copy_file],
'file_dep': [src_css_file],
'targets': [css_basename],
'title': title_with_actions
}
def task_publish_main_bidding():
"""Copy the main bidding html and css document to the web server root."""
src_file = "bidding-system.html"
dst_file = f"W:/{src_file}"
css_file = "bml.css"
dst_css = f"W:/{css_file}"
def copy_file(dependencies, targets) -> None:
copyfile(dependencies[0], targets[0])
for src, dst in [(src_file, dst_file), (css_file, dst_css)]:
yield {
'name': basename(src),
'actions': [copy_file],
'file_dep': [src],
'targets': [dst],
'title': title_with_actions
}
| 37.159236
| 118
| 0.652383
| 816
| 5,834
| 4.449755
| 0.275735
| 0.048196
| 0.048472
| 0.033049
| 0.279262
| 0.215918
| 0.134949
| 0.120077
| 0.084825
| 0.084825
| 0
| 0.00345
| 0.254714
| 5,834
| 156
| 119
| 37.397436
| 0.831647
| 0.263456
| 0
| 0.294118
| 0
| 0
| 0.072586
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0.009804
| 0.068627
| 0.009804
| 0.254902
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9058a9a6aeb7e495abc710b44e918cfdd30a156
| 1,288
|
py
|
Python
|
plugins/crumbling_in.py
|
jimconner/digital_sky
|
9427cd19dbd9fb1c82ca12fa8f962532d700c67f
|
[
"MIT"
] | 2
|
2019-03-04T20:38:44.000Z
|
2019-03-15T22:34:25.000Z
|
plugins/crumbling_in.py
|
jimconner/digital_sky
|
9427cd19dbd9fb1c82ca12fa8f962532d700c67f
|
[
"MIT"
] | null | null | null |
plugins/crumbling_in.py
|
jimconner/digital_sky
|
9427cd19dbd9fb1c82ca12fa8f962532d700c67f
|
[
"MIT"
] | null | null | null |
# Crumbling In
# Like randomised coloured dots and then they
# increase on both sides getting closer and closer into the middle.
import sys, traceback, random
from numpy import array,full
class animation():
def __init__(self,datastore):
self.max_led = datastore.LED_COUNT
self.pos = 0
self.direction=0
self.cols = [ \
[255,0,0,0], \
[0,255,0,0], \
[0,0,255,0], \
[0,0,0,255], \
[255,255,0,0], \
[255,0,255,0], \
[0,255,255,0], \
[0,0,255,64], \
]
self.row=full((self.max_led,4),0)
def emit_row(self):
try:
if self.pos >= self.max_led/2:
self.direction=1
if self.pos <= 0:
self.direction=0
col=self.cols[random.randint(0,7)]
if self.direction==1:
col=[0,0,0,0]
self.row[self.pos]=col
self.row[(self.max_led-1)-self.pos]=col
if self.direction==0:
self.pos+=1
else:
self.pos-=1
return self.row
except Exception as err:
print(err)
traceback.print_exc(file=sys.stdout)
| 28.622222
| 67
| 0.470497
| 167
| 1,288
| 3.562874
| 0.365269
| 0.053782
| 0.045378
| 0.040336
| 0.114286
| 0.114286
| 0.040336
| 0.040336
| 0.040336
| 0.040336
| 0
| 0.096354
| 0.403727
| 1,288
| 44
| 68
| 29.272727
| 0.678385
| 0.094721
| 0
| 0.054054
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054054
| false
| 0
| 0.054054
| 0
| 0.162162
| 0.054054
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b906c6820493a72163f757fe7ce4006f0287b820
| 821
|
py
|
Python
|
code/7/collections/namedtupe_example.py
|
TeamLab/introduction_to_pythoy_TEAMLAB_MOOC
|
ebf1ff02d6a341bfee8695eac478ff8297cb97e4
|
[
"MIT"
] | 65
|
2017-11-01T01:57:21.000Z
|
2022-02-08T13:36:25.000Z
|
code/7/collections/namedtupe_example.py
|
TeamLab/introduction_to_pythoy_TEAMLAB_MOOC
|
ebf1ff02d6a341bfee8695eac478ff8297cb97e4
|
[
"MIT"
] | 9
|
2017-11-03T15:05:30.000Z
|
2018-05-17T03:18:36.000Z
|
code/7/collections/namedtupe_example.py
|
TeamLab/introduction_to_pythoy_TEAMLAB_MOOC
|
ebf1ff02d6a341bfee8695eac478ff8297cb97e4
|
[
"MIT"
] | 64
|
2017-11-01T01:57:23.000Z
|
2022-01-19T03:52:12.000Z
|
from collections import namedtuple
# Basic example
Point = namedtuple('Point', ['x', 'y'])
p = Point(11, y=22)
print(p[0] + p[1])
x, y = p
print(x, y)
print(p.x + p.y)
print(Point(x=11, y=22))
from collections import namedtuple
import csv
f = open("users.csv", "r")
next(f)
reader = csv.reader(f)
student_list = []
for row in reader:
student_list.append(row)
print(row)
print(student_list)
columns = ["user_id", "integration_id", "login_id", "password", "first_name",
"last_name", "full_name", "sortable_name", "short_name",
"email", "status"]
Student = namedtuple('Student', columns)
student_namedtupe_list = []
for row in student_list:
student = Student(*row)
student_namedtupe_list.append(student)
print(student_namedtupe_list[0])
print(student_namedtupe_list[0].full_name)
| 24.147059
| 77
| 0.685749
| 122
| 821
| 4.442623
| 0.352459
| 0.081181
| 0.147601
| 0.114391
| 0.095941
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017316
| 0.155907
| 821
| 33
| 78
| 24.878788
| 0.764791
| 0.015834
| 0
| 0.071429
| 0
| 0
| 0.152605
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.035714
| 0.107143
| 0
| 0.107143
| 0.285714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9078d0e4d15cf11492a86d93eb5a61b04a92b6f
| 1,439
|
py
|
Python
|
test/helper_tools/benchtool.py
|
dotnes/mitmproxy
|
5eb17bbf6d47c8d703763bfa41cf1ff3f98a632f
|
[
"MIT"
] | 4
|
2018-03-14T03:47:22.000Z
|
2018-06-28T08:00:39.000Z
|
test/helper_tools/benchtool.py
|
dotnes/mitmproxy
|
5eb17bbf6d47c8d703763bfa41cf1ff3f98a632f
|
[
"MIT"
] | 1
|
2021-05-09T11:18:14.000Z
|
2021-05-09T11:18:14.000Z
|
test/helper_tools/benchtool.py
|
dotnes/mitmproxy
|
5eb17bbf6d47c8d703763bfa41cf1ff3f98a632f
|
[
"MIT"
] | 1
|
2018-04-22T15:43:46.000Z
|
2018-04-22T15:43:46.000Z
|
# Profile mitmdump with apachebench and
# yappi (https://code.google.com/p/yappi/)
#
# Requirements:
# - Apache Bench "ab" binary
# - pip install click yappi
from mitmproxy.main import mitmdump
from os import system
from threading import Thread
import time
import yappi
import click
class ApacheBenchThread(Thread):
def __init__(self, concurrency):
self.concurrency = concurrency
super().__init__()
def run(self):
time.sleep(2)
system(
"ab -n 1024 -c {} -X 127.0.0.1:8080 http://example.com/".format(self.concurrency))
@click.command()
@click.option('--profiler', default="none", type=click.Choice(['none', 'yappi']))
@click.option('--clock-type', default="cpu", type=click.Choice(['wall', 'cpu']))
@click.option('--concurrency', default=1, type=click.INT)
def main(profiler, clock_type, concurrency):
outfile = "callgrind.mitmdump-{}-c{}".format(clock_type, concurrency)
a = ApacheBenchThread(concurrency)
a.start()
if profiler == "yappi":
yappi.set_clock_type(clock_type)
yappi.start(addons=True)
print("Start mitmdump...")
mitmdump(["-k", "-q", "-S", "1024example"])
print("mitmdump stopped.")
print("Save profile information...")
if profiler == "yappi":
yappi.stop()
stats = yappi.get_func_stats()
stats.save(outfile, type='callgrind')
print("Done.")
if __name__ == '__main__':
main()
| 25.245614
| 94
| 0.649062
| 174
| 1,439
| 5.235632
| 0.471264
| 0.049396
| 0.032931
| 0.043908
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017109
| 0.18763
| 1,439
| 56
| 95
| 25.696429
| 0.76219
| 0.100764
| 0
| 0.055556
| 0
| 0.027778
| 0.191919
| 0.019425
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.166667
| 0
| 0.277778
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b907c416aa083b16df70a844cea0da2fdc9f29d9
| 8,922
|
py
|
Python
|
pivpy/graphics.py
|
alexliberzonlab/pivpy
|
c1c984cd669fce6f5c0b6a602d6a51ed3fec5954
|
[
"BSD-3-Clause"
] | 1
|
2018-07-15T07:17:30.000Z
|
2018-07-15T07:17:30.000Z
|
pivpy/graphics.py
|
alexliberzonlab/pivpy
|
c1c984cd669fce6f5c0b6a602d6a51ed3fec5954
|
[
"BSD-3-Clause"
] | 4
|
2018-06-14T14:02:45.000Z
|
2018-07-15T00:19:01.000Z
|
pivpy/graphics.py
|
alexliberzonlab/pivpy
|
c1c984cd669fce6f5c0b6a602d6a51ed3fec5954
|
[
"BSD-3-Clause"
] | 1
|
2019-07-18T15:25:02.000Z
|
2019-07-18T15:25:02.000Z
|
# -*- coding: utf-8 -*-
"""
Various plots
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation, FFMpegWriter
import xarray as xr
import os
def quiver(data, arrScale = 25.0, threshold = None, nthArr = 1,
contourLevels = None, colbar = True, logscale = False,
aspectratio='equal', colbar_orient = 'vertical', units = None):
"""
Generates a quiver plot of a 'data' xarray DataArray object (single frame from a dataset)
Inputs:
data - xarray DataArray of the type defined in pivpy, one of the frames in the Dataset
selected by default using .isel(t=0)
threshold - values above the threshold will be set equal to threshold
arrScale - use to change arrow scales
nthArr - use to plot only every nth arrow from the array
contourLevels - use to specify the maximum value (abs) of contour plots
colbar - True/False wether to generate a colorbar or not
logscale - if true then colorbar is on log scale
aspectratio - set auto or equal for the plot's apearence
colbar_orient - 'horizontal' or 'vertical' orientation of the colorbar (if colbar is True)
Outputs:
none
Usage:
graphics.quiver(data, arrScale = 0.2, threshold = Inf, n)
"""
data = dataset_to_array(data)
x = data.x
y = data.y
u = data.u
v = data.v
if units is not None:
lUnits = units[0] # ['m' 'm' 'mm/s' 'mm/s']
velUnits = units[2]
tUnits = velUnits.split('/')[1] # make it 's' or 'dt'
else:
lUnits, velUnits, tUnits = '', '', ''
if threshold is not None:
data['u'] = xr.where(data['u']>threshold, threshold, data['u'])
data['v'] = xr.where(data['v']>threshold, threshold, data['v'])
S = np.array(np.sqrt(u**2 + v**2))
fig = plt.get_fignums()
if len(fig) == 0: # if no figure is open
fig, ax = plt.subplots() # open a new figure
else:
ax = plt.gca()
if contourLevels is None:
levels = np.linspace(0, np.max(S.flatten()), 30) # default contour levels up to max of S
else:
levels = np.linspace(0, contourLevels, 30)
if logscale:
c = ax.contourf(x,y,S,alpha=0.8,
cmap = plt.get_cmap("Blues"),
levels = levels, norm = plt.colors.LogNorm())
else:
c = ax.contourf(x,y,S,alpha=0.8,
cmap = plt.get_cmap("Blues"),
levels=levels)
if colbar:
cbar = plt.colorbar(c, orientation=colbar_orient)
cbar.set_label(r'$\left| \, V \, \right|$ ['+ lUnits +' $\cdot$ '+ tUnits +'$^{-1}$]')
ax.quiver(x[::nthArr],y[::nthArr],
u[::nthArr,::nthArr],v[::nthArr,::nthArr],units='width',
scale = np.max(S*arrScale),headwidth=2)
ax.set_xlabel('x (' + lUnits + ')')
ax.set_ylabel('y (' + lUnits + ')')
ax.set_aspect(aspectratio)
return fig,ax
def histogram(data, normed = False):
"""
this function will plot a normalized histogram of
the velocity data.
Input:
data : xarray DataSet with ['u','v'] attrs['units']
normed : (optional) default is False to present normalized
histogram
"""
u = np.asarray(data.u).flatten()
v = np.asarray(data.v).flatten()
units = data.attrs['units']
f,ax = plt.subplots(2)
ax[0].hist(u,bins=np.int(np.sqrt(len(u))*0.5),density=normed)
ax[0].set_xlabel('u ['+units[2]+']')
ax[1] = plt.subplot2grid((2,1),(1,0))
ax[1].hist(v,bins=np.int(np.sqrt(len(v)*0.5)),density=normed)
ax[1].set_xlabel('v ['+units[2]+']')
plt.tight_layout()
return f, ax
def contour_plot(data, threshold = None, contourLevels = None,
colbar = True, logscale = False, aspectration='equal', units=None):
""" contourf ajusted for the xarray PIV dataset, creates a
contour map for the data['w'] property.
Input:
data : xarray PIV DataArray, converted automatically using .isel(t=0)
threshold : a threshold value, default is None (no data clipping)
contourLevels : number of contour levels, default is None
colbar : boolean (default is True) show/hide colorbar
logscale : boolean (True is default) create in linear/log scale
aspectration : string, 'equal' is the default
"""
data = dataset_to_array(data)
if units is not None:
lUnits = units[0] # ['m' 'm' 'mm/s' 'mm/s']
# velUnits = units[2]
# tUnits = velUnits.split('/')[1] # make it 's' or 'dt'
else:
# lUnits, velUnits = '', ''
lUnits = ''
f,ax = plt.subplots()
if threshold is not None:
data['w'] = xr.where(data['w']>threshold, threshold, data['w'])
m = np.amax(abs(data['w']))
if contourLevels == None:
levels = np.linspace(-m, m, 30)
else:
levels = np.linspace(-contourLevels, contourLevels, 30)
if logscale:
c = ax.contourf(data.x,data.y,np.abs(data['w']), levels=levels,
cmap = plt.get_cmap('RdYlBu'), norm=plt.colors.LogNorm())
else:
c = ax.contourf(data.x,data.y,data['w'], levels=levels,
cmap = plt.get_cmap('RdYlBu'))
plt.xlabel('x [' + lUnits + ']')
plt.ylabel('y [' + lUnits + ']')
if colbar:
cbar = plt.colorbar(c)
cbar.set_label(r'$\omega$ [s$^{-1}$]')
ax.set_aspect(aspectration)
return f,ax
def showf(data, variables=None, units=None, fig=None):
"""
showf(data, var, units)
Arguments:
data : xarray.DataSet that contains dimensions of t,x,y
and variables u,v and maybe w (scalar)
"""
if variables is None:
xlabel = ' '
ylabel = ' '
else:
xlabel = variables[0]
ylabel = variables[1]
if units is not None:
xlabel += ' ' + units[0]
ylabel += ' ' + units[1]
fig = plt.figure(None if fig is None else fig.number)
for t in data['t']:
d = data.isel(t=t)
plt.quiver(d['x'],d['y'],d['u'],d['v'],d['u']**2 + d['v']**2)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.draw()
plt.pause(0.1)
plt.show()
def showscal(data, property='ken'):
"""
showf(data, var, units)
Arguments:
data : xarray.DataSet that contains dimensions of t,x,y
and a variable w (scalar)
"""
# fig = plt.figure(None if fig is None else fig.number)
# import pdb; pdb.set_trace()
# xlabel = (None if var is None else var[0]) + ' [' + (None if units is None else units[0])+']'
# ylabel = (None if var is None else var[1]) + ' [' + (None if units is None else units[1])+']'
data = data.piv.vec2scal(property=property)
contour_plot(data)
def animate(data, arrowscale=1, savepath=None):
""" animates the quiver plot for the dataset (multiple frames)
Input:
data : xarray PIV type of DataSet
arrowscale : [optional] integer, default is 1
savepath : [optional] path to save the MP4 animation, default is None
Output:
if savepath is None, then only an image display of the animation
if savepath is an existing path, a file named im.mp4 is saved
"""
X, Y = data.x, data.y
U, V = data.u[:,:,0], data.v[:,:,0] # first frame
fig, ax = plt.subplots(1,1)
M = np.sqrt(U**2 + V**2)
Q = ax.quiver(X[::3,::3], Y[::3,::3],
U[::3,::3], V[::3,::3], M[::3,::3],
units='inches', scale=arrowscale)
cb = plt.colorbar(Q)
units = data.attrs['units']
cb.ax.set_ylabel('velocity (' + units[2] + ')')
text = ax.text(0.2,1.05, '1/'+str(len(data.t)), ha='center', va='center',
transform=ax.transAxes)
def update_quiver(num,Q,data,text):
U,V = data.u[:,:,num],data.v[:,:,num]
M = np.sqrt(U[::3,::3]**2 + V[::3,::3]**2)
Q.set_UVC(U,V,M)
text.set_text(str(num+1)+'/'+str(len(data.t)))
return Q
anim = FuncAnimation(fig, update_quiver, fargs=(Q,data,text),
frames = len(data.t), blit=False)
mywriter = FFMpegWriter()
if savepath:
p = os.getcwd()
os.chdir(savepath)
anim.save('im.mp4', writer=mywriter)
os.chdir(p)
else: anim.save('im.mp4', writer=mywriter)
def dataset_to_array(data,N=0):
""" converts xarray Dataset to array """
if 't' in data.dims:
print('Warning: function for a single frame, using first frame, supply data.isel(t=N)')
data = data.isel(t=N)
return data
| 32.922509
| 99
| 0.553015
| 1,221
| 8,922
| 4.015561
| 0.216216
| 0.014685
| 0.012237
| 0.011422
| 0.261473
| 0.229247
| 0.170916
| 0.134815
| 0.122782
| 0.107689
| 0
| 0.016344
| 0.300493
| 8,922
| 271
| 100
| 32.922509
| 0.769268
| 0.313046
| 0
| 0.2
| 0
| 0.006897
| 0.05019
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055172
| false
| 0
| 0.034483
| 0
| 0.124138
| 0.006897
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9081ad94fb9a0b4f6e0a49043c2a08a7969c6fc
| 1,212
|
py
|
Python
|
configs/my_config/vit_base_aspp.py
|
BostonCrayfish/mmsegmentation
|
e8b87242b877bfe0c32ea2630c2fd08977d7dd4b
|
[
"Apache-2.0"
] | null | null | null |
configs/my_config/vit_base_aspp.py
|
BostonCrayfish/mmsegmentation
|
e8b87242b877bfe0c32ea2630c2fd08977d7dd4b
|
[
"Apache-2.0"
] | null | null | null |
configs/my_config/vit_base_aspp.py
|
BostonCrayfish/mmsegmentation
|
e8b87242b877bfe0c32ea2630c2fd08977d7dd4b
|
[
"Apache-2.0"
] | null | null | null |
# model settings
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='pretrain/vit_base_patch16_224.pth',
backbone=dict(
type='VisionTransformer',
img_size=(224, 224),
patch_size=16,
in_channels=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
# out_indices=(2, 5, 8, 11),
qkv_bias=True,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.0,
with_cls_token=True,
norm_cfg=dict(type='LN', eps=1e-6),
act_cfg=dict(type='GELU'),
norm_eval=False,
interpolate_mode='bicubic'),
neck=None,
decode_head=dict(
type='ASPPHead',
in_channels=768,
# in_index=3,
channels=512,
dilations=(1, 6, 12, 18),
dropout_ratio=0.1,
num_classes=21,
contrast=True,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=None,
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole')) # yapf: disable
| 28.857143
| 74
| 0.587459
| 158
| 1,212
| 4.265823
| 0.594937
| 0.083086
| 0.048961
| 0.04451
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.062572
| 0.287954
| 1,212
| 42
| 75
| 28.857143
| 0.718424
| 0.084984
| 0
| 0
| 0
| 0
| 0.097826
| 0.029891
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9083abf7ea4269348156a83680d8a60f00f6033
| 69,300
|
py
|
Python
|
tripleo_ansible/ansible_plugins/modules/podman_container.py
|
smolar/tripleo-ansible
|
7bd37f019870c032bea71f22b305832932d81424
|
[
"Apache-2.0"
] | null | null | null |
tripleo_ansible/ansible_plugins/modules/podman_container.py
|
smolar/tripleo-ansible
|
7bd37f019870c032bea71f22b305832932d81424
|
[
"Apache-2.0"
] | null | null | null |
tripleo_ansible/ansible_plugins/modules/podman_container.py
|
smolar/tripleo-ansible
|
7bd37f019870c032bea71f22b305832932d81424
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2019 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# flake8: noqa: E501
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import json
from distutils.version import LooseVersion
import yaml
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_native
ANSIBLE_METADATA = {
'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = """
module: podman_container
author:
- "Sagi Shnaidman (@sshnaidm)"
version_added: '2.9'
short_description: Manage podman containers
notes: []
description:
- Start, stop, restart and manage Podman containers
requirements:
- "Podman installed on host"
options:
name:
description:
- Name of the container
required: True
type: str
executable:
description:
- Path to C(podman) executable if it is not in the C($PATH) on the
machine running C(podman)
default: 'podman'
type: str
state:
description:
- I(absent) - A container matching the specified name will be stopped and
removed.
- I(present) - Asserts the existence of a container matching the name and
any provided configuration parameters. If no container matches the
name, a container will be created. If a container matches the name but
the provided configuration does not match, the container will be
updated, if it can be. If it cannot be updated, it will be removed and
re-created with the requested config. Image version will be taken into
account when comparing configuration. Use the recreate option to force
the re-creation of the matching container.
- I(started) - Asserts there is a running container matching the name and
any provided configuration. If no container matches the name, a
container will be created and started. Use recreate to always re-create
a matching container, even if it is running. Use force_restart to force
a matching container to be stopped and restarted.
- I(stopped) - Asserts that the container is first I(present), and then
if the container is running moves it to a stopped state.
type: str
default: started
choices:
- absent
- present
- stopped
- started
image:
description:
- Repository path (or image name) and tag used to create the container.
If an image is not found, the image will be pulled from the registry.
If no tag is included, C(latest) will be used.
- Can also be an image ID. If this is the case, the image is assumed to
be available locally.
type: str
annotation:
description:
- Add an annotation to the container. The format is key value, multiple
times.
type: dict
authfile:
description:
- Path of the authentication file. Default is
``${XDG_RUNTIME_DIR}/containers/auth.json``
(Not available for remote commands) You can also override the default
path of the authentication file by setting the ``REGISTRY_AUTH_FILE``
environment variable. ``export REGISTRY_AUTH_FILE=path``
type: path
blkio_weight:
description:
- Block IO weight (relative weight) accepts a weight value between 10 and
1000
type: int
blkio_weight_device:
description:
- Block IO weight (relative device weight, format DEVICE_NAME[:]WEIGHT).
type: dict
cap_add:
description:
- List of capabilities to add to the container.
type: list
elements: str
cap_drop:
description:
- List of capabilities to drop from the container.
type: list
elements: str
cgroup_parent:
description:
- Path to cgroups under which the cgroup for the container will be
created.
If the path is not absolute, the path is considered to be relative to
the cgroups path of the init process. Cgroups will be created if they
do not already exist.
type: path
cgroupns:
description:
- Path to cgroups under which the cgroup for the container will be
created.
type: str
cgroups:
description:
- Determines whether the container will create CGroups.
Valid values are enabled and disabled, which the default being enabled.
The disabled option will force the container to not create CGroups,
and thus conflicts with CGroup options cgroupns and cgroup-parent.
type: str
choices:
- default
- disabled
cidfile:
description:
- Write the container ID to the file
type: path
cmd_args:
description:
- Any additionl command options you want to pass to podman command,
cmd_args - ['--other-param', 'value']
Be aware module doesn't support idempotency if this is set.
type: list
elements: str
conmon_pidfile:
description:
- Write the pid of the conmon process to a file.
conmon runs in a separate process than Podman,
so this is necessary when using systemd to restart Podman containers.
type: path
command:
description:
- Override command of container. Can be a string or a list.
type: raw
cpu_period:
description:
- Limit the CPU real-time period in microseconds
type: int
cpu_rt_period:
description:
- Limit the CPU real-time period in microseconds.
Limit the container's Real Time CPU usage. This flag tell the kernel to
restrict the container's Real Time CPU usage to the period you specify.
type: int
cpu_rt_runtime:
description:
- Limit the CPU real-time runtime in microseconds.
This flag tells the kernel to limit the amount of time in a given CPU
period Real Time tasks may consume.
type: int
cpu_shares:
description:
- CPU shares (relative weight)
type: int
cpus:
description:
- Number of CPUs. The default is 0.0 which means no limit.
type: str
cpuset_cpus:
description:
- CPUs in which to allow execution (0-3, 0,1)
type: str
cpuset_mems:
description:
- Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only
effective on NUMA systems.
type: str
detach:
description:
- Run container in detach mode
type: bool
default: True
debug:
description:
- Return additional information which can be helpful for investigations.
type: bool
default: False
detach_keys:
description:
- Override the key sequence for detaching a container. Format is a single
character or ctrl-value
type: str
device:
description:
- Add a host device to the container.
The format is <device-on-host>[:<device-on-container>][:<permissions>]
(e.g. device /dev/sdc:/dev/xvdc:rwm)
type: list
elements: str
device_read_bps:
description:
- Limit read rate (bytes per second) from a device
(e.g. device-read-bps /dev/sda:1mb)
type: list
device_read_iops:
description:
- Limit read rate (IO per second) from a device
(e.g. device-read-iops /dev/sda:1000)
type: list
device_write_bps:
description:
- Limit write rate (bytes per second) to a device
(e.g. device-write-bps /dev/sda:1mb)
type: list
device_write_iops:
description:
- Limit write rate (IO per second) to a device
(e.g. device-write-iops /dev/sda:1000)
type: list
dns:
description:
- Set custom DNS servers
type: list
elements: str
dns_option:
description:
- Set custom DNS options
type: str
dns_search:
description:
- Set custom DNS search domains (Use dns_search with '' if you don't wish
to set the search domain)
type: str
entrypoint:
description:
- Overwrite the default ENTRYPOINT of the image
type: str
env:
description:
- Set environment variables.
This option allows you to specify arbitrary environment variables that
are available for the process that will be launched inside of the
container.
type: dict
env_file:
description:
- Read in a line delimited file of environment variables
type: path
env_host:
description:
- Use all current host environment variables in container.
Defaults to false.
type: bool
etc_hosts:
description:
- Dict of host-to-IP mappings, where each host name is a key in the
dictionary. Each host name will be added to the container's
``/etc/hosts`` file.
type: dict
aliases:
- add_hosts
expose:
description:
- Expose a port, or a range of ports (e.g. expose "3300-3310") to set up
port redirection on the host system.
type: list
elements: str
aliases:
- exposed
- exposed_ports
force_restart:
description:
- Force restart of container.
type: bool
default: False
aliases:
- restart
gidmap:
description:
- Run the container in a new user namespace using the supplied mapping.
type: str
group_add:
description:
- Add additional groups to run as
type: list
healthcheck:
description:
- Set or alter a healthcheck command for a container.
type: str
healthcheck_interval:
description:
- Set an interval for the healthchecks
(a value of disable results in no automatic timer setup)
(default "30s")
type: str
healthcheck_retries:
description:
- The number of retries allowed before a healthcheck is considered to be
unhealthy. The default value is 3.
type: int
healthcheck_start_period:
description:
- The initialization time needed for a container to bootstrap.
The value can be expressed in time format like 2m3s. The default value
is 0s
type: str
healthcheck_timeout:
description:
- The maximum time allowed to complete the healthcheck before an interval
is considered failed. Like start-period, the value can be expressed in
a time format such as 1m22s. The default value is 30s
type: str
hostname:
description:
- Container host name. Sets the container host name that is available
inside the container.
type: str
http_proxy:
description:
- By default proxy environment variables are passed into the container if
set for the podman process. This can be disabled by setting the
http_proxy option to false. The environment variables passed in
include http_proxy, https_proxy, ftp_proxy, no_proxy, and also the
upper case versions of those.
Defaults to true
type: bool
image_volume:
description:
- Tells podman how to handle the builtin image volumes.
The options are bind, tmpfs, or ignore (default bind)
type: str
choices:
- 'bind'
- 'tmpfs'
- 'ignore'
image_strict:
description:
- Whether to compare images in idempotency by taking into account a full
name with registry and namespaces.
type: bool
default: False
init:
description:
- Run an init inside the container that forwards signals and reaps
processes.
type: str
init_path:
description:
- Path to the container-init binary.
type: str
interactive:
description:
- Keep STDIN open even if not attached. The default is false.
When set to true, keep stdin open even if not attached.
The default is false.
type: bool
ip:
description:
- Specify a static IP address for the container, for example
'10.88.64.128'.
Can only be used if no additional CNI networks to join were specified
via 'network:', and if the container is not joining another container's
network namespace via 'network container:<name|id>'.
The address must be within the default CNI network's pool
(default 10.88.0.0/16).
type: str
ipc:
description:
- Default is to create a private IPC namespace (POSIX SysV IPC) for the
container
type: str
kernel_memory:
description:
- Kernel memory limit
(format <number>[<unit>], where unit = b, k, m or g)
Note - idempotency is supported for integers only.
type: str
label:
description:
- Add metadata to a container, pass dictionary of label names and values
type: dict
label_file:
description:
- Read in a line delimited file of labels
type: str
log_driver:
description:
- Logging driver. Used to set the log driver for the container.
For example log_driver "k8s-file".
type: str
choices:
- k8s-file
- journald
- json-file
log_opt:
description:
- Logging driver specific options. Used to set the path to the container
log file. For example log_opt
"path=/var/log/container/mycontainer.json"
type: str
aliases:
- log_options
memory:
description:
- Memory limit (format 10k, where unit = b, k, m or g)
Note - idempotency is supported for integers only.
type: str
memory_reservation:
description:
- Memory soft limit (format 100m, where unit = b, k, m or g)
Note - idempotency is supported for integers only.
type: str
memory_swap:
description:
- A limit value equal to memory plus swap. Must be used with the -m
(--memory) flag.
The swap LIMIT should always be larger than -m (--memory) value.
By default, the swap LIMIT will be set to double the value of --memory
Note - idempotency is supported for integers only.
type: str
memory_swappiness:
description:
- Tune a container's memory swappiness behavior. Accepts an integer
between 0 and 100.
type: int
mount:
description:
- Attach a filesystem mount to the container. bind or tmpfs
For example mount
"type=bind,source=/path/on/host,destination=/path/in/container"
type: str
network:
description:
- Set the Network mode for the container
* bridge create a network stack on the default bridge
* none no networking
* container:<name|id> reuse another container's network stack
* host use the podman host network stack.
* <network-name>|<network-id> connect to a user-defined network
* ns:<path> path to a network namespace to join
* slirp4netns use slirp4netns to create a user network stack.
This is the default for rootless containers
type: list
elements: str
aliases:
- net
no_hosts:
description:
- Do not create /etc/hosts for the container
Default is false.
type: bool
oom_kill_disable:
description:
- Whether to disable OOM Killer for the container or not.
Default is false.
type: bool
oom_score_adj:
description:
- Tune the host's OOM preferences for containers (accepts -1000 to 1000)
type: int
pid:
description:
- Set the PID mode for the container
type: str
pids_limit:
description:
- Tune the container's pids limit. Set -1 to have unlimited pids for the
container.
type: str
pod:
description:
- Run container in an existing pod.
If you want podman to make the pod for you, preference the pod name
with "new:"
type: str
privileged:
description:
- Give extended privileges to this container. The default is false.
type: bool
publish:
description:
- Publish a container's port, or range of ports, to the host.
Format - ip:hostPort:containerPort | ip::containerPort |
hostPort:containerPort | containerPort
type: list
elements: str
aliases:
- ports
- published
- published_ports
publish_all:
description:
- Publish all exposed ports to random ports on the host interfaces. The
default is false.
type: bool
read_only:
description:
- Mount the container's root filesystem as read only. Default is false
type: bool
read_only_tmpfs:
description:
- If container is running in --read-only mode, then mount a read-write
tmpfs on /run, /tmp, and /var/tmp. The default is true
type: bool
recreate:
description:
- Use with present and started states to force the re-creation of an
existing container.
type: bool
default: False
restart_policy:
description:
- Restart policy to follow when containers exit.
Restart policy will not take effect if a container is stopped via the
podman kill or podman stop commands. Valid values are
* no - Do not restart containers on exit
* on-failure[:max_retries] - Restart containers when they exit with a
non-0 exit code, retrying indefinitely
or until the optional max_retries count is hit
* always - Restart containers when they exit, regardless of status,
retrying indefinitely
type: str
rm:
description:
- Automatically remove the container when it exits. The default is false.
type: bool
aliases:
- remove
rootfs:
description:
- If true, the first argument refers to an exploded container on the file
system. The dafault is false.
type: bool
security_opt:
description:
- Security Options. For example security_opt "seccomp=unconfined"
type: list
elements: str
shm_size:
description:
- Size of /dev/shm. The format is <number><unit>. number must be greater
than 0.
Unit is optional and can be b (bytes), k (kilobytes), m(megabytes), or
g (gigabytes).
If you omit the unit, the system uses bytes. If you omit the size
entirely, the system uses 64m
type: str
sig_proxy:
description:
- Proxy signals sent to the podman run command to the container process.
SIGCHLD, SIGSTOP, and SIGKILL are not proxied. The default is true.
type: bool
stop_signal:
description:
- Signal to stop a container. Default is SIGTERM.
type: int
stop_timeout:
description:
- Timeout (in seconds) to stop a container. Default is 10.
type: int
subgidname:
description:
- Run the container in a new user namespace using the map with 'name' in
the /etc/subgid file.
type: str
subuidname:
description:
- Run the container in a new user namespace using the map with 'name' in
the /etc/subuid file.
type: str
sysctl:
description:
- Configure namespaced kernel parameters at runtime
type: dict
systemd:
description:
- Run container in systemd mode. The default is true.
type: bool
tmpfs:
description:
- Create a tmpfs mount. For example tmpfs
"/tmp" "rw,size=787448k,mode=1777"
type: dict
tty:
description:
- Allocate a pseudo-TTY. The default is false.
type: bool
uidmap:
description:
- Run the container in a new user namespace using the supplied mapping.
type: list
ulimit:
description:
- Ulimit options
type: list
user:
description:
- Sets the username or UID used and optionally the groupname or GID for
the specified command.
type: str
userns:
description:
- Set the user namespace mode for the container.
It defaults to the PODMAN_USERNS environment variable.
An empty value means user namespaces are disabled.
type: str
uts:
description:
- Set the UTS mode for the container
type: str
volume:
description:
- Create a bind mount. If you specify, volume /HOST-DIR:/CONTAINER-DIR,
podman bind mounts /HOST-DIR in the host to /CONTAINER-DIR in the
podman container.
type: list
elements: str
aliases:
- volumes
volumes_from:
description:
- Mount volumes from the specified container(s).
type: list
elements: str
workdir:
description:
- Working directory inside the container.
The default working directory for running binaries within a container
is the root directory (/).
type: str
"""
EXAMPLES = """
- name: Run container
podman_container:
name: container
image: quay.io/bitnami/wildfly
state: started
- name: Create a data container
podman_container:
name: mydata
image: busybox
volume:
- /tmp/data
- name: Re-create a redis container
podman_container:
name: myredis
image: redis
command: redis-server --appendonly yes
state: present
recreate: yes
expose:
- 6379
volumes_from:
- mydata
- name: Restart a container
podman_container:
name: myapplication
image: redis
state: started
restart: yes
etc_hosts:
other: "127.0.0.1"
restart_policy: "no"
device: "/dev/sda:/dev/xvda:rwm"
ports:
- "8080:9000"
- "127.0.0.1:8081:9001/udp"
env:
SECRET_KEY: "ssssh"
BOOLEAN_KEY: "yes"
- name: Container present
podman_container:
name: mycontainer
state: present
image: ubuntu:14.04
command: "sleep 1d"
- name: Stop a container
podman_container:
name: mycontainer
state: stopped
- name: Start 4 load-balanced containers
podman_container:
name: "container{{ item }}"
recreate: yes
image: someuser/anotherappimage
command: sleep 1d
with_sequence: count=4
- name: remove container
podman_container:
name: ohno
state: absent
- name: Writing output
podman_container:
name: myservice
image: busybox
log_options: path=/var/log/container/mycontainer.json
log_driver: k8s-file
"""
RETURN = """
container:
description:
- Facts representing the current state of the container. Matches the
podman inspection output.
- Note that facts are part of the registered vars since Ansible 2.8. For
compatibility reasons, the facts
are also accessible directly as C(podman_container). Note that the
returned fact will be removed in Ansible 2.12.
- Empty if C(state) is I(absent).
returned: always
type: dict
sample: '{
"AppArmorProfile": "",
"Args": [
"sh"
],
"BoundingCaps": [
"CAP_CHOWN",
...
],
"Config": {
"Annotations": {
"io.kubernetes.cri-o.ContainerType": "sandbox",
"io.kubernetes.cri-o.TTY": "false"
},
"AttachStderr": false,
"AttachStdin": false,
"AttachStdout": false,
"Cmd": [
"sh"
],
"Domainname": "",
"Entrypoint": "",
"Env": [
"PATH=/usr/sbin:/usr/bin:/sbin:/bin",
"TERM=xterm",
"HOSTNAME=",
"container=podman"
],
"Hostname": "",
"Image": "docker.io/library/busybox:latest",
"Labels": null,
"OpenStdin": false,
"StdinOnce": false,
"StopSignal": 15,
"Tty": false,
"User": {
"gid": 0,
"uid": 0
},
"Volumes": null,
"WorkingDir": "/"
},
"ConmonPidFile": "...",
"Created": "2019-06-17T19:13:09.873858307+03:00",
"Dependencies": [],
"Driver": "overlay",
"EffectiveCaps": [
"CAP_CHOWN",
...
],
"ExecIDs": [],
"ExitCommand": [
"/usr/bin/podman",
"--root",
...
],
"GraphDriver": {
...
},
"HostConfig": {
...
},
"HostnamePath": "...",
"HostsPath": "...",
"ID": "...",
"Image": "...",
"ImageName": "docker.io/library/busybox:latest",
"IsInfra": false,
"LogPath": "/tmp/container/mycontainer.json",
"MountLabel": "system_u:object_r:container_file_t:s0:c282,c782",
"Mounts": [
...
],
"Name": "myservice",
"Namespace": "",
"NetworkSettings": {
"Bridge": "",
...
},
"Path": "sh",
"ProcessLabel": "system_u:system_r:container_t:s0:c282,c782",
"ResolvConfPath": "...",
"RestartCount": 0,
"Rootfs": "",
"State": {
"Dead": false,
"Error": "",
"ExitCode": 0,
"FinishedAt": "2019-06-17T19:13:10.157518963+03:00",
"Healthcheck": {
"FailingStreak": 0,
"Log": null,
"Status": ""
},
"OOMKilled": false,
"OciVersion": "1.0.1-dev",
"Paused": false,
"Pid": 4083,
"Restarting": false,
"Running": false,
"StartedAt": "2019-06-17T19:13:10.152479729+03:00",
"Status": "exited"
},
"StaticDir": "..."
...
}'
"""
class PodmanModuleParams:
"""Creates list of arguments for podman CLI command.
Arguments:
action {str} -- action type from 'run', 'stop', 'create', 'delete',
'start'
params {dict} -- dictionary of module parameters
"""
def __init__(self, action, params, podman_version, module):
self.params = params
self.action = action
self.podman_version = podman_version
self.module = module
def construct_command_from_params(self):
"""Create a podman command from given module parameters.
Returns:
list -- list of byte strings for Popen command
"""
if self.action in ['start', 'stop', 'delete']:
return self.start_stop_delete()
if self.action in ['create', 'run']:
cmd = [self.action, '--name', self.params['name']]
all_param_methods = [func for func in dir(self)
if callable(getattr(self, func))
and func.startswith("addparam")]
params_set = (i for i in self.params if self.params[i] is not None)
for param in params_set:
func_name = "_".join(["addparam", param])
if func_name in all_param_methods:
cmd = getattr(self, func_name)(cmd)
cmd.append(self.params['image'])
if self.params['command']:
if isinstance(self.params['command'], list):
cmd += self.params['command']
else:
cmd += self.params['command'].split()
return [to_bytes(i, errors='surrogate_or_strict') for i in cmd]
def start_stop_delete(self):
if self.action in ['stop', 'start']:
cmd = [self.action, self.params['name']]
return [to_bytes(i, errors='surrogate_or_strict') for i in cmd]
if self.action == 'delete':
cmd = ['rm', '-f', self.params['name']]
return [to_bytes(i, errors='surrogate_or_strict') for i in cmd]
def check_version(self, param, minv=None, maxv=None):
if minv and LooseVersion(minv) > LooseVersion(
self.podman_version):
self.module.fail_json(msg="Parameter %s is supported from podman "
"version %s only! Current version is %s" % (
param, minv, self.podman_version))
if maxv and LooseVersion(maxv) < LooseVersion(
self.podman_version):
self.module.fail_json(msg="Parameter %s is supported till podman "
"version %s only! Current version is %s" % (
param, minv, self.podman_version))
def addparam_annotation(self, c):
for annotate in self.params['annotation'].items():
c += ['--annotation', '='.join(annotate)]
return c
def addparam_authfile(self, c):
return c + ['--authfile', self.params['authfile']]
def addparam_blkio_weight(self, c):
return c + ['--blkio-weight', self.params['blkio_weight']]
def addparam_blkio_weight_device(self, c):
for blkio in self.params['blkio_weight_device'].items():
c += ['--blkio-weight-device', ':'.join(blkio)]
return c
def addparam_cap_add(self, c):
for cap_add in self.params['cap_add']:
c += ['--cap-add', cap_add]
return c
def addparam_cap_drop(self, c):
for cap_drop in self.params['cap_drop']:
c += ['--cap-drop', cap_drop]
return c
def addparam_cgroups(self, c):
self.check_version('--cgroups', minv='1.6.0')
return c + ['--cgroups=%s' % self.params['cgroups']]
def addparam_cgroupns(self, c):
self.check_version('--cgroupns', minv='1.6.2')
return c + ['--cgroupns=%s' % self.params['cgroupns']]
def addparam_cgroup_parent(self, c):
return c + ['--cgroup-parent', self.params['cgroup_parent']]
def addparam_cidfile(self, c):
return c + ['--cidfile', self.params['cidfile']]
def addparam_conmon_pidfile(self, c):
return c + ['--conmon-pidfile', self.params['conmon_pidfile']]
def addparam_cpu_period(self, c):
return c + ['--cpu-period', self.params['cpu_period']]
def addparam_cpu_rt_period(self, c):
return c + ['--cpu-rt-period', self.params['cpu_rt_period']]
def addparam_cpu_rt_runtime(self, c):
return c + ['--cpu-rt-runtime', self.params['cpu_rt_runtime']]
def addparam_cpu_shares(self, c):
return c + ['--cpu-shares', self.params['cpu_shares']]
def addparam_cpus(self, c):
return c + ['--cpus', self.params['cpus']]
def addparam_cpuset_cpus(self, c):
return c + ['--cpuset-cpus', self.params['cpuset_cpus']]
def addparam_cpuset_mems(self, c):
return c + ['--cpuset-mems', self.params['cpuset_mems']]
def addparam_detach(self, c):
return c + ['--detach=%s' % self.params['detach']]
def addparam_detach_keys(self, c):
return c + ['--detach-keys', self.params['detach_keys']]
def addparam_device(self, c):
for dev in self.params['device']:
c += ['--device', dev]
return c
def addparam_device_read_bps(self, c):
for dev in self.params['device_read_bps']:
c += ['--device-read-bps', dev]
return c
def addparam_device_read_iops(self, c):
for dev in self.params['device_read_iops']:
c += ['--device-read-iops', dev]
return c
def addparam_device_write_bps(self, c):
for dev in self.params['device_write_bps']:
c += ['--device-write-bps', dev]
return c
def addparam_device_write_iops(self, c):
for dev in self.params['device_write_iops']:
c += ['--device-write-iops', dev]
return c
def addparam_dns(self, c):
return c + ['--dns', ','.join(self.params['dns'])]
def addparam_dns_option(self, c):
return c + ['--dns-option', self.params['dns_option']]
def addparam_dns_search(self, c):
return c + ['--dns-search', self.params['dns_search']]
def addparam_entrypoint(self, c):
return c + ['--entrypoint', self.params['entrypoint']]
def addparam_env(self, c):
for env_value in self.params['env'].items():
c += ['--env',
b"=".join([to_bytes(k, errors='surrogate_or_strict')
for k in env_value])]
return c
def addparam_env_file(self, c):
return c + ['--env-file', self.params['env_file']]
def addparam_env_host(self, c):
self.check_version('--env-host', minv='1.5.0')
return c + ['--env-host=%s' % self.params['env_host']]
def addparam_etc_hosts(self, c):
for host_ip in self.params['etc_hosts'].items():
c += ['--add-host', ':'.join(host_ip)]
return c
def addparam_expose(self, c):
for exp in self.params['expose']:
c += ['--expose', exp]
return c
def addparam_gidmap(self, c):
return c + ['--gidmap', self.params['gidmap']]
def addparam_group_add(self, c):
for g in self.params['group_add']:
c += ['--group-add', g]
return c
def addparam_healthcheck(self, c):
return c + ['--healthcheck-command', self.params['healthcheck']]
def addparam_healthcheck_interval(self, c):
return c + ['--healthcheck-interval',
self.params['healthcheck_interval']]
def addparam_healthcheck_retries(self, c):
return c + ['--healthcheck-retries',
self.params['healthcheck_retries']]
def addparam_healthcheck_start_period(self, c):
return c + ['--healthcheck-start-period',
self.params['healthcheck_start_period']]
def addparam_healthcheck_timeout(self, c):
return c + ['--healthcheck-timeout',
self.params['healthcheck_timeout']]
def addparam_hostname(self, c):
return c + ['--hostname', self.params['hostname']]
def addparam_http_proxy(self, c):
return c + ['--http-proxy=%s' % self.params['http_proxy']]
def addparam_image_volume(self, c):
return c + ['--image-volume', self.params['image_volume']]
def addparam_init(self, c):
return c + ['--init', self.params['init']]
def addparam_init_path(self, c):
return c + ['--init-path', self.params['init_path']]
def addparam_interactive(self, c):
return c + ['--interactive=%s' % self.params['interactive']]
def addparam_ip(self, c):
return c + ['--ip', self.params['ip']]
def addparam_ipc(self, c):
return c + ['--ipc', self.params['ipc']]
def addparam_kernel_memory(self, c):
return c + ['--kernel-memory', self.params['kernel_memory']]
def addparam_label(self, c):
for label in self.params['label'].items():
c += ['--label', b'='.join([to_bytes(l, errors='surrogate_or_strict')
for l in label])]
return c
def addparam_label_file(self, c):
return c + ['--label-file', self.params['label_file']]
def addparam_log_driver(self, c):
return c + ['--log-driver', self.params['log_driver']]
def addparam_log_opt(self, c):
return c + ['--log-opt', self.params['log_opt']]
def addparam_memory(self, c):
return c + ['--memory', self.params['memory']]
def addparam_memory_reservation(self, c):
return c + ['--memory-reservation', self.params['memory_reservation']]
def addparam_memory_swap(self, c):
return c + ['--memory-swap', self.params['memory_swap']]
def addparam_memory_swappiness(self, c):
return c + ['--memory-swappiness', self.params['memory_swappiness']]
def addparam_mount(self, c):
return c + ['--mount', self.params['mount']]
def addparam_network(self, c):
return c + ['--network', ",".join(self.params['network'])]
def addparam_no_hosts(self, c):
return c + ['--no-hosts=%s' % self.params['no_hosts']]
def addparam_oom_kill_disable(self, c):
return c + ['--oom-kill-disable=%s' % self.params['oom_kill_disable']]
def addparam_oom_score_adj(self, c):
return c + ['--oom-score-adj', self.params['oom_score_adj']]
def addparam_pid(self, c):
return c + ['--pid', self.params['pid']]
def addparam_pids_limit(self, c):
return c + ['--pids-limit', self.params['pids_limit']]
def addparam_pod(self, c):
return c + ['--pod', self.params['pod']]
def addparam_privileged(self, c):
return c + ['--privileged=%s' % self.params['privileged']]
def addparam_publish(self, c):
for pub in self.params['publish']:
c += ['--publish', pub]
return c
def addparam_publish_all(self, c):
return c + ['--publish-all=%s' % self.params['publish_all']]
def addparam_read_only(self, c):
return c + ['--read-only=%s' % self.params['read_only']]
def addparam_read_only_tmpfs(self, c):
return c + ['--read-only-tmpfs=%s' % self.params['read_only_tmpfs']]
def addparam_restart_policy(self, c):
return c + ['--restart=%s' % self.params['restart_policy']]
def addparam_rm(self, c):
if self.params['rm']:
c += ['--rm']
return c
def addparam_rootfs(self, c):
return c + ['--rootfs=%s' % self.params['rootfs']]
def addparam_security_opt(self, c):
for secopt in self.params['security_opt']:
c += ['--security-opt', secopt]
return c
def addparam_shm_size(self, c):
return c + ['--shm-size', self.params['shm_size']]
def addparam_sig_proxy(self, c):
return c + ['--sig-proxy=%s' % self.params['sig_proxy']]
def addparam_stop_signal(self, c):
return c + ['--stop-signal', self.params['stop_signal']]
def addparam_stop_timeout(self, c):
return c + ['--stop-timeout', self.params['stop_timeout']]
def addparam_subgidname(self, c):
return c + ['--subgidname', self.params['subgidname']]
def addparam_subuidname(self, c):
return c + ['--subuidname', self.params['subuidname']]
def addparam_sysctl(self, c):
for sysctl in self.params['sysctl'].items():
c += ['--sysctl',
b"=".join([to_bytes(k, errors='surrogate_or_strict')
for k in sysctl])]
return c
def addparam_systemd(self, c):
return c + ['--systemd=%s' % self.params['systemd']]
def addparam_tmpfs(self, c):
for tmpfs in self.params['tmpfs'].items():
c += ['--tmpfs', ':'.join(tmpfs)]
return c
def addparam_tty(self, c):
return c + ['--tty=%s' % self.params['tty']]
def addparam_uidmap(self, c):
for uidmap in self.params['uidmap']:
c += ['--uidmap', uidmap]
return c
def addparam_ulimit(self, c):
for u in self.params['ulimit']:
c += ['--ulimit', u]
return c
def addparam_user(self, c):
return c + ['--user', self.params['user']]
def addparam_userns(self, c):
return c + ['--userns', self.params['userns']]
def addparam_uts(self, c):
return c + ['--uts', self.params['uts']]
def addparam_volume(self, c):
for vol in self.params['volume']:
if vol:
c += ['--volume', vol]
return c
def addparam_volumes_from(self, c):
for vol in self.params['volumes_from']:
c += ['--volumes-from', vol]
return c
def addparam_workdir(self, c):
return c + ['--workdir', self.params['workdir']]
# Add your own args for podman command
def addparam_cmd_args(self, c):
return c + self.params['cmd_args']
class PodmanDefaults:
def __init__(self, module, podman_version):
self.module = module
self.version = podman_version
self.defaults = {
"blkio_weight": 0,
"cgroups": "default",
"cgroup_parent": "",
"cidfile": "",
"cpus": 0.0,
"cpu_shares": 0,
"cpu_quota": 0,
"cpu_period": 0,
"cpu_rt_runtime": 0,
"cpu_rt_period": 0,
"cpuset_cpus": "",
"cpuset_mems": "",
"detach": True,
"device": [],
"env_host": False,
"etc_hosts": {},
"group_add": [],
"healthcheck": "",
"ipc": "",
"kernelmemory": "0",
"log_driver": "k8s-file",
"memory": "0",
"memory_swap": "0",
"memory_reservation": "0",
# "memory_swappiness": -1,
"no_hosts": False,
# libpod issue with networks in inspection
"network": ["default"],
"oom_score_adj": 0,
"pid": "",
"privileged": False,
"rm": False,
"security_opt": [],
"stop_signal": 15,
"tty": False,
"user": "",
"uts": "",
"volume": [],
"workdir": "/",
}
def default_dict(self):
# make here any changes to self.defaults related to podman version
return self.defaults
class PodmanContainerDiff:
def __init__(self, module, info, podman_version):
self.module = module
self.version = podman_version
self.default_dict = None
self.info = yaml.safe_load(json.dumps(info).lower())
self.params = self.defaultize()
self.diff = {'before': {}, 'after': {}}
self.non_idempotent = {
'env_file',
'env_host',
"ulimit", # Defaults depend on user and platform, impossible to guess
}
def defaultize(self):
params_with_defaults = {}
self.default_dict = PodmanDefaults(
self.module, self.version).default_dict()
for p in self.module.params:
if self.module.params[p] is None and p in self.default_dict:
params_with_defaults[p] = self.default_dict[p]
else:
params_with_defaults[p] = self.module.params[p]
return params_with_defaults
def _diff_update_and_compare(self, param_name, before, after):
if before != after:
self.diff['before'].update({param_name: before})
self.diff['after'].update({param_name: after})
return True
return False
def diffparam_annotation(self):
before = self.info['config']['annotations'] or {}
after = before.copy()
if self.module.params['annotation'] is not None:
after.update(self.params['annotation'])
return self._diff_update_and_compare('annotation', before, after)
def diffparam_env_host(self):
# It's impossible to get from inspest, recreate it if not default
before = False
after = self.params['env_host']
return self._diff_update_and_compare('env_host', before, after)
def diffparam_blkio_weight(self):
before = self.info['hostconfig']['blkioweight']
after = self.params['blkio_weight']
return self._diff_update_and_compare('blkio_weight', before, after)
def diffparam_blkio_weight_device(self):
before = self.info['hostconfig']['blkioweightdevice']
if before == [] and self.module.params['blkio_weight_device'] is None:
after = []
else:
after = self.params['blkio_weight_device']
return self._diff_update_and_compare('blkio_weight_device', before, after)
def diffparam_cap_add(self):
before = self.info['effectivecaps'] or []
after = []
if self.module.params['cap_add'] is not None:
after += ["cap_" + i.lower()
for i in self.module.params['cap_add']]
after += before
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('cap_add', before, after)
def diffparam_cap_drop(self):
before = self.info['effectivecaps'] or []
after = before[:]
if self.module.params['cap_drop'] is not None:
for c in ["cap_" + i.lower() for i in self.module.params['cap_drop']]:
if c in after:
after.remove(c)
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('cap_drop', before, after)
def diffparam_cgroup_parent(self):
before = self.info['hostconfig']['cgroupparent']
after = self.params['cgroup_parent']
return self._diff_update_and_compare('cgroup_parent', before, after)
def diffparam_cgroups(self):
# Cgroups output is not supported in all versions
if 'cgroups' in self.info['hostconfig']:
before = self.info['hostconfig']['cgroups']
after = self.params['cgroups']
return self._diff_update_and_compare('cgroups', before, after)
return False
def diffparam_cidfile(self):
before = self.info['hostconfig']['containeridfile']
after = self.params['cidfile']
return self._diff_update_and_compare('cidfile', before, after)
def diffparam_command(self):
# TODO(sshnaidm): to inspect image to get the default command
if self.module.params['command'] is not None:
before = self.info['config']['cmd']
after = self.params['command']
if isinstance(after, str):
after = [i.lower() for i in after.split()]
elif isinstance(after, list):
after = [i.lower() for i in after]
return self._diff_update_and_compare('command', before, after)
return False
def diffparam_conmon_pidfile(self):
before = self.info['conmonpidfile']
if self.module.params['conmon_pidfile'] is None:
after = before
else:
after = self.params['conmon_pidfile']
return self._diff_update_and_compare('conmon_pidfile', before, after)
def diffparam_cpu_period(self):
before = self.info['hostconfig']['cpuperiod']
after = self.params['cpu_period']
return self._diff_update_and_compare('cpu_period', before, after)
def diffparam_cpu_rt_period(self):
before = self.info['hostconfig']['cpurealtimeperiod']
after = self.params['cpu_rt_period']
return self._diff_update_and_compare('cpu_rt_period', before, after)
def diffparam_cpu_rt_runtime(self):
before = self.info['hostconfig']['cpurealtimeruntime']
after = self.params['cpu_rt_runtime']
return self._diff_update_and_compare('cpu_rt_runtime', before, after)
def diffparam_cpu_shares(self):
before = self.info['hostconfig']['cpushares']
after = self.params['cpu_shares']
return self._diff_update_and_compare('cpu_shares', before, after)
def diffparam_cpus(self):
before = int(self.info['hostconfig']['nanocpus']) / 1000000000
after = self.params['cpus']
return self._diff_update_and_compare('cpus', before, after)
def diffparam_cpuset_cpus(self):
before = self.info['hostconfig']['cpusetcpus']
after = self.params['cpuset_cpus']
return self._diff_update_and_compare('cpuset_cpus', before, after)
def diffparam_cpuset_mems(self):
before = self.info['hostconfig']['cpusetmems']
after = self.params['cpuset_mems']
return self._diff_update_and_compare('cpuset_mems', before, after)
def diffparam_device(self):
before = [":".join([i['pathonhost'], i['pathincontainer']])
for i in self.info['hostconfig']['devices']]
after = [":".join(i.split(":")[:2]) for i in self.params['device']]
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('devices', before, after)
def diffparam_device_read_bps(self):
before = self.info['hostconfig']['blkiodevicereadbps'] or []
before = ["%s:%s" % (i['path'], i['rate']) for i in before]
after = self.params['device_read_bps'] or []
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('device_read_bps', before, after)
def diffparam_device_read_iops(self):
before = self.info['hostconfig']['blkiodevicereadiops'] or []
before = ["%s:%s" % (i['path'], i['rate']) for i in before]
after = self.params['device_read_iops'] or []
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('device_read_iops', before, after)
def diffparam_device_write_bps(self):
before = self.info['hostconfig']['blkiodevicewritebps'] or []
before = ["%s:%s" % (i['path'], i['rate']) for i in before]
after = self.params['device_write_bps'] or []
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('device_write_bps', before, after)
def diffparam_device_write_iops(self):
before = self.info['hostconfig']['blkiodevicewriteiops'] or []
before = ["%s:%s" % (i['path'], i['rate']) for i in before]
after = self.params['device_write_iops'] or []
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('device_write_iops', before, after)
# Limited idempotency, it can't guess default values
def diffparam_env(self):
env_before = self.info['config']['env'] or {}
before = {i.split("=")[0]: i.split("=")[1] for i in env_before}
after = before.copy()
if self.params['env']:
after.update({
str(k).lower(): str(v).lower()
for k, v in self.params['env'].items()
})
return self._diff_update_and_compare('env', before, after)
def diffparam_etc_hosts(self):
if self.info['hostconfig']['extrahosts']:
before = dict([i.split(":") for i in self.info['hostconfig']['extrahosts']])
else:
before = {}
after = self.params['etc_hosts']
return self._diff_update_and_compare('etc_hosts', before, after)
def diffparam_group_add(self):
before = self.info['hostconfig']['groupadd']
after = self.params['group_add']
return self._diff_update_and_compare('group_add', before, after)
# Healthcheck is only defined in container config if a healthcheck
# was configured; otherwise the config key isn't part of the config.
def diffparam_healthcheck(self):
if 'healthcheck' in self.info['config']:
# the "test" key is a list of 2 items where the first one is
# "CMD-SHELL" and the second one is the actual healthcheck command.
before = self.info['config']['healthcheck']['test'][1]
else:
before = ''
after = self.params['healthcheck'] or before
return self._diff_update_and_compare('healthcheck', before, after)
# Because of hostname is random generated, this parameter has partial idempotency only.
def diffparam_hostname(self):
before = self.info['config']['hostname']
after = self.params['hostname'] or before
return self._diff_update_and_compare('hostname', before, after)
def diffparam_image(self):
# TODO(sshnaidm): for strict image compare mode use SHAs
before = self.info['config']['image']
after = self.params['image']
mode = self.params['image_strict']
if mode is None or not mode:
# In a idempotency 'lite mode' assume all images from different registries are the same
before = before.replace(":latest", "")
after = after.replace(":latest", "")
before = before.split("/")[-1]
after = after.split("/")[-1]
return self._diff_update_and_compare('image', before, after)
def diffparam_ipc(self):
before = self.info['hostconfig']['ipcmode']
after = self.params['ipc']
return self._diff_update_and_compare('ipc', before, after)
def diffparam_label(self):
before = self.info['config']['labels'] or {}
after = before.copy()
if self.params['label']:
after.update({
str(k).lower(): str(v).lower()
for k, v in self.params['label'].items()
})
return self._diff_update_and_compare('label', before, after)
def diffparam_log_driver(self):
before = self.info['hostconfig']['logconfig']['type']
after = self.params['log_driver']
return self._diff_update_and_compare('log_driver', before, after)
# Parameter has limited idempotency, unable to guess the default log_path
def diffparam_log_opt(self):
before = self.info['logpath']
if self.module.params['log_opt'] in [None, '']:
after = before
else:
after = self.params['log_opt'].split("=")[1]
return self._diff_update_and_compare('log_opt', before, after)
def diffparam_memory(self):
before = str(self.info['hostconfig']['memory'])
after = self.params['memory']
return self._diff_update_and_compare('memory', before, after)
def diffparam_memory_swap(self):
# By default it's twice memory parameter
before = str(self.info['hostconfig']['memoryswap'])
after = self.params['memory_swap']
if (self.module.params['memory_swap'] is None
and self.params['memory'] != 0
and self.params['memory'].isdigit()):
after = str(int(self.params['memory']) * 2)
return self._diff_update_and_compare('memory_swap', before, after)
def diffparam_memory_reservation(self):
before = str(self.info['hostconfig']['memoryreservation'])
after = self.params['memory_reservation']
return self._diff_update_and_compare('memory_reservation', before, after)
def diffparam_network(self):
before = [self.info['hostconfig']['networkmode']]
after = self.params['network']
return self._diff_update_and_compare('network', before, after)
def diffparam_no_hosts(self):
before = not bool(self.info['hostspath'])
after = self.params['no_hosts']
if self.params['network'] == ['none']:
after = True
return self._diff_update_and_compare('no_hosts', before, after)
def diffparam_oom_score_adj(self):
before = self.info['hostconfig']['oomscoreadj']
after = self.params['oom_score_adj']
return self._diff_update_and_compare('oom_score_adj', before, after)
def diffparam_privileged(self):
before = self.info['hostconfig']['privileged']
after = self.params['privileged']
return self._diff_update_and_compare('privileged', before, after)
def diffparam_pid(self):
before = self.info['hostconfig']['pidmode']
after = self.params['pid']
return self._diff_update_and_compare('pid', before, after)
def diffparam_rm(self):
before = self.info['hostconfig']['autoremove']
after = self.params['rm']
return self._diff_update_and_compare('rm', before, after)
def diffparam_security_opt(self):
before = self.info['hostconfig']['securityopt']
after = self.params['security_opt']
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('security_opt', before, after)
def diffparam_stop_signal(self):
before = self.info['config']['stopsignal']
after = self.params['stop_signal']
return self._diff_update_and_compare('stop_signal', before, after)
def diffparam_tty(self):
before = self.info['config']['tty']
after = self.params['tty']
return self._diff_update_and_compare('tty', before, after)
def diffparam_user(self):
before = self.info['config']['user']
if self.module.params['user'] is None and before:
after = before
else:
after = self.params['user']
return self._diff_update_and_compare('user', before, after)
def diffparam_uts(self):
before = self.info['hostconfig']['utsmode']
after = self.params['uts']
return self._diff_update_and_compare('uts', before, after)
def diffparam_volume(self):
before = self.info['mounts']
if before:
volumes = []
for m in before:
if m['type'] == 'volume':
volumes.append([m['name'], m['destination']])
else:
volumes.append([m['source'], m['destination']])
before = [":".join(v) for v in volumes]
# Ignore volumes option for idempotency
after = [":".join(v.split(":")[:2]) for v in self.params['volume']]
before, after = sorted(list(set(before))), sorted(list(set(after)))
return self._diff_update_and_compare('volume', before, after)
def diffparam_volumes_from(self):
before = self.info['hostconfig']['volumesfrom'] or []
after = self.params['volumes_from'] or []
return self._diff_update_and_compare('volumes_from', before, after)
def diffparam_workdir(self):
before = self.info['config']['workingdir']
after = self.params['workdir']
return self._diff_update_and_compare('workdir', before, after)
def is_different(self):
diff_func_list = [func for func in dir(self)
if callable(getattr(self, func)) and func.startswith(
"diffparam")]
fail_fast = not bool(self.module._diff)
different = False
for func_name in diff_func_list:
dff_func = getattr(self, func_name)
if dff_func():
if fail_fast:
return True
else:
different = True
# Check non idempotent parameters
for p in self.non_idempotent:
if self.module.params[p] is not None and self.module.params[p] not in [{}, [], '']:
different = True
return different
def ensure_image_exists(module, image):
"""If image is passed, ensure it exists, if not - pull it or fail.
Arguments:
module {obj} -- ansible module object
image {str} -- name of image
Returns:
list -- list of image actions - if it pulled or nothing was done
"""
image_actions = []
module_exec = module.params['executable']
if not image:
return image_actions
rc, out, err = module.run_command([module_exec, 'image', 'exists', image])
if rc == 0:
return image_actions
rc, out, err = module.run_command([module_exec, 'image', 'pull', image])
if rc != 0:
module.fail_json(msg="Can't pull image %s" % image, stdout=out,
stderr=err)
image_actions.append("pulled image %s" % image)
return image_actions
class PodmanContainer:
"""Perform container tasks.
Manages podman container, inspects it and checks its current state
"""
def __init__(self, module, name):
"""Initialize PodmanContainer class.
Arguments:
module {obj} -- ansible module object
name {str} -- name of container
"""
super(PodmanContainer, self).__init__()
self.module = module
self.name = name
self.stdout, self.stderr = '', ''
self.info = self.get_info()
self.version = self._get_podman_version()
self.diff = {}
self.actions = []
@property
def exists(self):
"""Check if container exists."""
return bool(self.info != {})
@property
def different(self):
"""Check if container is different."""
diffcheck = PodmanContainerDiff(self.module, self.info, self.version)
is_different = diffcheck.is_different()
diffs = diffcheck.diff
if self.module._diff and is_different and diffs['before'] and diffs['after']:
self.diff['before'] = "\n".join(
["%s - %s" % (k, v) for k, v in sorted(
diffs['before'].items())]) + "\n"
self.diff['after'] = "\n".join(
["%s - %s" % (k, v) for k, v in sorted(
diffs['after'].items())]) + "\n"
return is_different
@property
def running(self):
"""Return True if container is running now."""
return self.exists and self.info['State']['Running']
@property
def stopped(self):
"""Return True if container exists and is not running now."""
return self.exists and not self.info['State']['Running']
def get_info(self):
"""Inspect container and gather info about it."""
rc, out, err = self.module.run_command(
[self.module.params['executable'], b'container', b'inspect', self.name])
return json.loads(out)[0] if rc == 0 else {}
def _get_podman_version(self):
rc, out, err = self.module.run_command(
[self.module.params['executable'], b'--version'])
if rc != 0 or not out or "version" not in out:
self.module.fail_json(msg="%s run failed!" % self.module.params['executable'])
return out.split("version")[1].strip()
def _perform_action(self, action):
"""Perform action with container.
Arguments:
action {str} -- action to perform - start, create, stop, run,
delete
"""
b_command = PodmanModuleParams(action,
self.module.params,
self.version,
self.module,
).construct_command_from_params()
full_cmd = " ".join([self.module.params['executable']]
+ [to_native(i) for i in b_command])
self.module.log("PODMAN-CONTAINER-DEBUG: %s" % full_cmd)
self.actions.append(full_cmd)
if not self.module.check_mode:
rc, out, err = self.module.run_command(
[self.module.params['executable'], b'container'] + b_command,
expand_user_and_vars=False)
self.stdout = out
self.stderr = err
if rc != 0:
self.module.fail_json(
msg="Can't %s container %s" % (action, self.name),
stdout=out, stderr=err)
def run(self):
"""Run the container."""
self._perform_action('run')
def delete(self):
"""Delete the container."""
self._perform_action('delete')
def stop(self):
"""Stop the container."""
self._perform_action('stop')
def start(self):
"""Start the container."""
self._perform_action('start')
def create(self):
"""Create the container."""
self._perform_action('create')
def recreate(self):
"""Recreate the container."""
self.delete()
self.run()
def restart(self):
"""Restart the container."""
self.stop()
self.run()
class PodmanManager:
"""Module manager class.
Defines according to parameters what actions should be applied to container
"""
def __init__(self, module):
"""Initialize PodmanManager class.
Arguments:
module {obj} -- ansible module object
"""
super(PodmanManager, self).__init__()
self.module = module
self.results = {
'changed': False,
'actions': [],
'container': {},
}
self.name = self.module.params['name']
self.executable = \
self.module.get_bin_path(self.module.params['executable'],
required=True)
self.image = self.module.params['image']
image_actions = ensure_image_exists(self.module, self.image)
self.results['actions'] += image_actions
self.state = self.module.params['state']
self.restart = self.module.params['force_restart']
self.recreate = self.module.params['recreate']
self.container = PodmanContainer(self.module, self.name)
def update_container_result(self, changed=True):
"""Inspect the current container, update results with last info, exit.
Keyword Arguments:
changed {bool} -- whether any action was performed
(default: {True})
"""
facts = self.container.get_info() if changed else self.container.info
out, err = self.container.stdout, self.container.stderr
self.results.update({'changed': changed, 'container': facts,
'podman_actions': self.container.actions},
stdout=out, stderr=err)
if self.container.diff:
self.results.update({'diff': self.container.diff})
if self.module.params['debug']:
self.results.update({'podman_version': self.container.version})
self.module.exit_json(**self.results)
def make_started(self):
"""Run actions if desired state is 'started'."""
if self.container.running and \
(self.container.different or self.recreate):
self.container.recreate()
self.results['actions'].append('recreated %s' %
self.container.name)
self.update_container_result()
elif self.container.running and not self.container.different:
if self.restart:
self.container.restart()
self.results['actions'].append('restarted %s' %
self.container.name)
self.update_container_result()
self.update_container_result(changed=False)
elif not self.container.exists:
self.container.run()
self.results['actions'].append('started %s' % self.container.name)
self.update_container_result()
elif self.container.stopped and self.container.different:
self.container.recreate()
self.results['actions'].append('recreated %s' %
self.container.name)
self.update_container_result()
elif self.container.stopped and not self.container.different:
self.container.start()
self.results['actions'].append('started %s' % self.container.name)
self.update_container_result()
def make_stopped(self):
"""Run actions if desired state is 'stopped'."""
if not self.container.exists and not self.image:
self.module.fail_json(msg='Cannot create container when image'
' is not specified!')
if not self.container.exists:
self.container.create()
self.results['actions'].append('created %s' % self.container.name)
self.update_container_result()
if self.container.stopped:
self.update_container_result(changed=False)
elif self.container.running:
self.container.stop()
self.results['actions'].append('stopped %s' % self.container.name)
self.update_container_result()
def make_absent(self):
"""Run actions if desired state is 'absent'."""
if not self.container.exists:
self.results.update({'changed': False})
elif self.container.exists:
self.container.delete()
self.results['actions'].append('deleted %s' % self.container.name)
self.results.update({'changed': True})
self.results.update({'container': {},
'podman_actions': self.container.actions})
self.module.exit_json(**self.results)
def execute(self):
"""Execute the desired action according to map of actions & states."""
states_map = {
'present': self.make_started,
'started': self.make_started,
'absent': self.make_absent,
'stopped': self.make_stopped
}
process_action = states_map[self.state]
process_action()
self.module.fail_json(msg="Unexpected logic error happened, "
"please contact maintainers ASAP!")
def main():
module = AnsibleModule(
argument_spec=yaml.safe_load(DOCUMENTATION)['options'],
mutually_exclusive=(
['no_hosts', 'etc_hosts'],
),
supports_check_mode=True,
)
# work on input vars
if module.params['state'] in ['started', 'present'] and \
not module.params['image']:
module.fail_json(msg="State '%s' required image to be configured!" %
module.params['state'])
PodmanManager(module).execute()
if __name__ == '__main__':
main()
| 34.65
| 99
| 0.601198
| 8,359
| 69,300
| 4.864936
| 0.112813
| 0.039837
| 0.018394
| 0.020066
| 0.299759
| 0.209733
| 0.148847
| 0.115723
| 0.101903
| 0.091821
| 0
| 0.006567
| 0.285859
| 69,300
| 1,999
| 100
| 34.667334
| 0.815134
| 0.052006
| 0
| 0.256395
| 0
| 0.001785
| 0.469397
| 0.019309
| 0
| 0
| 0
| 0.0005
| 0.001785
| 1
| 0.105294
| false
| 0.00238
| 0.003569
| 0.041047
| 0.209994
| 0.000595
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b90a7ababb1e0f6301fc1099880a560c64176ef6
| 4,209
|
bzl
|
Python
|
samples/workload/XNNPACK/toolchain/emscripten_toolchain_config.bzl
|
utsavm9/wasm-micro-runtime
|
0960e82db2be30b741f5c83e7a57ea9056b2ab59
|
[
"Apache-2.0"
] | 2
|
2020-08-27T03:48:31.000Z
|
2020-09-17T03:02:53.000Z
|
samples/workload/XNNPACK/toolchain/emscripten_toolchain_config.bzl
|
utsavm9/wasm-micro-runtime
|
0960e82db2be30b741f5c83e7a57ea9056b2ab59
|
[
"Apache-2.0"
] | 3
|
2020-09-11T04:03:00.000Z
|
2020-09-23T06:16:43.000Z
|
samples/workload/XNNPACK/toolchain/emscripten_toolchain_config.bzl
|
utsavm9/wasm-micro-runtime
|
0960e82db2be30b741f5c83e7a57ea9056b2ab59
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
load("@bazel_tools//tools/build_defs/cc:action_names.bzl", "ACTION_NAMES")
load(
"@bazel_tools//tools/cpp:cc_toolchain_config_lib.bzl",
"feature",
"flag_group",
"flag_set",
"tool_path",
)
all_compile_actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
]
all_link_actions = [
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
]
def _impl(ctx):
tool_paths = [
tool_path(
name = "gcc",
path = "/opt/emsdk/upstream/emscripten/emcc",
),
tool_path(
name = "ld",
path = "/opt/emsdk/upstream/emscripten/emcc",
),
tool_path(
name = "ar",
path = "/opt/emsdk/upstream/emscripten/emar",
),
tool_path(
name = "cpp",
path = "/opt/emsdk/upstream/emscripten/em++",
),
tool_path(
name = "gcov",
path = "/bin/false",
),
tool_path(
name = "nm",
path = "/bin/false",
),
tool_path(
name = "objdump",
path = "/bin/false",
),
tool_path(
name = "strip",
path = "/bin/false",
),
]
features = [ # NEW
feature(
name = "default_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = all_compile_actions,
flag_groups = ([
flag_group(
flags = [
"-O3",
"-msimd128",
"-s",
"USE_PTHREADS=0",
"-s",
"ERROR_ON_UNDEFINED_SYMBOLS=0",
"-s",
"STANDALONE_WASM=1",
],
),
]),
),
],
),
feature(
name = "default_linker_flags",
enabled = True,
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = ([
flag_group(
flags = [
"-O3",
"-msimd128",
"-s",
"USE_PTHREADS=0",
"-s",
"ERROR_ON_UNDEFINED_SYMBOLS=0",
"-s",
"STANDALONE_WASM=1",
"-Wl,--export=__heap_base",
"-Wl,--export=__data_end",
],
),
]),
),
],
),
]
return cc_common.create_cc_toolchain_config_info(
ctx = ctx,
features = features, # NEW
cxx_builtin_include_directories = [
"/opt/emsdk/upstream/emscripten/system/include/libcxx",
"/opt/emsdk/upstream/emscripten/system/lib/libcxxabi/include",
"/opt/emsdk/upstream/emscripten/system/include",
"/opt/emsdk/upstream/emscripten/system/include/libc",
"/opt/emsdk/upstream/emscripten/system/lib/libc/musl/arch/emscripten",
"/opt/emsdk/upstream/lib/clang/12.0.0/include/",
],
toolchain_identifier = "wasm-emsdk",
host_system_name = "i686-unknown-linux-gnu",
target_system_name = "wasm32-unknown-emscripten",
target_cpu = "wasm32",
target_libc = "unknown",
compiler = "emsdk",
abi_version = "unknown",
abi_libc_version = "unknown",
tool_paths = tool_paths,
)
emsdk_toolchain_config = rule(
implementation = _impl,
attrs = {},
provides = [CcToolchainConfigInfo],
)
| 30.5
| 82
| 0.434545
| 342
| 4,209
| 5.067251
| 0.359649
| 0.046163
| 0.092325
| 0.135026
| 0.398153
| 0.363531
| 0.259088
| 0.21004
| 0.21004
| 0.109636
| 0
| 0.013508
| 0.45474
| 4,209
| 137
| 83
| 30.722628
| 0.741612
| 0.028986
| 0
| 0.507813
| 0
| 0
| 0.245958
| 0.178834
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007813
| false
| 0
| 0
| 0
| 0.015625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b90aa19934d5d7330ff2185f5e9e641a32b1df92
| 8,781
|
py
|
Python
|
cloud_storages/gdrive/gdrive.py
|
toplenboren/safezone
|
eafad765ed7cd6f6b7607ac07e75fd843d32ee07
|
[
"MIT"
] | null | null | null |
cloud_storages/gdrive/gdrive.py
|
toplenboren/safezone
|
eafad765ed7cd6f6b7607ac07e75fd843d32ee07
|
[
"MIT"
] | null | null | null |
cloud_storages/gdrive/gdrive.py
|
toplenboren/safezone
|
eafad765ed7cd6f6b7607ac07e75fd843d32ee07
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import json
from typing import List
from functools import lru_cache
from cloud_storages.http_shortcuts import *
from database.database import Database
from models.models import StorageMetaInfo, Resource, Size
from cloud_storages.storage import Storage
from cloud_storages.gdrive.client_config import GOOGLE_DRIVE_CONFIG, SCOPES
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
GOOGLE_DRIVE_DB_KEY = 'google'
class GDriveStorage(Storage):
def __init__(self, token):
self.token = token
@lru_cache(maxsize=None)
def _get_folder_id_by_name(self, name: str) -> str:
"""
Google drive has a quirk - you can't really use normal os-like paths - first you need to get an ID of the folder
This function searches for folders with specified name
"""
response = get_with_OAuth(
f"https://www.googleapis.com/drive/v3/files",
params={
'fields': '*',
'q': f"name = '{name}' and mimeType = 'application/vnd.google-apps.folder'"
},
token=self.token
)
if response.status_code == 200:
response_as_json = response.json()
try:
result = response_as_json['files'][0]['id']
return result
except IndexError as e:
raise ValueError(f"Something went wrong with GD: Error: {e}")
else:
raise ValueError(f"Something went wrong with GD: Response: "
f"{str(response.status_code)} — {response.json()}")
@classmethod
# todo (toplenboren) remove database argument dependency :(
def auth(cls, db: Database):
creds = None
creds_from_db = db.get(GOOGLE_DRIVE_DB_KEY)
if creds_from_db:
creds = Credentials.from_authorized_user_info(json.loads(creds_from_db), SCOPES)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_config(GOOGLE_DRIVE_CONFIG, SCOPES)
creds = flow.run_local_server(port=0)
db.set(GOOGLE_DRIVE_DB_KEY, creds.token)
@classmethod
def _deserialize_resource(cls, json: dict) -> Resource or None:
"""
Tries to parse Resource from YD to Resource object
:param json:
:return:
"""
try:
is_file = True
if 'folder' in json['mimeType']:
is_file = False
# You don't have pathes in google drive, instead -- you have an id
path = json['id']
except KeyError:
return None
res = Resource(is_file, path)
res.size = Size(json.get('size'), 'b') if json.get('size') else None
res.name = json.get('name')
res.url = json.get('webContentLink')
res.updated = json.get('modifiedTime')
res.md5 = json.get('md5Checksum')
return res
def list_resources_on_path(self, remote_path: str) -> List[Resource]:
"""
List all items in directory
:param path: path to the resource
"""
folder_id = self._get_folder_id_by_name(remote_path)
response = get_with_OAuth(
f"https://www.googleapis.com/drive/v3/files",
params={
'fields': '*',
'q': f"'{folder_id}' in parents"
},
token=self.token
)
if response.status_code == 200:
result = []
response_as_json = response.json()
files = response_as_json['files']
for resource in files:
res: Resource or None = self._deserialize_resource(resource)
if res is not None:
result.append(res)
return result
else:
raise ValueError(f"Something went wrong with YD: Response: "
f"{str(response.status_code)} — {response.json()['message']}")
def get_meta_info(self) -> StorageMetaInfo:
response = get_with_OAuth('https://www.googleapis.com/drive/v3/about?fields=*', token=self.token)
if response.status_code == 200:
response_read = response.json()
used_space = response_read.get('storageQuota', {}).get('usage')
total_space = response_read.get('storageQuota', {}).get('limit')
return StorageMetaInfo(int(used_space), int(total_space))
else:
raise ValueError(f"Something went wrong with GD: Response: "
f"{str(response.status_code)} — {response.json()['message']}")
def create_path(self, remote_path: List[str]) -> None:
"""
Creates the remote path on yandex disk
"""
print(f'[{__name__}] Trying to create directory {"/".join(remote_path)} on remote...')
dir_to_create = []
for dir in remote_path:
dir_to_create.append(dir)
path_to_create = '/'.join(dir_to_create)
response = put_with_OAuth(f'https://cloud-api.yandex.net/v1/disk/resources?path={path_to_create}',
token=self.token)
if 199 < response.status_code < 401:
print(f'[{__name__}] Created directory {path_to_create}')
continue
elif response.status_code == 409 and 'уже существует' in response.json().get('message', ''):
continue
return
def save_resource_to_path(self, resource: Resource, remote_path: str, overwrite: bool, _rec_call:bool = False) -> Resource or None:
"""
Put an Item to the directory
:param resource: resource on the local fs
:param remote_path: string, path to resource on remote fs
:param _rec_call: bool, a system parameter, whether or not this function was called as a recursive call
:return: saved resource or raises exception
"""
upload_successful_flag = False
response = get_with_OAuth(
f'https://cloud-api.yandex.net/v1/disk/resources/upload?path={remote_path}&overwrite=${overwrite}',
token=self.token
)
if response.status_code == 200:
response_read = response.json()
upload_link = response_read['href']
with open(resource.path, 'rb') as f:
files = f
response = put_with_OAuth(upload_link, data=files)
if 199 < response.status_code < 401:
upload_successful_flag = True
response = get_with_OAuth(f'https://cloud-api.yandex.net/v1/disk/resources?path={remote_path}',
token=self.token)
resource_metainfo = self._deserialize_resource(response.json())
if 199 < response.status_code < 401:
return resource_metainfo
elif upload_successful_flag:
return resource
# This dir is not present in the storage
# We use _rec_call to tell that the next call was made as recursive call, so we don't cause SO
elif response.status_code == 409 and not _rec_call:
# We don't need to create a folder with the name equal to the filename, so we do [:-1]
self.create_path(remote_path.split('/')[:-1])
return self.save_resource_to_path(resource, remote_path, overwrite, _rec_call=True)
raise ValueError(f"Something went wrong with YD: Response: "
f"{str(response.status_code)} — {response.json().get('message', '')}")
def download_resource(self, remote_path, local_path) -> str:
response = get_with_OAuth(
f'https://cloud-api.yandex.net/v1/disk/resources/download?path={remote_path}',
token=self.token
)
if response.status_code == 200:
response_read = response.json()
dl_url = response_read.get('href')
else:
raise ValueError(f"[{__name__}] Something went wrong with YD: Response: "
f"{str(response.status_code)} — {response.json()['message']}")
file = requests.get(dl_url)
open(local_path, 'wb').write(file.content)
return local_path
def main():
storage = GDriveStorage(None)
db = Database('../storage.db')
storage.auth(db)
authed_storage = GDriveStorage(json.loads(db.get(GOOGLE_DRIVE_DB_KEY))['token'])
result = authed_storage.list_resources_on_path('savezone')
print(result)
if __name__ == '__main__':
main()
| 38.853982
| 135
| 0.59959
| 1,060
| 8,781
| 4.775472
| 0.233019
| 0.041486
| 0.053339
| 0.023706
| 0.29593
| 0.280522
| 0.218886
| 0.218886
| 0.202884
| 0.193994
| 0
| 0.008605
| 0.298599
| 8,781
| 226
| 136
| 38.853982
| 0.81247
| 0.109213
| 0
| 0.30625
| 0
| 0.0125
| 0.182092
| 0.04045
| 0
| 0
| 0
| 0.004425
| 0
| 1
| 0.0625
| false
| 0
| 0.075
| 0
| 0.20625
| 0.025
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b90b0ec76c39d933c89c13f5c997460e2300453d
| 677
|
py
|
Python
|
index/urls.py
|
darkestmidnight/fedcodeathon2018
|
2cac972b6eaebd7bfc47c02aade36b0f4a6869ab
|
[
"MIT"
] | 1
|
2019-02-08T02:15:52.000Z
|
2019-02-08T02:15:52.000Z
|
index/urls.py
|
darkestmidnight/fedcodeathon2018
|
2cac972b6eaebd7bfc47c02aade36b0f4a6869ab
|
[
"MIT"
] | null | null | null |
index/urls.py
|
darkestmidnight/fedcodeathon2018
|
2cac972b6eaebd7bfc47c02aade36b0f4a6869ab
|
[
"MIT"
] | 1
|
2018-10-23T21:52:39.000Z
|
2018-10-23T21:52:39.000Z
|
from django.urls import re_path, include
from . import views
app_name='logged'
# url mappings for the webapp.
urlpatterns = [
re_path(r'^$', views.logged_count, name="logged_count"),
re_path(r'^loggedusers/', views.logged, name="logged_users"),
re_path(r'^settings/', views.user_settings, name="update_info"),
re_path(r'^administrators/', views.post_alert, name="post_alert"),
re_path(r'^alerts/$', views.list_alert, name="list_alert"),
re_path(r'^alerts/(?P<slug>[\w-]+)/$', views.view_alert, name="view_alert"),
re_path(r'^display/', views.display, name="display"),
re_path(r'^doorselection/', views.doors_election, name="door_selecttion")
]
| 42.3125
| 80
| 0.698671
| 98
| 677
| 4.591837
| 0.408163
| 0.12
| 0.124444
| 0.08
| 0.08
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113737
| 677
| 16
| 81
| 42.3125
| 0.75
| 0.041359
| 0
| 0
| 0
| 0
| 0.29784
| 0.040123
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b90cb0cd96548302814d62e2805216240024b671
| 3,202
|
py
|
Python
|
scout/dao/item.py
|
uw-it-aca/scout
|
be787378c216f1fb172d68914a550a91c62bc264
|
[
"Apache-2.0"
] | 7
|
2017-01-29T09:51:22.000Z
|
2022-02-24T16:40:55.000Z
|
scout/dao/item.py
|
uw-it-aca/scout
|
be787378c216f1fb172d68914a550a91c62bc264
|
[
"Apache-2.0"
] | 338
|
2016-03-21T19:55:04.000Z
|
2022-03-30T21:12:28.000Z
|
scout/dao/item.py
|
uw-it-aca/scout
|
be787378c216f1fb172d68914a550a91c62bc264
|
[
"Apache-2.0"
] | 4
|
2016-03-02T01:19:01.000Z
|
2016-12-13T14:48:31.000Z
|
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from scout.dao.space import get_spots_by_filter, _get_spot_filters, \
_get_extended_info_by_key
import copy
def get_item_by_id(item_id):
spot = get_spots_by_filter([
('item:id', item_id),
('extended_info:app_type', 'tech')
])
if spot:
spot = _filter_spot_items(item_id, spot[0])
return spot
def _filter_spot_items(item_id, spot):
for item in spot.items:
if item.item_id == item_id:
spot.item = item
return spot
def add_item_info(spot):
for item in spot.items:
item.model = _get_extended_info_by_key("i_model",
item.extended_info)
item.brand = _get_extended_info_by_key("i_brand",
item.extended_info)
item.checkout_period = _get_extended_info_by_key(
"i_checkout_period",
item.extended_info
)
item.reservation_notes = _get_extended_info_by_key(
"i_reservation_notes",
item.extended_info
)
item.is_active = _get_extended_info_by_key(
"i_is_active",
item.extended_info
)
item.quantity = _get_extended_info_by_key(
"i_quantity",
item.extended_info
)
item.description = _get_extended_info_by_key(
"i_description",
item.extended_info
)
item.reserve_url = _get_extended_info_by_key(
"i_reserve_url",
item.extended_info
)
item.manual_url = _get_extended_info_by_key(
"i_manual_url",
item.extended_info
)
item.owner = _get_extended_info_by_key(
"i_owner",
item.extended_info
)
item.is_stf = _get_extended_info_by_key(
"i_is_stf",
item.extended_info
)
item.cte_type_id = _get_extended_info_by_key(
"cte_type_id",
item.extended_info
)
return spot
def get_filtered_items(spots, request):
parameter_list = _get_spot_filters(request)
brand = []
subcategory = []
is_active = False
for param in parameter_list:
if param[0] == "item:extended_info:i_brand":
brand.append(param[1])
elif param[0] == "item:subcategory":
subcategory.append(param[1])
elif param[0] == "item:extended_info:i_is_active":
is_active = True
new_spots = []
for spot in spots:
new_spot = copy.deepcopy(spot)
new_spot.items = []
for item in spot.items:
if is_active and not item.is_active:
continue
if len(subcategory) > 0 and item.subcategory not in subcategory:
continue
if len(brand) > 0 and item.brand not in brand:
continue
new_spot.items.append(item)
new_spots.append(new_spot)
return new_spots
def get_item_count(spots):
item_count = 0
for spot in spots:
item_count += len(spot.items)
return item_count
| 28.846847
| 76
| 0.584635
| 397
| 3,202
| 4.312343
| 0.188917
| 0.196262
| 0.130841
| 0.129089
| 0.333528
| 0.257593
| 0.08528
| 0
| 0
| 0
| 0
| 0.007062
| 0.336665
| 3,202
| 110
| 77
| 29.109091
| 0.798964
| 0.025609
| 0
| 0.244681
| 0
| 0
| 0.076997
| 0.025024
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053191
| false
| 0
| 0.021277
| 0
| 0.12766
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b90cef65b59792b28b4c92088d99214713e0be27
| 458
|
py
|
Python
|
juriscraper/opinions/united_states/state/minnctapp.py
|
umeboshi2/juriscraper
|
16abceb3747947593841b1c2708de84dcc85c59d
|
[
"BSD-2-Clause"
] | null | null | null |
juriscraper/opinions/united_states/state/minnctapp.py
|
umeboshi2/juriscraper
|
16abceb3747947593841b1c2708de84dcc85c59d
|
[
"BSD-2-Clause"
] | null | null | null |
juriscraper/opinions/united_states/state/minnctapp.py
|
umeboshi2/juriscraper
|
16abceb3747947593841b1c2708de84dcc85c59d
|
[
"BSD-2-Clause"
] | 1
|
2021-03-03T00:03:16.000Z
|
2021-03-03T00:03:16.000Z
|
#Scraper for Minnesota Court of Appeals Published Opinions
#CourtID: minnctapp
#Court Short Name: MN
#Author: mlr
#Date: 2016-06-03
from juriscraper.opinions.united_states.state import minn
class Site(minn.Site):
# Only subclasses minn for the _download method.
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
self.court_filters = ['/ctapun/', '/ctappub/']
| 26.941176
| 58
| 0.703057
| 60
| 458
| 5.1
| 0.733333
| 0.065359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021277
| 0.179039
| 458
| 16
| 59
| 28.625
| 0.792553
| 0.368996
| 0
| 0
| 0
| 0
| 0.060071
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b90d416b48352a6528abbda811ab137b9f58c6c2
| 1,223
|
py
|
Python
|
monty/os/__init__.py
|
JosephMontoya-TRI/monty
|
facef1776c7d05c941191a32a0b93f986a9761dd
|
[
"MIT"
] | null | null | null |
monty/os/__init__.py
|
JosephMontoya-TRI/monty
|
facef1776c7d05c941191a32a0b93f986a9761dd
|
[
"MIT"
] | null | null | null |
monty/os/__init__.py
|
JosephMontoya-TRI/monty
|
facef1776c7d05c941191a32a0b93f986a9761dd
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
import os
import errno
from contextlib import contextmanager
__author__ = 'Shyue Ping Ong'
__copyright__ = 'Copyright 2013, The Materials Project'
__version__ = '0.1'
__maintainer__ = 'Shyue Ping Ong'
__email__ = 'ongsp@ucsd.edu'
__date__ = '1/24/14'
@contextmanager
def cd(path):
"""
A Fabric-inspired cd context that temporarily changes directory for
performing some tasks, and returns to the original working directory
afterwards. E.g.,
with cd("/my/path/"):
do_something()
Args:
path: Path to cd to.
"""
cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(cwd)
def makedirs_p(path, **kwargs):
"""
Wrapper for os.makedirs that does not raise an exception if the directory already exists, in the fashion of
"mkdir -p" command. The check is performed in a thread-safe way
Args:
path: path of the directory to create
kwargs: standard kwargs for os.makedirs
"""
try:
os.makedirs(path, **kwargs)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
| 23.075472
| 111
| 0.6435
| 161
| 1,223
| 4.695652
| 0.590062
| 0.039683
| 0.031746
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012318
| 0.269828
| 1,223
| 53
| 112
| 23.075472
| 0.834267
| 0.409648
| 0
| 0.076923
| 0
| 0
| 0.136503
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0.038462
| 0.153846
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b90fbfa2a7bb6e18e5af7e82345d7b5cf393db62
| 2,347
|
py
|
Python
|
backend/app.py
|
alexespejo/project-argus
|
53a6a8b1790906044bffbd2db156322938b62da9
|
[
"MIT"
] | 1
|
2022-03-21T02:13:25.000Z
|
2022-03-21T02:13:25.000Z
|
backend/app.py
|
alexespejo/project-argus
|
53a6a8b1790906044bffbd2db156322938b62da9
|
[
"MIT"
] | null | null | null |
backend/app.py
|
alexespejo/project-argus
|
53a6a8b1790906044bffbd2db156322938b62da9
|
[
"MIT"
] | null | null | null |
import face_recognition
from flask import Flask, request, redirect, Response
import camera
import firestore as db
# You can change this to any folder on your system
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
app = Flask(__name__)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def detect_faces_in_image(name, access, file_stream):
# Load the uploaded image filed
img = face_recognition.load_image_file(file_stream)
# Get face encodings for any faces in the uploaded image
unknown_face_encodings = face_recognition.face_encodings(img)[0].tolist()
db.add_member(name, access, unknown_face_encodings)
return ('', 204)
@app.route('/')
def root():
return ('', 204)
@app.route('/upload', methods=['GET', 'POST'])
def upload_image():
db.encoding.update()
name = request.form.get("name")
access = request.form.get("access")
access = int(access)
if request.method == 'POST':
if 'file' not in request.files:
return redirect(request.url)
file = request.files['file']
if file.filename == '':
return redirect(request.url)
if file and allowed_file(file.filename):
return detect_faces_in_image(name, access, file)
return redirect('/video_feed')
@app.route('/update', methods=['GET', 'POST'])
def update():
db.encoding.update()
member = request.form.get("updateMember")
changeName = request.form.get("changeName")
changeAccess = request.form.get("changeAccess")
if changeAccess == None:
changeAccess = ""
db.update_member(member, changeName, changeAccess)
return ('', 204)
@app.route('/configuration', methods=['GET', 'POST'])
def config():
db.config_camera_interval(int(request.form.get('cameraDuration')))
return('', 204)
@app.route('/members')
def members():
print(type(db.encoding.get_names()))
return str(db.encoding.get_names())
@app.route('/video_feed')
def video_feed():
print('CAMERA RUN')
return Response(camera.gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/recent_person')
def recent_person():
return db.history_log.get_most_recent_member()
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5001, debug=True)
| 26.370787
| 94
| 0.672348
| 302
| 2,347
| 5.056291
| 0.354305
| 0.036673
| 0.05501
| 0.044532
| 0.041912
| 0.041912
| 0.041912
| 0
| 0
| 0
| 0
| 0.011979
| 0.181934
| 2,347
| 88
| 95
| 26.670455
| 0.783333
| 0.058372
| 0
| 0.135593
| 0
| 0
| 0.109701
| 0.011786
| 0
| 0
| 0
| 0
| 0
| 1
| 0.152542
| false
| 0
| 0.067797
| 0.050847
| 0.40678
| 0.033898
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8d49b043794456e8669c31d21ba4a68846ab71c
| 5,088
|
py
|
Python
|
SVassembly/plot_bcs_across_bkpts.py
|
AV321/SVPackage
|
c9c625af7f5047ddb43ae79f8beb2ce9aadf7697
|
[
"MIT"
] | null | null | null |
SVassembly/plot_bcs_across_bkpts.py
|
AV321/SVPackage
|
c9c625af7f5047ddb43ae79f8beb2ce9aadf7697
|
[
"MIT"
] | null | null | null |
SVassembly/plot_bcs_across_bkpts.py
|
AV321/SVPackage
|
c9c625af7f5047ddb43ae79f8beb2ce9aadf7697
|
[
"MIT"
] | 1
|
2019-01-22T19:16:24.000Z
|
2019-01-22T19:16:24.000Z
|
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors
import csv
from scipy.stats import mode
import math as m
import os
import collections
#set working directory
#os.chdir("/mnt/ix1/Projects/M002_131217_gastric/P00526/P00526_WG10_150722_gastric/A20_170516_hmw_maps/metr")
#bkpt_name = "1"
#example: plot_bcs_bkpt("1", "/mnt/ix1/Projects/M002_131217_gastric/P00526/P00526_WG10_150722_gastric/A20_170516_hmw_maps/metr", "/mnt/ix1/Projects/M002_131217_gastric/P00526/P00526_WG10_150722_gastric/A20_170516_hmw_maps/metr")
def plot_bcs_bkpt(bkpt_name, infolder, outfolder):
if infolder[-1] != '/':
infolder = infolder + '/'
file_1 = infolder + bkpt_name + "_1.bc_windows.txt"
file_2 = infolder + bkpt_name + "_2.bc_windows.txt"
file_hap = infolder + bkpt_name + "_hap_bcs.txt"
df_1 = pd.read_table(file_1)
df_2 = pd.read_table(file_2)
hap_bcs = pd.read_table(file_hap)
# bkpt_name = "1"
# file_1 = bkpt_name + "_1.bc_windows.txt"
# file_2 = bkpt_name + "_2.bc_windows.txt"
# file_hap = bkpt_name + "_hap_bcs.txt"
# #sort barcodes by where they map (lowest coordinate to highest)
# #read in data frames
# df_1 = pd.read_table(file_1)
# df_2 = pd.read_table(file_2)
# hap_bcs = pd.read_table(file_hap)
hap_bcs = hap_bcs.transpose()
bcs_hap_dict = {}
for key in df_1.keys():
if key != "chrom" and key != "window_start" and key != "window_end":
key = key[:-2]
bcs_hap_dict[key] = 'unassigned'
for key, values in hap_bcs.iteritems():
if values[0] != 'bcs':
hap = values[1]
bcs_hap_dict[values[0]] = hap
df_1 = df_1.sort_values('window_start')
df_2 = df_2.sort_values('window_start')
chrom_1 = df_1.at[0, 'chrom']
chrom_2 = df_2.at[0, 'chrom']
x_values_1_1 = []
x_values_1_2 = []
x_values_1_unassigned = []
y_values_1_1 = []
y_values_1_2 = []
y_values_1_unassigned = []
x_values_2_1 = []
x_values_2_2 = []
x_values_2_unassigned = []
y_values_2_1 = []
y_values_2_2 = []
y_values_2_unassigned = []
i1 = 0
window_start_arr1 = df_1['window_start']
for name, values in df_1.iteritems(): #go through columns (so each barcode)
if name != "chrom" and name != "window_start" and name != "window_end":
i1 += 1
name = name[:-2]
hap = bcs_hap_dict[name]
#print type(hap) int
for indx, window in values.iteritems():
if window != 0:
if hap == 1:
y_values_1_1.append(i1)
x_values_1_1.append(window_start_arr1[indx])
elif hap == 2:
y_values_1_2.append(i1)
x_values_1_2.append(window_start_arr1[indx])
else:
y_values_1_unassigned.append(i1)
x_values_1_unassigned.append(window_start_arr1[indx])
i2 = 0
window_start_arr2 = df_2['window_start']
for name, values in df_2.iteritems():
if name != "chrom" and name != "window_start" and name != "window_end":
i2 += 1
name = name[:-2]
hap = bcs_hap_dict[name]
for indx, window in values.iteritems():
if window != 0:
if hap == 1:
y_values_2_1.append(i2)
x_values_2_1.append(window_start_arr2[indx])
elif hap == 2:
y_values_2_2.append(i2)
x_values_2_2.append(window_start_arr2[indx])
elif hap == 'unassigned':
y_values_2_unassigned.append(i2)
x_values_2_unassigned.append(window_start_arr2[indx])
fig = plt.figure()
figL = fig.add_subplot(121)
figL.scatter(x_values_1_1, y_values_1_1, s=0.2, color='b') #this doesn't seem to contain anything
figL.scatter(x_values_1_2, y_values_1_2, s=0.2, color='r') #same
figL.scatter(x_values_1_unassigned, y_values_1_unassigned, s=0.2, color='g')
figL.set_title("")
figL.set_xlabel("chr %d (Mb)" %chrom_1)
figL.set_ylabel("SV-specific barcode")
figR = fig.add_subplot(122)
figR.scatter(x_values_2_1, y_values_2_1, s=0.2, color='b') #same
figR.scatter(x_values_2_2, y_values_2_2, s=0.2, color='r') #same
figR.scatter(x_values_2_unassigned, y_values_2_unassigned, s=0.2, color='g')
figR.set_title("")
figR.set_xlabel("chr %d (Mb)" %chrom_2)
figR.set_ylabel("")
brkpt1 = min(df_1['window_start']) + ((max(df_1['window_end']) - min(df_1['window_start']))/2)
brkpt2 = min(df_2['window_start']) + ((max(df_2['window_end']) - min(df_2['window_start']))/2)
figL.axvline(x=brkpt1, linewidth=1, color = 'black')
figR.axvline(x=brkpt2, linewidth=1, color = 'black')
path = outfolder + 'bcs_bkpt_map'
plt.savefig(path)
| 36.085106
| 228
| 0.605149
| 759
| 5,088
| 3.715415
| 0.188406
| 0.074113
| 0.025532
| 0.031915
| 0.567731
| 0.434043
| 0.344681
| 0.255319
| 0.217021
| 0.197872
| 0
| 0.073184
| 0.272209
| 5,088
| 140
| 229
| 36.342857
| 0.688361
| 0.162539
| 0
| 0.135922
| 0
| 0
| 0.082567
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009709
| false
| 0
| 0.097087
| 0
| 0.106796
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8d75cfce0f3dc1a5df25624c4dcbf0a3624f6c0
| 2,917
|
py
|
Python
|
language-detection-webapp/blueprints/langid.py
|
derlin/SwigSpot_Schwyzertuutsch-Spotting
|
f38c8243ff34c6e512cadab5e4f51b08dacc16c6
|
[
"Apache-2.0"
] | 6
|
2018-06-17T07:14:32.000Z
|
2020-03-02T15:28:25.000Z
|
language-detection-webapp/blueprints/langid.py
|
derlin/SwigSpot_Schwyzertuutsch-Spotting
|
f38c8243ff34c6e512cadab5e4f51b08dacc16c6
|
[
"Apache-2.0"
] | 1
|
2021-03-31T18:42:26.000Z
|
2021-03-31T18:42:26.000Z
|
language-detection-webapp/blueprints/langid.py
|
derlin/SwigSpot_Schwyzertuutsch-Spotting
|
f38c8243ff34c6e512cadab5e4f51b08dacc16c6
|
[
"Apache-2.0"
] | 1
|
2019-04-16T09:18:08.000Z
|
2019-04-16T09:18:08.000Z
|
import logging
from flask import Blueprint
from flask import Flask, render_template, request, flash
from flask_wtf import FlaskForm
from wtforms import StringField, validators, SelectField, BooleanField
from wtforms.fields.html5 import IntegerRangeField
from wtforms.widgets import TextArea
import langid
from utils.utils import templated
blueprint_langid = Blueprint('langid', __name__)
class UrlForm(FlaskForm):
url = StringField(
'URL',
validators=[validators.DataRequired(), validators.URL(message='Sorry, this is not a valid URL,')])
wMin = IntegerRangeField(
'Min. words',
default=5,
validators=[validators.DataRequired(), validators.NumberRange(min=1, max=20)])
extractor_class = SelectField(
'Extractor',
default=langid.EXTRACTORS[0],
choices=[(i, i) for i in langid.EXTRACTORS],
validators=[validators.DataRequired()])
model_class = SelectField(
'Model',
default=langid.MODELS[0],
choices=[(i, i) for i in langid.MODELS],
validators=[validators.DataRequired()])
return_raw = BooleanField(
'Display raw sentences',
default=False
)
class TextForm(FlaskForm):
text = StringField(
'Text',
widget=TextArea(),
validators=[validators.DataRequired()])
model_class = SelectField(
'Model',
default=langid.MODELS[0],
choices=[(i, i) for i in langid.MODELS],
validators=[validators.DataRequired()])
@blueprint_langid.route('/', methods=['GET', 'POST'])
@templated('index.html')
def crawl():
form = UrlForm(request.form)
if request.method == 'GET':
return dict(form=form)
elif not form.validate():
for f, errs in form.errors.items():
flash("%s: %s" % (f, "<br>".join(errs)), 'danger')
return dict(form=form)
try:
results = langid.mixed_sentences_from_urls(
form.url.data.strip(), extractor_name=form.extractor_class.data, model=form.model_class.data,
with_proba=True, min_words=form.wMin.data, return_raw=form.return_raw.data)
except Exception as e:
flash('Something went wrong %s' % e, 'danger')
logging.exception(e)
return dict(form=form)
return dict(form=form, results=results, labels=langid.DEFAULT_LABELS)
@blueprint_langid.route('/text', methods=['GET', 'POST'])
@templated('langid.html')
def predict_text():
form = TextForm(request.form)
if request.method == 'GET':
return dict(form=form)
elif not form.validate():
for f, errs in form.errors.items():
flash("%s: %s" % (f, "<br>".join(errs)), 'danger')
return dict(form=form)
results = [[r] for r in langid.lang_of_text(
form.text.data, model=form.model_class.data, with_proba=True)]
return dict(form=form, results=results, labels=langid.DEFAULT_LABELS)
| 29.765306
| 106
| 0.65204
| 347
| 2,917
| 5.394813
| 0.302594
| 0.037393
| 0.05235
| 0.067308
| 0.407585
| 0.403846
| 0.403846
| 0.403846
| 0.392094
| 0.349359
| 0
| 0.003501
| 0.216661
| 2,917
| 97
| 107
| 30.072165
| 0.815755
| 0
| 0
| 0.364865
| 0
| 0
| 0.069249
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.121622
| 0
| 0.364865
| 0.054054
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8d9062a56a02a0e0c258c3b8d23088b9caa04a9
| 11,421
|
py
|
Python
|
sandbox/lib/jumpscale/Jumpscale/core/BASECLASSES/JSConfigsBCDB.py
|
threefoldtech/threebot_prebuilt
|
1f0e1c65c14cef079cd80f73927d7c8318755c48
|
[
"Apache-2.0"
] | null | null | null |
sandbox/lib/jumpscale/Jumpscale/core/BASECLASSES/JSConfigsBCDB.py
|
threefoldtech/threebot_prebuilt
|
1f0e1c65c14cef079cd80f73927d7c8318755c48
|
[
"Apache-2.0"
] | null | null | null |
sandbox/lib/jumpscale/Jumpscale/core/BASECLASSES/JSConfigsBCDB.py
|
threefoldtech/threebot_prebuilt
|
1f0e1c65c14cef079cd80f73927d7c8318755c48
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) July 2018: TF TECH NV in Belgium see https://www.threefold.tech/
# In case TF TECH NV ceases to exist (e.g. because of bankruptcy)
# then Incubaid NV also in Belgium will get the Copyright & Authorship for all changes made since July 2018
# and the license will automatically become Apache v2 for all code related to Jumpscale & DigitalMe
# This file is part of jumpscale at <https://github.com/threefoldtech>.
# jumpscale is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# jumpscale is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License v3 for more details.
#
# You should have received a copy of the GNU General Public License
# along with jumpscale or jumpscale derived works. If not, see <http://www.gnu.org/licenses/>.
# LICENSE END
from Jumpscale import j
from .JSConfigBCDBBase import JSConfigBCDBBase
class JSConfigsBCDB(JSConfigBCDBBase):
def _childclass_selector(self, jsxobject, **kwargs):
"""
allow custom implementation of which child class to use
:return:
"""
return self.__class__._CHILDCLASS
def new(self, name, jsxobject=None, autosave=True, **kwargs):
"""
it it exists will delete if first when delete == True
:param name:
:param jsxobject:
:param autosave: sets the autosave argument on the data and also saves the object before the function returns. If set to False, you need to explicitly save the object.
:param kwargs:
:return:
"""
if self.exists(name=name):
raise j.exceptions.Base("cannot do new object, exists")
jsconfig = self._new(name=name, jsxobject=jsxobject, autosave=autosave, **kwargs)
self._check(jsconfig)
return jsconfig
def _check_children(self):
if not self._cache_use:
assert self._children == {}
def _check(self, jsconfig):
if jsconfig._id is None:
# model has never been saved no check required yet
return
# lets do some tests (maybe in future can be removed, but for now the safe bet)
assert jsconfig._id > 0
mother_id = jsconfig._mother_id_get()
if mother_id:
assert jsconfig.mother_id == mother_id
assert jsconfig._model.schema._md5 == self._model.schema._md5
def _new(self, name, jsxobject=None, autosave=True, **kwargs):
"""
:param name: for the CONFIG item (is a unique name for the service, client, ...)
:param jsxobject: you can right away specify the jsxobject
:param kwargs: the data elements which will be given to JSXObject underneith (given to constructor)
:return: the service
"""
kwargs_to_class = {}
if not jsxobject:
if kwargs:
kwargs_to_obj_new = {}
props = [i.name for i in self._model.schema.properties]
for key, val in kwargs.items():
if key in props:
kwargs_to_obj_new[key] = val
else:
kwargs_to_class[key] = val
jsxobject = self._model.new(data=kwargs_to_obj_new)
else:
jsxobject = self._model.new()
jsxobject.name = name
# means we need to remember the parent id
mother_id = self._mother_id_get()
if mother_id:
if jsxobject.mother_id != mother_id:
jsxobject.mother_id = mother_id
jsconfig_klass = self._childclass_selector(jsxobject=jsxobject)
jsconfig = jsconfig_klass(parent=self, jsxobject=jsxobject, **kwargs_to_class)
jsconfig._triggers_call(jsconfig, "new")
jsconfig._autosave = autosave
self._children[name] = jsconfig
if autosave:
self._children[name].save()
jsxobject._autosave = autosave
return self._children[name]
def get(self, name="main", id=None, needexist=False, autosave=True, reload=False, **kwargs):
"""
:param name: of the object
"""
# will reload if needed (not in self._children)
rc, jsconfig = self._get(name=name, id=id, die=needexist, reload=reload)
if not jsconfig:
self._log_debug("NEW OBJ:%s:%s" % (name, self._classname))
jsconfig = self._new(name=name, autosave=autosave, **kwargs)
else:
# check that the stored values correspond with kwargs given
# means comes from the database
if not jsconfig._data._model.schema._md5 == jsconfig._model.schema._md5:
# means data came from DB and schema is not same as config mgmt class
j.shell()
changed = False
jsconfig._data._autosave = False
for key, val in kwargs.items():
if not getattr(jsconfig, key) == val:
changed = True
setattr(jsconfig, key, val)
if changed and autosave:
try:
jsconfig.save()
except Exception as e:
print("CHECK WHY ERROR")
j.shell()
jsconfig._autosave = autosave
# lets do some tests (maybe in future can be removed, but for now the safe bet)
self._check(jsconfig)
jsconfig._triggers_call(jsconfig, "get")
return jsconfig
def _get(self, name="main", id=None, die=True, reload=False, autosave=True):
if id:
obj = self._model.get(id)
name = obj.name
return 1, self._new(name, obj)
obj = self._validate_child(name)
if obj:
if reload:
obj.load()
return 1, obj
self._log_debug("get child:'%s'from '%s'" % (name, self._classname))
# new = False
res = self.find(name=name)
if len(res) < 1:
if not die:
return 3, None
raise j.exceptions.Base(
"Did not find instance for:%s, name searched for:%s" % (self.__class__._location, name)
)
elif len(res) > 1:
raise j.exceptions.Base(
"Found more than 1 service for :%s, name searched for:%s" % (self.__class__._location, name)
)
else:
jsxconfig = res[0]
jsxconfig._autosave = autosave
return 2, jsxconfig
def reset(self):
"""
will destroy all data in the DB, be carefull
:return:
"""
self._log_debug("reset all data")
for item in self.find():
try:
item.delete()
except Exception as e:
j.shell()
if not self._mother_id_get():
self._model.index.destroy()
def _children_names_get(self, filter=None):
condition = False
Item = self._model.index.sql
mother_id = self._mother_id_get()
if mother_id:
condition = Item.mother_id == mother_id
if filter and filter != "*":
condition = Item.name.startswith(filter) and condition if condition else Item.name.startswith(filter)
if condition:
res = [i.name for i in Item.select().where(condition)]
else:
res = [i.name for i in Item.select()]
if len(res) > 50:
return []
return res
def find(self, reload=False, **kwargs):
"""
:param kwargs: e.g. color="red",...
:return: list of the config objects
"""
res = []
ids_done = []
for key, item in list(self._children.items()):
match = True
for key, val in kwargs.items():
if item._hasattr(key):
if val != getattr(item, key):
match = False
else:
match = False
if match:
if reload:
item.load()
res.append(item)
if item.id not in ids_done:
ids_done.append(item.id)
kwargs = self._kwargs_update(kwargs)
# this is more efficient no need to go to backend stor if the objects are already in mem
ids = self._model.find_ids(**kwargs)
for id in ids:
if id not in ids_done:
item = self.get(id=id, reload=reload, autosave=False)
res.append(item)
return res
def _kwargs_update(self, kwargs):
mother_id = self._mother_id_get()
if mother_id:
kwargs["mother_id"] = mother_id
return kwargs
def count(self, **kwargs):
"""
:param kwargs: e.g. color="red",...
:return: list of the config objects
"""
kwargs = self._kwargs_update(kwargs)
# TODO do proper count query
return len(list(self._model.find_ids(**kwargs)))
def _findData(self, **kwargs):
"""
:param kwargs: e.g. color="red",...
:return: list of the data objects (the data of the model)
"""
kwargs = self._kwargs_update(kwargs)
return self._model.find(**kwargs)
def save(self):
for item in self._children_get():
if item._hasattr("save"):
item.save()
def delete(self, name=None):
"""
:param name:
:return:
"""
self._delete(name=name)
def _delete(self, name=None):
if name:
_, child = self._get(name=name, die=False)
if child:
return child.delete()
else:
return self.reset()
if not name and self._parent:
if self._classname in self._parent._children:
if not isinstance(self._parent, j.baseclasses.factory):
# only delete when not a factory means is a custom class we're building
del self._parent._children[self._data.name]
def exists(self, name="main"):
"""
:param name: of the object
"""
obj = self._validate_child(name)
if obj:
return True
# will only use the index
return self.count(name=name) == 1
def _children_get(self, filter=None):
"""
:param filter: is '' then will show all, if None will ignore _
when * at end it will be considered a prefix
when * at start it will be considered a end of line filter (endswith)
when R as first char its considered to be a regex
everything else is a full match
:return:
"""
# TODO implement filter properly
x = []
for _, item in self._children.items():
x.append(item)
x = self._filter(filter=filter, llist=x, nameonly=False)
# be smarter in how we use the index
for item in self.find():
if item not in x:
x.append(item)
return x
def __str__(self):
return "jsxconfigobj:collection:%s" % self._model.schema.url
| 34.609091
| 175
| 0.571228
| 1,409
| 11,421
| 4.506033
| 0.226402
| 0.028981
| 0.00945
| 0.0126
| 0.210584
| 0.134194
| 0.122066
| 0.094031
| 0.08647
| 0.056702
| 0
| 0.003583
| 0.340163
| 11,421
| 329
| 176
| 34.714286
| 0.838907
| 0.268803
| 0
| 0.265625
| 0
| 0
| 0.032352
| 0.003286
| 0
| 0
| 0
| 0.006079
| 0.020833
| 1
| 0.098958
| false
| 0
| 0.010417
| 0.005208
| 0.223958
| 0.005208
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8e0235b8205933db406d18f8b9437b0dca33a40
| 1,810
|
py
|
Python
|
TRANSFORM/Resources/python/2006LUT_to_SDF.py
|
greenwoodms/TRANSFORM-Library
|
dc152d4f0298d3f18385f2ea33645d87d7812915
|
[
"Apache-2.0"
] | 29
|
2018-04-24T17:06:19.000Z
|
2021-11-21T05:17:28.000Z
|
TRANSFORM/Resources/python/2006LUT_to_SDF.py
|
greenwoodms/TRANSFORM-Library
|
dc152d4f0298d3f18385f2ea33645d87d7812915
|
[
"Apache-2.0"
] | 13
|
2018-04-05T08:34:27.000Z
|
2021-10-04T14:24:41.000Z
|
TRANSFORM/Resources/python/2006LUT_to_SDF.py
|
greenwoodms/TRANSFORM-Library
|
dc152d4f0298d3f18385f2ea33645d87d7812915
|
[
"Apache-2.0"
] | 17
|
2018-08-06T22:18:01.000Z
|
2022-01-29T21:38:17.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 03 11:06:37 2018
@author: vmg
"""
import sdf
import numpy as np
# Load 2006 LUT for interpolation
# 2006 Groeneveld Look-Up Table as presented in
# "2006 CHF Look-Up Table", Nuclear Engineering and Design 237, pp. 190-1922.
# This file requires the file 2006LUTdata.txt
# Pressure range [MPa] from 2006 LUT, convert to [Pa]
P = np.array((0.10,0.30,0.50,1.0,2.0,3.0,5.0,7.0,10.0,12.0,14.0,16.0,18.0,20.0,21.0))*1e6
# Mass Flux range [kg/m^2-s] from 2006 .LUT.
G = np.array((0.,50.,100.,300.,500.,750.,1000.,1500.,2000.,2500.,3000.,3500.,4000.,4500.,5000.,5500.,6000.,6500.,7000.,7500.,8000.))
# Quality range from 2006 LUT
x = np.array((-0.50,-0.40,-0.30,-0.20,-0.15,-0.10,-0.05,0.00,0.05,0.10,0.15,0.20,0.25,0.30,0.35,0.40,0.45,0.50,0.60,0.70,0.80,0.90,1.00))
# Critical heat flux [kW/m^2] from 2006 LUT, convert to [W/m^2]
q_raw=np.loadtxt('../Data/2006LUTdata.txt')*1e3
# Convert the imported array into a (MxNxQ) where:
# M is number of mass flux divisions
# N is number of quality divisions
# Q is number of pressure divisions
lenG = len(G)
lenx = len(x)
lenP = len(P)
q = np.zeros((lenG,lenx,lenP))
for i in xrange(lenG):
for j in xrange(lenx):
for k in xrange(lenP):
q[i,j,k] = q_raw[i + k*lenG,j]
# Create the datasets:
ds_G = sdf.Dataset('G', data=G, unit='kg/(m2.s)', is_scale=True, display_name='Mass Flux')
ds_x = sdf.Dataset('x', data=x, unit='1', is_scale=True, display_name='Quality')
ds_P = sdf.Dataset('P', data=P, unit='Pa', is_scale=True, display_name='Pressure')
ds_q = sdf.Dataset('q', data=q, unit='W/m2', scales=[ds_G,ds_x,ds_P])
# Create the root group and write the file:
g = sdf.Group('/', comment='2006 CHF LUT', datasets=[ds_G,ds_x,ds_P,ds_q])
sdf.save('../Data/2006LUT.sdf', g)
| 36.2
| 137
| 0.651381
| 371
| 1,810
| 3.126685
| 0.412399
| 0.030172
| 0.037931
| 0.046552
| 0.106897
| 0.015517
| 0
| 0
| 0
| 0
| 0
| 0.169151
| 0.147514
| 1,810
| 50
| 138
| 36.2
| 0.582631
| 0.373481
| 0
| 0
| 0
| 0
| 0.088949
| 0.020665
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8e07bde7c24919fc5325f0451f8753ee945632d
| 2,836
|
py
|
Python
|
test/asserting/policy.py
|
tmsanrinsha/vint
|
8c34196252b43d7361d0f58cb78cf2d3e4e4fbd0
|
[
"MIT"
] | 2
|
2021-06-15T15:07:28.000Z
|
2021-10-05T12:23:23.000Z
|
test/asserting/policy.py
|
tmsanrinsha/vint
|
8c34196252b43d7361d0f58cb78cf2d3e4e4fbd0
|
[
"MIT"
] | null | null | null |
test/asserting/policy.py
|
tmsanrinsha/vint
|
8c34196252b43d7361d0f58cb78cf2d3e4e4fbd0
|
[
"MIT"
] | null | null | null |
import unittest
from pathlib import Path
from pprint import pprint
from vint.compat.itertools import zip_longest
from vint.linting.linter import Linter
from vint.linting.config.config_default_source import ConfigDefaultSource
class PolicyAssertion(unittest.TestCase):
class StubPolicySet(object):
def __init__(self, *policies):
self._policies = policies
def get_enabled_policies(self):
return self._policies
def update_by_config(self, policy_enabling_map):
pass
class StubConfigContainer(object):
def __init__(self, policy_names_to_enable):
default_config_dict = ConfigDefaultSource(None).get_config_dict()
policy_options = default_config_dict.get('policies', {})
for policy, options in policy_options.items():
options['enabled'] = False
for policy in policy_names_to_enable:
options = policy_options.setdefault(policy, {})
options['enabled'] = True
self._config_dict = {
'policies': policy_options,
}
def append_config_source(self, config_source):
# Ignore a comment config source
pass
def get_config_dict(self):
return self._config_dict
def assertFoundNoViolations(self, path, Policy, policy_options=None):
self.assertFoundViolationsEqual(path, Policy, [], policy_options)
def assertFoundViolationsEqual(self, path, Policy, expected_violations, policy_options=None):
policy_to_test = Policy()
policy_name = Policy.__name__
policy_set = PolicyAssertion.StubPolicySet(policy_to_test)
config = PolicyAssertion.StubConfigContainer(policy_name)
if policy_options is not None:
config.get_config_dict()['policies'][policy_name].update(policy_options)
linter = Linter(policy_set, config.get_config_dict())
violations = linter.lint_file(path)
pprint(violations)
assert len(violations) == len(expected_violations)
for violation, expected_violation in zip_longest(violations, expected_violations):
self.assertViolation(violation, expected_violation)
def assertViolation(self, actual_violation, expected_violation):
self.assertIsNot(actual_violation, None)
self.assertIsNot(expected_violation, None)
pprint(actual_violation)
assert actual_violation['name'] == expected_violation['name']
assert actual_violation['position'] == expected_violation['position']
assert actual_violation['level'] == expected_violation['level']
self.assertIsInstance(actual_violation['description'], str)
def get_fixture_path(*filename):
return Path('test', 'fixture', 'policy', *filename)
| 31.865169
| 97
| 0.685825
| 295
| 2,836
| 6.298305
| 0.261017
| 0.076964
| 0.027987
| 0.018299
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230606
| 2,836
| 88
| 98
| 32.227273
| 0.851512
| 0.010578
| 0
| 0.036364
| 0
| 0
| 0.035663
| 0
| 0
| 0
| 0
| 0
| 0.272727
| 1
| 0.181818
| false
| 0.036364
| 0.109091
| 0.054545
| 0.4
| 0.054545
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8e0ad168c40024827eba4f57a5381ccd338e24b
| 39,902
|
py
|
Python
|
dataprofiler/labelers/character_level_cnn_model.py
|
gliptak/DataProfiler
|
37ffbf43652246ef27e070df7ff0d9f1b9529162
|
[
"Apache-2.0"
] | null | null | null |
dataprofiler/labelers/character_level_cnn_model.py
|
gliptak/DataProfiler
|
37ffbf43652246ef27e070df7ff0d9f1b9529162
|
[
"Apache-2.0"
] | 1
|
2021-11-20T01:08:12.000Z
|
2021-11-20T01:08:12.000Z
|
dataprofiler/labelers/character_level_cnn_model.py
|
gliptak/DataProfiler
|
37ffbf43652246ef27e070df7ff0d9f1b9529162
|
[
"Apache-2.0"
] | null | null | null |
import copy
import json
import logging
import os
import sys
import time
from collections import defaultdict
import numpy as np
import tensorflow as tf
from sklearn import decomposition
from .. import dp_logging
from . import labeler_utils
from .base_model import AutoSubRegistrationMeta, BaseModel, BaseTrainableModel
_file_dir = os.path.dirname(os.path.abspath(__file__))
logger = dp_logging.get_child_logger(__name__)
class NoV1ResourceMessageFilter(logging.Filter):
"""Removes TF2 warning for using TF1 model which has resources."""
def filter(self, record):
msg = 'is a problem, consider rebuilding the SavedModel after ' + \
'running tf.compat.v1.enable_resource_variables()'
return msg not in record.getMessage()
tf_logger = logging.getLogger('tensorflow')
tf_logger.addFilter(NoV1ResourceMessageFilter())
@tf.keras.utils.register_keras_serializable()
class FBetaScore(tf.keras.metrics.Metric):
r"""Computes F-Beta score.
Adapted and slightly modified from https://github.com/tensorflow/addons/blob/v0.12.0/tensorflow_addons/metrics/f_scores.py#L211-L283
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# https://github.com/tensorflow/addons/blob/v0.12.0/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
It is the weighted harmonic mean of precision
and recall. Output range is `[0, 1]`. Works for
both multi-class and multi-label classification.
$$
F_{\beta} = (1 + \beta^2) * \frac{\textrm{precision} * \textrm{precision}}{(\beta^2 \cdot \textrm{precision}) + \textrm{recall}}
$$
Args:
num_classes: Number of unique classes in the dataset.
average: Type of averaging to be performed on data.
Acceptable values are `None`, `micro`, `macro` and
`weighted`. Default value is None.
beta: Determines the weight of precision and recall
in harmonic mean. Determines the weight given to the
precision and recall. Default value is 1.
threshold: Elements of `y_pred` greater than threshold are
converted to be 1, and the rest 0. If threshold is
None, the argmax is converted to 1, and the rest 0.
name: (Optional) String name of the metric instance.
dtype: (Optional) Data type of the metric result.
Returns:
F-Beta Score: float.
"""
# Modification: remove the run-time type checking for functions
def __init__(self, num_classes, average=None, beta=1.0, threshold=None,
name="fbeta_score", dtype=None, **kwargs):
super().__init__(name=name, dtype=dtype)
if average not in (None, "micro", "macro", "weighted"):
raise ValueError(
"Unknown average type. Acceptable values "
"are: [None, 'micro', 'macro', 'weighted']"
)
if not isinstance(beta, float):
raise TypeError("The value of beta should be a python float")
if beta <= 0.0:
raise ValueError("beta value should be greater than zero")
if threshold is not None:
if not isinstance(threshold, float):
raise TypeError("The value of threshold should be a python float")
if threshold > 1.0 or threshold <= 0.0:
raise ValueError("threshold should be between 0 and 1")
self.num_classes = num_classes
self.average = average
self.beta = beta
self.threshold = threshold
self.axis = None
self.init_shape = []
if self.average != "micro":
self.axis = 0
self.init_shape = [self.num_classes]
def _zero_wt_init(name):
return self.add_weight(
name, shape=self.init_shape, initializer="zeros", dtype=self.dtype
)
self.true_positives = _zero_wt_init("true_positives")
self.false_positives = _zero_wt_init("false_positives")
self.false_negatives = _zero_wt_init("false_negatives")
self.weights_intermediate = _zero_wt_init("weights_intermediate")
def update_state(self, y_true, y_pred, sample_weight=None):
if self.threshold is None:
threshold = tf.reduce_max(y_pred, axis=-1, keepdims=True)
# make sure [0, 0, 0] doesn't become [1, 1, 1]
# Use abs(x) > eps, instead of x != 0 to check for zero
y_pred = tf.logical_and(y_pred >= threshold, tf.abs(y_pred) > 1e-12)
else:
y_pred = y_pred > self.threshold
y_true = tf.cast(y_true, self.dtype)
y_pred = tf.cast(y_pred, self.dtype)
def _weighted_sum(val, sample_weight):
if sample_weight is not None:
val = tf.math.multiply(val, tf.expand_dims(sample_weight, 1))
return tf.reduce_sum(val, axis=self.axis)
self.true_positives.assign_add(_weighted_sum(y_pred * y_true, sample_weight))
self.false_positives.assign_add(
_weighted_sum(y_pred * (1 - y_true), sample_weight)
)
self.false_negatives.assign_add(
_weighted_sum((1 - y_pred) * y_true, sample_weight)
)
self.weights_intermediate.assign_add(_weighted_sum(y_true, sample_weight))
def result(self):
precision = tf.math.divide_no_nan(
self.true_positives, self.true_positives + self.false_positives
)
recall = tf.math.divide_no_nan(
self.true_positives, self.true_positives + self.false_negatives
)
mul_value = precision * recall
add_value = (tf.math.square(self.beta) * precision) + recall
mean = tf.math.divide_no_nan(mul_value, add_value)
f1_score = mean * (1 + tf.math.square(self.beta))
if self.average == "weighted":
weights = tf.math.divide_no_nan(
self.weights_intermediate, tf.reduce_sum(self.weights_intermediate)
)
f1_score = tf.reduce_sum(f1_score * weights)
elif self.average is not None: # [micro, macro]
f1_score = tf.reduce_mean(f1_score)
return f1_score
def get_config(self):
"""Returns the serializable config of the metric."""
config = {
"num_classes": self.num_classes,
"average": self.average,
"beta": self.beta,
"threshold": self.threshold,
}
base_config = super().get_config()
return {**base_config, **config}
def reset_states(self):
reset_value = tf.zeros(self.init_shape, dtype=self.dtype)
tf.keras.backend.batch_set_value([(v, reset_value) for v in self.variables])
@tf.keras.utils.register_keras_serializable()
class F1Score(FBetaScore):
r"""Computes F-1 Score.
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# https://github.com/tensorflow/addons/blob/v0.12.0/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
It is the harmonic mean of precision and recall.
Output range is `[0, 1]`. Works for both multi-class
and multi-label classification.
$$
F_1 = 2 \cdot \frac{\textrm{precision} \cdot \textrm{recall}}{\textrm{precision} + \textrm{recall}}
$$
Args:
num_classes: Number of unique classes in the dataset.
average: Type of averaging to be performed on data.
Acceptable values are `None`, `micro`, `macro`
and `weighted`. Default value is None.
threshold: Elements of `y_pred` above threshold are
considered to be 1, and the rest 0. If threshold is
None, the argmax is converted to 1, and the rest 0.
name: (Optional) String name of the metric instance.
dtype: (Optional) Data type of the metric result.
Returns:
F-1 Score: float.
"""
# Modification: remove the run-time type checking for functions
def __init__(self, num_classes, average=None, threshold=None,
name="f1_score", dtype=None):
super().__init__(num_classes, average, 1.0, threshold, name=name, dtype=dtype)
def get_config(self):
base_config = super().get_config()
del base_config["beta"]
return base_config
def build_embd_dictionary(filename):
"""
Returns a numpy embedding dictionary from embed file with GloVe-like format
:param filename: Path to the embed file for loading
:type filename: str
"""
embd_table = dict()
with open(filename, 'r') as embds:
for line in embds:
line = line.strip().split()
embd_table[line[0]] = np.asarray(line[1:])
return embd_table
def create_glove_char(n_dims, source_file=None):
"""
Embeds GloVe chars embeddings from source file to n_dims principal
components in a new file
:param n_dims: Final number of principal component dims of the embeddings
:type n_dims: int
:param source_file: Location of original embeddings to factor down
:type source_file: str
"""
if source_file is None:
source_file = os.path.join(_file_dir,
"embeddings/glove.840B.300d-char.txt")
# get embedding table first and vectors as array
embd_table = build_embd_dictionary(source_file)
embd_words, embd_matrix = [
np.asarray(ls) if i > 0 else list(ls)
for i, ls in enumerate(zip(*embd_table.items()))]
# get PCA embedder
pca = decomposition.PCA(n_components=n_dims)
reduced_embds = pca.fit_transform(embd_matrix)
# write to file
dir_name = os.path.dirname(source_file)
embd_file_name = os.path.join(dir_name,
'glove-reduced-{}D.txt'.format(n_dims))
with open(embd_file_name, 'w') as file:
for word, embd in zip(embd_words, reduced_embds):
file.write(word + " " + ' '.join(str(num) for num in embd) + "\n")
class CharacterLevelCnnModel(BaseTrainableModel,
metaclass=AutoSubRegistrationMeta):
# boolean if the label mapping requires the mapping for index 0 reserved
requires_zero_mapping = True
def __init__(self, label_mapping=None, parameters=None):
"""
CNN Model Initializer. initialize epoch_id
:param label_mapping: maps labels to their encoded integers
:type label_mapping: dict
:param parameters: Contains all the appropriate parameters for the
model. Must contain num_labels. Other possible parameters are:
max_length, max_char_encoding_id, dim_embed, size_fc
dropout, size_conv, num_fil, optimizer, default_label
:type parameters: dict
:return: None
"""
# parameter initialization
if not parameters:
parameters = {}
parameters.setdefault('max_length', 3400)
parameters.setdefault('max_char_encoding_id', 127)
parameters.setdefault('dim_embed', 64)
parameters.setdefault('size_fc', [96, 96])
parameters.setdefault('dropout', 0.073)
parameters.setdefault('size_conv', 13)
parameters.setdefault('default_label', "UNKNOWN")
parameters.setdefault('num_fil', [48 for _ in range(4)])
parameters['pad_label'] = 'PAD'
self._epoch_id = 0
# reconstruct flags for model
self._model_num_labels = 0
self._model_default_ind = -1
BaseModel.__init__(self, label_mapping, parameters)
def __eq__(self, other):
"""
Checks if two models are equal with one another, may only check
important variables, i.e. may not check model itself.
:param self: a model
:param other: a model
:type self: BaseModel
:type other: BaseModel
:return: Whether or not self and other are equal
:rtype: bool
"""
if self._parameters != other._parameters \
or self._label_mapping != other._label_mapping:
return False
return True
def _validate_parameters(self, parameters):
"""
Validate the parameters sent in. Raise error if invalid parameters are
present.
:param parameters: parameter dict containing the following parameters:
max_length: Maximum char length in a sample
max_char_encoding_id: Maximum integer value for encoding the input
dim_embed: Number of embedded dimensions
size_fc: Size of each fully connected layers
dropout: Ratio of dropout in the model
size_conv: Convolution kernel size
default_label: Key for label_mapping that is the default label
pad_label: Key for entities_dict that is the pad label
num_fil: Number of filters in each convolution layer
:type parameters: dict
:return: None
"""
errors = []
list_of_necessary_params = ['max_length', 'max_char_encoding_id',
'dim_embed', 'size_fc', 'dropout',
'size_conv', 'default_label', 'pad_label',
'num_fil']
# Make sure the necessary parameters are present and valid.
for param in parameters:
if param in ['max_length', 'max_char_encoding_id', 'dim_embed',
'size_conv']:
if not isinstance(parameters[param], (int, float)) \
or parameters[param] < 0:
errors.append(param + " must be a valid integer or float "
"greater than 0.")
elif param == 'dropout':
if not isinstance(parameters[param], (int, float)) \
or parameters[param] < 0 or parameters[param] > 1:
errors.append(param + " must be a valid integer or float "
"from 0 to 1.")
elif param == 'size_fc' or param == 'num_fil':
if not isinstance(parameters[param], list) \
or len(parameters[param]) == 0:
errors.append(param + " must be a non-empty list of "
"integers.")
else:
for item in parameters[param]:
if not isinstance(item, int):
errors.append(param + " must be a non-empty "
"list of integers.")
break
elif param == 'default_label':
if not isinstance(parameters[param], str):
error = str(param) + " must be a string."
errors.append(error)
# Error if there are extra parameters thrown in
for param in parameters:
if param not in list_of_necessary_params:
errors.append(param + " is not an accepted parameter.")
if errors:
raise ValueError('\n'.join(errors))
def set_label_mapping(self, label_mapping):
"""
Sets the labels for the model
:param label_mapping: label mapping of the model
:type label_mapping: dict
:return: None
"""
if not isinstance(label_mapping, (list, dict)):
raise TypeError("Labels must either be a non-empty encoding dict "
"which maps labels to index encodings or a list.")
label_mapping = copy.deepcopy(label_mapping)
if 'PAD' not in label_mapping:
if isinstance(label_mapping, list): # if list missing PAD
label_mapping = ['PAD'] + label_mapping
elif 0 not in label_mapping.values(): # if dict missing PAD and 0
label_mapping.update({'PAD': 0})
if (isinstance(label_mapping, dict)
and label_mapping.get('PAD', None) != 0): # dict with bad PAD
raise ValueError("`PAD` must map to index zero.")
if self._parameters['default_label'] not in label_mapping:
raise ValueError("The `default_label` of {} must exist in the "
"label mapping.".format(
self._parameters['default_label']))
super().set_label_mapping(label_mapping)
def _need_to_reconstruct_model(self):
"""
Determines whether or not the model needs to be reconstructed.
:return: bool of whether or not the model needs to reconstruct.
"""
if not self._model:
return False
default_ind = self.label_mapping[self._parameters['default_label']]
return self.num_labels != self._model_num_labels or \
default_ind != self._model_default_ind
def save_to_disk(self, dirpath):
"""
Saves whole model to disk with weights
:param dirpath: directory path where you want to save the model to
:type dirpath: str
:return: None
"""
if not self._model:
self._construct_model()
elif self._need_to_reconstruct_model():
self._reconstruct_model()
model_param_dirpath = os.path.join(dirpath, "model_parameters.json")
with open(model_param_dirpath, 'w') as fp:
json.dump(self._parameters, fp)
labels_dirpath = os.path.join(dirpath, "label_mapping.json")
with open(labels_dirpath, 'w') as fp:
json.dump(self.label_mapping, fp)
self._model.save(os.path.join(dirpath))
@classmethod
def load_from_disk(cls, dirpath):
"""
Loads whole model from disk with weights
:param dirpath: directory path where you want to load the model from
:type dirpath: str
:return: None
"""
# load parameters
model_param_dirpath = os.path.join(dirpath, "model_parameters.json")
with open(model_param_dirpath, 'r') as fp:
parameters = json.load(fp)
# load label_mapping
labels_dirpath = os.path.join(dirpath, "label_mapping.json")
with open(labels_dirpath, 'r') as fp:
label_mapping = json.load(fp)
# use f1 score metric
custom_objects = {
"F1Score": F1Score(
num_classes=max(label_mapping.values()) + 1,
average='micro'),
"CharacterLevelCnnModel": cls,
}
with tf.keras.utils.custom_object_scope(custom_objects):
tf_model = tf.keras.models.load_model(dirpath)
loaded_model = cls(label_mapping, parameters)
loaded_model._model = tf_model
# Tensorflow v1 Model weights need to be transferred.
if not callable(tf_model):
loaded_model._construct_model()
tf1_weights = []
for var in tf_model.variables:
if 'training' not in var.name:
tf1_weights.append(var.value())
loaded_model._construct_model()
tf1_weights.append(loaded_model._model.weights[-1].value())
loaded_model._model.set_weights(tf1_weights)
# load self
loaded_model._model_num_labels = loaded_model.num_labels
loaded_model._model_default_ind = loaded_model.label_mapping[
loaded_model._parameters['default_label']
]
return loaded_model
@staticmethod
def _char_encoding_layer(input_str_tensor, max_char_encoding_id, max_len):
"""
Character encoding for the list of sentences
:param input_str_tensor: input list of sentences converted to tensor
:type input_str_tensor: tf.tensor
:param max_char_encoding_id: Maximum integer value for encoding the
input
:type max_char_encoding_id: int
:param max_len: Maximum char length in a sample
:type max_len: int
:return : tensor containing encoded list of input sentences
:rtype: tf.Tensor
"""
# convert characters to indices
input_str_flatten = tf.reshape(input_str_tensor, [-1])
sentences_encode = tf.strings.unicode_decode(input_str_flatten,
input_encoding='UTF-8')
sentences_encode = tf.add(tf.cast(1, tf.int32), sentences_encode)
sentences_encode = tf.math.minimum(sentences_encode,
max_char_encoding_id + 1)
# padding
sentences_encode_pad = sentences_encode.to_tensor(shape=[None, max_len])
return sentences_encode_pad
@staticmethod
def _argmax_threshold_layer(num_labels, threshold=0.0, default_ind=1):
"""
Adds an argmax threshold layer to the model. This layer's output will be
the argmax value if the confidence for that argmax meets the threshold
for its label, otherwise it will be the default label index.
:param num_labels: number of entities
:type num_labels: int
:param threshold: default set to 0 so all confidences pass.
:type threshold: float
:param default_ind: default index
:type default_ind: int
:return: final argmax threshold layer for the model
"""
# Initialize the thresholds vector variable and create the threshold
# matrix.
class ThreshArgMaxLayer(tf.keras.layers.Layer):
def __init__(self, threshold_, num_labels_):
super(ThreshArgMaxLayer, self).__init__()
thresh_init = tf.constant_initializer(threshold_)
self.thresh_vec = tf.Variable(
name='ThreshVec',
initial_value=thresh_init(shape=[num_labels_]),
trainable=False)
def call(self, argmax_layer, confidence_layer):
threshold_at_argmax = tf.gather(self.thresh_vec, argmax_layer)
confidence_max_layer = tf.keras.backend.max(confidence_layer,
axis=2)
# Check if the confidences meet the threshold minimum.
argmax_mask = tf.keras.backend.cast(
tf.keras.backend.greater_equal(confidence_max_layer,
threshold_at_argmax),
dtype=argmax_layer.dtype)
# Create a vector the same size as the batch_size which
# represents the background label
bg_label_tf = tf.keras.backend.constant(
default_ind, dtype=argmax_layer.dtype)
# Generate the final predicted output using the function:
final_predicted_layer = tf.add(
bg_label_tf,
tf.multiply(
tf.subtract(argmax_layer, bg_label_tf),
argmax_mask
), name='ThreshArgMax'
)
return final_predicted_layer
return ThreshArgMaxLayer(threshold, num_labels)
def _construct_model(self):
"""
Model constructor for the data labeler. This also serves as a weight
reset.
:return: None
"""
num_labels = self.num_labels
default_ind = self.label_mapping[self._parameters['default_label']]
# Reset model
tf.keras.backend.clear_session()
# generate glove embedding
create_glove_char(self._parameters['dim_embed'])
# generate model
self._model = tf.keras.models.Sequential()
# default parameters
max_length = self._parameters['max_length']
max_char_encoding_id = self._parameters['max_char_encoding_id']
# Encoding layer
def encoding_function(input_str):
char_in_vector = CharacterLevelCnnModel._char_encoding_layer(
input_str, max_char_encoding_id, max_length)
return char_in_vector
self._model.add(tf.keras.layers.Input(shape=(None,), dtype=tf.string))
self._model.add(
tf.keras.layers.Lambda(encoding_function,
output_shape=tuple([max_length])))
# Create a pre-trained weight matrix
# character encoding indices range from 0 to max_char_encoding_id,
# we add one extra index for out-of-vocabulary character
embed_file = os.path.join(
_file_dir, "embeddings/glove-reduced-{}D.txt".format(
self._parameters['dim_embed']))
embedding_matrix = np.zeros((max_char_encoding_id + 2,
self._parameters['dim_embed']))
embedding_dict = build_embd_dictionary(embed_file)
input_shape = tuple([max_length])
# Fill in the weight matrix: let pad and space be 0s
for ascii_num in range(max_char_encoding_id):
if chr(ascii_num) in embedding_dict:
embedding_matrix[ascii_num + 1] = embedding_dict[chr(ascii_num)]
self._model.add(tf.keras.layers.Embedding(
max_char_encoding_id + 2,
self._parameters['dim_embed'],
weights=[embedding_matrix],
input_length=input_shape[0],
trainable=True))
# Add the convolutional layers
for fil in self._parameters['num_fil']:
self._model.add(tf.keras.layers.Conv1D(
filters=fil, kernel_size=self._parameters['size_conv'],
activation='relu', padding='same'))
if self._parameters['dropout']:
self._model.add(
tf.keras.layers.Dropout(self._parameters['dropout']))
# Add batch normalization, set fused = True for compactness
self._model.add(
tf.keras.layers.BatchNormalization(fused=False, scale=True))
# Add the fully connected layers
for size in self._parameters['size_fc']:
self._model.add(
tf.keras.layers.Dense(units=size, activation='relu'))
if self._parameters['dropout']:
self._model.add(
tf.keras.layers.Dropout(self._parameters['dropout']))
# Add the final Softmax layer
self._model.add(
tf.keras.layers.Dense(num_labels, activation='softmax'))
# Output the model into a .pb file for TensorFlow
argmax_layer = tf.keras.backend.argmax(self._model.output)
# Create confidence layers
final_predicted_layer = CharacterLevelCnnModel._argmax_threshold_layer(
num_labels, threshold=0.0, default_ind=default_ind)
argmax_outputs = self._model.outputs + \
[argmax_layer,
final_predicted_layer(argmax_layer, self._model.output)]
self._model = tf.keras.Model(self._model.inputs, argmax_outputs)
# Compile the model
softmax_output_layer_name = self._model.outputs[0].name.split('/')[0]
losses = {softmax_output_layer_name: "categorical_crossentropy"}
# use f1 score metric
f1_score_training = F1Score(num_classes=num_labels, average='micro')
metrics = {softmax_output_layer_name: ['acc', f1_score_training]}
self._model.compile(loss=losses,
optimizer="adam",
metrics=metrics)
self._epoch_id = 0
self._model_num_labels = num_labels
self._model_default_ind = default_ind
def reset_weights(self):
"""
Reset the weights of the model.
:return: None
"""
self._construct_model()
def _reconstruct_model(self):
"""
Reconstruct the appropriate layers if the number of number of labels is
altered
:return: None
"""
# Reset model
tf.keras.backend.clear_session()
num_labels = self.num_labels
default_ind = self.label_mapping[self._parameters['default_label']]
# Remove the 3 output layers (dense_2', 'tf_op_layer_ArgMax',
# 'thresh_arg_max_layer')
for _ in range(3):
self._model.layers.pop()
# Add the final Softmax layer to the previous spot
final_softmax_layer = tf.keras.layers.Dense(
num_labels, activation='softmax', name="dense_2")(
self._model.layers[-4].output)
# Output the model into a .pb file for TensorFlow
argmax_layer = tf.keras.backend.argmax(final_softmax_layer)
# Create confidence layers
final_predicted_layer = CharacterLevelCnnModel._argmax_threshold_layer(
num_labels, threshold=0.0, default_ind=default_ind)
argmax_outputs = [final_softmax_layer] + \
[argmax_layer,
final_predicted_layer(argmax_layer,
final_softmax_layer)]
self._model = tf.keras.Model(self._model.inputs, argmax_outputs)
# Compile the model
softmax_output_layer_name = self._model.outputs[0].name.split('/')[0]
losses = {softmax_output_layer_name: "categorical_crossentropy"}
# use f1 score metric
f1_score_training = F1Score(num_classes=num_labels, average='micro')
metrics = {softmax_output_layer_name: ['acc', f1_score_training]}
self._model.compile(loss=losses,
optimizer="adam",
metrics=metrics)
self._epoch_id = 0
self._model_num_labels = num_labels
self._model_default_ind = default_ind
def fit(self, train_data, val_data=None, batch_size=32, label_mapping=None,
reset_weights=False, verbose=True):
"""
Train the current model with the training data and validation data
:param train_data: Training data used to train model
:type train_data: Union[list, np.ndarray]
:param val_data: Validation data used to validate the training
:type val_data: Union[list, np.ndarray]
:param batch_size: Used to determine number of samples in each batch
:type batch_size: int
:param label_mapping: maps labels to their encoded integers
:type label_mapping: Union[dict, None]
:param reset_weights: Flag to determine whether to reset the weights or
not
:type reset_weights: bool
:param verbose: Flag to determine whether to print status or not
:type verbose: bool
:return: None
"""
if label_mapping is not None:
self.set_label_mapping(label_mapping)
if not self._model:
self._construct_model()
else:
if self._need_to_reconstruct_model():
self._reconstruct_model()
if reset_weights:
self.reset_weights()
history = defaultdict()
f1 = None
f1_report = []
self._model.reset_metrics()
softmax_output_layer_name = self._model.outputs[0].name.split('/')[0]
start_time = time.time()
batch_id = 0
for x_train, y_train in train_data:
model_results = self._model.train_on_batch(
x_train, {softmax_output_layer_name: y_train})
sys.stdout.flush()
if verbose:
sys.stdout.write(
"\rEPOCH %d, batch_id %d: loss: %f - acc: %f - "
"f1_score %f" %
(self._epoch_id, batch_id, *model_results[1:]))
batch_id += 1
for i, metric_label in enumerate(self._model.metrics_names):
history[metric_label] = model_results[i]
if val_data:
f1, f1_report = self._validate_training(val_data)
history['f1_report'] = f1_report
val_f1 = f1_report['weighted avg']['f1-score'] \
if f1_report else np.NAN
val_precision = f1_report['weighted avg']['precision'] \
if f1_report else np.NAN
val_recall = f1_report['weighted avg']['recall'] \
if f1_report else np.NAN
epoch_time = time.time() - start_time
logger.info("\rEPOCH %d (%ds), loss: %f - acc: %f - f1_score %f -- "
"val_f1: %f - val_precision: %f - val_recall %f" %
(self._epoch_id, epoch_time, *model_results[1:],
val_f1, val_precision, val_recall))
self._epoch_id += 1
return history, f1, f1_report
def _validate_training(self, val_data, batch_size_test=32,
verbose_log=True, verbose_keras=False):
"""
Validate the model on the test set and return the evaluation metrics.
:param val_data: data generator for the validation
:type val_data: iterator
:param batch_size_test: Number of samples to process in testing
:type batch_size_test: int
:param verbose_log: whether or not to print out scores for training,
etc.
:type verbose_log: bool
:param verbose_keras: whether or not to print out scores for training,
from keras.
:type verbose_keras: bool
return (f1-score, f1 report).
"""
f1 = None
f1_report = None
if val_data is None:
return f1, f1_report
# Predict on the test set
batch_id = 0
y_val_pred = []
y_val_test = []
for x_val, y_val in val_data:
y_val_pred.append(self._model.predict(
x_val, batch_size=batch_size_test, verbose=verbose_keras)[1])
y_val_test.append(np.argmax(y_val, axis=-1))
batch_id += 1
sys.stdout.flush()
if verbose_log:
sys.stdout.write("\rEPOCH %g, validation_batch_id %d" %
(self._epoch_id, batch_id))
tf.keras.backend.set_floatx('float32')
# Clean the predicted entities and the actual entities
f1, f1_report = labeler_utils.evaluate_accuracy(
np.concatenate(y_val_pred, axis=0),
np.concatenate(y_val_test, axis=0),
self.num_labels,
self.reverse_label_mapping,
verbose=verbose_keras)
return f1, f1_report
def predict(self, data, batch_size=32, show_confidences=False,
verbose=True):
"""
Run model and get predictions
:param data: text input
:type data: Union[list, numpy.ndarray]
:param batch_size: number of samples in the batch of data
:type batch_size: int
:param show_confidences: whether user wants prediction confidences
:type show_confidences:
:param verbose: Flag to determine whether to print status or not
:type verbose: bool
:return: char level predictions and confidences
:rtype: dict
"""
if not self._model:
raise ValueError("You are trying to predict without a model. "
"Construct/Load a model before predicting.")
elif self._need_to_reconstruct_model():
raise RuntimeError("The model label mapping definitions have been "
"altered without additional training. Please "
"train the model or reset the label mapping to "
"predict.")
# Pre-allocate space for predictions
confidences = []
sentence_lengths = np.zeros((batch_size,), dtype=int)
predictions = np.zeros((batch_size, self._parameters['max_length']))
if show_confidences:
confidences = np.zeros((batch_size,
self._parameters['max_length'],
self.num_labels))
# Run model with batching
allocation_index = 0
for batch_id, batch_data in enumerate(data):
model_output = self._model(
tf.convert_to_tensor(batch_data)
)
# Count number of samples in batch to prevent array mismatch
num_samples_in_batch = len(batch_data)
allocation_index = batch_id * batch_size
# Double array size
if len(predictions) <= allocation_index:
predictions = np.pad(predictions, ((0, len(predictions)),
(0, 0)), mode='constant')
sentence_lengths = np.pad(
sentence_lengths, pad_width=((0, len(sentence_lengths)),),
mode='constant')
if show_confidences:
confidences = np.pad(confidences,
((0, len(predictions)),
(0, 0), (0, 0)), mode='constant')
if show_confidences:
confidences[allocation_index:allocation_index + num_samples_in_batch] = model_output[0].numpy()
predictions[allocation_index:allocation_index + num_samples_in_batch] = model_output[1].numpy()
sentence_lengths[allocation_index:allocation_index + num_samples_in_batch] = list(map(lambda x: len(x[0]), batch_data))
allocation_index += num_samples_in_batch
# Convert predictions, confidences to lists from numpy
predictions_list = [i for i in range(0, allocation_index)]
confidences_list = None
if show_confidences:
confidences_list = [i for i in range(0, allocation_index)]
# Append slices of predictions to return prediction & confidence matrices
for index, sentence_length \
in enumerate(sentence_lengths[:allocation_index]):
predictions_list[index] = list(predictions[index][:sentence_length])
if show_confidences:
confidences_list[index] = list(confidences[index][:sentence_length])
if show_confidences:
return {'pred': predictions_list, 'conf': confidences_list}
return {'pred': predictions_list}
def details(self):
"""
Prints the relevant details of the model (summary, parameters, label
mapping)
"""
print("\n###### Model Details ######\n")
self._model.summary()
print("\nModel Parameters:")
for key, value in self._parameters.items():
print("{}: {}".format(key, value))
print("\nModel Label Mapping:")
for key, value in self.label_mapping.items():
print("{}: {}".format(key, value))
| 40.38664
| 136
| 0.602276
| 4,744
| 39,902
| 4.854132
| 0.137858
| 0.026576
| 0.010422
| 0.011812
| 0.364904
| 0.316962
| 0.282005
| 0.254864
| 0.238492
| 0.222555
| 0
| 0.009365
| 0.309609
| 39,902
| 987
| 137
| 40.427558
| 0.826557
| 0.271064
| 0
| 0.19666
| 0
| 0.001855
| 0.086511
| 0.008724
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057514
| false
| 0
| 0.024119
| 0.001855
| 0.131725
| 0.009276
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8e1bca5e78231c74ae6a4100aeb7480c5e84ad6
| 6,031
|
py
|
Python
|
airflow/contrib/plugins/metastore_browser/main.py
|
Nipica/airflow
|
211a71f8a6b9d808bd03af84bd77bf8ff0ef247f
|
[
"Apache-2.0"
] | null | null | null |
airflow/contrib/plugins/metastore_browser/main.py
|
Nipica/airflow
|
211a71f8a6b9d808bd03af84bd77bf8ff0ef247f
|
[
"Apache-2.0"
] | 1
|
2019-01-14T17:12:47.000Z
|
2019-01-14T17:12:47.000Z
|
airflow/contrib/plugins/metastore_browser/main.py
|
shubhamod/airflow
|
04f4622656656d4c55b69d460bbd2ed1379810c4
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
import json
from flask import Blueprint, request
from flask_admin import BaseView, expose
import pandas as pd
from airflow.hooks.hive_hooks import HiveMetastoreHook, HiveCliHook
from airflow.hooks.mysql_hook import MySqlHook
from airflow.hooks.presto_hook import PrestoHook
from airflow.plugins_manager import AirflowPlugin
from airflow.www import utils as wwwutils
from airflow.www.decorators import gzipped
METASTORE_CONN_ID = 'metastore_default'
METASTORE_MYSQL_CONN_ID = 'metastore_mysql'
PRESTO_CONN_ID = 'presto_default'
HIVE_CLI_CONN_ID = 'hive_default'
DEFAULT_DB = 'default'
DB_WHITELIST = None
DB_BLACKLIST = ['tmp']
TABLE_SELECTOR_LIMIT = 2000
# Keeping pandas from truncating long strings
pd.set_option('display.max_colwidth', -1)
# Creating a flask admin BaseView
class MetastoreBrowserView(BaseView, wwwutils.DataProfilingMixin):
@expose('/')
def index(self):
sql = """
SELECT
a.name as db, db_location_uri as location,
count(1) as object_count, a.desc as description
FROM DBS a
JOIN TBLS b ON a.DB_ID = b.DB_ID
GROUP BY a.name, db_location_uri, a.desc
""".format(**locals())
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
df = h.get_pandas_df(sql)
df.db = (
'<a href="/admin/metastorebrowserview/db/?db=' +
df.db + '">' + df.db + '</a>')
table = df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
escape=False,
na_rep='',)
return self.render(
"metastore_browser/dbs.html", table=table)
@expose('/table/')
def table(self):
table_name = request.args.get("table")
m = HiveMetastoreHook(METASTORE_CONN_ID)
table = m.get_table(table_name)
return self.render(
"metastore_browser/table.html",
table=table, table_name=table_name, datetime=datetime, int=int)
@expose('/db/')
def db(self):
db = request.args.get("db")
m = HiveMetastoreHook(METASTORE_CONN_ID)
tables = sorted(m.get_tables(db=db), key=lambda x: x.tableName)
return self.render(
"metastore_browser/db.html", tables=tables, db=db)
@gzipped
@expose('/partitions/')
def partitions(self):
schema, table = request.args.get("table").split('.')
sql = """
SELECT
a.PART_NAME,
a.CREATE_TIME,
c.LOCATION,
c.IS_COMPRESSED,
c.INPUT_FORMAT,
c.OUTPUT_FORMAT
FROM PARTITIONS a
JOIN TBLS b ON a.TBL_ID = b.TBL_ID
JOIN DBS d ON b.DB_ID = d.DB_ID
JOIN SDS c ON a.SD_ID = c.SD_ID
WHERE
b.TBL_NAME like '{table}' AND
d.NAME like '{schema}'
ORDER BY PART_NAME DESC
""".format(**locals())
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
df = h.get_pandas_df(sql)
return df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@gzipped
@expose('/objects/')
def objects(self):
where_clause = ''
if DB_WHITELIST:
dbs = ",".join(["'" + db + "'" for db in DB_WHITELIST])
where_clause = "AND b.name IN ({})".format(dbs)
if DB_BLACKLIST:
dbs = ",".join(["'" + db + "'" for db in DB_BLACKLIST])
where_clause = "AND b.name NOT IN ({})".format(dbs)
sql = """
SELECT CONCAT(b.NAME, '.', a.TBL_NAME), TBL_TYPE
FROM TBLS a
JOIN DBS b ON a.DB_ID = b.DB_ID
WHERE
a.TBL_NAME NOT LIKE '%tmp%' AND
a.TBL_NAME NOT LIKE '%temp%' AND
b.NAME NOT LIKE '%tmp%' AND
b.NAME NOT LIKE '%temp%'
{where_clause}
LIMIT {LIMIT};
""".format(where_clause=where_clause, LIMIT=TABLE_SELECTOR_LIMIT)
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
d = [
{'id': row[0], 'text': row[0]}
for row in h.get_records(sql)]
return json.dumps(d)
@gzipped
@expose('/data/')
def data(self):
table = request.args.get("table")
sql = "SELECT * FROM {table} LIMIT 1000;".format(table=table)
h = PrestoHook(PRESTO_CONN_ID)
df = h.get_pandas_df(sql)
return df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@expose('/ddl/')
def ddl(self):
table = request.args.get("table")
sql = "SHOW CREATE TABLE {table};".format(table=table)
h = HiveCliHook(HIVE_CLI_CONN_ID)
return h.run_cli(sql)
v = MetastoreBrowserView(category="Plugins", name="Hive Metadata Browser")
# Creating a flask blueprint to intergrate the templates and static folder
bp = Blueprint(
"metastore_browser", __name__,
template_folder='templates',
static_folder='static',
static_url_path='/static/metastore_browser')
# Defining the plugin class
class MetastoreBrowserPlugin(AirflowPlugin):
name = "metastore_browser"
flask_blueprints = [bp]
admin_views = [v]
| 33.320442
| 75
| 0.62842
| 792
| 6,031
| 4.636364
| 0.282828
| 0.017974
| 0.019063
| 0.021786
| 0.227397
| 0.147603
| 0.133442
| 0.106754
| 0.099673
| 0.099673
| 0
| 0.003824
| 0.262809
| 6,031
| 180
| 76
| 33.505556
| 0.822087
| 0.157354
| 0
| 0.258993
| 0
| 0
| 0.334453
| 0.028662
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05036
| false
| 0
| 0.079137
| 0
| 0.215827
| 0.021583
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8e296b5bc6bda6288119a1eb8117102f686848c
| 12,255
|
py
|
Python
|
app/lib/manage.py
|
AaronDewes/compose-nonfree
|
82ef3e58019ee03d163dea7aff4d7ed18d884238
|
[
"MIT"
] | 5
|
2021-09-26T18:02:27.000Z
|
2022-03-30T10:16:03.000Z
|
app/lib/manage.py
|
AaronDewes/compose-nonfree
|
82ef3e58019ee03d163dea7aff4d7ed18d884238
|
[
"MIT"
] | 5
|
2021-09-23T18:57:00.000Z
|
2021-11-02T06:47:05.000Z
|
app/lib/manage.py
|
AaronDewes/compose-nonfree
|
82ef3e58019ee03d163dea7aff4d7ed18d884238
|
[
"MIT"
] | 3
|
2021-10-01T15:14:09.000Z
|
2022-03-30T10:16:06.000Z
|
#!/usr/bin/env python3
# SPDX-FileCopyrightText: 2021 Aaron Dewes <aaron.dewes@protonmail.com>
#
# SPDX-License-Identifier: MIT
import stat
import tempfile
import threading
from typing import List
from sys import argv
import os
import requests
import shutil
import json
import yaml
import subprocess
from lib.composegenerator.v0.generate import createComposeConfigFromV0
from lib.composegenerator.v1.generate import createComposeConfigFromV1
from lib.appymlgenerator import convertComposeYMLToAppYML
from lib.validate import findAndValidateApps
from lib.metadata import getAppRegistry, getSimpleAppRegistry
from lib.entropy import deriveEntropy
# For an array of threads, join them and wait for them to finish
def joinThreads(threads: List[threading.Thread]):
for thread in threads:
thread.join()
# The directory with this script
scriptDir = os.path.dirname(os.path.realpath(__file__))
nodeRoot = os.path.join(scriptDir, "..", "..")
appsDir = os.path.join(nodeRoot, "apps")
appSystemDir = os.path.join(nodeRoot, "app-system")
sourcesList = os.path.join(appSystemDir, "sources.list")
appDataDir = os.path.join(nodeRoot, "app-data")
userFile = os.path.join(nodeRoot, "db", "user.json")
legacyScript = os.path.join(nodeRoot, "scripts", "app")
def runCompose(app: str, args: str):
compose(app, args)
# Returns a list of every argument after the second one in sys.argv joined into a string by spaces
def getArguments():
arguments = ""
for i in range(3, len(argv)):
arguments += argv[i] + " "
return arguments
def getAppYml(name):
url = 'https://raw.githubusercontent.com/runcitadel/compose-nonfree/main/apps/' + \
name + '/' + 'app.yml'
response = requests.get(url)
if response.status_code == 200:
return response.text
else:
return False
def getAppYmlPath(app):
return os.path.join(appsDir, app, 'app.yml')
def composeToAppYml(app):
composeFile = os.path.join(appsDir, app, "docker-compose.yml")
appYml = os.path.join(appsDir, app, "app.yml")
# Read the compose file and parse it
with open(composeFile, "r") as f:
compose = yaml.safe_load(f)
registry = os.path.join(appsDir, "registry.json")
# Load the registry
with open(registry, "r") as f:
registryData = json.load(f)
converted = convertComposeYMLToAppYML(compose, app, registryData)
# Put converted into the app.yml after encoding it as YAML
with open(appYml, "w") as f:
f.write(yaml.dump(converted, sort_keys=False))
def update(verbose: bool = False):
apps = findAndValidateApps(appsDir)
# The compose generation process updates the registry, so we need to get it set up with the basics before that
registry = getAppRegistry(apps, appsDir)
with open(os.path.join(appsDir, "registry.json"), "w") as f:
json.dump(registry, f, indent=4, sort_keys=True)
print("Wrote registry to registry.json")
simpleRegistry = getSimpleAppRegistry(apps, appsDir)
with open(os.path.join(appSystemDir, "apps.json"), "w") as f:
json.dump(simpleRegistry, f, indent=4, sort_keys=True)
print("Wrote version information to apps.json")
# Loop through the apps and generate valid compose files from them, then put these into the app dir
for app in apps:
composeFile = os.path.join(appsDir, app, "docker-compose.yml")
appYml = os.path.join(appsDir, app, "app.yml")
with open(composeFile, "w") as f:
appCompose = getApp(appYml, app)
if(appCompose):
f.write(yaml.dump(appCompose, sort_keys=False))
if verbose:
print("Wrote " + app + " to " + composeFile)
print("Generated configuration successfully")
def download(app: str = None):
if(app is None):
apps = findAndValidateApps(appsDir)
for app in apps:
data = getAppYml(app)
if data:
with open(getAppYmlPath(app), 'w') as f:
f.write(data)
else:
print("Warning: Could not download " + app)
else:
data = getAppYml(app)
if data:
with open(getAppYmlPath(app), 'w') as f:
f.write(data)
else:
print("Warning: Could not download " + app)
def getUserData():
userData = {}
if os.path.isfile(userFile):
with open(userFile, "r") as f:
userData = json.load(f)
return userData
def startInstalled():
# If userfile doen't exist, just do nothing
userData = {}
if os.path.isfile(userFile):
with open(userFile, "r") as f:
userData = json.load(f)
threads = []
for app in userData["installedApps"]:
print("Starting app {}...".format(app))
# Run runCompose(args.app, "up --detach") asynchrounously for all apps, then exit(0) when all are finished
thread = threading.Thread(target=runCompose, args=(app, "up --detach"))
thread.start()
threads.append(thread)
joinThreads(threads)
def stopInstalled():
# If userfile doen't exist, just do nothing
userData = {}
if os.path.isfile(userFile):
with open(userFile, "r") as f:
userData = json.load(f)
threads = []
for app in userData["installedApps"]:
print("Stopping app {}...".format(app))
# Run runCompose(args.app, "up --detach") asynchrounously for all apps, then exit(0) when all are finished
thread = threading.Thread(
target=runCompose, args=(app, "rm --force --stop"))
thread.start()
threads.append(thread)
joinThreads(threads)
# Loads an app.yml and converts it to a docker-compose.yml
def getApp(appFile: str, appId: str):
with open(appFile, 'r') as f:
app = yaml.safe_load(f)
if not "metadata" in app:
raise Exception("Error: Could not find metadata in " + appFile)
app["metadata"]["id"] = appId
if('version' in app and str(app['version']) == "1"):
return createComposeConfigFromV1(app, nodeRoot)
else:
return createComposeConfigFromV0(app)
def compose(app, arguments):
# Runs a compose command in the app dir
# Before that, check if a docker-compose.yml exists in the app dir
composeFile = os.path.join(appsDir, app, "docker-compose.yml")
commonComposeFile = os.path.join(appSystemDir, "docker-compose.common.yml")
os.environ["APP_DOMAIN"] = subprocess.check_output(
"hostname -s 2>/dev/null || echo 'umbrel'", shell=True).decode("utf-8") + ".local"
os.environ["APP_HIDDEN_SERVICE"] = subprocess.check_output("cat {} 2>/dev/null || echo 'notyetset.onion'".format(
os.path.join(nodeRoot, "tor", "data", "app-{}/hostname".format(app))), shell=True).decode("utf-8")
os.environ["APP_SEED"] = deriveEntropy("app-{}-seed".format(app))
# Allow more app seeds, with random numbers from 1-5 assigned in a loop
for i in range(1, 6):
os.environ["APP_SEED_{}".format(i)] = deriveEntropy("app-{}-seed{}".format(app, i))
os.environ["APP_DATA_DIR"] = os.path.join(appDataDir, app)
os.environ["BITCOIN_DATA_DIR"] = os.path.join(nodeRoot, "bitcoin")
os.environ["LND_DATA_DIR"] = os.path.join(nodeRoot, "lnd")
# List all hidden services for an app and put their hostname in the environment
hiddenServices: List[str] = getAppHiddenServices(app)
for service in hiddenServices:
appHiddenServiceFile = os.path.join(
nodeRoot, "tor", "data", "app-{}-{}/hostname".format(app, service))
os.environ["APP_HIDDEN_SERVICE_{}".format(service.upper().replace("-", "_"))] = subprocess.check_output("cat {} 2>/dev/null || echo 'notyetset.onion'".format(
appHiddenServiceFile), shell=True).decode("utf-8")
if not os.path.isfile(composeFile):
print("Error: Could not find docker-compose.yml in " + app)
exit(1)
os.system(
"docker compose --env-file '{}' --project-name '{}' --file '{}' --file '{}' {}".format(
os.path.join(nodeRoot, ".env"), app, commonComposeFile, composeFile, arguments))
def remove_readonly(func, path, _):
os.chmod(path, stat.S_IWRITE)
func(path)
def deleteData(app: str):
dataDir = os.path.join(appDataDir, app)
try:
shutil.rmtree(dataDir, onerror=remove_readonly)
except FileNotFoundError:
pass
def createDataDir(app: str):
dataDir = os.path.join(appDataDir, app)
appDir = os.path.join(appsDir, app)
if os.path.isdir(dataDir):
deleteData(app)
# Recursively copy everything from appDir to dataDir while excluding .gitignore
shutil.copytree(appDir, dataDir, symlinks=False,
ignore=shutil.ignore_patterns(".gitignore"))
# Chown and chmod dataDir to have the same owner and permissions as appDir
os.chown(dataDir, os.stat(appDir).st_uid, os.stat(appDir).st_gid)
os.chmod(dataDir, os.stat(appDir).st_mode)
def setInstalled(app: str):
userData = getUserData()
if not "installedApps" in userData:
userData["installedApps"] = []
userData["installedApps"].append(app)
userData["installedApps"] = list(set(userData["installedApps"]))
with open(userFile, "w") as f:
json.dump(userData, f)
def setRemoved(app: str):
userData = getUserData()
if not "installedApps" in userData:
return
userData["installedApps"] = list(set(userData["installedApps"]))
userData["installedApps"].remove(app)
with open(userFile, "w") as f:
json.dump(userData, f)
def getAppHiddenServices(app: str):
torDir = os.path.join(nodeRoot, "tor", "data")
# List all subdirectories of torDir which start with app-${APP}-
# but return them without the app-${APP}- prefix
results = []
for subdir in os.listdir(torDir):
if subdir.startswith("app-{}-".format(app)):
results.append(subdir[len("app-{}-".format(app)):])
return results
# Parse the sources.list repo file, which contains a list of sources in the format
# <git-url> <branch>
# For every line, clone the repo to a temporary dir and checkout the branch
# Then, check that repos apps in the temporary dir/apps and for every app,
# overwrite the current app dir with the contents of the temporary dir/apps/app
# Also, keep a list of apps from every repo, a repo later in the file may not overwrite an app from a repo earlier in the file
def updateRepos():
# Get the list of repos
repos = []
with open(sourcesList) as f:
repos = f.readlines()
# For each repo, clone the repo to a temporary dir, checkout the branch,
# and overwrite the current app dir with the contents of the temporary dir/apps/app
alreadyInstalled = []
for repo in repos:
repo = repo.strip()
if repo == "":
continue
# Split the repo into the git url and the branch
repo = repo.split(" ")
if len(repo) != 2:
print("Error: Invalid repo format in " + sourcesList)
exit(1)
gitUrl = repo[0]
branch = repo[1]
# Clone the repo to a temporary dir
tempDir = tempfile.mkdtemp()
print("Cloning the repository")
# Git clone with a depth of 1 to avoid cloning the entire repo
# Dont print anything to stdout, as we don't want to see the git clone output
subprocess.run("git clone --depth 1 {} {}".format(gitUrl, tempDir), shell=True, stdout=subprocess.DEVNULL)
# Overwrite the current app dir with the contents of the temporary dir/apps/app
for app in os.listdir(os.path.join(tempDir, "apps")):
# if the app is already installed, don't overwrite it
if app in alreadyInstalled:
continue
if os.path.isdir(os.path.join(appsDir, app)):
shutil.rmtree(os.path.join(appsDir, app), onerror=remove_readonly)
if os.path.isdir(os.path.join(tempDir, "apps", app)):
shutil.copytree(os.path.join(tempDir, "apps", app), os.path.join(appsDir, app),
symlinks=False, ignore=shutil.ignore_patterns(".gitignore"))
alreadyInstalled.append(app)
# Remove the temporary dir
shutil.rmtree(tempDir)
| 37.941176
| 166
| 0.651652
| 1,601
| 12,255
| 4.963148
| 0.229232
| 0.031714
| 0.04153
| 0.025673
| 0.356154
| 0.304304
| 0.268185
| 0.217468
| 0.200856
| 0.181601
| 0
| 0.003904
| 0.226683
| 12,255
| 322
| 167
| 38.059006
| 0.834547
| 0.203101
| 0
| 0.27193
| 0
| 0
| 0.136658
| 0.00473
| 0
| 0
| 0
| 0
| 0
| 1
| 0.087719
| false
| 0.004386
| 0.074561
| 0.004386
| 0.201754
| 0.048246
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8e31dd1ab5827961bb3c5e7a54cd2196fee2f7f
| 2,814
|
py
|
Python
|
features/jit-features/query/query.py
|
YuanruiZJU/SZZ-TSE
|
093506f9019a0d8b412dad4672525f93150ca181
|
[
"MIT"
] | 13
|
2019-04-15T12:54:56.000Z
|
2022-03-09T02:30:14.000Z
|
features/jit-features/query/query.py
|
YanYoungZhao/SZZ-TSE
|
093506f9019a0d8b412dad4672525f93150ca181
|
[
"MIT"
] | 1
|
2022-01-27T02:33:09.000Z
|
2022-01-27T02:33:09.000Z
|
features/jit-features/query/query.py
|
YanYoungZhao/SZZ-TSE
|
093506f9019a0d8b412dad4672525f93150ca181
|
[
"MIT"
] | 6
|
2019-11-04T11:24:13.000Z
|
2021-12-16T07:53:18.000Z
|
from query.base import BaseQuery
class CommitMetaQuery(BaseQuery):
table_name = 'commit_meta'
class DiffusionFeaturesQuery(BaseQuery):
table_name = 'diffusion_features'
class SizeFeaturesQuery(BaseQuery):
table_name = 'size_features'
class PurposeFeaturesQuery(BaseQuery):
table_name = 'purpose_features'
class HistoryFeaturesQuery(BaseQuery):
table_name = 'history_features'
class ExperienceFeaturesQuery(BaseQuery):
table_name = 'experience_features'
class ProjectQuery:
def __init__(self, project):
self.project = project
self.cms = CommitMetaQuery(project).do_query()
self.diffusion_features = DiffusionFeaturesQuery(project).do_query()
self.size_features = SizeFeaturesQuery(project).do_query()
self.purpose_features = PurposeFeaturesQuery(project).do_query()
self.history_features = HistoryFeaturesQuery(project).do_query()
self.exp_features = ExperienceFeaturesQuery(project).do_query()
self.__cache_end_commit_id = None
@property
def end_commit_id(self):
if self.__cache_end_commit_id is not None:
return self.__cache_end_commit_id
commit_id = None
for pf in self.purpose_features:
if pf.fix:
commit_id = pf.commit_id
self.__cache_end_commit_id = commit_id
return self.__cache_end_commit_id
def combine(self):
features_dict = dict()
for sf in self.size_features:
features_dict[sf.commit_id] = dict()
features_dict[sf.commit_id]['la'] = sf.la
features_dict[sf.commit_id]['ld'] = sf.ld
features_dict[sf.commit_id]['lt'] = sf.lt
for df in self.diffusion_features:
features_dict[df.commit_id]['ns'] = df.ns
features_dict[df.commit_id]['nd'] = df.nd
features_dict[df.commit_id]['nf'] = df.nf
features_dict[df.commit_id]['entropy'] = df.entropy
for pf in self.purpose_features:
features_dict[pf.commit_id]['fix'] = pf.fix
for hf in self.history_features:
features_dict[hf.commit_id]['ndev'] = hf.ndev
features_dict[hf.commit_id]['age'] = hf.age
features_dict[hf.commit_id]['nuc'] = hf.nuc
for ef in self.exp_features:
features_dict[ef.commit_id]['exp'] = ef.exp
features_dict[ef.commit_id]['rexp'] = ef.rexp
features_dict[ef.commit_id]['sexp'] = ef.sexp
ret_list = list()
for cm in self.cms:
cm_dict = features_dict[cm.commit_id]
if len(cm_dict) == 14:
cm_dict['commit_id'] = cm.commit_id
ret_list.append(cm_dict)
if cm.commit_id == self.end_commit_id:
break
return ret_list
| 34.317073
| 76
| 0.643568
| 352
| 2,814
| 4.835227
| 0.193182
| 0.141011
| 0.045241
| 0.063455
| 0.286722
| 0.082256
| 0.032902
| 0
| 0
| 0
| 0
| 0.000956
| 0.256574
| 2,814
| 81
| 77
| 34.740741
| 0.81262
| 0
| 0
| 0.0625
| 0
| 0
| 0.051546
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046875
| false
| 0
| 0.015625
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8e3234f6fa0a9c3711d4ac7b793885d955f7286
| 449
|
py
|
Python
|
example/mappers.py
|
mikeywaites/flask-arrested
|
6b97ce2ad2765f9acab10f4726e310258aa51de0
|
[
"MIT"
] | 46
|
2016-06-28T10:25:07.000Z
|
2019-12-10T20:53:47.000Z
|
example/mappers.py
|
mikeywaites/flask-arrested
|
6b97ce2ad2765f9acab10f4726e310258aa51de0
|
[
"MIT"
] | 4
|
2018-02-10T10:53:08.000Z
|
2018-11-07T08:11:06.000Z
|
example/mappers.py
|
mikeywaites/flask-arrested
|
6b97ce2ad2765f9acab10f4726e310258aa51de0
|
[
"MIT"
] | 9
|
2016-07-20T17:05:46.000Z
|
2022-02-15T18:40:17.000Z
|
from kim import Mapper, field
from example.models import Planet, Character
class PlanetMapper(Mapper):
__type__ = Planet
id = field.Integer(read_only=True)
name = field.String()
description = field.String()
created_at = field.DateTime(read_only=True)
class CharacterMapper(Mapper):
__type__ = Character
id = field.Integer(read_only=True)
name = field.String()
created_at = field.DateTime(read_only=True)
| 19.521739
| 47
| 0.712695
| 56
| 449
| 5.464286
| 0.428571
| 0.104575
| 0.156863
| 0.117647
| 0.526144
| 0.526144
| 0.526144
| 0.526144
| 0.526144
| 0
| 0
| 0
| 0.191537
| 449
| 22
| 48
| 20.409091
| 0.842975
| 0
| 0
| 0.461538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8e3680aea79628533b40e4e3bc074491f7796fd
| 3,660
|
py
|
Python
|
collections/ansible_collections/community/general/plugins/connection/saltstack.py
|
escalate/ansible-gitops-example-repository
|
f7f7a9fcd09abd982f5fcd3bd196809a6c4c2f08
|
[
"MIT"
] | 1
|
2021-07-16T19:51:04.000Z
|
2021-07-16T19:51:04.000Z
|
collections/ansible_collections/community/general/plugins/connection/saltstack.py
|
escalate/ansible-gitops-example-repository
|
f7f7a9fcd09abd982f5fcd3bd196809a6c4c2f08
|
[
"MIT"
] | null | null | null |
collections/ansible_collections/community/general/plugins/connection/saltstack.py
|
escalate/ansible-gitops-example-repository
|
f7f7a9fcd09abd982f5fcd3bd196809a6c4c2f08
|
[
"MIT"
] | null | null | null |
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
# Based on func.py
# (c) 2014, Michael Scherer <misc@zarb.org>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Michael Scherer (@mscherer) <misc@zarb.org>
name: saltstack
short_description: Allow ansible to piggyback on salt minions
description:
- This allows you to use existing Saltstack infrastructure to connect to targets.
'''
import os
import base64
from ansible import errors
from ansible.plugins.connection import ConnectionBase
HAVE_SALTSTACK = False
try:
import salt.client as sc
HAVE_SALTSTACK = True
except ImportError:
pass
class Connection(ConnectionBase):
""" Salt-based connections """
has_pipelining = False
# while the name of the product is salt, naming that module salt cause
# trouble with module import
transport = 'community.general.saltstack'
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self.host = self._play_context.remote_addr
def _connect(self):
if not HAVE_SALTSTACK:
raise errors.AnsibleError("saltstack is not installed")
self.client = sc.LocalClient()
self._connected = True
return self
def exec_command(self, cmd, sudoable=False, in_data=None):
""" run a command on the remote minion """
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
if in_data:
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
self._display.vvv("EXEC %s" % cmd, host=self.host)
# need to add 'true;' to work around https://github.com/saltstack/salt/issues/28077
res = self.client.cmd(self.host, 'cmd.exec_code_all', ['bash', 'true;' + cmd])
if self.host not in res:
raise errors.AnsibleError("Minion %s didn't answer, check if salt-minion is running and the name is correct" % self.host)
p = res[self.host]
return p['retcode'], p['stdout'], p['stderr']
@staticmethod
def _normalize_path(path, prefix):
if not path.startswith(os.path.sep):
path = os.path.join(os.path.sep, path)
normpath = os.path.normpath(path)
return os.path.join(prefix, normpath[1:])
def put_file(self, in_path, out_path):
""" transfer a file from local to remote """
super(Connection, self).put_file(in_path, out_path)
out_path = self._normalize_path(out_path, '/')
self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
with open(in_path, 'rb') as in_fh:
content = in_fh.read()
self.client.cmd(self.host, 'hashutil.base64_decodefile', [base64.b64encode(content), out_path])
# TODO test it
def fetch_file(self, in_path, out_path):
""" fetch a file from remote to local """
super(Connection, self).fetch_file(in_path, out_path)
in_path = self._normalize_path(in_path, '/')
self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
content = self.client.cmd(self.host, 'cp.get_file_str', [in_path])[self.host]
open(out_path, 'wb').write(content)
def close(self):
""" terminate the connection; nothing to do here """
pass
| 36.237624
| 133
| 0.668033
| 507
| 3,660
| 4.668639
| 0.390533
| 0.037178
| 0.037178
| 0.032953
| 0.107731
| 0.066751
| 0.024504
| 0.024504
| 0.024504
| 0.024504
| 0
| 0.011867
| 0.217213
| 3,660
| 100
| 134
| 36.6
| 0.814311
| 0.185792
| 0
| 0.032258
| 0
| 0
| 0.197279
| 0.018027
| 0
| 0
| 0
| 0.01
| 0
| 1
| 0.112903
| false
| 0.032258
| 0.112903
| 0
| 0.322581
| 0.016129
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8e37ad4239180526865365831c9ddf7d0371aa5
| 5,074
|
py
|
Python
|
create/views.py
|
normaldotcom/webvirtmgr
|
8d822cb94105abf82eb0ff6651a36c43b0911d2a
|
[
"Apache-2.0"
] | 1
|
2019-07-16T20:32:44.000Z
|
2019-07-16T20:32:44.000Z
|
create/views.py
|
normaldotcom/webvirtmgr
|
8d822cb94105abf82eb0ff6651a36c43b0911d2a
|
[
"Apache-2.0"
] | null | null | null |
create/views.py
|
normaldotcom/webvirtmgr
|
8d822cb94105abf82eb0ff6651a36c43b0911d2a
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
from servers.models import Compute
from create.models import Flavor
from instance.models import Instance
from libvirt import libvirtError
from vrtManager.create import wvmCreate
from vrtManager import util
from create.forms import FlavorAddForm, NewVMForm
def create(request, host_id):
"""
Create new instance.
"""
if not request.user.is_authenticated():
return HttpResponseRedirect('/login')
errors = []
compute = Compute.objects.get(id=host_id)
flavors = Flavor.objects.filter().order_by('id')
try:
conn = wvmCreate(compute.hostname,
compute.login,
compute.password,
compute.type)
storages = sorted(conn.get_storages())
networks = sorted(conn.get_networks())
instances = conn.get_instances()
get_images = sorted(conn.get_storages_images())
mac_auto = util.randomMAC()
except libvirtError as err:
errors.append(err.message)
if not storages:
msg = _("You haven't defined have any storage pools")
errors.append(msg)
if not networks:
msg = _("You haven't defined have any network pools")
errors.append(msg)
if request.method == 'POST':
if 'create_flavor' in request.POST:
form = FlavorAddForm(request.POST)
if form.is_valid():
data = form.cleaned_data
create_flavor = Flavor(label=data['label'],
vcpu=data['vcpu'],
memory=data['memory'],
disk=data['disk'])
create_flavor.save()
return HttpResponseRedirect(request.get_full_path())
if 'delete_flavor' in request.POST:
flavor_id = request.POST.get('flavor', '')
delete_flavor = Flavor.objects.get(id=flavor_id)
delete_flavor.delete()
return HttpResponseRedirect(request.get_full_path())
if 'create' in request.POST:
volumes = {}
form = NewVMForm(request.POST)
if form.is_valid():
data = form.cleaned_data
if instances:
if data['name'] in instances:
msg = _("A virtual machine with this name already exists")
errors.append(msg)
if not errors:
if data['hdd_size']:
if not data['mac']:
msg = _("No Virtual Machine MAC has been entered")
errors.append(msg)
else:
try:
path = conn.create_volume(data['storage'], data['name'], data['hdd_size'])
volumes[path] = conn.get_volume_type(path)
except libvirtError as msg_error:
errors.append(msg_error.message)
elif data['template']:
templ_path = conn.get_volume_path(data['template'])
clone_path = conn.clone_from_template(data['name'], templ_path)
volumes[clone_path] = conn.get_volume_type(clone_path)
else:
if not data['images']:
msg = _("First you need to create or select an image")
errors.append(msg)
else:
for vol in data['images'].split(','):
try:
path = conn.get_volume_path(vol)
volumes[path] = conn.get_volume_type(path)
except libvirtError as msg_error:
errors.append(msg_error.message)
if not errors:
uuid = util.randomUUID()
try:
conn.create_instance(data['name'], data['memory'], data['vcpu'], data['host_model'],
uuid, volumes, data['networks'], data['virtio'], data['mac'])
create_instance = Instance(compute_id=host_id, name=data['name'], uuid=uuid)
create_instance.save()
return HttpResponseRedirect('/instance/%s/%s/' % (host_id, data['name']))
except libvirtError as msg_error:
if data['hdd_size']:
conn.delete_volume(volumes.keys()[0])
errors.append(msg_error.message)
conn.close()
return render_to_response('create.html', locals(), context_instance=RequestContext(request))
| 44.121739
| 112
| 0.513599
| 492
| 5,074
| 5.146341
| 0.268293
| 0.024882
| 0.047393
| 0.03357
| 0.232622
| 0.159558
| 0.159558
| 0.102686
| 0.102686
| 0.102686
| 0
| 0.000326
| 0.396334
| 5,074
| 114
| 113
| 44.508772
| 0.826314
| 0.003942
| 0
| 0.3
| 0
| 0
| 0.085947
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.01
| false
| 0.01
| 0.11
| 0
| 0.17
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8e487af25b9797dd2a942cb5666ca85e89e2765
| 886
|
py
|
Python
|
utils/wassersteinGradientPenalty.py
|
andimarafioti/GACELA
|
34649fb01bdecbcb266db046a8b9c48c141f16e1
|
[
"MIT"
] | 15
|
2020-05-12T02:58:12.000Z
|
2022-03-14T12:10:56.000Z
|
utils/wassersteinGradientPenalty.py
|
tifgan/gacela
|
cd496cfce128ea7b6191a93639f8f4efac7e7142
|
[
"MIT"
] | 1
|
2021-05-22T14:02:06.000Z
|
2021-06-01T13:45:11.000Z
|
utils/wassersteinGradientPenalty.py
|
tifgan/gacela
|
cd496cfce128ea7b6191a93639f8f4efac7e7142
|
[
"MIT"
] | 5
|
2020-06-18T20:15:00.000Z
|
2021-11-05T15:45:35.000Z
|
import torch
__author__ = 'Andres'
def calc_gradient_penalty_bayes(discriminator, real_data, fake_data, gamma):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
batch_size = real_data.size()[0]
alpha = torch.rand(batch_size, 1, 1, 1)
alpha = alpha.expand(real_data.size()).to(device)
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
interpolates = torch.autograd.Variable(interpolates, requires_grad=True).to(device)
disc_interpolates = discriminator(interpolates)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = ((gradients.norm(2) - 1) ** 2) * gamma
return gradient_penalty
| 42.190476
| 91
| 0.688488
| 109
| 886
| 5.357798
| 0.431193
| 0.054795
| 0.041096
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012587
| 0.193002
| 886
| 21
| 92
| 42.190476
| 0.804196
| 0
| 0
| 0
| 0
| 0
| 0.014656
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8e6a09b44f3ad67acebf3ea296df8c1d2d40eaf
| 4,075
|
py
|
Python
|
openke/data/UniverseTrainDataLoader.py
|
luofeisg/OpenKE-PuTransE
|
0bfefb3917e7479520917febd91a9f4d7353c7fc
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
openke/data/UniverseTrainDataLoader.py
|
luofeisg/OpenKE-PuTransE
|
0bfefb3917e7479520917febd91a9f4d7353c7fc
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
openke/data/UniverseTrainDataLoader.py
|
luofeisg/OpenKE-PuTransE
|
0bfefb3917e7479520917febd91a9f4d7353c7fc
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
'''
MIT License
Copyright (c) 2020 Rashid Lafraie
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import os
import ctypes
import numpy as np
from .TrainDataLoader import TrainDataLoader
class UniverseTrainDataLoader(TrainDataLoader):
def __init__(self, in_path="./", batch_size=None, nbatches=None, threads=8, sampling_mode="normal", bern_flag=0,
filter_flag=1, neg_ent=1, neg_rel=0, initial_random_seed=2):
super(UniverseTrainDataLoader, self).__init__(in_path=in_path, batch_size=batch_size, nbatches=nbatches,
threads=threads, sampling_mode=sampling_mode, bern_flag=bern_flag,
filter_flag=filter_flag, neg_ent=neg_ent, neg_rel=neg_rel,
initial_random_seed=initial_random_seed)
self.entity_total_universe = 0
self.relation_total_universe = 0
self.train_total_universe = 0
"""argtypes"""
self.lib.sampling.argtypes = [
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_int64,
ctypes.c_int64,
ctypes.c_int64,
ctypes.c_int64,
ctypes.c_int64,
ctypes.c_int64,
ctypes.c_int64
]
self.lib.getParallelUniverse.argtypes = [
ctypes.c_int64,
ctypes.c_float,
ctypes.c_int64
]
self.lib.getEntityRemapping.argtypes = [
ctypes.c_void_p
]
self.lib.getRelationRemapping.argtypes = [
ctypes.c_void_p
]
self.lib.getEntityTotalUniverse.restype = ctypes.c_int64
self.lib.getRelationTotalUniverse.restype = ctypes.c_int64
self.lib.getTrainTotalUniverse.restype = ctypes.c_int64
def swap_helpers(self):
self.lib.swapHelpers()
def reset_universe(self):
self.lib.resetUniverse()
self.set_nbatches(self.lib.getTrainTotal, self.nbatches)
def get_universe_mappings(self):
entity_remapping = np.zeros(self.entity_total_universe, dtype=np.int64)
relation_remapping = np.zeros(self.relation_total_universe, dtype=np.int64)
entity_remapping_addr = entity_remapping.__array_interface__["data"][0]
relation_remapping_addr = relation_remapping.__array_interface__["data"][0]
self.lib.getEntityRemapping(entity_remapping_addr)
self.lib.getRelationRemapping(relation_remapping_addr)
return entity_remapping, relation_remapping
def compile_universe_dataset(self, triple_constraint, balance_param, relation_in_focus):
self.lib.getParallelUniverse(triple_constraint, balance_param, relation_in_focus)
self.entity_total_universe = self.lib.getEntityTotalUniverse()
self.relation_total_universe = self.lib.getRelationTotalUniverse()
self.train_total_universe = self.lib.getTrainTotalUniverse()
self.set_nbatches(self.train_total_universe, self.nbatches)
| 40.75
| 120
| 0.694724
| 501
| 4,075
| 5.41517
| 0.345309
| 0.049023
| 0.053078
| 0.046443
| 0.200147
| 0.122374
| 0.103207
| 0.083303
| 0.048655
| 0.048655
| 0
| 0.013826
| 0.23681
| 4,075
| 99
| 121
| 41.161616
| 0.858521
| 0.262577
| 0
| 0.25
| 0
| 0
| 0.005364
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.066667
| 0
| 0.183333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8e92112b61dc64252a8bdb77bbf3e0e15b55abe
| 5,074
|
py
|
Python
|
test/jit/test_backend_nnapi.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 60,067
|
2017-01-18T17:21:31.000Z
|
2022-03-31T21:37:45.000Z
|
test/jit/test_backend_nnapi.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 66,955
|
2017-01-18T17:21:38.000Z
|
2022-03-31T23:56:11.000Z
|
test/jit/test_backend_nnapi.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 19,210
|
2017-01-18T17:45:04.000Z
|
2022-03-31T23:51:56.000Z
|
import os
import sys
import unittest
import torch
import torch._C
from pathlib import Path
from test_nnapi import TestNNAPI
from torch.testing._internal.common_utils import TEST_WITH_ASAN
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
if __name__ == "__main__":
raise RuntimeError(
"This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead."
)
"""
Unit Tests for Nnapi backend with delegate
Inherits most tests from TestNNAPI, which loads Android NNAPI models
without the delegate API.
"""
# First skip is needed for IS_WINDOWS or IS_MACOS to skip the tests.
# Second skip is because ASAN is currently causing an error.
# It is still unclear how to resolve this. T95764916
torch_root = Path(__file__).resolve().parent.parent.parent
lib_path = torch_root / 'build' / 'lib' / 'libnnapi_backend.so'
@unittest.skipIf(not os.path.exists(lib_path),
"Skipping the test as libnnapi_backend.so was not found")
@unittest.skipIf(TEST_WITH_ASAN, "Unresolved bug with ASAN")
class TestNnapiBackend(TestNNAPI):
def setUp(self):
super().setUp()
# Save default dtype
module = torch.nn.PReLU()
self.default_dtype = module.weight.dtype
# Change dtype to float32 (since a different unit test changed dtype to float64,
# which is not supported by the Android NNAPI delegate)
# Float32 should typically be the default in other files.
torch.set_default_dtype(torch.float32)
# Load nnapi delegate library
torch.ops.load_library(str(lib_path))
# Override
def call_lowering_to_nnapi(self, traced_module, args):
compile_spec = {"forward": {"inputs": args}}
return torch._C._jit_to_backend("nnapi", traced_module, compile_spec)
def test_tensor_input(self):
# Lower a simple module
args = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1)
module = torch.nn.PReLU()
traced = torch.jit.trace(module, args)
# Argument input is a single Tensor
self.call_lowering_to_nnapi(traced, args)
# Argument input is a Tensor in a list
self.call_lowering_to_nnapi(traced, [args])
# Test exceptions for incorrect compile specs
def test_compile_spec_santiy(self):
args = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1)
module = torch.nn.PReLU()
traced = torch.jit.trace(module, args)
errorMsgTail = r"""
method_compile_spec should contain a Tensor or Tensor List which bundles input parameters: shape, dtype, quantization, and dimorder.
For input shapes, use 0 for run/load time flexible input.
method_compile_spec must use the following format:
{"forward": {"inputs": at::Tensor}} OR {"forward": {"inputs": c10::List<at::Tensor>}}"""
# No forward key
compile_spec = {"backward": {"inputs": args}}
with self.assertRaisesRegex(RuntimeError, "method_compile_spec does not contain the \"forward\" key." + errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No dictionary under the forward key
compile_spec = {"forward": 1}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain a dictionary with an \"inputs\" key, "
"under it's \"forward\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No inputs key (in the dictionary under the forward key)
compile_spec = {"forward": {"not inputs": args}}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain a dictionary with an \"inputs\" key, "
"under it's \"forward\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No Tensor or TensorList under the inputs key
compile_spec = {"forward": {"inputs": 1}}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain either a Tensor or TensorList, under it's \"inputs\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
compile_spec = {"forward": {"inputs": [1]}}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain either a Tensor or TensorList, under it's \"inputs\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
def tearDown(self):
# Change dtype back to default (Otherwise, other unit tests will complain)
torch.set_default_dtype(self.default_dtype)
| 44.508772
| 132
| 0.643082
| 644
| 5,074
| 4.89441
| 0.285714
| 0.069797
| 0.037754
| 0.020939
| 0.409264
| 0.388959
| 0.388959
| 0.35882
| 0.329632
| 0.329632
| 0
| 0.011182
| 0.259756
| 5,074
| 113
| 133
| 44.902655
| 0.828009
| 0.162594
| 0
| 0.356164
| 0
| 0.027397
| 0.252449
| 0.005632
| 0
| 0
| 0
| 0
| 0.068493
| 1
| 0.068493
| false
| 0
| 0.109589
| 0
| 0.205479
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8e997acb5df08763f83e5ed402ea27c456b06ca
| 1,078
|
py
|
Python
|
main/configure.py
|
syxu828/Graph2Seq-0.1
|
36e38f755c0ee390735e49121259151da54bcc1c
|
[
"Apache-2.0"
] | 24
|
2018-11-04T17:16:52.000Z
|
2022-01-06T12:34:49.000Z
|
main/configure.py
|
syxu828/Graph2Seq-0.1
|
36e38f755c0ee390735e49121259151da54bcc1c
|
[
"Apache-2.0"
] | 3
|
2018-12-09T00:31:36.000Z
|
2020-07-29T06:21:51.000Z
|
main/configure.py
|
syxu828/Graph2Seq-0.1
|
36e38f755c0ee390735e49121259151da54bcc1c
|
[
"Apache-2.0"
] | 4
|
2019-01-09T06:44:41.000Z
|
2019-08-04T07:55:00.000Z
|
train_data_path = "../data/no_cycle/train.data"
dev_data_path = "../data/no_cycle/dev.data"
test_data_path = "../data/no_cycle/test.data"
word_idx_file_path = "../data/word.idx"
word_embedding_dim = 100
train_batch_size = 32
dev_batch_size = 500
test_batch_size = 500
l2_lambda = 0.000001
learning_rate = 0.001
epochs = 100
encoder_hidden_dim = 200
num_layers_decode = 1
word_size_max = 1
dropout = 0.0
path_embed_method = "lstm" # cnn or lstm or bi-lstm
unknown_word = "<unk>"
PAD = "<PAD>"
GO = "<GO>"
EOS = "<EOS>"
deal_unknown_words = True
seq_max_len = 11
decoder_type = "greedy" # greedy, beam
beam_width = 4
attention = True
num_layers = 1 # 1 or 2
# the following are for the graph encoding method
weight_decay = 0.0000
sample_size_per_layer = 4
sample_layer_size = 4
hidden_layer_dim = 100
feature_max_len = 1
feature_encode_type = "uni"
# graph_encode_method = "max-pooling" # "lstm" or "max-pooling"
graph_encode_direction = "bi" # "single" or "bi"
concat = True
encoder = "gated_gcn" # "gated_gcn" "gcn" "seq"
lstm_in_gcn = "none" # before, after, none
| 21.137255
| 63
| 0.727273
| 180
| 1,078
| 4.033333
| 0.466667
| 0.044077
| 0.049587
| 0.057851
| 0.078512
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054645
| 0.151206
| 1,078
| 50
| 64
| 21.56
| 0.738798
| 0.197588
| 0
| 0
| 0
| 0
| 0.164912
| 0.091228
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8ea7055295dd79ddcfe4843e79b06f95f13078d
| 7,506
|
py
|
Python
|
dataControlWidget.py
|
andreasbayer/AEGUIFit
|
6a1e31091b74d648d007c75c9fef6efae4086860
|
[
"BSD-3-Clause"
] | null | null | null |
dataControlWidget.py
|
andreasbayer/AEGUIFit
|
6a1e31091b74d648d007c75c9fef6efae4086860
|
[
"BSD-3-Clause"
] | null | null | null |
dataControlWidget.py
|
andreasbayer/AEGUIFit
|
6a1e31091b74d648d007c75c9fef6efae4086860
|
[
"BSD-3-Clause"
] | null | null | null |
from PyQt5.QtWidgets import QLabel, QWidget, QGridLayout, QCheckBox, QGroupBox
from InftyDoubleSpinBox import InftyDoubleSpinBox
from PyQt5.QtCore import pyqtSignal, Qt
import helplib as hl
import numpy as np
class dataControlWidget(QGroupBox):
showErrorBars_changed = pyqtSignal(bool)
ignoreFirstPoint_changed = pyqtSignal(bool)
data_changed = pyqtSignal(bool, bool)
data_shift = pyqtSignal(np.float64)
load_fits = pyqtSignal(list)
load_view = pyqtSignal(str)
load_meta = pyqtSignal(str)
fit_on_startup = pyqtSignal()
SHOW_ERROR_BARS = "Show error bars"
SHOW_ERROR_BARS_NOT_LOADED = "Show error bars (could not be calculated)"
def __init__(self):
QWidget.__init__(self)
self.setTitle('Data Settings')
self.__lblEnergyShift = QLabel("Energy Shift:")
self.__dsbEnergyShift = InftyDoubleSpinBox()
self.__dsbEnergyShift.editingFinished.connect(self.__energyShiftChanged)
self.__dsbEnergyShift.setSingleStep(0.01)
self.__chkShowErrorBars = QCheckBox(self.SHOW_ERROR_BARS_NOT_LOADED)
self.__chkShowErrorBars.stateChanged.connect(self.__chkShowErrorBars_changed)
self.__chkIgnoreFirstPoint = QCheckBox('Ignore first data point.')
self.__chkIgnoreFirstPoint.stateChanged.connect(self.__chkIgnoreFirstPoint_changed)
self.__mainLayout = QGridLayout()
self.setLayout(self.__mainLayout)
self.__mainLayout.setAlignment(Qt.AlignTop)
self.__mainLayout.addWidget(self.__lblEnergyShift, 0, 0)
self.__mainLayout.addWidget(self.__dsbEnergyShift, 0, 1)
self.__mainLayout.addWidget(self.__chkShowErrorBars, 1, 0, 1, 2)
self.__mainLayout.addWidget(self.__chkIgnoreFirstPoint, 2, 0, 1, 2)
self.__chkIgnoreFirstPoint.setVisible(False)
self.reset(False)
def reset(self, enable):
self.__data = None
self.__all_data = None
self.__stdErrors = None
self.__chkShowErrorBars.setCheckable(True)
self.__chkShowErrorBars.setChecked(False)
self.__chkShowErrorBars.setEnabled(False)
self.__chkIgnoreFirstPoint.setCheckable(True)
self.__chkIgnoreFirstPoint.setChecked(False)
self.__chkIgnoreFirstPoint.setEnabled(False)
self.setEnergyShift(0.0)
self.__prevShift = 0.0
self.setEnabled(enable)
def __chkShowErrorBars_changed(self, state):
self.__chkShowErrorBars.setCheckState(state)
self.showErrorBars_changed.emit(self.getShowErrorBars())
def __chkIgnoreFirstPoint_changed(self, state):
self.__chkIgnoreFirstPoint.setCheckState(state)
self.ignoreFirstPoint_changed.emit(self.getIgnoreFirstPoint())
def __energyShiftChanged(self):
self.cause_shift()
def cause_shift(self):
energyShift = self.__dsbEnergyShift.value()
increment = energyShift - self.__prevShift
self.__prevShift = energyShift
self.data_shift.emit(increment)
self.data_changed.emit(self.getShowErrorBars(), self.getIgnoreFirstPoint())
# def setData(self, data):
# self.__data = data
def getData(self):
first_point = 0
if self.getIgnoreFirstPoint():
first_point = 1
return self.__data[first_point:,]
def getEnergyShift(self):
return (self.__dsbEnergyShift.value())
def setEnergyShift(self, value):
#increment = self.__dsbEnergyShift.value() - value
increment = value - self.__dsbEnergyShift.value()
self.__dsbEnergyShift.setValue(value)
#self.__shiftData(increment)
#self.data_shift.emit(increment)
def __shiftData(self, increment):
try:
if self.__data is not None:
for set in self.__data:
set[0] += increment
except Exception as e:
print(e)
def getStdErrors(self):
if self.__stdErrors is not None:
first_point = 0
if self.getIgnoreFirstPoint():
first_point = 1
return self.__stdErrors[first_point:]
else:
return None
def getMax_Energy(self):
if self.getData() is not None:
return self.getData()[-1][0]
else:
return None
def getMin_Energy(self):
if self.getData() is not None:
return self.getData()[0][0]
else:
return None
def getShowErrorBars(self):
return self.__chkShowErrorBars.isChecked()
def setShowErrorBars(self, value):
self.__chkShowErrorBars.setChecked(value)
def getIgnoreFirstPoint(self):
return self.__chkIgnoreFirstPoint.isChecked()
def setIgnoreFirstPoint(self, value):
self.__chkIgnoreFirstPoint.setChecked(value)
def hasStdErrors(self):
return self.__stdErrors is not None
def loadFile(self, fileName, id_string):
self.__all_data, self.__stdErrors, (fit_strings, view_string, data_string, meta_string), id_found =\
hl.readFileForFitsDataAndStdErrorAndMetaData(fileName, id_string)
#we need a copy to not save any altered data!
self.__data = (self.__all_data[:, 0:2]).copy()
if len(self.__data) <= 1:
raise Exception("Not enough data in file!")
if self.hasStdErrors():
self.__chkShowErrorBars.setText(self.SHOW_ERROR_BARS)
else:
self.__chkShowErrorBars.setText(self.SHOW_ERROR_BARS_NOT_LOADED)
self.__chkShowErrorBars.setEnabled(self.hasStdErrors())
self.__chkShowErrorBars.setChecked(self.hasStdErrors())
self.__chkIgnoreFirstPoint.setEnabled(True)
self.data_changed.emit(self.hasStdErrors(), self.getIgnoreFirstPoint())
self.load_fits.emit(fit_strings)
self.load_view.emit(view_string)
self.load_meta.emit(meta_string)
self.load_from_data_string(data_string)
self.cause_shift()
self.fit_on_startup.emit()
return id_found
def load_from_data_string(self, data_string):
if data_string is not None:
split_string = data_string.split('\v')
for i in range(0, len(split_string)):
item = split_string[i].split('=')
if len(item) == 2:
if (item[0] == 'egs'):
self.setEnergyShift(np.float64(item[1]))
elif item[0] == 'seb':
if item[1] == '1' or item[1] == 'True':
self.setShowErrorBars(True)
elif item[1] == '0' or item[1] == 'False':
self.setShowErrorBars(False)
elif item[0] == 'ifd':
if item[1] == '1' or item[1] == 'True':
self.setIgnoreFirstPoint(True)
elif item[1] == '0' or item[1] == 'False':
self.setIgnoreFirstPoint(False)
def get_data_string(self):
return 'egs=' + str(self.getEnergyShift()) + '\vseb=' + str(self.getShowErrorBars()) +\
'\vifd=' + str(self.getIgnoreFirstPoint())
def saveFile(self, fileName, id_string, fit_strings, view_string, data_string, meta_string):
hl.saveFilewithMetaData(id_string, fileName, self.__all_data, (fit_strings, view_string, data_string, meta_string))
| 34.589862
| 123
| 0.635092
| 769
| 7,506
| 5.890767
| 0.197659
| 0.06181
| 0.020088
| 0.023841
| 0.182561
| 0.14128
| 0.132671
| 0.116998
| 0.070199
| 0.060044
| 0
| 0.010033
| 0.269651
| 7,506
| 216
| 124
| 34.75
| 0.816308
| 0.026246
| 0
| 0.138158
| 0
| 0
| 0.024654
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.144737
| false
| 0
| 0.032895
| 0.032895
| 0.335526
| 0.006579
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8ea7298a7caca93599e616f2e4db31947e61892
| 6,425
|
py
|
Python
|
src/freemovr_engine/calib/acquire.py
|
strawlab/flyvr
|
335892cae740e53e82e07b526e1ba53fbd34b0ce
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 3
|
2015-01-29T14:09:25.000Z
|
2016-04-24T04:25:49.000Z
|
src/freemovr_engine/calib/acquire.py
|
strawlab/flyvr
|
335892cae740e53e82e07b526e1ba53fbd34b0ce
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
src/freemovr_engine/calib/acquire.py
|
strawlab/flyvr
|
335892cae740e53e82e07b526e1ba53fbd34b0ce
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
import roslib
roslib.load_manifest('sensor_msgs')
roslib.load_manifest('dynamic_reconfigure')
import rospy
import sensor_msgs.msg
import dynamic_reconfigure.srv
import dynamic_reconfigure.encoding
import numpy as np
import time
import os.path
import queue
class CameraHandler(object):
def __init__(self,topic_prefix='',debug=False,enable_dynamic_reconfigure=False):
self.topic_prefix=topic_prefix
self.debug = debug
rospy.Subscriber( '%s/image_raw'%self.topic_prefix, sensor_msgs.msg.Image,
self.get_image_callback)
self.pipeline_max_latency = 0.2
self.last_image = None
self.im_queue = None
self.recon = None
if enable_dynamic_reconfigure:
self.recon = rospy.ServiceProxy('%s/set_parameters'%self.topic_prefix, dynamic_reconfigure.srv.Reconfigure)
self.recon_cache = {}
def reconfigure(self, **params):
if self.recon is not None:
changed = {}
for k,v in list(params.items()):
if k in self.recon_cache:
if self.recon_cache[k] != v:
changed[k] = v
else:
changed[k] = v
if changed:
msg = dynamic_reconfigure.encoding.encode_config(params)
self.recon_cache.update(changed)
self.recon(msg)
if self.im_queue is not None:
#clear the queue so we get a new image with the new settings
while True:
try:
self.im_queue.get_nowait()
except queue.Empty:
break
def set_im_queue(self,q):
self.im_queue = q
def get_image_callback(self,msg):
if self.im_queue is None:
return
try:
if self.debug:
print("%s got image: %f" % (self.topic_prefix, msg.header.stamp.to_sec()))
self.im_queue.put_nowait((self.topic_prefix,msg))
except queue.Full:
if self.debug:
print(self.topic_prefix,"full")
class _Runner(object):
def __init__(self,cam_handlers,ros_latency=0.2,queue_depth=20):
self.cam_handlers = cam_handlers
self.im_queue = queue.Queue(len(cam_handlers)*queue_depth)
for ch in self.cam_handlers:
ch.set_im_queue(self.im_queue)
self.ros_latency = ros_latency
self.max_cam_latency = max( [ch.pipeline_max_latency for ch in self.cam_handlers ])
self._result = {}
@property
def result(self):
return self._result
@property
def result_as_nparray(self):
res = {}
for cam in self._result:
nimgs = len(self._result[cam])
tmpres = [0]*nimgs
for i in range(nimgs):
msg = self._result[cam][i]
shape = (msg.height, msg.width)
imarr = np.fromstring(msg.data,dtype=np.uint8)
imarr.shape = (msg.height, msg.width)
tmpres[i] = imarr
#sad to use dstack here, IMO res[cam][:,:,i] = imarr
#should have worked.
res[cam] = np.dstack(tmpres)
return res
def cycle_duration( self, dur ):
tstart = time.time()
while (time.time() - tstart) < dur:
time.sleep(0.05) # wait 50 msec
def clear_queue(self):
q = self.im_queue
while 1:
try:
q.get_nowait()
except queue.Empty:
break
def _is_done(self,rdict,n_per_camera,verbose=False):
done=True
for topic_prefix in list(rdict.keys()):
if verbose:
rospy.loginfo(' _is_done() has %d frames for %r'%(len(rdict[topic_prefix]), topic_prefix))
if len(rdict[topic_prefix]) < n_per_camera:
done=False
return done
class SimultaneousCameraRunner(_Runner):
def __init__(self,cam_handlers,**kwargs):
_Runner.__init__(self, cam_handlers,**kwargs)
def get_images(self,n_per_camera, pre_func=None, pre_func_args=[], post_func=None, post_func_args=[], verbose=False):
self._result.clear()
for ch in self.cam_handlers:
self._result[ch.topic_prefix] = []
#clear the queue
self.clear_queue()
if pre_func: pre_func(*pre_func_args)
t_latest = time.time() + (self.ros_latency + self.max_cam_latency)*n_per_camera
#wait for the images to arrive
while not self._is_done(self._result,n_per_camera,verbose=verbose):
try:
topic_prefix, msg = self.im_queue.get(1,10.0) # block, 10 second timeout
except queue.Empty:
continue
t_image = msg.header.stamp.to_sec()
if t_image > t_latest:
rospy.logwarn("image from %s at t=%f was too slow (by %f)" % (topic_prefix, t_image, t_image - t_latest))
self._result[topic_prefix].append( msg )
if post_func: post_func(*post_func_args)
class SequentialCameraRunner(_Runner):
def __init__(self,cam_handlers,**kwargs):
_Runner.__init__(self, cam_handlers,**kwargs)
self.wait_duration = kwargs.get("wait_duration", 0.1)
self.check_earliest = False
self.check_latest = False
def get_images(self,n_per_camera,verbose=False):
self._result.clear()
for ch in self.cam_handlers:
self._result[ch.topic_prefix] = []
t_earliest = time.time()
self.clear_queue()
t_latest = t_earliest + (self.ros_latency + self.max_cam_latency)
while not self._is_done(self._result,n_per_camera,verbose=verbose):
try:
topic_prefix, msg = self.im_queue.get(1,10.0) # block, 10 second timeout
except queue.Empty:
continue
t_image = msg.header.stamp.to_sec()
if self.check_latest and t_image > t_latest:
rospy.logwarn("image from %s at t=%f was too slow (by %f)" % (topic_prefix, t_image, t_image - t_latest))
if self.check_earliest and t_image < t_earliest:
rospy.logwarn("image from %s at t=%f was too early (by %f)" % (topic_prefix, t_image, t_earliest - t_image))
continue
self._result[topic_prefix].append( msg )
| 36.095506
| 124
| 0.588327
| 829
| 6,425
| 4.306393
| 0.207479
| 0.064706
| 0.033894
| 0.026611
| 0.380112
| 0.337815
| 0.285434
| 0.229132
| 0.220168
| 0.220168
| 0
| 0.006348
| 0.313463
| 6,425
| 177
| 125
| 36.299435
| 0.802993
| 0.036887
| 0
| 0.296552
| 0
| 0
| 0.040783
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096552
| false
| 0
| 0.062069
| 0.006897
| 0.213793
| 0.013793
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8eb7ee679859acda30ad6ca74e666a2bc11c767
| 6,949
|
py
|
Python
|
examples/hfht/pointnet_classification.py
|
nixli/hfta
|
76274b5ee0e32732da20b153a3cc6550510d8a78
|
[
"MIT"
] | 24
|
2021-04-06T20:36:10.000Z
|
2022-02-26T17:03:33.000Z
|
examples/hfht/pointnet_classification.py
|
nixli/hfta
|
76274b5ee0e32732da20b153a3cc6550510d8a78
|
[
"MIT"
] | 20
|
2021-04-02T00:51:34.000Z
|
2022-03-29T15:00:08.000Z
|
examples/hfht/pointnet_classification.py
|
nixli/hfta
|
76274b5ee0e32732da20b153a3cc6550510d8a78
|
[
"MIT"
] | 5
|
2021-04-11T20:07:32.000Z
|
2021-06-14T06:41:05.000Z
|
import argparse
import logging
import numpy as np
import os
import pandas as pd
import random
import subprocess
from pathlib import Path
from hyperopt import hp
from hyperopt.pyll.stochastic import sample
from hfta.hfht import (tune_hyperparameters, attach_common_args,
rearrange_algorithm_kwargs, handle_integers,
generate_fusible_param_flags, generate_nonfusible_param)
from hfta.workflow import extract_logging_level
from hfta.hfht.utils import fuse_dicts
def main(args):
random.seed(args.seed)
np.random.seed(args.seed)
rng_state = np.random.RandomState(seed=args.seed)
fusibles = {
'lr': hp.uniform('lr', 0.0001, 0.01),
'beta1': hp.uniform('beta1', 0.001, 0.999),
'beta2': hp.uniform('beta2', 0.001, 0.999),
'weight_decay': hp.uniform('weight_decay', 0.0, 0.5),
'gamma': hp.uniform('gamma', 0.1, 0.9),
'step_size': hp.choice('step_size', (5, 10, 20, 40)),
}
nonfusibles = {
'batch_size': hp.choice('batch_size', (8, 16, 32)),
'feature_transform': hp.choice('feature_transform', (True, False)),
}
def _run(results_dir, epochs, iters_per_epoch, params, env_vars=None):
# Build the cmd.
cmd = [
'python',
'train_classification.py',
'--epochs',
str(epochs),
'--iters-per-epoch',
str(iters_per_epoch),
'--dataset',
args.dataset,
'--dataset_type',
args.dataset_type,
'--num_points',
str(args.num_points),
'--device',
args.device,
'--eval',
'--seed',
str(args.seed),
'--batch_size',
str(generate_nonfusible_param(params, 'batch_size')),
]
if results_dir is not None:
cmd.extend(['--outf', results_dir])
if generate_nonfusible_param(params, 'feature_transform'):
cmd.append('--feature_transform')
cmd.extend(
generate_fusible_param_flags(
params,
['lr', 'beta1', 'beta2', 'weight_decay', 'gamma', 'step_size'],
))
if args.mode == 'hfta':
cmd.append('--hfta')
if args.amp:
cmd.append('--amp')
# Launch the training process.
succeeded = True
try:
logging.info('--> Running cmd = {}'.format(cmd))
subprocess.run(
cmd,
stdout=subprocess.DEVNULL if results_dir is None else open(
os.path.join(results_dir, 'stdout.txt'),
'w',
),
stderr=subprocess.DEVNULL if results_dir is None else open(
os.path.join(results_dir, 'stderr.txt'),
'w',
),
check=True,
cwd=os.path.join(
os.path.abspath(os.path.expanduser(os.path.dirname(__file__))),
'../pointnet/'),
env=env_vars,
)
except subprocess.CalledProcessError as e:
logging.error(e)
succeeded = False
return succeeded
def try_params(ids, epochs, params, env_vars=None):
""" Running the training process for pointnet classification task.
Args:
ids: Either a single int ID (for serial), or a list of IDs (for HFTA).
epochs: number of epochs to run.
params: maps hyperparameter name to its value(s). For HFTA, the values are
provided as a list.
env_vars: optional, dict(str, str) that includes extra environment that
needs to be forwarded to the subprocess call
Returns:
result(s): A single result dict for serial or a list of result dicts for
HFTA in the same order as ids.
early_stop(s): Whether the training process early stopped. A single bool
for serial or a list of bools for HFTA in the same order as ids.
"""
epochs = int(round(epochs))
ids_str = (','.join([str(i) for i in ids]) if isinstance(
ids,
(list, tuple),
) else str(ids))
# Allocate result dir.
results_dir = os.path.join(args.outdir, ids_str)
Path(results_dir).mkdir(parents=True, exist_ok=True)
# Run training.
succeeded = _run(
results_dir,
epochs,
args.iters_per_epoch,
params,
env_vars=env_vars,
)
if not succeeded:
raise RuntimeError('_run failed!')
# Gather the results.
results_frame = pd.read_csv(os.path.join(results_dir, 'eval.csv'))
if isinstance(ids, (list, tuple)):
results = [{'acc': acc} for acc in results_frame['acc'].tolist()]
assert len(results) == len(ids)
return results, [False] * len(ids)
else:
return {'acc': results_frame['acc'][0]}, False
def dry_run(
B=None,
nonfusibles_kvs=None,
epochs=None,
iters_per_epoch=None,
env_vars=None,
):
params = [{
**handle_integers(sample(fusibles, rng=rng_state)),
**nonfusibles_kvs
} for _ in range(max(B, 1))]
if B > 0:
params = fuse_dicts(params)
else:
params = params[0]
return _run(None, epochs, iters_per_epoch, params, env_vars=env_vars)
tune_hyperparameters(
space={
**fusibles,
**nonfusibles
},
try_params_callback=try_params,
dry_run_callback=dry_run,
mode=args.mode,
algorithm=args.algorithm,
nonfusibles=nonfusibles.keys(),
dry_run_repeats=args.dry_run_repeats,
dry_run_epochs=args.dry_run_epochs,
dry_run_iters_per_epoch=args.dry_run_iters_per_epoch,
metric='acc',
goal='max',
algorithm_configs={
'hyperband': args.hyperband_kwargs,
'random': args.random_kwargs,
},
seed=args.seed,
outdir=args.outdir,
)
def attach_args(parser=argparse.ArgumentParser()):
parser.add_argument(
'--workers',
type=int,
help='number of data loading workers',
default=4,
)
parser.add_argument(
'--iters-per-epoch',
type=int,
default=int(1e9),
help='number of epochs to train for',
)
parser.add_argument('--dataset', type=str, required=True, help="dataset path")
parser.add_argument(
'--dataset-type',
type=str,
default='shapenet',
help="dataset type shapenet|modelnet40",
)
parser.add_argument(
'--num-points',
type=int,
default=2500,
help='num of points for dataset',
)
parser.add_argument(
'--device',
type=str,
default='cuda',
choices=['cpu', 'cuda', 'xla'],
help="the device where this test is running",
)
parser.add_argument(
'--amp',
default=False,
action='store_true',
help='Enable AMP; only used when --device is cuda',
)
parser = attach_common_args(parser)
return parser
if __name__ == '__main__':
args = attach_args().parse_args()
rearrange_algorithm_kwargs(args)
logging.basicConfig(level=extract_logging_level(args))
args.outdir = os.path.abspath(os.path.expanduser(args.outdir))
args.dataset = os.path.abspath(os.path.expanduser(args.dataset))
main(args)
| 29.570213
| 80
| 0.61534
| 880
| 6,949
| 4.693182
| 0.281818
| 0.017433
| 0.028329
| 0.013801
| 0.143584
| 0.104358
| 0.084262
| 0.05908
| 0.030508
| 0.030508
| 0
| 0.012228
| 0.258598
| 6,949
| 234
| 81
| 29.696581
| 0.789402
| 0.110088
| 0
| 0.115578
| 0
| 0
| 0.128431
| 0.003758
| 0
| 0
| 0
| 0
| 0.005025
| 1
| 0.025126
| false
| 0
| 0.065327
| 0
| 0.115578
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8ec1873a929e5565a9c1de6ad8321fa85a4a6d9
| 1,409
|
py
|
Python
|
tests/utils/dut.py
|
Ostrokrzew/standalone-linux-io-tracer
|
5fcbe7f0c7b027d9e5fdfb4c6e9d553c6fa617b6
|
[
"BSD-3-Clause-Clear"
] | 24
|
2019-05-09T08:36:46.000Z
|
2022-03-16T16:20:01.000Z
|
tests/utils/dut.py
|
Ostrokrzew/standalone-linux-io-tracer
|
5fcbe7f0c7b027d9e5fdfb4c6e9d553c6fa617b6
|
[
"BSD-3-Clause-Clear"
] | 122
|
2019-05-27T12:27:15.000Z
|
2020-07-31T06:45:08.000Z
|
tests/utils/dut.py
|
Ostrokrzew/standalone-linux-io-tracer
|
5fcbe7f0c7b027d9e5fdfb4c6e9d553c6fa617b6
|
[
"BSD-3-Clause-Clear"
] | 18
|
2019-05-27T09:31:56.000Z
|
2021-05-27T18:54:52.000Z
|
#
# Copyright(c) 2020 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from core.test_run_utils import TestRun
from utils.installer import install_iotrace, check_if_installed
from utils.iotrace import IotracePlugin
from utils.misc import kill_all_io
from test_tools.fio.fio import Fio
def dut_prepare(reinstall: bool):
if not check_if_installed() or reinstall:
TestRun.LOGGER.info("Installing iotrace:")
install_iotrace()
else:
TestRun.LOGGER.info("iotrace is already installed by previous test")
# Call it after installing iotrace because we need iotrace
# to get valid paths
dut_cleanup()
fio = Fio()
if not fio.is_installed():
TestRun.LOGGER.info("Installing fio")
fio.install()
TestRun.LOGGER.info("Killing all IO")
kill_all_io()
def dut_cleanup():
iotrace: IotracePlugin = TestRun.plugins['iotrace']
TestRun.LOGGER.info("Stopping fuzzing")
TestRun.executor.run(f'{iotrace.working_dir}/standalone-linux-io-tracer/tests/security/fuzzy/fuzz.sh clean')
output = TestRun.executor.run('pgrep iotrace')
if output.stdout != "":
TestRun.executor.run(f'kill -9 {output.stdout}')
TestRun.LOGGER.info("Removing existing traces")
trace_repository_path: str = iotrace.get_trace_repository_path()
TestRun.executor.run_expect_success(f'rm -rf {trace_repository_path}/kernel')
| 30.630435
| 112
| 0.726757
| 189
| 1,409
| 5.275132
| 0.481481
| 0.078235
| 0.102307
| 0.054162
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005119
| 0.168204
| 1,409
| 45
| 113
| 31.311111
| 0.845563
| 0.110007
| 0
| 0
| 0
| 0.035714
| 0.236568
| 0.085806
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.178571
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8ecc8d3dac32d7fd54bf1a19d511383c8e5ce7f
| 355
|
py
|
Python
|
game_service.py
|
Drew8521/MusiQ
|
e52671c7dcc4f54f6cbb829486a733a9179575b1
|
[
"MIT"
] | null | null | null |
game_service.py
|
Drew8521/MusiQ
|
e52671c7dcc4f54f6cbb829486a733a9179575b1
|
[
"MIT"
] | 1
|
2019-08-09T21:36:33.000Z
|
2019-08-09T21:37:24.000Z
|
game_service.py
|
Drew8521/MusiQ
|
e52671c7dcc4f54f6cbb829486a733a9179575b1
|
[
"MIT"
] | null | null | null |
from models import Song
from random import choice
def random_song(genre):
results = Song.query().filter(Song.genre==genre).fetch()
print(results)
songs = choice(results)
random_song = {
"title": songs.song,
"album": songs.album,
"artist": songs.artist.lower(),
"genre": genre,
}
return random_song
| 23.666667
| 60
| 0.622535
| 42
| 355
| 5.190476
| 0.452381
| 0.137615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.247887
| 355
| 14
| 61
| 25.357143
| 0.816479
| 0
| 0
| 0
| 0
| 0
| 0.059155
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.153846
| 0
| 0.307692
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8ee134e47a471c9b912238f8dbcd8fb83c49b93
| 3,405
|
py
|
Python
|
libs/export_pbs/exportPb.py
|
linye931025/FPN_Tensorflow-master
|
e972496a798e9d77a74ddc6062d46b152d072ce7
|
[
"MIT"
] | null | null | null |
libs/export_pbs/exportPb.py
|
linye931025/FPN_Tensorflow-master
|
e972496a798e9d77a74ddc6062d46b152d072ce7
|
[
"MIT"
] | null | null | null |
libs/export_pbs/exportPb.py
|
linye931025/FPN_Tensorflow-master
|
e972496a798e9d77a74ddc6062d46b152d072ce7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import os, sys
import tensorflow as tf
import tf_slim as slim
from tensorflow.python.tools import freeze_graph
sys.path.append('../../')
from data.io.image_preprocess import short_side_resize_for_inference_data
from libs.configs import cfgs
from libs.networks import build_whole_network
CKPT_PATH = '/home/yjr/PycharmProjects/Faster-RCNN_Tensorflow/output/trained_weights/FasterRCNN_20180517/voc_200000model.ckpt'
OUT_DIR = '../../output/Pbs'
PB_NAME = 'FasterRCNN_Res101_Pascal.pb'
def build_detection_graph():
# 1. preprocess img
img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None, 3],
name='input_img') # is RGB. not GBR
raw_shape = tf.shape(img_plac)
raw_h, raw_w = tf.to_float(raw_shape[0]), tf.to_float(raw_shape[1])
img_batch = tf.cast(img_plac, tf.float32)
img_batch = short_side_resize_for_inference_data(img_tensor=img_batch,
target_shortside_len=cfgs.IMG_SHORT_SIDE_LEN,
length_limitation=cfgs.IMG_MAX_LENGTH)
img_batch = img_batch - tf.constant(cfgs.PIXEL_MEAN)
img_batch = tf.expand_dims(img_batch, axis=0) # [1, None, None, 3]
det_net = build_whole_network.DetectionNetwork(base_network_name=cfgs.NET_NAME,
is_training=False)
detected_boxes, detection_scores, detection_category = det_net.build_whole_detection_network(
input_img_batch=img_batch,
gtboxes_batch=None)
xmin, ymin, xmax, ymax = detected_boxes[:, 0], detected_boxes[:, 1], \
detected_boxes[:, 2], detected_boxes[:, 3]
resized_shape = tf.shape(img_batch)
resized_h, resized_w = tf.to_float(resized_shape[1]), tf.to_float(resized_shape[2])
xmin = xmin * raw_w / resized_w
xmax = xmax * raw_w / resized_w
ymin = ymin * raw_h / resized_h
ymax = ymax * raw_h / resized_h
boxes = tf.transpose(tf.stack([xmin, ymin, xmax, ymax]))
dets = tf.concat([tf.reshape(detection_category, [-1, 1]),
tf.reshape(detection_scores, [-1, 1]),
boxes], axis=1, name='DetResults')
return dets
def export_frozenPB():
tf.reset_default_graph()
dets = build_detection_graph()
saver = tf.train.Saver()
with tf.Session() as sess:
print("we have restred the weights from =====>>\n", CKPT_PATH)
saver.restore(sess, CKPT_PATH)
tf.train.write_graph(sess.graph_def, OUT_DIR, PB_NAME)
freeze_graph.freeze_graph(input_graph=os.path.join(OUT_DIR, PB_NAME),
input_saver='',
input_binary=False,
input_checkpoint=CKPT_PATH,
output_node_names="DetResults",
restore_op_name="save/restore_all",
filename_tensor_name='save/Const:0',
output_graph=os.path.join(OUT_DIR, PB_NAME.replace('.pb', '_Frozen.pb')),
clear_devices=False,
initializer_nodes='')
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = ''
export_frozenPB()
| 38.693182
| 126
| 0.612335
| 426
| 3,405
| 4.553991
| 0.368545
| 0.041237
| 0.018557
| 0.018557
| 0.098969
| 0.059794
| 0.027835
| 0.027835
| 0
| 0
| 0
| 0.016393
| 0.283407
| 3,405
| 87
| 127
| 39.137931
| 0.778689
| 0.021733
| 0
| 0
| 0
| 0
| 0.090499
| 0.041792
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.129032
| 0
| 0.177419
| 0.032258
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8eef0cd263627a15c156d8fca2fb80f3faea6c2
| 983
|
py
|
Python
|
ngadnap/command_templates/adapter_removal.py
|
smilefreak/NaDNAP
|
18354778dd896bc0ab3456ca7dbb9d194c1ebf4d
|
[
"MIT"
] | null | null | null |
ngadnap/command_templates/adapter_removal.py
|
smilefreak/NaDNAP
|
18354778dd896bc0ab3456ca7dbb9d194c1ebf4d
|
[
"MIT"
] | null | null | null |
ngadnap/command_templates/adapter_removal.py
|
smilefreak/NaDNAP
|
18354778dd896bc0ab3456ca7dbb9d194c1ebf4d
|
[
"MIT"
] | null | null | null |
"""
Adapter Removal templates
"""
# AdapterRemoval
#
# {0}: executable
# {1}: fastq1 abs
# {2}: fastq2 abs
# {3}: fastq1
# {4}: fastq2
# {5}: minimum length
# {6}: mismatch_rate
# {7}: min base uality
# {8}: min merge_length
__ADAPTER_REMOVAL__="""
{0} --collapse --file1 {1} --file2 {2} --outputstats {3}.stats --trimns --outputcollapsed {3}.collapsed --minlength {5} --output1 {3}.p1 --output2 {4}.p2 --mm {6} --minquality {7} --minalignmentlength {8} --trimqualities
"""
import os
from ngadnap.dependency_graph.graph import CommandNode
def adapter_removal(config, args, fq1 ,fq2):
fq1o = os.path.abspath(fq1)
fq2o = os.path.abspath(fq2)
cmd = __ADAPTER_REMOVAL__.format(config['adapter_removal']['executable'], fq1o, fq2o, fq1, fq2, args.adapt_min_length, args.adapt_mismatch_rate ,args.adapt_min_qual, args.adapt_alignment_length)
job_id = fq1 + ".adapter_removal"
return CommandNode(cmd, job_id, None, args.temp_directory)
| 31.709677
| 225
| 0.678535
| 128
| 983
| 5
| 0.539063
| 0.13125
| 0.040625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.049818
| 0.162767
| 983
| 30
| 226
| 32.766667
| 0.727825
| 0.201424
| 0
| 0
| 0
| 0.090909
| 0.351245
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.181818
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8f15b0752a64958efc156868083500a63e94dc1
| 1,745
|
py
|
Python
|
undercloud_heat_plugins/immutable_resources.py
|
AllenJSebastian/tripleo-common
|
d510a30266e002e90c358e69cb720bfdfa736134
|
[
"Apache-2.0"
] | 52
|
2015-04-17T12:06:09.000Z
|
2021-11-23T09:46:30.000Z
|
undercloud_heat_plugins/immutable_resources.py
|
AllenJSebastian/tripleo-common
|
d510a30266e002e90c358e69cb720bfdfa736134
|
[
"Apache-2.0"
] | null | null | null |
undercloud_heat_plugins/immutable_resources.py
|
AllenJSebastian/tripleo-common
|
d510a30266e002e90c358e69cb720bfdfa736134
|
[
"Apache-2.0"
] | 47
|
2015-10-09T15:22:38.000Z
|
2021-04-22T04:35:57.000Z
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from heat.engine.resources.openstack.neutron import net
from heat.engine.resources.openstack.neutron import port
from heat.engine.resources.openstack.neutron import subnet
def _copy_schema_immutable(schema):
new_schema = copy.deepcopy(schema)
if not schema.update_allowed:
new_schema.immutable = True
return new_schema
class ImmutableNet(net.Net):
'''Ensure an existing net doesn't change.'''
properties_schema = {
k: _copy_schema_immutable(v)
for k, v in net.Net.properties_schema.items()
}
class ImmutablePort(port.Port):
'''Ensure an existing port doesn't change.'''
properties_schema = {
k: _copy_schema_immutable(v)
for k, v in port.Port.properties_schema.items()
}
class ImmutableSubnet(subnet.Subnet):
'''Ensure an existing subnet doesn't change.'''
properties_schema = {
k: _copy_schema_immutable(v)
for k, v in subnet.Subnet.properties_schema.items()
}
def resource_mapping():
return {
'OS::Neutron::Net': ImmutableNet,
'OS::Neutron::Port': ImmutablePort,
'OS::Neutron::Subnet': ImmutableSubnet,
}
| 28.145161
| 78
| 0.696848
| 231
| 1,745
| 5.164502
| 0.411255
| 0.050293
| 0.063705
| 0.057837
| 0.253982
| 0.253982
| 0.253982
| 0.140821
| 0.140821
| 0.140821
| 0
| 0.002907
| 0.211461
| 1,745
| 61
| 79
| 28.606557
| 0.864099
| 0.384527
| 0
| 0.2
| 0
| 0
| 0.049666
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.133333
| 0.033333
| 0.466667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8f2378998282c62f5eff079407d0b48e7bea81d
| 2,154
|
py
|
Python
|
slybot/setup.py
|
DataKnower/dk-portia
|
24579c0160167af2442117975bf7d6a714b4d7d5
|
[
"BSD-3-Clause"
] | null | null | null |
slybot/setup.py
|
DataKnower/dk-portia
|
24579c0160167af2442117975bf7d6a714b4d7d5
|
[
"BSD-3-Clause"
] | null | null | null |
slybot/setup.py
|
DataKnower/dk-portia
|
24579c0160167af2442117975bf7d6a714b4d7d5
|
[
"BSD-3-Clause"
] | null | null | null |
from os.path import join, abspath, dirname, exists
from slybot import __version__
from setuptools import setup, find_packages
from setuptools.command.bdist_egg import bdist_egg
from setuptools.command.sdist import sdist
def build_js():
root = abspath(dirname(__file__))
base_path = abspath(join(root, '..', 'splash_utils'))
if not exists(base_path):
base_path = abspath(join(root, '..', 'slyd', 'splash_utils'))
files = ('waitAsync.js', 'perform_actions.js')
fdata = []
for fname in files:
with open(join(base_path, fname)) as f:
fdata.append(f.read())
js_file = abspath(join(root, 'slybot', 'splash-script-combined.js'))
with open(js_file, 'w') as f:
f.write(';(function(){\n%s\n})();' % '\n'.join(fdata))
class bdist_egg_command(bdist_egg):
def run(self):
build_js()
bdist_egg.run(self)
class sdist_command(sdist):
def run(self):
build_js()
sdist.run(self)
install_requires = ['Scrapy', 'scrapely', 'loginform', 'lxml', 'jsonschema',
'dateparser', 'scrapyjs', 'page_finder', 'six']
extras = {
'tests': ['nose', 'nose-timer'],
'clustering': ['page_clustering']
}
setup(name='slybot',
version=__version__,
license='BSD',
description='Slybot crawler',
author='Scrapy project',
author_email='info@scrapy.org',
url='http://github.com/scrapinghub/portia',
packages=find_packages(exclude=('tests', 'tests.*')),
platforms=['Any'],
scripts=['bin/slybot', 'bin/portiacrawl'],
install_requires=install_requires,
extras_require=extras,
package_data={'': ['slybot/splash-script-combined.js']},
include_package_data=True,
cmdclass={
'bdist_egg': bdist_egg_command,
'sdist': sdist_command
},
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7'
])
| 29.916667
| 76
| 0.615135
| 245
| 2,154
| 5.216327
| 0.461224
| 0.043818
| 0.035211
| 0.029734
| 0.106416
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002414
| 0.230734
| 2,154
| 71
| 77
| 30.338028
| 0.768859
| 0
| 0
| 0.068966
| 0
| 0
| 0.283658
| 0.037604
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051724
| false
| 0
| 0.086207
| 0
| 0.172414
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8f25149f3eefd3629cc486cf987c4d8a9a5bbb9
| 3,846
|
py
|
Python
|
yolov3.py
|
huhuhang/yolov3
|
6c254b3f453c394046381e1c00cb0908b8f97b3a
|
[
"MIT"
] | 35
|
2018-10-12T06:33:09.000Z
|
2022-02-25T03:19:37.000Z
|
yolov3.py
|
huhuhang/yolov3
|
6c254b3f453c394046381e1c00cb0908b8f97b3a
|
[
"MIT"
] | 1
|
2019-08-31T16:05:12.000Z
|
2020-01-05T15:34:54.000Z
|
yolov3.py
|
huhuhang/yolov3
|
6c254b3f453c394046381e1c00cb0908b8f97b3a
|
[
"MIT"
] | 14
|
2018-12-10T22:48:51.000Z
|
2021-11-18T20:56:38.000Z
|
import torch
import torch.nn as nn
from .yolo_layer import *
from .yolov3_base import *
class Yolov3(Yolov3Base):
def __init__(self, num_classes=80):
super().__init__()
self.backbone = Darknet([1,2,8,8,4])
anchors_per_region = 3
self.yolo_0_pre = Yolov3UpsamplePrep([512, 1024], 1024, anchors_per_region*(5+num_classes))
self.yolo_0 = YoloLayer(anchors=[(116., 90.), (156., 198.), (373., 326.)], stride=32, num_classes=num_classes)
self.yolo_1_c = ConvBN(512, 256, 1)
self.yolo_1_prep = Yolov3UpsamplePrep([256, 512], 512+256, anchors_per_region*(5+num_classes))
self.yolo_1 = YoloLayer(anchors=[(30., 61.), (62., 45.), (59., 119.)], stride=16, num_classes=num_classes)
self.yolo_2_c = ConvBN(256, 128, 1)
self.yolo_2_prep = Yolov3UpsamplePrep([128, 256], 256+128, anchors_per_region*(5+num_classes))
self.yolo_2 = YoloLayer(anchors=[(10., 13.), (16., 30.), (33., 23.)], stride=8, num_classes=num_classes)
def get_loss_layers(self):
return [self.yolo_0, self.yolo_1, self.yolo_2]
def forward_yolo(self, xb):
x, y0 = self.yolo_0_pre(xb[-1])
x = self.yolo_1_c(x)
x = nn.Upsample(scale_factor=2, mode='nearest')(x)
x = torch.cat([x, xb[-2]], 1)
x, y1 = self.yolo_1_prep(x)
x = self.yolo_2_c(x)
x = nn.Upsample(scale_factor=2, mode='nearest')(x)
x = torch.cat([x, xb[-3]], 1)
x, y2 = self.yolo_2_prep(x)
return [y0, y1, y2]
###################################################################
## Backbone and helper modules
class DarknetBlock(nn.Module):
def __init__(self, ch_in):
super().__init__()
ch_hid = ch_in//2
self.conv1 = ConvBN(ch_in, ch_hid, kernel_size=1, stride=1, padding=0)
self.conv2 = ConvBN(ch_hid, ch_in, kernel_size=3, stride=1, padding=1)
def forward(self, x): return self.conv2(self.conv1(x)) + x
class Darknet(nn.Module):
def __init__(self, num_blocks, start_nf=32):
super().__init__()
nf = start_nf
self.base = ConvBN(3, nf, kernel_size=3, stride=1) #, padding=1)
self.layers = []
for i, nb in enumerate(num_blocks):
# dn_layer = make_group_layer(nf, nb, stride=(1 if i==-1 else 2))
dn_layer = self.make_group_layer(nf, nb, stride=2)
self.add_module(f"darknet_{i}", dn_layer)
self.layers.append(dn_layer)
nf *= 2
def make_group_layer(self, ch_in, num_blocks, stride=2):
layers = [ConvBN(ch_in, ch_in*2, stride=stride)]
for i in range(num_blocks): layers.append(DarknetBlock(ch_in*2))
return nn.Sequential(*layers)
def forward(self, x):
y = [self.base(x)]
for l in self.layers:
y.append(l(y[-1]))
return y
class Yolov3UpsamplePrep(nn.Module):
def __init__(self, filters_list, in_filters, out_filters):
super().__init__()
self.branch = nn.ModuleList([
ConvBN(in_filters, filters_list[0], 1),
ConvBN(filters_list[0], filters_list[1], kernel_size=3),
ConvBN(filters_list[1], filters_list[0], kernel_size=1),
ConvBN(filters_list[0], filters_list[1], kernel_size=3),
ConvBN(filters_list[1], filters_list[0], kernel_size=1),])
self.for_yolo = nn.ModuleList([
ConvBN(filters_list[0], filters_list[1], kernel_size=3),
nn.Conv2d(filters_list[1], out_filters, kernel_size=1, stride=1,
padding=0, bias=True)])
def forward(self, x):
for m in self.branch: x = m(x)
branch_out = x
for m in self.for_yolo: x = m(x)
return branch_out, x
| 38.079208
| 119
| 0.578783
| 556
| 3,846
| 3.748201
| 0.208633
| 0.06142
| 0.025912
| 0.043186
| 0.335893
| 0.296065
| 0.246161
| 0.196257
| 0.145873
| 0.145873
| 0
| 0.06742
| 0.263391
| 3,846
| 100
| 120
| 38.46
| 0.668196
| 0.026781
| 0
| 0.146667
| 0
| 0
| 0.006812
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.053333
| 0.026667
| 0.306667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8f43d95779ee26635e6e7c26bda70278bc13afd
| 3,915
|
py
|
Python
|
tests/queries/test_query.py
|
txf626/django
|
95bda03f2da15172cf342f13ba8a77c007b63fbb
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 2
|
2019-02-28T12:38:32.000Z
|
2019-09-30T08:08:16.000Z
|
tests/queries/test_query.py
|
Scheldon/django
|
11a9017179812198a12a2fc19610262a549aa46e
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 57
|
2018-10-08T12:37:30.000Z
|
2018-10-08T17:39:26.000Z
|
tests/queries/test_query.py
|
Scheldon/django
|
11a9017179812198a12a2fc19610262a549aa46e
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1
|
2021-06-21T07:51:09.000Z
|
2021-06-21T07:51:09.000Z
|
from datetime import datetime
from django.core.exceptions import FieldError
from django.db.models import CharField, F, Q
from django.db.models.expressions import SimpleCol
from django.db.models.fields.related_lookups import RelatedIsNull
from django.db.models.functions import Lower
from django.db.models.lookups import Exact, GreaterThan, IsNull, LessThan
from django.db.models.sql.query import Query
from django.db.models.sql.where import OR
from django.test import TestCase
from django.test.utils import register_lookup
from .models import Author, Item, ObjectC, Ranking
class TestQuery(TestCase):
def test_simple_query(self):
query = Query(Author)
where = query.build_where(Q(num__gt=2))
lookup = where.children[0]
self.assertIsInstance(lookup, GreaterThan)
self.assertEqual(lookup.rhs, 2)
self.assertEqual(lookup.lhs.target, Author._meta.get_field('num'))
def test_complex_query(self):
query = Query(Author)
where = query.build_where(Q(num__gt=2) | Q(num__lt=0))
self.assertEqual(where.connector, OR)
lookup = where.children[0]
self.assertIsInstance(lookup, GreaterThan)
self.assertEqual(lookup.rhs, 2)
self.assertEqual(lookup.lhs.target, Author._meta.get_field('num'))
lookup = where.children[1]
self.assertIsInstance(lookup, LessThan)
self.assertEqual(lookup.rhs, 0)
self.assertEqual(lookup.lhs.target, Author._meta.get_field('num'))
def test_multiple_fields(self):
query = Query(Item)
where = query.build_where(Q(modified__gt=F('created')))
lookup = where.children[0]
self.assertIsInstance(lookup, GreaterThan)
self.assertIsInstance(lookup.rhs, SimpleCol)
self.assertIsInstance(lookup.lhs, SimpleCol)
self.assertEqual(lookup.rhs.target, Item._meta.get_field('created'))
self.assertEqual(lookup.lhs.target, Item._meta.get_field('modified'))
def test_transform(self):
query = Query(Author)
with register_lookup(CharField, Lower):
where = query.build_where(~Q(name__lower='foo'))
lookup = where.children[0]
self.assertIsInstance(lookup, Exact)
self.assertIsInstance(lookup.lhs, Lower)
self.assertIsInstance(lookup.lhs.lhs, SimpleCol)
self.assertEqual(lookup.lhs.lhs.target, Author._meta.get_field('name'))
def test_negated_nullable(self):
query = Query(Item)
where = query.build_where(~Q(modified__lt=datetime(2017, 1, 1)))
self.assertTrue(where.negated)
lookup = where.children[0]
self.assertIsInstance(lookup, LessThan)
self.assertEqual(lookup.lhs.target, Item._meta.get_field('modified'))
lookup = where.children[1]
self.assertIsInstance(lookup, IsNull)
self.assertEqual(lookup.lhs.target, Item._meta.get_field('modified'))
def test_foreign_key(self):
query = Query(Item)
msg = 'Joined field references are not permitted in this query'
with self.assertRaisesMessage(FieldError, msg):
query.build_where(Q(creator__num__gt=2))
def test_foreign_key_f(self):
query = Query(Ranking)
with self.assertRaises(FieldError):
query.build_where(Q(rank__gt=F('author__num')))
def test_foreign_key_exclusive(self):
query = Query(ObjectC)
where = query.build_where(Q(objecta=None) | Q(objectb=None))
a_isnull = where.children[0]
self.assertIsInstance(a_isnull, RelatedIsNull)
self.assertIsInstance(a_isnull.lhs, SimpleCol)
self.assertEqual(a_isnull.lhs.target, ObjectC._meta.get_field('objecta'))
b_isnull = where.children[1]
self.assertIsInstance(b_isnull, RelatedIsNull)
self.assertIsInstance(b_isnull.lhs, SimpleCol)
self.assertEqual(b_isnull.lhs.target, ObjectC._meta.get_field('objectb'))
| 41.648936
| 81
| 0.696552
| 489
| 3,915
| 5.421268
| 0.186094
| 0.113165
| 0.107884
| 0.048284
| 0.523576
| 0.408148
| 0.397963
| 0.288948
| 0.288948
| 0.265937
| 0
| 0.006969
| 0.193614
| 3,915
| 93
| 82
| 42.096774
| 0.832753
| 0
| 0
| 0.325
| 0
| 0
| 0.034227
| 0
| 0
| 0
| 0
| 0
| 0.4
| 1
| 0.1
| false
| 0
| 0.15
| 0
| 0.2625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8f4457783432480005e18ff932b887d871f9663
| 16,356
|
py
|
Python
|
src/matrix_game/matrix_game.py
|
ewanlee/mackrl
|
6dd505aa09830f16c35a022f67e255db935c807e
|
[
"Apache-2.0"
] | 26
|
2019-10-28T09:01:45.000Z
|
2021-09-20T08:56:12.000Z
|
src/matrix_game/matrix_game.py
|
ewanlee/mackrl
|
6dd505aa09830f16c35a022f67e255db935c807e
|
[
"Apache-2.0"
] | 1
|
2020-07-25T06:50:05.000Z
|
2020-07-25T06:50:05.000Z
|
src/matrix_game/matrix_game.py
|
ewanlee/mackrl
|
6dd505aa09830f16c35a022f67e255db935c807e
|
[
"Apache-2.0"
] | 6
|
2019-12-18T12:02:57.000Z
|
2021-03-03T13:15:47.000Z
|
# This notebook implements a proof-of-principle for
# Multi-Agent Common Knowledge Reinforcement Learning (MACKRL)
# The entire notebook can be executed online, no need to download anything
# http://pytorch.org/
from itertools import chain
import torch
import torch.nn.functional as F
from torch.multiprocessing import Pool, set_start_method, freeze_support
try:
set_start_method('spawn')
except RuntimeError:
pass
from torch.nn import init
from torch.optim import Adam, SGD
import numpy as np
import matplotlib.pyplot as plt
use_cuda = False
payoff_values = []
payoff_values.append(torch.tensor([ # payoff values
[5, 0, 0, 2, 0],
[0, 1, 2, 4, 2],
[0, 0, 0, 2, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0],
], dtype=torch.float32) * 0.2)
payoff_values.append(
torch.tensor([ # payoff values
[0, 0, 1, 0, 5],
[0, 0, 2, 0, 0],
[1, 2, 4, 2, 1],
[0, 0, 2, 0, 0],
[0, 0, 1, 0, 0],
], dtype=torch.float32) * 0.2)
n_agents = 2
n_actions = len(payoff_values[0])
n_states_dec = 5
n_states_joint = 3
n_mix_hidden = 3
p_observation = 0.5
p_ck_noise = [0.0]
# Number of gradient steps
t_max = 202
# We'll be using a high learning rate, since we have exact gradients
lr = 0.05 # DEBUG: 0.05 if exact gradients!
optim = 'adam'
# You can reduce this number if you are short on time. (Eg. n_trials = 20)
#n_trials = 100 # 30
n_trials = 20 #15 #100
std_val = 1.0
# These are the 3 settings we run: MACRKL, Joint-action-learner (always uses CK),
# Independent Actor-Critic (always uses decentralised actions selection)
labels = ["IAC", "JAL"]
p_vec = [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]
final_res = []
# # Pair-Controller with 3 input state (no CK, CK & Matrix ID = 0, CK & Matrix ID = 1), n_actions^2 actions for
# # joint action + 1 action for delegation to the independent agents.
# theta_joint = init.normal_(torch.zeros(n_states_joint, n_actions ** 2 + 1, requires_grad=True), std=0.1)
# Produce marginalised policy: pi_pc[0] * pi^a * pi^b + p(u^ab)
def p_joint_all(pi_pc, pi_dec):
p_joint = pi_pc[1:].view(n_actions, n_actions).clone()
pi_a_pi_b = torch.ger(pi_dec[0], pi_dec[1])
p_joint = pi_pc[0] * pi_a_pi_b + p_joint
return p_joint
def p_joint_all_noise_alt(pi_pcs, pi_dec, p_ck_noise, ck_state):
p_none = (1-p_ck_noise) ** 2 # both unnoised
p_both = (p_ck_noise) ** 2 # both noised
p_one = (1-p_ck_noise) * p_ck_noise # exactly one noised
p_marg_ag0_ck1 = pi_pcs[1][1:].view(n_actions, n_actions).clone().sum(dim=0)
p_marg_ag0_ck2 = pi_pcs[2][1:].view(n_actions, n_actions).clone().sum(dim=0)
p_marg_ag1_ck1 = pi_pcs[1][1:].view(n_actions, n_actions).clone().sum(dim=1)
p_marg_ag1_ck2 = pi_pcs[2][1:].view(n_actions, n_actions).clone().sum(dim=1)
p_joint_ck0 = pi_pcs[0][1:].view(n_actions, n_actions).clone()
p_joint_ck1 = pi_pcs[1][1:].view(n_actions, n_actions).clone()
p_joint_ck2 = pi_pcs[2][1:].view(n_actions, n_actions).clone()
p_d_ck0 = pi_pcs[0][0]
p_d_ck1 = pi_pcs[1][0]
p_d_ck2 = pi_pcs[2][0]
def make_joint(p1, p2, mode="interval"):
"""
1. Pick uniform random variable between [0,1]
2. Do multinomial sampling through contiguous, ordered bucketing for both p1, p2
"""
p1 = p1.clone().view(-1)
p2 = p2.clone().view(-1)
p_final = p1.clone().zero_()
if mode == "interval":
for i in range(p1.shape[0]):
# calculate overlap between the probability distributions
low1 = torch.sum(p1[:i])
high1 = low1 + p1[i]
low2 = torch.sum(p2[:i])
high2 = low2 + p2[i]
if low1 >= low2 and high2 > low1:
p_final[i] = torch.min(high1, high2) - low1
pass
elif low2 >= low1 and high1 > low2:
p_final[i] = torch.min(high1, high2) - low2
else:
p_final[i] = 0
return p_final.clone().view(n_actions, n_actions)
if ck_state == 0:
p_joint = p_joint_ck0 + p_d_ck0 * torch.ger(pi_dec[0], pi_dec[1])
return p_joint # always delegate
elif ck_state == 1:
p_joint = p_none * p_joint_ck1 + \
p_both * p_joint_ck2 + \
p_one * make_joint(p_joint_ck1, p_joint_ck2) + \
p_one * make_joint(p_joint_ck2, p_joint_ck1) + \
(p_one * p_d_ck1 * p_d_ck2
+ p_one * p_d_ck2 * p_d_ck1
+ p_both * p_d_ck2
+ p_none * p_d_ck1) * torch.ger(pi_dec[0], pi_dec[1]) \
+ p_one * p_d_ck1 * (1 - p_d_ck2) * torch.ger(pi_dec[0], p_marg_ag1_ck2) \
+ p_one * (1 - p_d_ck2) * p_d_ck1 * torch.ger(p_marg_ag0_ck2, pi_dec[1]) \
+ p_one * p_d_ck2 * (1 - p_d_ck1) * torch.ger(pi_dec[0], p_marg_ag1_ck1) \
+ p_one * (1 - p_d_ck1) * p_d_ck2 * torch.ger(p_marg_ag0_ck1, pi_dec[1])
return p_joint
elif ck_state == 2:
p_joint = p_none * p_joint_ck2 + \
p_both * p_joint_ck1 + \
p_one * make_joint(p_joint_ck2, p_joint_ck1) + \
p_one * make_joint(p_joint_ck1, p_joint_ck2) + \
(p_one * p_d_ck2 * p_d_ck1
+ p_one * p_d_ck1 * p_d_ck2
+ p_both * p_d_ck1
+ p_none * p_d_ck2) * torch.ger(pi_dec[0], pi_dec[1]) \
+ p_one * p_d_ck2 * (1 - p_d_ck1) * torch.ger(pi_dec[0], p_marg_ag1_ck1) \
+ p_one * (1 - p_d_ck1) * p_d_ck2 * torch.ger(p_marg_ag0_ck1, pi_dec[1]) \
+ p_one * p_d_ck1 * (1 - p_d_ck2) * torch.ger(pi_dec[0], p_marg_ag1_ck2) \
+ p_one * (1 - p_d_ck2) * p_d_ck1 * torch.ger(p_marg_ag0_ck2, pi_dec[1])
return p_joint
pass
def get_policies(common_knowledge, observations, run, test, thetas_dec, theta_joint, p_ck_noise=0):
if test:
beta = 100
else:
beta = 1
actions = []
pi_dec = []
# common_knowledge decides whether ck_state is informative
if common_knowledge == 0:
ck_state = 0
else:
ck_state = int(observations[0] + 1)
if p_ck_noise == 0:
pol_vals = theta_joint[ck_state, :].clone()
# logits get masked out for independent learner and joint-action-learner
# independent learner has a pair controller that always delegates
if run == 'JAL':
pol_vals[0] = -10 ** 10
elif run == 'IAC':
pol_vals[1:] = -10 ** 10
# apply temperature to set testing
pi_pc = F.softmax(pol_vals * beta, -1)
# calcuate decentralised policies
for i in range(n_agents):
dec_state = int(observations[i])
pi = F.softmax(thetas_dec[i][dec_state] * beta, -1)
pi_dec.append(pi)
return pi_pc, pi_dec
else:
pol_vals = theta_joint.clone()
pi_pcs = []
for i in range(n_states_joint):
if run == 'JAL':
pol_vals[i][0] = -10 ** 10
elif run == 'IAC':
pol_vals[i][1:] = -10 ** 10
# apply temperature to set testing
pi_pcs.append(F.softmax(pol_vals[i] * beta, -1))
# calcuate decentralised policies
for i in range(n_agents):
dec_state = int(observations[i])
pi = F.softmax(thetas_dec[i][dec_state] * beta, -1)
pi_dec.append(pi)
return pi_pcs, pi_dec, ck_state
def get_state(common_knowledge, obs_0, obs_1, matrix_id):
receives_obs = [obs_0, obs_1]
if common_knowledge == 1:
observations = np.repeat(matrix_id, 2)
else:
observations = np.ones((n_agents)) * 2 #
for ag in range(n_agents):
if receives_obs[ag]:
observations[ag] += matrix_id + 1
return common_knowledge, observations, matrix_id
# Calculate the expected return: sum_{\tau} P(\tau | pi) R(\tau)
def expected_return(p_common, p_observation, thetas, run, test, p_ck_noise=0):
thetas_dec = thetas["dec"]
theta_joint = thetas["joint"]
# Probability of CK
p_common_val = [1 - p_common, p_common]
# Probability of observation given no CK)
p_obs_val = [1 - p_observation, p_observation]
# Matrices are chosen 50 / 50
p_matrix = [0.5, 0.5]
# p_matrix = [1.0, 0.0] # DEBUG!
# Initialise expected return
ret_val = 0
for ck in [0, 1]:
for matrix_id in [0, 1]:
for obs_0 in [0, 1]:
for obs_1 in [0, 1]:
p_state = p_common_val[ck] * p_obs_val[obs_0] * p_obs_val[obs_1] * p_matrix[matrix_id]
common_knowledge, observations, matrix_id = get_state(ck, obs_0, obs_1, matrix_id)
# Get final probabilities for joint actions
if p_ck_noise==0:
pi_pc, pi_dec = get_policies(common_knowledge, observations, run, test, thetas_dec, theta_joint)
p_joint_val = p_joint_all(pi_pc, pi_dec)
else:
pol_vals, pi_dec, ck_state = get_policies(common_knowledge, observations, run, test, thetas_dec, theta_joint, p_ck_noise)
p_joint_val = p_joint_all_noise_alt(pol_vals, pi_dec, p_ck_noise, ck_state)
# Expected return is just the elementwise product of rewards and action probabilities
expected_ret = (p_joint_val * payoff_values[matrix_id]).sum()
# Add return from given state
ret_val = ret_val + p_state * expected_ret
return ret_val
def _proc(args):
p_common, p_observation, run, p_ck_noise, t_max, n_trials = args
results = []
for nt in range(n_trials):
print("Run: {} P_CK_NOISE: {} P_common: {} #Trial: {}".format(run, p_ck_noise, p_common, nt))
results_log = np.zeros((t_max // (t_max // 100),))
results_log_test = np.zeros((t_max // (t_max // 100),))
thetas = {}
thetas["dec"] = [init.normal_(torch.zeros(n_states_dec, n_actions, requires_grad=True), std=std_val) for i in
range(n_agents)]
thetas["joint"] = init.normal_(torch.zeros(n_states_joint, n_actions ** 2 + 1, requires_grad=True),
std=std_val)
params = chain(*[_v if isinstance(_v, (list, tuple)) else [_v] for _v in thetas.values()])
params = list(params)
if use_cuda:
for param in params:
param = param.to("cuda")
if optim == 'sgd':
optimizer = SGD(params, lr=lr)
else:
optimizer = Adam(params, lr=lr)
for i in range(t_max):
if run in ['MACKRL',
'JAL',
'IAC']:
loss = - expected_return(p_common, p_observation, thetas, run, False, p_ck_noise)
r_s = -loss.data.numpy()
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % (t_max // 100) == 0:
if run in ['MACKRL',
'JAL',
'IAC']:
r_test = expected_return(p_common, p_observation, thetas, run, True, p_ck_noise)
results_log_test[i // (t_max // 100)] = r_test
results_log[i // (t_max // 100)] = r_s
results.append((results_log_test, results_log))
return results
def main():
use_mp = True
if use_mp:
pool = Pool(processes=2)
# Well be appending results to these lists
run_results = []
for run in labels:
noise_results = []
for pnoise in p_ck_noise:
print("Run: {} P_CK_NOISE: {}".format(run, pnoise))
results = pool.map(_proc, [ (pc, p_observation, run, pnoise, t_max, n_trials) for pc in p_vec ])
noise_results.append(results)
run_results.append(noise_results)
for p_common_id, p_common in enumerate(p_vec):
all_res = []
all_res_test = []
for run_id, run in enumerate(labels):
for pnoise_id, pnoise in enumerate(p_ck_noise):
try:
results = run_results[run_id][pnoise_id][p_common_id]
except Exception as e:
pass
all_res_test.append(np.stack([r[0] for r in results], axis=1))
all_res.append(np.stack([r[1] for r in results], axis=1))
final_res.append([all_res_test, all_res])
pool.close()
pool.join()
else:
# Well be appending results to these lists
run_results = []
for run in labels:
noise_results = []
for pnoise in p_ck_noise:
print("Run: {} P_CK_NOISE: {}".format(run, pnoise))
results = [_proc((pc, p_observation, run, pnoise, t_max, n_trials)) for pc in p_vec ]
noise_results.append(results)
run_results.append(noise_results)
for p_common_id, p_common in enumerate(p_vec):
all_res = []
all_res_test = []
for run_id, run in enumerate(labels):
for pnoise_id, pnoise in enumerate(p_ck_noise):
try:
results = run_results[run_id][pnoise_id][p_common_id]
except Exception as e:
pass
all_res_test.append(np.stack([r[0] for r in results], axis=1))
all_res.append(np.stack([r[1] for r in results], axis=1))
final_res.append([all_res_test, all_res])
import pickle
import uuid
import os
res_dict = {}
res_dict["final_res"] = final_res
res_dict["labels"] = labels
res_dict["p_ck_noise"] = p_ck_noise
res_dict["p_vec"] = p_vec
if not os.path.exists(os.path.join(os.path.dirname(os.path.abspath(__file__)),
"pickles")):
os.makedirs(os.path.join(os.path.dirname(os.path.abspath(__file__)),
"pickles"))
pickle.dump(res_dict, open(os.path.join(os.path.dirname(os.path.abspath(__file__)),
"pickles",
"final_res_{}.p".format(uuid.uuid4().hex[:4])), "wb"))
plt.figure(figsize=(5, 5))
color = ['b', 'r','g', 'c', 'm', 'y', 'k','b', 'r','g', 'c', 'm', 'y', 'k']
titles = ['Test', 'Train Performance']
for pl in [0,1]:
ax = plt.subplot(1, 1, 1)
for i in range(len(labels)):
for pck, pcknoise in enumerate(p_ck_noise):
mean_vals = []
min_vals = []
max_vals = []
for j, p in enumerate( p_vec ):
vals = final_res[j][pl]
this_mean = np.mean( vals[i*len(p_ck_noise) + pck], 1)[-1]
std = np.std(vals[i], 1)[-1]/0.5
low = this_mean-std / (n_trials)**0.5
high = this_mean + std / (n_trials)**0.5
mean_vals.append( this_mean )
min_vals.append( low )
max_vals.append( high )
plt.plot(p_vec,
mean_vals,
color[(i*len(p_ck_noise) + pck) % len(color)],
label = "{} p_ck_noise: {}".format(labels[i], pcknoise))
plt.fill_between(p_vec,
min_vals,
max_vals,
facecolor=color[i],
alpha=0.3)
plt.xlabel('P(common knowledge)')
plt.ylabel('Expected Return')
plt.ylim([0.0, 1.01])
plt.xlim([-0.01, 1.01])
ax.set_facecolor((1.0, 1.0, 1.0))
ax.grid(color='k', linestyle='-', linewidth=1)
ax.set_title(titles[pl])
plt.legend()
plt.xticks([0, 0.5, 1])
plt.yticks([0.5, 0.75, 1])
plt.savefig("MACKRL {}.pdf".format(titles[pl]))
plt.show(block=False)
if __name__ == "__main__":
freeze_support()
main()
| 38.037209
| 145
| 0.547811
| 2,375
| 16,356
| 3.501895
| 0.151579
| 0.023807
| 0.027895
| 0.014068
| 0.481664
| 0.446916
| 0.406036
| 0.358783
| 0.33173
| 0.314056
| 0
| 0.039799
| 0.331744
| 16,356
| 430
| 146
| 38.037209
| 0.721134
| 0.119589
| 0
| 0.263804
| 0
| 0
| 0.024855
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02454
| false
| 0.015337
| 0.033742
| 0
| 0.088957
| 0.009202
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8f623d0cb63c4b268f633b3bf392a5401ce666a
| 2,962
|
py
|
Python
|
pr_consistency/2.find_pr_branches.py
|
adrn/astropy-tools
|
c26a5e4cdf8735976375dd2b77de797a7723bcd9
|
[
"BSD-3-Clause"
] | 10
|
2018-02-24T15:06:39.000Z
|
2020-11-24T15:28:35.000Z
|
pr_consistency/2.find_pr_branches.py
|
adrn/astropy-tools
|
c26a5e4cdf8735976375dd2b77de797a7723bcd9
|
[
"BSD-3-Clause"
] | 63
|
2018-01-22T20:12:47.000Z
|
2021-07-10T15:42:58.000Z
|
pr_consistency/2.find_pr_branches.py
|
adrn/astropy-tools
|
c26a5e4cdf8735976375dd2b77de797a7723bcd9
|
[
"BSD-3-Clause"
] | 16
|
2018-02-25T16:32:51.000Z
|
2021-07-10T13:33:46.000Z
|
# The purpose of this script is to check all the maintenance branches of the
# given repository, and find which pull requests are included in which
# branches. The output is a JSON file that contains for each pull request the
# list of all branches in which it is included. We look specifically for the
# message "Merge pull request #xxxx " in commit messages, so this is not
# completely foolproof, but seems to work for now.
import os
import sys
import json
import re
import subprocess
import tempfile
from collections import defaultdict
from astropy.utils.console import color_print
from common import get_branches
if sys.argv[1:]:
REPOSITORY_NAME = sys.argv[1]
else:
REPOSITORY_NAME = 'astropy/astropy'
print("The repository this script currently works with is '{}'.\n"
.format(REPOSITORY_NAME))
REPOSITORY = f'git://github.com/{REPOSITORY_NAME}.git'
NAME = os.path.basename(REPOSITORY_NAME)
DIRTOCLONEIN = tempfile.mkdtemp() # set this to a non-temp directory to retain the clone between runs
ORIGIN = 'origin' # set this to None to not fetch anything but rather use the directory as-is.
STARTDIR = os.path.abspath('.')
# The branches we are interested in
BRANCHES = get_branches(REPOSITORY_NAME)
# Read in a list of all the PRs
with open(f'merged_pull_requests_{NAME}.json') as merged:
merged_prs = json.load(merged)
# Set up a dictionary where each key will be a PR and each value will be a list
# of branches in which the PR is present
pr_branches = defaultdict(list)
try:
# Set up repository
color_print(f'Cloning {REPOSITORY}', 'green')
os.chdir(DIRTOCLONEIN)
if os.path.isdir(NAME):
# already exists... assume its the right thing
color_print('"{}" directory already exists - assuming it is an already '
'existing clone'.format(NAME), 'yellow')
os.chdir(NAME)
if ORIGIN:
subprocess.call(f'git fetch {ORIGIN}', shell=True)
else:
subprocess.call(f'git clone {REPOSITORY}', shell=True)
os.chdir(NAME)
# Loop over branches and find all PRs in the branch
for branch in BRANCHES:
# Change branch
color_print(f'Switching to branch {branch}', 'green')
subprocess.call('git reset --hard', shell=True)
subprocess.call('git clean -fxd', shell=True)
subprocess.call(f'git checkout {branch}', shell=True)
if ORIGIN:
subprocess.call(f'git reset --hard {ORIGIN}/{branch}', shell=True)
# Extract log:
log = subprocess.check_output('git log', shell=True).decode('utf-8')
# Check for the presence of the PR in the log
for pr in (re.findall(r'Merge pull request #(\d+) ', log) +
re.findall(r'Backport PR #(\d+):', log)):
pr_branches[pr].append(branch)
finally:
os.chdir(STARTDIR)
with open(f'pull_requests_branches_{NAME}.json', 'w') as f:
json.dump(pr_branches, f, sort_keys=True, indent=2)
| 33.659091
| 102
| 0.686698
| 437
| 2,962
| 4.599542
| 0.366133
| 0.031343
| 0.029851
| 0.035821
| 0.025871
| 0.025871
| 0
| 0
| 0
| 0
| 0
| 0.001719
| 0.214382
| 2,962
| 87
| 103
| 34.045977
| 0.862054
| 0.310263
| 0
| 0.117647
| 0
| 0
| 0.248641
| 0.051409
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.176471
| 0
| 0.176471
| 0.098039
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8f63abc9f6d14490126b79f424fe99cf745e819
| 603
|
py
|
Python
|
agents/solo_q_agents/q_agent_test/aux.py
|
pedMatias/matias_hfo
|
6d88e1043a1455f5c1f6cc11b9380869772f4176
|
[
"MIT"
] | 1
|
2021-06-03T20:03:50.000Z
|
2021-06-03T20:03:50.000Z
|
agents/solo_q_agents/q_agent_test/aux.py
|
pedMatias/matias_hfo
|
6d88e1043a1455f5c1f6cc11b9380869772f4176
|
[
"MIT"
] | null | null | null |
agents/solo_q_agents/q_agent_test/aux.py
|
pedMatias/matias_hfo
|
6d88e1043a1455f5c1f6cc11b9380869772f4176
|
[
"MIT"
] | 1
|
2021-03-14T01:22:33.000Z
|
2021-03-14T01:22:33.000Z
|
from datetime import datetime as dt
import os
import numpy as np
import settings
def mkdir():
now = dt.now().replace(second=0, microsecond=0)
name_dir = "q_agent_train_" + now.strftime("%Y-%m-%d_%H:%M:%S")
path = os.path.join(settings.MODELS_DIR, name_dir)
try:
os.mkdir(path)
except FileExistsError:
name_dir += "_2"
path = os.path.join(settings.MODELS_DIR, name_dir)
os.mkdir(path)
return path
def save_model(q_table: str, directory: str, file_name: str):
file_path = os.path.join(directory, file_name)
np.save(file_path, q_table)
| 24.12
| 67
| 0.665008
| 94
| 603
| 4.074468
| 0.43617
| 0.073107
| 0.078329
| 0.109661
| 0.198433
| 0.198433
| 0.198433
| 0.198433
| 0.198433
| 0
| 0
| 0.006289
| 0.208955
| 603
| 24
| 68
| 25.125
| 0.796646
| 0
| 0
| 0.222222
| 0
| 0
| 0.054726
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.222222
| 0
| 0.388889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8f694a754b9e6ecfc7a48eb472c8ee96d237a42
| 278
|
py
|
Python
|
timeserio/utils/functools.py
|
ig248/timeserio
|
afc2a953a83e763418d417059493ef13a17d349c
|
[
"MIT"
] | 63
|
2019-07-12T17:16:27.000Z
|
2022-02-22T11:06:50.000Z
|
timeserio/utils/functools.py
|
ig248/timeserio
|
afc2a953a83e763418d417059493ef13a17d349c
|
[
"MIT"
] | 34
|
2019-07-30T11:52:09.000Z
|
2022-03-28T12:42:02.000Z
|
timeserio/utils/functools.py
|
ig248/timeserio
|
afc2a953a83e763418d417059493ef13a17d349c
|
[
"MIT"
] | 12
|
2019-08-14T05:51:22.000Z
|
2021-03-15T09:34:15.000Z
|
import inspect
def get_default_args(func):
"""Get default arguments of a function.
"""
signature = inspect.signature(func)
return {
k: v.default
for k, v in signature.parameters.items()
if v.default is not inspect.Parameter.empty
}
| 21.384615
| 51
| 0.636691
| 36
| 278
| 4.861111
| 0.666667
| 0.114286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.269784
| 278
| 12
| 52
| 23.166667
| 0.862069
| 0.129496
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8fa0708043799c2510940867111d04480ef484c
| 5,030
|
py
|
Python
|
explore/scripts/get_repos_creationhistory.py
|
john18/uccross.github.io
|
72cd88c7310ab1503467fba27add2338cf57d8f7
|
[
"MIT"
] | 12
|
2019-03-02T06:42:37.000Z
|
2022-03-01T03:59:08.000Z
|
explore/scripts/get_repos_creationhistory.py
|
john18/uccross.github.io
|
72cd88c7310ab1503467fba27add2338cf57d8f7
|
[
"MIT"
] | 6
|
2020-04-14T21:22:36.000Z
|
2022-01-19T23:41:35.000Z
|
explore/scripts/get_repos_creationhistory.py
|
john18/uccross.github.io
|
72cd88c7310ab1503467fba27add2338cf57d8f7
|
[
"MIT"
] | 29
|
2017-11-08T19:39:20.000Z
|
2022-03-17T18:05:29.000Z
|
import helpers
import json
import re
datfilepath = "../github-data/labRepos_CreationHistory.json"
allData = {}
# Check for and read existing data file
allData = helpers.read_existing(datfilepath)
# Read repo info data file (to use as repo list)
dataObj = helpers.read_json("../github-data/labReposInfo.json")
# Populate repo list
repolist = []
print("Getting internal repos ...")
repolist = sorted(dataObj["data"].keys())
print("Repo list complete. Found %d repos." % (len(repolist)))
# Read pretty GraphQL query
query_in = helpers.read_gql("../queries/repo-CreationDate.gql")
# Rest endpoint query
query_commits_in = "/repos/OWNNAME/REPONAME/commits?until=CREATETIME&per_page=100"
query_commits_in2 = "/repos/OWNNAME/REPONAME/commits?per_page=100"
# Retrieve authorization token
authhead = helpers.get_gitauth()
# Iterate through internal repos
print("Gathering data across multiple paginated queries...")
collective = {u'data': {}}
tab = " "
for repo in repolist:
# History doesn't change, only update new repos or those that had no previous commits
if "data" in allData.keys() and repo in allData["data"].keys():
if allData["data"][repo]["firstCommitAt"]:
print(tab + "Already recorded data for '%s'" % (repo))
continue
pageNum = 1
print("\n'%s'" % (repo))
print(tab + "page %d" % (pageNum))
repoSplit = repo.split("/")
# Query 1
print(tab + "Get creation date and default branch")
print(tab + "Modifying query...")
newquery = re.sub('OWNNAME', repoSplit[0], query_in)
newquery = re.sub('REPONAME', repoSplit[1], newquery)
gitquery = json.dumps({'query': newquery})
print(tab + "Query ready!")
# Actual query exchange
outObj = helpers.query_github(authhead, gitquery)
if outObj["errors"]:
print(tab + "Could not complete '%s'" % (repo))
collective["data"].pop(repo, None)
continue
# Update collective data
collective["data"][repo] = outObj["data"]["repository"]
# Query 2
print(tab + "Get pre-GitHub commit timestamps")
print(tab + "Modifying query...")
gitquery = re.sub('OWNNAME', repoSplit[0], query_commits_in)
gitquery = re.sub('REPONAME', repoSplit[1], gitquery)
gitquery = re.sub('CREATETIME', collective["data"][repo]["createdAt"], gitquery)
print(tab + "Query ready!")
# Actual query exchange
outObj = helpers.query_githubrest(authhead, gitquery)
if outObj["errors"]:
print(tab + "Could not get pre-GitHub commits for '%s'" % (repo))
outObj["data"] = []
# Update collective data
collective["data"][repo]["commitTimestamps"] = []
for commit in outObj["data"]:
collective["data"][repo]["commitTimestamps"].append(commit["commit"]["committer"]["date"])
# If no pre-GitHub commits, check the greater commit history
if len(collective["data"][repo]["commitTimestamps"]) > 0 and collective["data"][repo]["commitTimestamps"][0]:
collective["data"][repo]["initBeforeGitHubRepo"] = True
else:
print(tab + "No pre-GitHub commits found, getting full history")
collective["data"][repo]["initBeforeGitHubRepo"] = False
# Query 3
print(tab + "Modifying query...")
gitquery = re.sub('OWNNAME', repoSplit[0], query_commits_in2)
gitquery = re.sub('REPONAME', repoSplit[1], gitquery)
print(tab + "Query ready!")
# Actual query exchange
outObj = helpers.query_githubrest(authhead, gitquery)
if outObj["errors"]:
print(tab + "Could not complete '%s'" % (repo))
collective["data"].pop(repo, None)
continue
# Update collective data
for commit in outObj["data"]:
collective["data"][repo]["commitTimestamps"].append(commit["commit"]["committer"]["date"])
# Paginate if needed
hasNext = ("next" in outObj)
while hasNext:
pageNum += 1
print(tab + "page %d" % (pageNum))
print(tab + "Modifying query...")
newquery = gitquery + "&page=" + str(pageNum)
print(tab + "Query ready!")
# Actual query exchange
outObj = helpers.query_githubrest(authhead, newquery)
if outObj["errors"]:
print(tab + "Could not complete '%s'" % (repo))
collective["data"].pop(repo, None)
continue
# Update collective data
for commit in outObj["data"]:
collective["data"][repo]["commitTimestamps"].append(commit["commit"]["committer"]["date"])
hasNext = ("next" in outObj)
# Sort dates
collective["data"][repo]["commitTimestamps"].sort()
# Save earliest commit date
firstdate = None
if len(collective["data"][repo]["commitTimestamps"]) > 0:
firstdate = collective["data"][repo]["commitTimestamps"][0]
collective["data"][repo]["firstCommitAt"] = firstdate
del collective["data"][repo]["commitTimestamps"]
print("'%s' Done!" % (repo))
print("\nCollective data gathering complete!")
# Combine new data with existing data
if "data" not in allData.keys():
allData["data"] = {}
for repo in collective["data"].keys():
allData["data"][repo] = collective["data"][repo]
allDataString = json.dumps(allData, indent=4, sort_keys=True)
# Write output file
print("\nWriting file '%s'" % (datfilepath))
with open(datfilepath, "w") as fileout:
fileout.write(allDataString)
print("Wrote file!")
print("\nDone!\n")
| 31.4375
| 110
| 0.695626
| 633
| 5,030
| 5.492891
| 0.248025
| 0.096635
| 0.08283
| 0.097785
| 0.432557
| 0.39258
| 0.368709
| 0.325568
| 0.295082
| 0.295082
| 0
| 0.005535
| 0.137972
| 5,030
| 159
| 111
| 31.63522
| 0.796356
| 0.132406
| 0
| 0.356436
| 0
| 0
| 0.310281
| 0.049101
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.029703
| 0
| 0.029703
| 0.267327
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8fb454d7a74c617f9f1467386eb93a2fe60e4db
| 341
|
py
|
Python
|
examples/test/runMe.py
|
tomaszjonak/PBL
|
738b95da52cd59dcacb0b9dc244ca1713b0264ac
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
examples/test/runMe.py
|
tomaszjonak/PBL
|
738b95da52cd59dcacb0b9dc244ca1713b0264ac
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
examples/test/runMe.py
|
tomaszjonak/PBL
|
738b95da52cd59dcacb0b9dc244ca1713b0264ac
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
#! /usr/bin/env python2.7
from __future__ import print_function
import sys
sys.path.append("../../include")
import PyBool_public_interface as Bool
if __name__ == "__main__":
expr = Bool.parse_std("input.txt")
expr = expr["main_expr"]
expr = Bool.simplify(expr)
expr = Bool.nne(expr)
print(Bool.print_expr(expr))
| 16.238095
| 38
| 0.683284
| 48
| 341
| 4.479167
| 0.604167
| 0.148837
| 0.111628
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007117
| 0.175953
| 341
| 20
| 39
| 17.05
| 0.758007
| 0.070381
| 0
| 0
| 0
| 0
| 0.123418
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.3
| 0
| 0.3
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8fcc7a6b82e6b901e4e3c720b6e0e1f082a90c0
| 24,425
|
py
|
Python
|
calculator.py
|
rupen4678/botique_management_system
|
9b7807cc28bb15e024093d6161a8fef96ce7e291
|
[
"Apache-2.0"
] | null | null | null |
calculator.py
|
rupen4678/botique_management_system
|
9b7807cc28bb15e024093d6161a8fef96ce7e291
|
[
"Apache-2.0"
] | null | null | null |
calculator.py
|
rupen4678/botique_management_system
|
9b7807cc28bb15e024093d6161a8fef96ce7e291
|
[
"Apache-2.0"
] | null | null | null |
from tkinter import *
import random
import time
from PIL import Image
from datetime import datetime
from tinydb import *
import os
import pickle
#from database1 import *
from random import randint
root = Tk()
root.geometry("1600x800+0+0")
root.title("Suman_dai_ko_DHOKAN")
root.configure(bg="goldenrod4")
text_Input = StringVar()
operator =""
yes =""
no=""
Tops = Frame(root, width=1600 ,height=50,bg="goldenrod4", relief=RIDGE)
Tops.pack(side=TOP)
f1 = Frame(root, width = 800 ,height=500,bg="goldenrod4",relief=SUNKEN)
f1.pack(side=LEFT)
f2 = Frame(root, width = 300,height = 700,bg="dark slate blue",relief=SUNKEN)
f2.pack(side=RIGHT)
#f3= Frame(root,width=1600,height=300,fg="blue", bg="powder blue", relief=SUNKEN).pack(side=Bottom)
#==========================================================Time=======================================
localtime=time.asctime(time.localtime(time.time()))
#datetime=Label(Tops,font("arial",20,"bold"),text=nowTime,bd=10 ,bg="black", #fg="white", anchor="w").pack()
#====================================debugged========================
shirt = IntVar()
pant = IntVar()
sale = IntVar()
buy = IntVar()
deposite = IntVar()
withdraw = IntVar()
coat = IntVar()
order = IntVar()
total = IntVar()
out = IntVar()
before = IntVar() #order before the 60
stock = IntVar()
delivery = IntVar()
#########################main_gate######################
def _calculation():
shirt_mm = shirt.get()
pant_mm = pant.get()
sale_mm = sale.get()
buy_mm = buy.get()
deposite_mm = deposite.get()
withdraw_mm = withdraw.get()
coat_mm = coat.get()
order_mm = order.get()
total_mm = total.get()
time = datetime.now()
day = time.day
month = time.month
hour = time.hour
second = time.second
year = time.year
minute = time.minute
#setting the filename using the loop
#file = open("1{}".format())
'''for i in range(5):
if os.path.isfile(i):
pass
else:
file = open("{}.txt".format(i+1), "w+")
created with name {}".format(file))'''
#creating the filenames with append =1 if the name already existed
file_name = "r.txt"
if os.path.isfile(file_name):
expand = 1
while True:
expand += 1
new_file_name = file_name.split(".txt")[0] + str(expand) + ".txt"
if os.path.isfile(new_file_name): #if the newfilename exists
print("using the file {}".format(new_file_name))
#file = open("{}".format(new_file_name), "w+")
continue
else:
file_name = open(new_file_name, "w+")
print("creating the file {}".format(file_name))
#file = open("{}".format(file_name), "w+")
break
file_name = "fil.txt"
file = open("{}".format(file_name),"w+")
totalx = shirt_mm+pant_mm+sale_mm+buy_mm+deposite_mm+withdraw_mm+coat_mm+order_mm
file.write("Total:-{}".format(totalx))
file.write("shirt:-{}".format(shirt_mm))
file.write("pant_mm:-{}".format(pant_mm))
file.write("sale_mm:-{}".format(sale_mm))
file.write("buy_mm:-{}".format(buy_mm))
file.write("deposite_mm:-{}".format(deposite_mm))
file.write("withdraw_mm:-{}".format(withdraw_mm))
file.write("coat:-{}".format(coat_mm))
file.write("order:-{}".format(order_mm))
reading = file.readlines()
file.close()
#after wards set the total from here total.set
#++++++++++++++++++++++++++++++Varibales_inset+++++++++++++++++++++++++++++++++
order_bef = IntVar()
stock_full = IntVar()
shrting = IntVar()
pant = IntVar()
sari = IntVar()
order_info = IntVar()
delivery_report = IntVar()
daily_info = IntVar()
sales = IntVar()
buy = IntVar()
total_bank = IntVar()
bank_deposite = IntVar()
bank_withdraw = IntVar()
due_amount = IntVar()
order_info = IntVar()
daily_cash = IntVar()
cus_name = IntVar()
cus_no = IntVar()
employee = IntVar()
###############################class of algoriths#########################
class __main():
def __init__(self):
self.order = order
def __order_info(self):
self.now = datetime()
self.hour = now.hour
self.minute = now.minute
self.second = now.second
self.year = now.year
self.month = now.month
self.day = now.day
self.record_time = record_time
if self.hour == self.record_timeD:
print("the time for the product is actually %s left" %(self.hour-self.record_timeD))
#++++++++++++++++++++++++++++++++++++++++tinydb example++++++++++++++++++++++
#db = TinyDB("/databse/d4ta.json")
#db.insert({"cus_number":"98938232", "cus_name":"rupen"})
#def no_y():
# lis = db.all()
################Info===============
lblInfo = Label(Tops, font=("arial",60, "italic bold"),text="Botique Management Systewm",fg="white", bg="dark slate blue", bd=10, anchor="w", relief=RIDGE)
lblInfo.pack()
lblInfo = Label(Tops, font=("arial",30, "bold"),text=localtime,fg="white",bg="black", bd=10, anchor="w", relief=RIDGE)
lblInfo.pack()
#===========================================================Calculator==================================
"""def current_dir():
import os
import sys
DIR = os.getcwd()
print(DIR)
lblInfo = Label(Tops, font=("arial",60, "italic"),text=current_dir,fg="black",bg="powder blue",bd=10, anchor="W")
lblInfo.pack()
#DIR = dir
#return dir
"""
#randomBtn=Button(f1,pady=16,padx=16,bd=8,bg="powder blue", text="C_dir", command=lambda: current_dir(dir)).pack(side=TOP)
def btnClick(numbers):
global operator
operator = operator + str(numbers)
text_Input.set(operator)
def btnClearDisplay():
global operator
operator=""
text_Input.set("")
def btnEqualsInput():
global operator
sumup=str(eval(operator))
text_Input.set(sumup)
operator=""
def bill_entry():
global bill_in
global bill_out
bill_out = ""
bill_in = ""
def rupen():
global rupen
rupen = rupen
ronley = StringVar()
'''def malware_activate():
global cmd_active
if "rupen" in cmd_active:
if "rupen" in cmd_active[1]:
if "ronley" in cmd_active[2]:'''
#==============================another windows about me=====================
def ano_win1():
win1 = Toplevel()
#this is going to be the window in which there is nothing in the function
#of the system on the support in teh main loop
#there is no limit in the system of teh
win1.title("this is the owner window:")
win1.geometry("1600x800+0+0")
#win1.configure(bg="silver")
my_info = Frame(win1, width=600, height=700,bg="RoyalBlue4",relief=GROOVE)
my_info.pack(side=LEFT)
customer_info = Frame(win1, width=600, height=500,bg="RoyalBlue4", relief=GROOVE)
customer_info.pack(side=RIGHT)
others_info = Frame(win1, width=100, height=100,bg="RoyalBlue4",relief=GROOVE)
others_info.pack(side=BOTTOM)
all_info = Frame(win1, width=50, height=50,bg="RoyalBlue4",relief=RAISED)
all_info.pack()
lblname=Label(my_info,font=("arial",20,"italic"),text="Rupen Gurung",bg="powder blue", fg="green", bd=10, relief=SUNKEN).pack(side=TOP)
lblpro=Label(my_info,font=("arial", 20,"bold"),text="Software Engineer",bg="powder blue", fg="green",bd=10, relief=RAISED).pack()
ima = StringVar()
imageloc=Entry(win1,font=("arial",16,"italic"),bg="black",fg="white",bd=5,insertwidth=1,relief=GROOVE,textvariable=ima).pack()
imageButt=Button(win1,font=("arial",20, "bold"),bd=5,bg="white",fg="white",command= lambda: _image(image)).pack()
'''def _image(image):
image = image.set(imageloc)
return image
#image = Image.open("/root/Desktop/Desktop/anonymous/5.png")
imae = Label(win1,font=("arial", 20,"italic"),width=300, height=168,bg="black",fg="white", text=image,relief=FLAT).pack()
win1.mainloop()'''
#=============================getting all the infos ========================
def _price_inputs():
win2 = Toplevel()
win2.title("This is going to the section for the price inputs")
win2.geometry("1600x800")
framex = Frame(win2,width=1600,bg="RoyalBlue4",height=100,relief=GROOVE).pack(side=TOP)
frame1 = Frame(win2,width=775, height=750,bg="white", relief=SUNKEN).pack()
frame2 = Frame(win2, width=775,height=750,bg="black", relief=FLAT).pack()
#==++++===========================title=============================
llb1 = Label(framex,font=("arial", 20,"italic"),bg="powder blue",fg="green",text="INPUT THE PRICES",relief=GROOVE).pack()
win2.mainloop()
###########################sending emails############################
def __send_email():
'''import smtplib
gmail = smtplib.SMTP("smtp.gmail.com", 587)
gmail.starttls()
_file = open("/root/Desktop/Desktop/python/")
gmail.login("username", "password")
msg = "YOUR MESSAGE"
gmail.sendmail("your email adress", "the")
gmail.quit()'''
dialog = Tk()
dialog.title("Send emails")
dialog.geometry("800x800")
dframe = Frame(dialog,width=800,height=800,bg="white",relief=SUNKEN).pack()
email = StringVar()
password = StringVar()
semail = StringVar()
spassword = StringVar()
label = Label(dframe, font=("arial",16, "bold"), fg="white", bg="black", text="your_email").pack(side=LEFT)
entry1 = Entry(dframe, font=("arial",16,"bold"), fg="white",bg="black", textvariable=email,insertwidth=1,bd=5).pack(side=RIGHT)
label1 = Label(dframe, font=("arial",16, "bold"), fg="white", bg="black", text="password", relief=SUNKEN).pack()
entry2 = Entry(dframe,font=("arial", 16 ,"bold"),textvariable=password, insertwidth=1,bd=5).pack(side=RIGHT)
Label2 =Label(dframe,font=("arial",16, "bold"),fg="white",bg="black", text="sender_email",relief=SUNKEN).pack(side=LEFT)
entry2 = Entry(dframe,font=("arial",16, "bold"),bd=5,fg="white",bg="black",textvariable=semail,insertwidth=1).pack(side=LEFT)
label3 = Label(dframe,font=("arial",16,"bold"),fg="white",bg="black",text="sender_password", relief=SUNKEN).pack(side=LEFT)
entry3= Entry(dframe,font=("arial",16,"bold"),fg="white",textvariable=spassword,insertwidth=1,relief=SUNKEN).pack()
dialog.mainloop()
#btnEmail = Button(root,font=("arial", 16, "bold"), bg="black",fg="white",text="email",command=lambda: __send_email(),relief=GROOVE).pack()
#================================next section===========================
fix = Button(root, bd=10,bg="black",fg="white",command=_price_inputs,relief=GROOVE).pack(side=BOTTOM)
btnru = Button(root, font=("arial 20 bold"),bd=20, bg="black",fg="white",text="click",command=ano_win1,relief=GROOVE).pack(side=BOTTOM)
#fucking mazing yr coding
def column(col):
for coll in col:
call=cal+1
return call
#def yes_y():
# rupe = Toplevel(root)
# rupe.title("this is second window")
# return
#def no_y():
#nos = Toplevel(root)
#nos.title("this is nos window")
#return
a = Entry(f2,font=("arial", 20,"bold"), textvariable=text_Input, bd=30, insertwidth=4,
bg="dark slate blue",fg="white", justify="right").grid(columnspan=4)
btn7=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),
text="7",bg="dim gray", command=lambda: btnClick(7)).grid(row=2,column=0)
btn8=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),
text="8",bg="dim gray", command=lambda: btnClick(8)).grid(row=2,column=1)
btn9=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),
text="9",bg="dim gray", command=lambda: btnClick(9)).grid(row=2,column=2)
#!!!!!!!!!!!!!!!!!!!!!!additions!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
Addition=Button(f2,padx=16,pady=16,bd=8,text="+",fg="black",bg="dim gray", command=lambda: btnClick("+")).grid(row=2,column=3)
btn6=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),text="4", bg="dim gray", command=lambda: btnClick(4)).grid(row=3,column=0)
btn5=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),text="5", bg="dim gray", command=lambda: btnClick(5)).grid(row=3,column=1)
btn4=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),text="6",bg="dim gray", command=lambda: btnClick(6)).grid(row=3,column=2)
Subtract=Button(f2,padx=16,pady=16,bd=8,text="-", bg="dim gray", command=lambda: btnClick("-")).grid(row=3,column=3)
btn3=Button(f2,padx=16,pady=16,bd=8,text="3",font=("arial", 20, "bold") ,bg="dim gray", command=lambda: btnClick(3)).grid(row=4,column=0)
btn2=Button(f2,padx=16,pady=16,bd=8,text="2",font=("arial", 20, "bold"), bg="dim gray", command=lambda: btnClick(2)).grid(row=4,column=1)
btn1=Button(f2,padx=16,pady=16,bd=8,text="1",font=("arial", 20, "bold") ,bg="dim gray", command=lambda: btnClick(1)).grid(row=4,column=2)
Multiply=Button(f2,padx=16,pady=16,bd=8,text="*", bg="dim gray", command=lambda: btnClick("X")).grid(row=4,column=3)
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
btn0=Button(f2,padx=16,pady=16,bd=8,bg="dim gray",text="0",fg="black",font=("arial", 20, "bold"), command=lambda: btnClick(0)).grid(row=5,column=0)
btnClear=Button(f2,pady=16,padx=16,bd=8, fg="black",font=("arial", 20, "bold"),text="C",bg="dim gray", command=btnClearDisplay).grid(row=5,column=1)
btnEquals=Button(f2,padx=16,pady=16,fg="black",bd=8,text="=",bg="dim gray", font=("arial", 20,"bold"), command=btnEqualsInput).grid(row=5,column=2)
#btn2=Button(f2,padx=16,pady=16,bd=8,fg="black",text="2",bg="dim gray", command=lambda: btnClick(2)).grid(row=5,column=3)
division=Button(f2,padx=16,pady=16,bd=8,fg="black", text="/", bg="dim gray", command=lambda: btnClick("/")).grid(row=5,column=3)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
rand = StringVar()
#lblReference = Label(f1,font=("arial", 16,"bold"), text="Reference",bd=16,fg="red",bg="red",anchor="w",relief=RIDGE).grid(row=0,column=0)
#txtReference=Entry(f1,font=("arial", 16, "bold"), textvariable=rand, bd=10,insertwidth=4,bg="red",fg="white", justify = "right").grid(row=0,column=1)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
lblReference = Label(f1,font=("arial", 16,"bold"), text="Reference",bd=16,fg="white",bg="green",anchor="w", relief=RIDGE)
lblReference.grid(row=0,column=0)
b=Entry(f1,font=("arial", 16, "bold"), textvariable=rand, bd=10,insertwidth=4,fg="white",bg="black", justify = "left")
b.grid(row=0,column=1)
#img = "/root/Desktop/Desktop/python/projects/prj1_Botik/1.jpg"
#root.ima = Image.open(img)
#Label (root,bg="white",width=120,height=120, image=ima).pack()
bill_in = StringVar()
bill_out = StringVar()
shrting=Label(f1,font=("arial", 20, "bold"), text="Shirting:",bg="powder blue", fg="black",anchor="w",relief=GROOVE).grid(row=1,column=0)
shirts=Entry(f1,font=("arial", 16, "italic"), bd=10, textvariable=shirt, insertwidth=1,bg="black",fg="white", justify="left").grid(row=2,column=0)
owner=Button(root,padx=16,pady=16, font=("arial",12, "bold"),text="info", bd=8,bg="black",command=ano_win1,fg="white",relief=RAISED).pack(side=LEFT)
yes=Button(root,padx=16,pady=16,font=("arial",12, "bold"),text="Done",bd=8,bg="black", fg="white", command=_calculation(),relief=RAISED).pack(side=RIGHT)
panting=Label(f1,font=("arial",20, "bold"), text="pant_mm:", bg="powder blue",fg="black",anchor="w",relief=GROOVE).grid(row=1,column=1)
pantx=Entry(f1,font=("arial",16, "bold"), textvariable=pant, insertwidth=1, bd=10,bg="black",fg="white", justify="left").grid(row=2,column=1)
sales=Label(f1,font=("arial",16, "bold"), text="sales_total:",bg="powder blue",fg="black",anchor="w",bd=8,relief=GROOVE).grid(row=1,column=2)
salex=Entry(f1,font=("arial",16, "bold"),bg="black",fg="white",textvariable=sale,insertwidth=1,bd=10,justify="left").grid(row=2,column=2)
buying=Label(f1,font=("arial",16, "bold"), text="buying_something: ",bg="powder blue",fg="black", anchor="e", relief=GROOVE).grid(row=3,column=0)
buyx=Entry(f1,font=("arial", 16, "bold"), textvariable=buy, insertwidth=1, bd=10,bg="black", fg="white", justify="left").grid(row=4,column=0)
Bank_Total=Label(f1,font=("arial",16,"bold"),text="Bank_Deposite: ", bg="powder blue", fg="black", anchor="e",relief=GROOVE).grid(row=3, column=1)
depositex=Entry(f1,font=("arial",16,"bold"),bd=10, textvariable=deposite, bg="black", fg="white", justify="left").grid(row=4, column=1)
lblBankwith=Label(f1, font=("arial", 16, "bold"),fg="black",bg="powder blue",text="Bank_Withdraw", anchor="e",relief=GROOVE).grid(row=3,column=2)
withdrawx=Entry(f1,font=("arial",16, "bold"),bd=10, fg="white",bg="black", textvariable=withdraw, insertwidth=1).grid(row=4,column=2)
coating=Label(f1, font=("arial", 16, "bold"),text="coat_mm:", bg="powder blue",fg="black",anchor="e").grid(row=5,column=0)
coatx=Entry(f1, font=("arial", 16, "bold"), bg="black", fg="white",
textvariable=coat, insertwidth=1, justify="left",bd=10).grid(row=6,column=0)
lablsari=Label(f1,font=("arial", 16, "bold"), bg="powder blue",text="sari mm:", fg="black",anchor="e",relief=GROOVE).grid(row=5,column=1)
sarix=Entry(f1, font=("arial", 16, "bold"), bg="black",bd=10, fg="white",textvariable=sari, insertwidth=1).grid(row=6,column=1)
buying=Label(f1,font=("arial", 16, "bold"), bg="powder blue",text="buy_info:",fg="black",anchor="e",relief=GROOVE).grid(row=7,column=0)
buyx=Entry(f1,font=("arial",16, "bold"),bd=8, fg="white",bg="black",textvariable=buy,insertwidth=1).grid(row=8,column=0)
outgoing =Label(f1, font=("arial", 16, "bold"), bg="powder blue", text="outgoing:", fg="black",anchor="e",relief=GROOVE).grid(row=7,column=1)
outx=Entry(f1,font=("arial", 16, "bold"),textvariable=out, bd=8,fg="white",bg="black",insertwidth=1).grid(row=8,column=1)
ordering=Label(f1,font=("arial",16,"bold"),bg="powder blue",text="order_info:",fg="black",anchor="e",relief=GROOVE).grid(row=9,column=0)
orderx=Entry(f1,font=("arial",16,"bold"),insertwidth=1, textvariable=order,bd=8,fg="white",bg="black").grid(row=10,column=0)
lblcustomer=Label(f1,font=("arial",16,"bold"),bg="powder blue",text="cus_name:",fg="black",anchor="e",relief=GROOVE).grid(row=9,column=1)
no=Entry(f1,font=("arial",16, "bold"),bd=8,bg="black",fg="white",insertwidth=1, textvariable=cus_name).grid(row=10,column=1)
lblmonthly=Label(f1, font=("arial",16,"bold"),bg="powder blue",text="monthly:",fg="black",anchor="e",relief=GROOVE).grid(row=5,column=2)
monthly=StringVar()
monthx=Entry(f1,font=("arial",16,"bold"),show="blank",bg="black",textvariable=monthly,insertwidth=1,fg="white",bd=10).grid(row=6,column=2)
lbltotal=Label(f1, font=("arial", 16, "bold"),bg="powder blue",text="Total:",fg="black").grid(row=7,column=2)
totalx=Entry(f1, font=("arial", 16, "bold"),bg="black",textvariable=total,fg="white",insertwidth=1,bd=10).grid(row=8,column=2)
lblemployee = Label(f1,font=("arial", 16, "bold"),bg="powder blue",text="employee name:",fg="black",anchor="e",relief=GROOVE).grid(row=9,column=2)
employx= Entry(f1,font=("arial", 16,"bold"),textvariable=employee,insertwidth=1,bg="black",fg="white",bd=10).grid(row=10,column=2)
###############################database for the project######################
'''def __database():
db = TinyDB("/records.json")
#print(monthly)
#print(b)
#fuck = c.get()
a = order_bef.get()
b = stock_full.get()
c = shrting.get()
d = pant.get()
e = sari.get()
f = order_info.get()
g = delivery_report.get()
h = daily_info.get()
i = sales.get()
j = buy.get()
k = total_bank.get()
l = bank_deposite.get()
m = bank_withdraw.get()
n = due_amount.get()
o = order_info.get()
p = daily_cash.get()
q = cus_name.get()
r = cus_no.get()
s = employee.get()
files = {"a": "", "b": "", "c": "", "d": "", "e": "", "f": "", "g": "", "h": "", "i": "", "j": ""
, "k": "", "l": "", "m": "", "n": "", "o": "", "p": "", "q": "", "r": "", "s": ""}
db.insert({"total": a }),
db.insert({"regrds":"reference"}),
db.insert({"day_income":"billion"}),
db.insert({"day_outgoing":"billout"}),
db.insert({"bankdeposit":"bankdepo"}),
db.insert({"full_stock":"stock"}),
db.insert({"shirt_mm":"shirt"}),
db.insert({"bankwithdraw":"bankwith"}),
db.insert({"pantmm":"pant"}),
db.insert({"sarimm":"sari"}),
db.insert({"orderday":"orderinfo"}),
db.insert({"salling":"sales"}),
db.insert({"buying":"buy"}),
db.insert({"customern":"customer"}),
db.insert({"monthly_info":"monthly"}),
db.insert({"totaldy":"total"}),
db.insert({"employeid":"employee"})
for db in range(1):
print(db)
files = list(files)
file = open("/file.txt", "wb")
da = ""
for data in files:
if len(data) != 0:
print("this is are the files written in python\\n check the file.txt for debug ")
da += data
print(data)
da = int(da)
file.write(da)
try:
file = open("/records.txt", "r")
except:
print("creating the file from script {}".format(__file__))
file = open("/records.txt","w")
finally:
pass
check = os.path.isfile("/records.txt")
if check:
for item in db:
data = open("/records.txt","wb")
#with open("/records.txt","wb") as file:
#pickle.dump(item, data)
#file.close()
#file1 = pickle.load(file)
if len(item) == len(file1):
break
if item != file:
#item = str(item)
file.write("%s" %(item))
time.sleep(1)
print("done writing to the file")
#for item in db:
with open("/records.txt", "rb") as file:
reading = file1
if len(reading) != None:
print("its printed")
print(reading)
file.close()
#db.insert({"name":"Rupen Gurung"})
name = Query()
#db(name.type == "changed")
d = datetime.now()
month = str(d.month)
day = str(d.day)
year = str(d.year)
hour = str(d.hour)
minute = str(d.minute)
second = str(d.second)
between = str(":")'''
'''def __time(infos):
time = datetime.now()
day = str(time.day)
month = str(time.month)
hour = str(time.hour)
second = str(time.second)
year = str(time.year)
minute = str(time.minute)
#assuming the infos as the order taken that will be notified before the
#60 hours
#changing all the formats to the seconds that will be easy for the #calculation
#first calculating seconds in one day that will ease all the further operations
daysec = (24*60) * 60 * 60
###
##this is will be easy now
yearSec = daysec * 365
month = daysec * 30
daySec = daysec
hourSec = 60 * 60 * 60
minuteSec = 60 * 60
files = {"a":"", "b":"","c":"","d":"","e":"","f":"","g":"","h":"","i":"","j":""
,"k":"","l":"","m":"","n":"","o":"","p":"","q":"","r":"","s":""}'''
#files = list(files)
'''for data in files:
if len(data) != 0:
print(data)'''
#lenght = len(db)
##this will show the recorded bill numbers
def bill_in():
##assuming the variable as bill number .get var
bill = bill_in.get()
billo = bill_out.get()
bills = tinydb.TinyDb("/bills.json")
while bill or billo != None:
bills.insert({"billInput": bill, "billOutput": billo})
win = Toplevel()
win.title("bills")
winF = Frame(win, bg="black",relief=SUNKEN).pack()
winE = Entry(winF, insertwidth=10,insertheight=10,fg="white",bg="black",textvariable=bills).pack()
win.mainloop()
#l
# command=bill_in).pack(anchor=NE)
root.mainloop()
#__database()
#add1=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),
#text="+",bg="powder blue", command=lambda: btnClick("+")).grid(row=3,column=6)
#btn10=Button(f2,padx=16,padx=16, fg="blue", font("arial",5,"bold"),
# text="rupen",bg="powder blue", command=rupen).grid(row=3,column=5)
#def function():
# pass():
# pass main():
# root.mainloop()
#for the revies of the follow in the sorry of the same of the tkinter in the main function of the sollow
#main()
| 34.401408
| 155
| 0.600368
| 3,463
| 24,425
| 4.182501
| 0.148715
| 0.044118
| 0.031897
| 0.041425
| 0.384355
| 0.319387
| 0.25939
| 0.213339
| 0.184203
| 0.159279
| 0
| 0.036604
| 0.155537
| 24,425
| 709
| 156
| 34.449929
| 0.665616
| 0.154023
| 0
| 0.05303
| 0
| 0
| 0.148139
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049242
| false
| 0.022727
| 0.034091
| 0
| 0.090909
| 0.011364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8feca35fdbbdb7ba2119b9d7d1e1e21456081ac
| 18,656
|
py
|
Python
|
mmdet/models/anchor_heads/embedding_nnms_head_v2_limited.py
|
Lanselott/mmdetection
|
03ce0a87f4d52f4adf4f78fd39ad30b2da394376
|
[
"Apache-2.0"
] | null | null | null |
mmdet/models/anchor_heads/embedding_nnms_head_v2_limited.py
|
Lanselott/mmdetection
|
03ce0a87f4d52f4adf4f78fd39ad30b2da394376
|
[
"Apache-2.0"
] | null | null | null |
mmdet/models/anchor_heads/embedding_nnms_head_v2_limited.py
|
Lanselott/mmdetection
|
03ce0a87f4d52f4adf4f78fd39ad30b2da394376
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms, bbox_overlaps
from ..builder import build_loss
from ..registry import HEADS
from ..utils import ConvModule, Scale, bias_init_with_prob
from IPython import embed
INF = 1e8
@HEADS.register_module
class EmbeddingNNmsHeadV2limited(nn.Module):
"""
Fully Convolutional One-Stage Object Detection head from [1]_.
The FCOS head does not use anchor boxes. Instead bounding boxes are
predicted at each pixel and a centerness measure is used to supress
low-quality predictions.
References:
.. [1] https://arxiv.org/abs/1904.01355
Example:
>>> self = FCOSHead(11, 7)
>>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]
>>> cls_score, bbox_pred, centerness = self.forward(feats)
>>> assert len(cls_score) == len(self.scales)
"""
def __init__(self,
num_classes,
in_channels,
feat_channels=256,
stacked_convs=4,
embedding_convs_num=2,
strides=(4, 8, 16, 32, 64),
delta=2.0,
regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),
(512, INF)),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
conv_cfg=None,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)):
super(EmbeddingNNmsHeadV2limited, self).__init__()
self.num_classes = num_classes
self.cls_out_channels = num_classes - 1
self.in_channels = in_channels
self.feat_channels = feat_channels
self.stacked_convs = stacked_convs
self.embedding_convs_num = embedding_convs_num
self.strides = strides
self.delta = delta
self.regress_ranges = regress_ranges
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self._init_layers()
def _init_layers(self):
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
self.embedding_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.embedding_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.fcos_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.fcos_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
self.embedding_cls = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
# Pull and Push loss
self.pull_loss = nn.MSELoss()
def init_weights(self):
for m in self.cls_convs:
normal_init(m.conv, std=0.01)
for m in self.reg_convs:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.fcos_cls, std=0.01, bias=bias_cls)
normal_init(self.fcos_reg, std=0.01)
normal_init(self.embedding_cls, std=0.01)
def forward(self, feats):
return multi_apply(self.forward_single, feats, self.scales)
def forward_single(self, x, scale):
cls_feat = x
reg_feat = x
embedding_feat = x
for cls_layer in self.cls_convs:
cls_feat = cls_layer(cls_feat)
cls_score = self.fcos_cls(cls_feat)
for embedding_layer in self.embedding_convs:
embedding_feat = embedding_layer(embedding_feat)
embedding_pred = self.embedding_cls(embedding_feat)
for reg_layer in self.reg_convs:
reg_feat = reg_layer(reg_feat)
# scale the bbox_pred of different level
# float to avoid overflow when enabling FP16
bbox_pred = scale(self.fcos_reg(reg_feat)).float().exp()
return cls_score, bbox_pred, embedding_pred
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def loss(self,
cls_scores,
bbox_preds,
embedding_preds,
gt_bboxes,
gt_labels,
img_metas,
cfg,
gt_bboxes_ignore=None):
assert len(cls_scores) == len(bbox_preds) == len(embedding_preds)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
labels, bbox_targets = self.fcos_target(all_level_points, gt_bboxes,
gt_labels)
num_imgs = cls_scores[0].size(0)
# flatten cls_scores and bbox_preds
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)
for cls_score in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
for bbox_pred in bbox_preds
]
flatten_embedding_preds = [
embedding_feat.permute(0, 2, 3, 1).reshape(-1, 1)
for embedding_feat in embedding_preds
]
flatten_cls_scores = torch.cat(flatten_cls_scores)
flatten_bbox_preds = torch.cat(flatten_bbox_preds)
flatten_embedding_preds = torch.cat(flatten_embedding_preds)
flatten_labels = torch.cat(labels)
flatten_bbox_targets = torch.cat(bbox_targets)
# repeat points to align with bbox_preds
flatten_points = torch.cat(
[points.repeat(num_imgs, 1) for points in all_level_points])
pos_inds = flatten_labels.nonzero().reshape(-1)
num_pos = len(pos_inds)
loss_cls = self.loss_cls(
flatten_cls_scores, flatten_labels,
avg_factor=num_pos + num_imgs) # avoid num_pos is 0
pos_bbox_preds = flatten_bbox_preds[pos_inds]
if num_pos > 0:
pos_bbox_targets = flatten_bbox_targets[pos_inds]
pos_points = flatten_points[pos_inds]
pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds)
pos_decoded_target_preds = distance2bbox(pos_points,
pos_bbox_targets)
pos_iou_scores = bbox_overlaps(pos_decoded_bbox_preds, pos_decoded_target_preds, is_aligned=True).clamp(min=1e-6)
max_scores, max_inds = flatten_cls_scores.sigmoid().max(1)
pos_embedding_preds = flatten_embedding_preds[pos_inds]
# Instance level op
dist_conf_mask_list = []
# generate instance levels index
instance_counter = torch.zeros(num_pos, device=pos_points.device)
remove = torch.zeros(num_pos, device=pos_points.device)
obj_id = 0
# NOTE: get mask for each obj
for i in range(len(pos_decoded_target_preds)):
if remove[i] == 0:
current_bbox = pos_decoded_target_preds[i]
mask = ((pos_decoded_target_preds == current_bbox).sum(1)==4).nonzero()
instance_counter[mask] = obj_id
remove[mask] = 1
obj_id += 1
instance_counter = instance_counter.int()
obj_ids = torch.bincount(instance_counter).nonzero().int()
for obj_id in obj_ids:
dist_conf_mask_list.append((instance_counter==obj_id).float())
# Opt for each obj
objs_embedding_list = []
obj_embedding_means_list = []
obj_embedding_means_expand_list = []
for dist_conf_mask in dist_conf_mask_list:
obj_mask_inds = dist_conf_mask.nonzero().reshape(-1)
obj_embedding_preds = pos_embedding_preds[obj_mask_inds]
objs_embedding_list.append(obj_embedding_preds)
# mean value
embedding_mean = obj_embedding_preds.sum() / obj_embedding_preds.shape[0]
obj_embedding_means_list.append(embedding_mean)
obj_embedding_means_expand_list.append(torch.zeros_like(obj_embedding_preds).fill_(embedding_mean))
embed()
# pull loss
theta = 1
embedding_expand_means = torch.cat(obj_embedding_means_expand_list)
pull_embedding = torch.cat(objs_embedding_list)
pull_loss = theta * self.pull_loss(pull_embedding, embedding_expand_means)
# push loss
N_samples = len(dist_conf_mask_list)
push_loss = 0
for obj_j_embedding_mean in obj_embedding_means_list:
for obj_k_embedding_mean in obj_embedding_means_list:
if torch.equal(obj_j_embedding_mean, obj_k_embedding_mean):
continue
else:
push_dist = self.delta - torch.abs(obj_k_embedding_mean - obj_j_embedding_mean)
push_loss += torch.max(push_dist, torch.zeros(1, device=push_dist.device))
push_loss = push_loss / N_samples**2
# iou loss
loss_bbox = self.loss_bbox(
pos_decoded_bbox_preds,
pos_decoded_target_preds)
else:
loss_bbox = pos_bbox_preds.sum()
push_loss = pos_bbox_preds.sum()
pull_loss = pos_bbox_preds.sum()
return dict(
loss_cls=loss_cls,
loss_bbox=loss_bbox,
push_loss=push_loss,
pull_loss=pull_loss)
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def get_bboxes(self,
cls_scores,
bbox_preds,
img_metas,
cfg,
rescale=None):
assert len(cls_scores) == len(bbox_preds)
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach() for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
det_bboxes = self.get_bboxes_single(cls_score_list, bbox_pred_list,
mlvl_points, img_shape,
scale_factor, cfg, rescale)
result_list.append(det_bboxes)
return result_list
def get_bboxes_single(self,
cls_scores,
bbox_preds,
mlvl_points,
img_shape,
scale_factor,
cfg,
rescale=False):
assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)
mlvl_bboxes = []
mlvl_scores = []
for cls_score, bbox_pred, points in zip(
cls_scores, bbox_preds, mlvl_points):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
scores = cls_score.permute(1, 2, 0).reshape(
-1, self.cls_out_channels).sigmoid()
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
max_scores, _ = scores.max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
points = points[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
bboxes = distance2bbox(points, bbox_pred, max_shape=img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_bboxes = torch.cat(mlvl_bboxes)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([padding, mlvl_scores], dim=1)
det_bboxes, det_labels = multiclass_nms(
mlvl_bboxes,
mlvl_scores,
cfg.score_thr,
cfg.nms,
cfg.max_per_img)
return det_bboxes, det_labels
def get_points(self, featmap_sizes, dtype, device):
"""Get points according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
dtype (torch.dtype): Type of points.
device (torch.device): Device of points.
Returns:
tuple: points of each image.
"""
mlvl_points = []
for i in range(len(featmap_sizes)):
mlvl_points.append(
self.get_points_single(featmap_sizes[i], self.strides[i],
dtype, device))
return mlvl_points
def get_points_single(self, featmap_size, stride, dtype, device):
h, w = featmap_size
x_range = torch.arange(
0, w * stride, stride, dtype=dtype, device=device)
y_range = torch.arange(
0, h * stride, stride, dtype=dtype, device=device)
y, x = torch.meshgrid(y_range, x_range)
points = torch.stack(
(x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2
return points
def fcos_target(self, points, gt_bboxes_list, gt_labels_list):
assert len(points) == len(self.regress_ranges)
num_levels = len(points)
# expand regress ranges to align with points
expanded_regress_ranges = [
points[i].new_tensor(self.regress_ranges[i])[None].expand_as(
points[i]) for i in range(num_levels)
]
# concat all levels points and regress ranges
concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)
concat_points = torch.cat(points, dim=0)
# get labels and bbox_targets of each image
labels_list, bbox_targets_list = multi_apply(
self.fcos_target_single,
gt_bboxes_list,
gt_labels_list,
points=concat_points,
regress_ranges=concat_regress_ranges)
# split to per img, per level
num_points = [center.size(0) for center in points]
labels_list = [labels.split(num_points, 0) for labels in labels_list]
bbox_targets_list = [
bbox_targets.split(num_points, 0)
for bbox_targets in bbox_targets_list
]
# concat per level image
concat_lvl_labels = []
concat_lvl_bbox_targets = []
for i in range(num_levels):
concat_lvl_labels.append(
torch.cat([labels[i] for labels in labels_list]))
concat_lvl_bbox_targets.append(
torch.cat(
[bbox_targets[i] for bbox_targets in bbox_targets_list]))
return concat_lvl_labels, concat_lvl_bbox_targets
def fcos_target_single(self, gt_bboxes, gt_labels, points, regress_ranges):
num_points = points.size(0)
num_gts = gt_labels.size(0)
if num_gts == 0:
return gt_labels.new_zeros(num_points), \
gt_bboxes.new_zeros((num_points, 4))
areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) * (
gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1)
# TODO: figure out why these two are different
# areas = areas[None].expand(num_points, num_gts)
areas = areas[None].repeat(num_points, 1)
regress_ranges = regress_ranges[:, None, :].expand(
num_points, num_gts, 2)
gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4)
xs, ys = points[:, 0], points[:, 1]
xs = xs[:, None].expand(num_points, num_gts)
ys = ys[:, None].expand(num_points, num_gts)
left = xs - gt_bboxes[..., 0]
right = gt_bboxes[..., 2] - xs
top = ys - gt_bboxes[..., 1]
bottom = gt_bboxes[..., 3] - ys
bbox_targets = torch.stack((left, top, right, bottom), -1)
# condition1: inside a gt bbox
inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0
# condition2: limit the regression range for each location
max_regress_distance = bbox_targets.max(-1)[0]
inside_regress_range = (
max_regress_distance >= regress_ranges[..., 0]) & (
max_regress_distance <= regress_ranges[..., 1])
# if there are still more than one objects for a location,
# we choose the one with minimal area
areas[inside_gt_bbox_mask == 0] = INF
areas[inside_regress_range == 0] = INF
min_area, min_area_inds = areas.min(dim=1)
labels = gt_labels[min_area_inds]
labels[min_area == INF] = 0
bbox_targets = bbox_targets[range(num_points), min_area_inds]
return labels, bbox_targets
| 40.468547
| 125
| 0.572684
| 2,290
| 18,656
| 4.352838
| 0.134498
| 0.026184
| 0.012841
| 0.007725
| 0.25632
| 0.191413
| 0.147873
| 0.10945
| 0.070526
| 0.063102
| 0
| 0.019415
| 0.340159
| 18,656
| 460
| 126
| 40.556522
| 0.790333
| 0.081904
| 0
| 0.132597
| 0
| 0
| 0.005063
| 0
| 0
| 0
| 0
| 0.002174
| 0.013812
| 1
| 0.033149
| false
| 0
| 0.022099
| 0.002762
| 0.085635
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8ffdfd391593d89205af0a89c79433669635ec2
| 471
|
py
|
Python
|
plotly_basic_plots/line_chart2.py
|
HarishOsthe/Plotly_Dash_Practice_Codes
|
ca709509d27803a4d727b3986d4473cdd71a41a6
|
[
"MIT"
] | null | null | null |
plotly_basic_plots/line_chart2.py
|
HarishOsthe/Plotly_Dash_Practice_Codes
|
ca709509d27803a4d727b3986d4473cdd71a41a6
|
[
"MIT"
] | null | null | null |
plotly_basic_plots/line_chart2.py
|
HarishOsthe/Plotly_Dash_Practice_Codes
|
ca709509d27803a4d727b3986d4473cdd71a41a6
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import plotly.offline as pyo
import plotly.graph_objs as go
df= pd.read_csv("Data/nst-est2017-alldata.csv")
df2=df[df["DIVISION"] == '1']
df2.set_index("NAME",inplace=True)
list_of_pop_col=[col for col in df2.columns if col.startswith('POP')]
df2=df2[list_of_pop_col]
data=[go.Scatter(x=df2.columns,
y=df2.loc[name],
mode='lines',
name=name) for name in df2.index]
pyo.plot(data)
| 24.789474
| 69
| 0.66879
| 81
| 471
| 3.777778
| 0.530864
| 0.078431
| 0.058824
| 0.078431
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034211
| 0.193206
| 471
| 19
| 70
| 24.789474
| 0.771053
| 0
| 0
| 0
| 0
| 0
| 0.103814
| 0.059322
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.285714
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f8fff1c2d03cf1ef4aae436dd124c9505b06ab95
| 21,993
|
py
|
Python
|
tests/test_markup.py
|
samdoran/sphinx
|
4c91c038b220d07bbdfe0c1680af42fe897f342c
|
[
"BSD-2-Clause"
] | 4,973
|
2015-01-03T15:44:00.000Z
|
2022-03-31T03:11:51.000Z
|
tests/test_markup.py
|
samdoran/sphinx
|
4c91c038b220d07bbdfe0c1680af42fe897f342c
|
[
"BSD-2-Clause"
] | 7,850
|
2015-01-02T08:09:25.000Z
|
2022-03-31T18:57:40.000Z
|
tests/test_markup.py
|
samdoran/sphinx
|
4c91c038b220d07bbdfe0c1680af42fe897f342c
|
[
"BSD-2-Clause"
] | 2,179
|
2015-01-03T15:26:53.000Z
|
2022-03-31T12:22:44.000Z
|
"""
test_markup
~~~~~~~~~~~
Test various Sphinx-specific markup extensions.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import pytest
from docutils import frontend, nodes, utils
from docutils.parsers.rst import Parser as RstParser
from sphinx import addnodes
from sphinx.builders.html.transforms import KeyboardTransform
from sphinx.builders.latex import LaTeXBuilder
from sphinx.roles import XRefRole
from sphinx.testing.util import Struct, assert_node
from sphinx.transforms import SphinxSmartQuotes
from sphinx.util import docutils, texescape
from sphinx.util.docutils import sphinx_domains
from sphinx.writers.html import HTMLTranslator, HTMLWriter
from sphinx.writers.latex import LaTeXTranslator, LaTeXWriter
@pytest.fixture
def settings(app):
texescape.init() # otherwise done by the latex builder
optparser = frontend.OptionParser(
components=(RstParser, HTMLWriter, LaTeXWriter))
settings = optparser.get_default_values()
settings.smart_quotes = True
settings.env = app.builder.env
settings.env.temp_data['docname'] = 'dummy'
settings.contentsname = 'dummy'
settings.rfc_base_url = 'http://tools.ietf.org/html/'
domain_context = sphinx_domains(settings.env)
domain_context.enable()
yield settings
domain_context.disable()
@pytest.fixture
def new_document(settings):
def create():
document = utils.new_document('test data', settings)
document['file'] = 'dummy'
return document
return create
@pytest.fixture
def inliner(new_document):
document = new_document()
document.reporter.get_source_and_line = lambda line=1: ('dummy.rst', line)
return Struct(document=document, reporter=document.reporter)
@pytest.fixture
def parse(new_document):
def parse_(rst):
document = new_document()
parser = RstParser()
parser.parse(rst, document)
SphinxSmartQuotes(document, startnode=None).apply()
for msg in document.traverse(nodes.system_message):
if msg['level'] == 1:
msg.replace_self([])
return document
return parse_
# since we're not resolving the markup afterwards, these nodes may remain
class ForgivingTranslator:
def visit_pending_xref(self, node):
pass
def depart_pending_xref(self, node):
pass
class ForgivingHTMLTranslator(HTMLTranslator, ForgivingTranslator):
pass
class ForgivingLaTeXTranslator(LaTeXTranslator, ForgivingTranslator):
pass
@pytest.fixture
def verify_re_html(app, parse):
def verify(rst, html_expected):
document = parse(rst)
KeyboardTransform(document).apply()
html_translator = ForgivingHTMLTranslator(document, app.builder)
document.walkabout(html_translator)
html_translated = ''.join(html_translator.fragment).strip()
assert re.match(html_expected, html_translated), 'from ' + rst
return verify
@pytest.fixture
def verify_re_latex(app, parse):
def verify(rst, latex_expected):
document = parse(rst)
app.builder = LaTeXBuilder(app)
app.builder.set_environment(app.env)
app.builder.init()
theme = app.builder.themes.get('manual')
latex_translator = ForgivingLaTeXTranslator(document, app.builder, theme)
latex_translator.first_document = -1 # don't write \begin{document}
document.walkabout(latex_translator)
latex_translated = ''.join(latex_translator.body).strip()
assert re.match(latex_expected, latex_translated), 'from ' + repr(rst)
return verify
@pytest.fixture
def verify_re(verify_re_html, verify_re_latex):
def verify_re_(rst, html_expected, latex_expected):
if html_expected:
verify_re_html(rst, html_expected)
if latex_expected:
verify_re_latex(rst, latex_expected)
return verify_re_
@pytest.fixture
def verify(verify_re_html, verify_re_latex):
def verify_(rst, html_expected, latex_expected):
if html_expected:
verify_re_html(rst, re.escape(html_expected) + '$')
if latex_expected:
verify_re_latex(rst, re.escape(latex_expected) + '$')
return verify_
@pytest.fixture
def get_verifier(verify, verify_re):
v = {
'verify': verify,
'verify_re': verify_re,
}
def get(name):
return v[name]
return get
@pytest.mark.parametrize('type,rst,html_expected,latex_expected', [
(
# pep role
'verify',
':pep:`8`',
('<p><span class="target" id="index-0"></span><a class="pep reference external" '
'href="http://www.python.org/dev/peps/pep-0008"><strong>PEP 8</strong></a></p>'),
('\\sphinxAtStartPar\n'
'\\index{Python Enhancement Proposals@\\spxentry{Python Enhancement Proposals}'
'!PEP 8@\\spxentry{PEP 8}}\\sphinxhref{http://www.python.org/dev/peps/pep-0008}'
'{\\sphinxstylestrong{PEP 8}}')
),
(
# pep role with anchor
'verify',
':pep:`8#id1`',
('<p><span class="target" id="index-0"></span><a class="pep reference external" '
'href="http://www.python.org/dev/peps/pep-0008#id1">'
'<strong>PEP 8#id1</strong></a></p>'),
('\\sphinxAtStartPar\n'
'\\index{Python Enhancement Proposals@\\spxentry{Python Enhancement Proposals}'
'!PEP 8\\#id1@\\spxentry{PEP 8\\#id1}}\\sphinxhref'
'{http://www.python.org/dev/peps/pep-0008\\#id1}'
'{\\sphinxstylestrong{PEP 8\\#id1}}')
),
(
# rfc role
'verify',
':rfc:`2324`',
('<p><span class="target" id="index-0"></span><a class="rfc reference external" '
'href="http://tools.ietf.org/html/rfc2324.html"><strong>RFC 2324</strong></a></p>'),
('\\sphinxAtStartPar\n'
'\\index{RFC@\\spxentry{RFC}!RFC 2324@\\spxentry{RFC 2324}}'
'\\sphinxhref{http://tools.ietf.org/html/rfc2324.html}'
'{\\sphinxstylestrong{RFC 2324}}')
),
(
# rfc role with anchor
'verify',
':rfc:`2324#id1`',
('<p><span class="target" id="index-0"></span><a class="rfc reference external" '
'href="http://tools.ietf.org/html/rfc2324.html#id1">'
'<strong>RFC 2324#id1</strong></a></p>'),
('\\sphinxAtStartPar\n'
'\\index{RFC@\\spxentry{RFC}!RFC 2324\\#id1@\\spxentry{RFC 2324\\#id1}}'
'\\sphinxhref{http://tools.ietf.org/html/rfc2324.html\\#id1}'
'{\\sphinxstylestrong{RFC 2324\\#id1}}')
),
(
# correct interpretation of code with whitespace
'verify_re',
'``code sample``',
('<p><code class="(samp )?docutils literal notranslate"><span class="pre">'
'code</span>   <span class="pre">sample</span></code></p>'),
r'\\sphinxAtStartPar\n\\sphinxcode{\\sphinxupquote{code sample}}',
),
(
# interpolation of arrows in menuselection
'verify',
':menuselection:`a --> b`',
('<p><span class="menuselection">a \N{TRIANGULAR BULLET} b</span></p>'),
'\\sphinxAtStartPar\n\\sphinxmenuselection{a \\(\\rightarrow\\) b}',
),
(
# interpolation of ampersands in menuselection
'verify',
':menuselection:`&Foo -&&- &Bar`',
('<p><span class="menuselection"><span class="accelerator">F</span>oo '
'-&- <span class="accelerator">B</span>ar</span></p>'),
('\\sphinxAtStartPar\n'
r'\sphinxmenuselection{\sphinxaccelerator{F}oo \sphinxhyphen{}'
r'\&\sphinxhyphen{} \sphinxaccelerator{B}ar}'),
),
(
# interpolation of ampersands in guilabel
'verify',
':guilabel:`&Foo -&&- &Bar`',
('<p><span class="guilabel"><span class="accelerator">F</span>oo '
'-&- <span class="accelerator">B</span>ar</span></p>'),
('\\sphinxAtStartPar\n'
r'\sphinxguilabel{\sphinxaccelerator{F}oo \sphinxhyphen{}\&\sphinxhyphen{} \sphinxaccelerator{B}ar}'),
),
(
# no ampersands in guilabel
'verify',
':guilabel:`Foo`',
'<p><span class="guilabel">Foo</span></p>',
'\\sphinxAtStartPar\n\\sphinxguilabel{Foo}',
),
(
# kbd role
'verify',
':kbd:`space`',
'<p><kbd class="kbd docutils literal notranslate">space</kbd></p>',
'\\sphinxAtStartPar\n\\sphinxkeyboard{\\sphinxupquote{space}}',
),
(
# kbd role
'verify',
':kbd:`Control+X`',
('<p><kbd class="kbd compound docutils literal notranslate">'
'<kbd class="kbd docutils literal notranslate">Control</kbd>'
'+'
'<kbd class="kbd docutils literal notranslate">X</kbd>'
'</kbd></p>'),
'\\sphinxAtStartPar\n\\sphinxkeyboard{\\sphinxupquote{Control+X}}',
),
(
# kbd role
'verify',
':kbd:`Alt+^`',
('<p><kbd class="kbd compound docutils literal notranslate">'
'<kbd class="kbd docutils literal notranslate">Alt</kbd>'
'+'
'<kbd class="kbd docutils literal notranslate">^</kbd>'
'</kbd></p>'),
('\\sphinxAtStartPar\n'
'\\sphinxkeyboard{\\sphinxupquote{Alt+\\textasciicircum{}}}'),
),
(
# kbd role
'verify',
':kbd:`M-x M-s`',
('<p><kbd class="kbd compound docutils literal notranslate">'
'<kbd class="kbd docutils literal notranslate">M</kbd>'
'-'
'<kbd class="kbd docutils literal notranslate">x</kbd>'
' '
'<kbd class="kbd docutils literal notranslate">M</kbd>'
'-'
'<kbd class="kbd docutils literal notranslate">s</kbd>'
'</kbd></p>'),
('\\sphinxAtStartPar\n'
'\\sphinxkeyboard{\\sphinxupquote{M\\sphinxhyphen{}x M\\sphinxhyphen{}s}}'),
),
(
# kbd role
'verify',
':kbd:`-`',
'<p><kbd class="kbd docutils literal notranslate">-</kbd></p>',
('\\sphinxAtStartPar\n'
'\\sphinxkeyboard{\\sphinxupquote{\\sphinxhyphen{}}}'),
),
(
# kbd role
'verify',
':kbd:`Caps Lock`',
'<p><kbd class="kbd docutils literal notranslate">Caps Lock</kbd></p>',
('\\sphinxAtStartPar\n'
'\\sphinxkeyboard{\\sphinxupquote{Caps Lock}}'),
),
(
# non-interpolation of dashes in option role
'verify_re',
':option:`--with-option`',
('<p><code( class="xref std std-option docutils literal notranslate")?>'
'<span class="pre">--with-option</span></code></p>$'),
(r'\\sphinxAtStartPar\n'
r'\\sphinxcode{\\sphinxupquote{\\sphinxhyphen{}\\sphinxhyphen{}with\\sphinxhyphen{}option}}$'),
),
(
# verify smarty-pants quotes
'verify',
'"John"',
'<p>“John”</p>',
"\\sphinxAtStartPar\n“John”",
),
(
# ... but not in literal text
'verify',
'``"John"``',
('<p><code class="docutils literal notranslate"><span class="pre">'
'"John"</span></code></p>'),
'\\sphinxAtStartPar\n\\sphinxcode{\\sphinxupquote{"John"}}',
),
(
# verify classes for inline roles
'verify',
':manpage:`mp(1)`',
'<p><em class="manpage">mp(1)</em></p>',
'\\sphinxAtStartPar\n\\sphinxstyleliteralemphasis{\\sphinxupquote{mp(1)}}',
),
(
# correct escaping in normal mode
'verify',
'Γ\\\\∞$',
None,
'\\sphinxAtStartPar\nΓ\\textbackslash{}\\(\\infty\\)\\$',
),
(
# in verbatim code fragments
'verify',
'::\n\n @Γ\\∞${}',
None,
('\\begin{sphinxVerbatim}[commandchars=\\\\\\{\\}]\n'
'@Γ\\PYGZbs{}\\(\\infty\\)\\PYGZdl{}\\PYGZob{}\\PYGZcb{}\n'
'\\end{sphinxVerbatim}'),
),
(
# in URIs
'verify_re',
'`test <https://www.google.com/~me/>`_',
None,
r'\\sphinxAtStartPar\n\\sphinxhref{https://www.google.com/~me/}{test}.*',
),
(
# description list: simple
'verify',
'term\n description',
'<dl class="docutils">\n<dt>term</dt><dd>description</dd>\n</dl>',
None,
),
(
# description list: with classifiers
'verify',
'term : class1 : class2\n description',
('<dl class="docutils">\n<dt>term<span class="classifier">class1</span>'
'<span class="classifier">class2</span></dt><dd>description</dd>\n</dl>'),
None,
),
(
# glossary (description list): multiple terms
'verify',
'.. glossary::\n\n term1\n term2\n description',
('<dl class="glossary docutils">\n'
'<dt id="term-term1">term1<a class="headerlink" href="#term-term1"'
' title="Permalink to this term">¶</a></dt>'
'<dt id="term-term2">term2<a class="headerlink" href="#term-term2"'
' title="Permalink to this term">¶</a></dt>'
'<dd>description</dd>\n</dl>'),
None,
),
])
def test_inline(get_verifier, type, rst, html_expected, latex_expected):
verifier = get_verifier(type)
verifier(rst, html_expected, latex_expected)
@pytest.mark.parametrize('type,rst,html_expected,latex_expected', [
(
'verify',
r'4 backslashes \\\\',
r'<p>4 backslashes \\</p>',
None,
),
])
@pytest.mark.skipif(docutils.__version_info__ < (0, 16),
reason='docutils-0.16 or above is required')
def test_inline_docutils16(get_verifier, type, rst, html_expected, latex_expected):
verifier = get_verifier(type)
verifier(rst, html_expected, latex_expected)
@pytest.mark.sphinx(confoverrides={'latex_engine': 'xelatex'})
@pytest.mark.parametrize('type,rst,html_expected,latex_expected', [
(
# in verbatim code fragments
'verify',
'::\n\n @Γ\\∞${}',
None,
('\\begin{sphinxVerbatim}[commandchars=\\\\\\{\\}]\n'
'@Γ\\PYGZbs{}∞\\PYGZdl{}\\PYGZob{}\\PYGZcb{}\n'
'\\end{sphinxVerbatim}'),
),
])
def test_inline_for_unicode_latex_engine(get_verifier, type, rst,
html_expected, latex_expected):
verifier = get_verifier(type)
verifier(rst, html_expected, latex_expected)
def test_samp_role(parse):
# no braces
text = ':samp:`a{b}c`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, ("a",
[nodes.emphasis, "b"],
"c")])
# nested braces
text = ':samp:`a{{b}}c`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, ("a",
[nodes.emphasis, "{b"],
"}c")])
# half-opened braces
text = ':samp:`a{bc`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, "a{bc"])
# escaped braces
text = ':samp:`a\\\\{b}c`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, "a{b}c"])
# no braces (whitespaces are keeped as is)
text = ':samp:`code sample`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, "code sample"])
def test_download_role(parse):
# implicit
text = ':download:`sphinx.rst`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, addnodes.download_reference,
nodes.literal, "sphinx.rst"])
assert_node(doctree[0][0], refdoc='dummy', refdomain='', reftype='download',
refexplicit=False, reftarget='sphinx.rst', refwarn=False)
assert_node(doctree[0][0][0], classes=['xref', 'download'])
# explicit
text = ':download:`reftitle <sphinx.rst>`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, addnodes.download_reference,
nodes.literal, "reftitle"])
assert_node(doctree[0][0], refdoc='dummy', refdomain='', reftype='download',
refexplicit=True, reftarget='sphinx.rst', refwarn=False)
assert_node(doctree[0][0][0], classes=['xref', 'download'])
def test_XRefRole(inliner):
role = XRefRole()
# implicit
doctrees, errors = role('ref', 'rawtext', 'text', 5, inliner, {}, [])
assert len(doctrees) == 1
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'text'])
assert_node(doctrees[0], refdoc='dummy', refdomain='', reftype='ref', reftarget='text',
refexplicit=False, refwarn=False)
assert errors == []
# explicit
doctrees, errors = role('ref', 'rawtext', 'title <target>', 5, inliner, {}, [])
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'title'])
assert_node(doctrees[0], refdoc='dummy', refdomain='', reftype='ref', reftarget='target',
refexplicit=True, refwarn=False)
# bang
doctrees, errors = role('ref', 'rawtext', '!title <target>', 5, inliner, {}, [])
assert_node(doctrees[0], [nodes.literal, 'title <target>'])
# refdomain
doctrees, errors = role('test:doc', 'rawtext', 'text', 5, inliner, {}, [])
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'text'])
assert_node(doctrees[0], refdoc='dummy', refdomain='test', reftype='doc', reftarget='text',
refexplicit=False, refwarn=False)
# fix_parens
role = XRefRole(fix_parens=True)
doctrees, errors = role('ref', 'rawtext', 'text()', 5, inliner, {}, [])
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'text()'])
assert_node(doctrees[0], refdoc='dummy', refdomain='', reftype='ref', reftarget='text',
refexplicit=False, refwarn=False)
# lowercase
role = XRefRole(lowercase=True)
doctrees, errors = role('ref', 'rawtext', 'TEXT', 5, inliner, {}, [])
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'TEXT'])
assert_node(doctrees[0], refdoc='dummy', refdomain='', reftype='ref', reftarget='text',
refexplicit=False, refwarn=False)
@pytest.mark.sphinx('dummy', testroot='prolog')
def test_rst_prolog(app, status, warning):
app.builder.build_all()
rst = app.env.get_doctree('restructuredtext')
md = app.env.get_doctree('markdown')
# rst_prolog
assert_node(rst[0], nodes.paragraph)
assert_node(rst[0][0], nodes.emphasis)
assert_node(rst[0][0][0], nodes.Text)
assert rst[0][0][0] == 'Hello world'
# rst_epilog
assert_node(rst[-1], nodes.section)
assert_node(rst[-1][-1], nodes.paragraph)
assert_node(rst[-1][-1][0], nodes.emphasis)
assert_node(rst[-1][-1][0][0], nodes.Text)
assert rst[-1][-1][0][0] == 'Good-bye world'
# rst_prolog & rst_epilog on exlucding reST parser
assert not md.rawsource.startswith('*Hello world*.')
assert not md.rawsource.endswith('*Good-bye world*.\n')
@pytest.mark.sphinx('dummy', testroot='keep_warnings')
def test_keep_warnings_is_True(app, status, warning):
app.builder.build_all()
doctree = app.env.get_doctree('index')
assert_node(doctree[0], nodes.section)
assert len(doctree[0]) == 2
assert_node(doctree[0][1], nodes.system_message)
@pytest.mark.sphinx('dummy', testroot='keep_warnings',
confoverrides={'keep_warnings': False})
def test_keep_warnings_is_False(app, status, warning):
app.builder.build_all()
doctree = app.env.get_doctree('index')
assert_node(doctree[0], nodes.section)
assert len(doctree[0]) == 1
@pytest.mark.sphinx('dummy', testroot='refonly_bullet_list')
def test_compact_refonly_bullet_list(app, status, warning):
app.builder.build_all()
doctree = app.env.get_doctree('index')
assert_node(doctree[0], nodes.section)
assert len(doctree[0]) == 5
assert doctree[0][1].astext() == 'List A:'
assert_node(doctree[0][2], nodes.bullet_list)
assert_node(doctree[0][2][0][0], addnodes.compact_paragraph)
assert doctree[0][2][0][0].astext() == 'genindex'
assert doctree[0][3].astext() == 'List B:'
assert_node(doctree[0][4], nodes.bullet_list)
assert_node(doctree[0][4][0][0], nodes.paragraph)
assert doctree[0][4][0][0].astext() == 'Hello'
@pytest.mark.sphinx('dummy', testroot='default_role')
def test_default_role1(app, status, warning):
app.builder.build_all()
# default-role: pep
doctree = app.env.get_doctree('index')
assert_node(doctree[0], nodes.section)
assert_node(doctree[0][1], nodes.paragraph)
assert_node(doctree[0][1][0], addnodes.index)
assert_node(doctree[0][1][1], nodes.target)
assert_node(doctree[0][1][2], nodes.reference, classes=["pep"])
# no default-role
doctree = app.env.get_doctree('foo')
assert_node(doctree[0], nodes.section)
assert_node(doctree[0][1], nodes.paragraph)
assert_node(doctree[0][1][0], nodes.title_reference)
assert_node(doctree[0][1][1], nodes.Text)
@pytest.mark.sphinx('dummy', testroot='default_role',
confoverrides={'default_role': 'guilabel'})
def test_default_role2(app, status, warning):
app.builder.build_all()
# default-role directive is stronger than configratuion
doctree = app.env.get_doctree('index')
assert_node(doctree[0], nodes.section)
assert_node(doctree[0][1], nodes.paragraph)
assert_node(doctree[0][1][0], addnodes.index)
assert_node(doctree[0][1][1], nodes.target)
assert_node(doctree[0][1][2], nodes.reference, classes=["pep"])
# default_role changes the default behavior
doctree = app.env.get_doctree('foo')
assert_node(doctree[0], nodes.section)
assert_node(doctree[0][1], nodes.paragraph)
assert_node(doctree[0][1][0], nodes.inline, classes=["guilabel"])
assert_node(doctree[0][1][1], nodes.Text)
| 35.702922
| 111
| 0.60592
| 2,520
| 21,993
| 5.190476
| 0.152381
| 0.042813
| 0.048089
| 0.050917
| 0.544878
| 0.505657
| 0.457416
| 0.408333
| 0.376529
| 0.34159
| 0
| 0.016342
| 0.223708
| 21,993
| 615
| 112
| 35.760976
| 0.749429
| 0.060428
| 0
| 0.397119
| 0
| 0.030864
| 0.324364
| 0.12279
| 0
| 0
| 0
| 0
| 0.146091
| 1
| 0.061728
| false
| 0.00823
| 0.028807
| 0.002058
| 0.119342
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|