hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7fe22be8e9933d73e58f8f86a8508c7b07bfe49 | 7,375 | py | Python | glib/glib-2.46.2/glib/glib.py | imx6uldev/depedency_tools | 0748392a4e97ded2a770b6fbcab281dd3fda2db7 | [
"MIT"
] | null | null | null | glib/glib-2.46.2/glib/glib.py | imx6uldev/depedency_tools | 0748392a4e97ded2a770b6fbcab281dd3fda2db7 | [
"MIT"
] | 1 | 2020-10-13T07:38:31.000Z | 2020-10-13T07:38:31.000Z | migrate/glib/glib/glib.py | zhongliangkang/twemproxy41 | 8dc3664145b0fcdd32fa321720235a9db9b3cece | [
"Apache-2.0"
] | 1 | 2020-02-04T15:39:06.000Z | 2020-02-04T15:39:06.000Z | import gdb
import sys
if sys.version_info[0] >= 3:
long = int
# This is not quite right, as local vars may override symname
def read_global_var (symname):
return gdb.selected_frame().read_var(symname)
def g_quark_to_string (quark):
if quark == None:
return None
quark = long(quark)
if quark == 0:
return None
try:
val = read_global_var ("quarks")
max_q = long(read_global_var ("quark_seq_id"))
except:
try:
val = read_global_var ("g_quarks")
max_q = long(read_global_var ("g_quark_seq_id"))
except:
return None;
if quark < max_q:
return val[quark].string()
return None
# We override the node printers too, so that node->next is not expanded
class GListNodePrinter:
"Prints a GList node"
def __init__ (self, val):
self.val = val
def to_string (self):
return "{data=%s, next=0x%x, prev=0x%x}" % (str(self.val["data"]), long(self.val["next"]), long(self.val["prev"]))
class GSListNodePrinter:
"Prints a GSList node"
def __init__ (self, val):
self.val = val
def to_string (self):
return "{data=%s, next=0x%x}" % (str(self.val["data"]), long(self.val["next"]))
class GListPrinter:
"Prints a GList"
class _iterator:
def __init__(self, head, listtype):
self.link = head
self.listtype = listtype
self.count = 0
def __iter__(self):
return self
def next(self):
if self.link == 0:
raise StopIteration
data = self.link['data']
self.link = self.link['next']
count = self.count
self.count = self.count + 1
return ('[%d]' % count, data)
def __init__ (self, val, listtype):
self.val = val
self.listtype = listtype
def children(self):
return self._iterator(self.val, self.listtype)
def to_string (self):
return "0x%x" % (long(self.val))
def display_hint (self):
return "array"
class GHashPrinter:
"Prints a GHashTable"
class _iterator:
def __init__(self, ht, keys_are_strings):
self.ht = ht
if ht != 0:
self.keys = ht["keys"]
self.values = ht["values"]
self.hashes = ht["hashes"]
self.size = ht["size"]
self.pos = 0
self.keys_are_strings = keys_are_strings
self.value = None
def __iter__(self):
return self
def next(self):
if self.ht == 0:
raise StopIteration
if self.value != None:
v = self.value
self.value = None
return v
while long(self.pos) < long(self.size):
self.pos = self.pos + 1
if long (self.hashes[self.pos]) >= 2:
key = self.keys[self.pos]
val = self.values[self.pos]
if self.keys_are_strings:
key = key.cast (gdb.lookup_type("char").pointer())
# Queue value for next result
self.value = ('[%dv]'% (self.pos), val)
# Return key
return ('[%dk]'% (self.pos), key)
raise StopIteration
def __init__ (self, val):
self.val = val
self.keys_are_strings = False
try:
string_hash = read_global_var ("g_str_hash")
except:
string_hash = None
if self.val != 0 and string_hash != None and self.val["hash_func"] == string_hash:
self.keys_are_strings = True
def children(self):
return self._iterator(self.val, self.keys_are_strings)
def to_string (self):
return "0x%x" % (long(self.val))
def display_hint (self):
return "map"
def pretty_printer_lookup (val):
# None yet, want things like hash table and list
type = val.type.unqualified()
# If it points to a reference, get the reference.
if type.code == gdb.TYPE_CODE_REF:
type = type.target ()
if type.code == gdb.TYPE_CODE_PTR:
type = type.target().unqualified()
t = str(type)
if t == "GList":
return GListPrinter(val, "GList")
if t == "GSList":
return GListPrinter(val, "GSList")
if t == "GHashTable":
return GHashPrinter(val)
else:
t = str(type)
if t == "GList":
return GListNodePrinter(val)
if t == "GSList *":
return GListPrinter(val, "GSList")
return None
def register (obj):
if obj == None:
obj = gdb
obj.pretty_printers.append(pretty_printer_lookup)
class ForeachCommand (gdb.Command):
"""Foreach on list"""
def __init__ (self):
super (ForeachCommand, self).__init__ ("gforeach",
gdb.COMMAND_DATA,
gdb.COMPLETE_SYMBOL)
def valid_name (self, name):
if not name[0].isalpha():
return False
return True
def parse_args (self, arg):
i = arg.find(" ")
if i <= 0:
raise Exception ("No var specified")
var = arg[:i]
if not self.valid_name(var):
raise Exception ("Invalid variable name")
while i < len (arg) and arg[i].isspace():
i = i + 1
if arg[i:i+2] != "in":
raise Exception ("Invalid syntax, missing in")
i = i + 2
while i < len (arg) and arg[i].isspace():
i = i + 1
colon = arg.find (":", i)
if colon == -1:
raise Exception ("Invalid syntax, missing colon")
val = arg[i:colon]
colon = colon + 1
while colon < len (arg) and arg[colon].isspace():
colon = colon + 1
command = arg[colon:]
return (var, val, command)
def do_iter(self, arg, item, command):
item = item.cast (gdb.lookup_type("void").pointer())
item = long(item)
to_eval = "set $%s = (void *)0x%x\n"%(arg, item)
gdb.execute(to_eval)
gdb.execute(command)
def slist_iterator (self, arg, container, command):
l = container.cast (gdb.lookup_type("GSList").pointer())
while long(l) != 0:
self.do_iter (arg, l["data"], command)
l = l["next"]
def list_iterator (self, arg, container, command):
l = container.cast (gdb.lookup_type("GList").pointer())
while long(l) != 0:
self.do_iter (arg, l["data"], command)
l = l["next"]
def pick_iterator (self, container):
t = container.type.unqualified()
if t.code == gdb.TYPE_CODE_PTR:
t = t.target().unqualified()
t = str(t)
if t == "GSList":
return self.slist_iterator
if t == "GList":
return self.list_iterator
raise Exception("Invalid container type %s"%(str(container.type)))
def invoke (self, arg, from_tty):
(var, container, command) = self.parse_args(arg)
container = gdb.parse_and_eval (container)
func = self.pick_iterator(container)
func(var, container, command)
ForeachCommand ()
| 28.585271 | 122 | 0.532068 | import gdb
import sys
if sys.version_info[0] >= 3:
long = int
def read_global_var (symname):
return gdb.selected_frame().read_var(symname)
def g_quark_to_string (quark):
if quark == None:
return None
quark = long(quark)
if quark == 0:
return None
try:
val = read_global_var ("quarks")
max_q = long(read_global_var ("quark_seq_id"))
except:
try:
val = read_global_var ("g_quarks")
max_q = long(read_global_var ("g_quark_seq_id"))
except:
return None;
if quark < max_q:
return val[quark].string()
return None
class GListNodePrinter:
def __init__ (self, val):
self.val = val
def to_string (self):
return "{data=%s, next=0x%x, prev=0x%x}" % (str(self.val["data"]), long(self.val["next"]), long(self.val["prev"]))
class GSListNodePrinter:
def __init__ (self, val):
self.val = val
def to_string (self):
return "{data=%s, next=0x%x}" % (str(self.val["data"]), long(self.val["next"]))
class GListPrinter:
class _iterator:
def __init__(self, head, listtype):
self.link = head
self.listtype = listtype
self.count = 0
def __iter__(self):
return self
def next(self):
if self.link == 0:
raise StopIteration
data = self.link['data']
self.link = self.link['next']
count = self.count
self.count = self.count + 1
return ('[%d]' % count, data)
def __init__ (self, val, listtype):
self.val = val
self.listtype = listtype
def children(self):
return self._iterator(self.val, self.listtype)
def to_string (self):
return "0x%x" % (long(self.val))
def display_hint (self):
return "array"
class GHashPrinter:
class _iterator:
def __init__(self, ht, keys_are_strings):
self.ht = ht
if ht != 0:
self.keys = ht["keys"]
self.values = ht["values"]
self.hashes = ht["hashes"]
self.size = ht["size"]
self.pos = 0
self.keys_are_strings = keys_are_strings
self.value = None
def __iter__(self):
return self
def next(self):
if self.ht == 0:
raise StopIteration
if self.value != None:
v = self.value
self.value = None
return v
while long(self.pos) < long(self.size):
self.pos = self.pos + 1
if long (self.hashes[self.pos]) >= 2:
key = self.keys[self.pos]
val = self.values[self.pos]
if self.keys_are_strings:
key = key.cast (gdb.lookup_type("char").pointer())
self.value = ('[%dv]'% (self.pos), val)
return ('[%dk]'% (self.pos), key)
raise StopIteration
def __init__ (self, val):
self.val = val
self.keys_are_strings = False
try:
string_hash = read_global_var ("g_str_hash")
except:
string_hash = None
if self.val != 0 and string_hash != None and self.val["hash_func"] == string_hash:
self.keys_are_strings = True
def children(self):
return self._iterator(self.val, self.keys_are_strings)
def to_string (self):
return "0x%x" % (long(self.val))
def display_hint (self):
return "map"
def pretty_printer_lookup (val):
type = val.type.unqualified()
if type.code == gdb.TYPE_CODE_REF:
type = type.target ()
if type.code == gdb.TYPE_CODE_PTR:
type = type.target().unqualified()
t = str(type)
if t == "GList":
return GListPrinter(val, "GList")
if t == "GSList":
return GListPrinter(val, "GSList")
if t == "GHashTable":
return GHashPrinter(val)
else:
t = str(type)
if t == "GList":
return GListNodePrinter(val)
if t == "GSList *":
return GListPrinter(val, "GSList")
return None
def register (obj):
if obj == None:
obj = gdb
obj.pretty_printers.append(pretty_printer_lookup)
class ForeachCommand (gdb.Command):
def __init__ (self):
super (ForeachCommand, self).__init__ ("gforeach",
gdb.COMMAND_DATA,
gdb.COMPLETE_SYMBOL)
def valid_name (self, name):
if not name[0].isalpha():
return False
return True
def parse_args (self, arg):
i = arg.find(" ")
if i <= 0:
raise Exception ("No var specified")
var = arg[:i]
if not self.valid_name(var):
raise Exception ("Invalid variable name")
while i < len (arg) and arg[i].isspace():
i = i + 1
if arg[i:i+2] != "in":
raise Exception ("Invalid syntax, missing in")
i = i + 2
while i < len (arg) and arg[i].isspace():
i = i + 1
colon = arg.find (":", i)
if colon == -1:
raise Exception ("Invalid syntax, missing colon")
val = arg[i:colon]
colon = colon + 1
while colon < len (arg) and arg[colon].isspace():
colon = colon + 1
command = arg[colon:]
return (var, val, command)
def do_iter(self, arg, item, command):
item = item.cast (gdb.lookup_type("void").pointer())
item = long(item)
to_eval = "set $%s = (void *)0x%x\n"%(arg, item)
gdb.execute(to_eval)
gdb.execute(command)
def slist_iterator (self, arg, container, command):
l = container.cast (gdb.lookup_type("GSList").pointer())
while long(l) != 0:
self.do_iter (arg, l["data"], command)
l = l["next"]
def list_iterator (self, arg, container, command):
l = container.cast (gdb.lookup_type("GList").pointer())
while long(l) != 0:
self.do_iter (arg, l["data"], command)
l = l["next"]
def pick_iterator (self, container):
t = container.type.unqualified()
if t.code == gdb.TYPE_CODE_PTR:
t = t.target().unqualified()
t = str(t)
if t == "GSList":
return self.slist_iterator
if t == "GList":
return self.list_iterator
raise Exception("Invalid container type %s"%(str(container.type)))
def invoke (self, arg, from_tty):
(var, container, command) = self.parse_args(arg)
container = gdb.parse_and_eval (container)
func = self.pick_iterator(container)
func(var, container, command)
ForeachCommand ()
| true | true |
f7fe22cb63026328e9f656be2ab16fc1b93bb460 | 188 | py | Python | openiPrototype/openiPrototype/APIS/Products_and_Services/Service/admin.py | OPENi-ict/ntua_demo | 104118fbe1f54db35386ca96286317ceb64cb658 | [
"Apache-2.0"
] | null | null | null | openiPrototype/openiPrototype/APIS/Products_and_Services/Service/admin.py | OPENi-ict/ntua_demo | 104118fbe1f54db35386ca96286317ceb64cb658 | [
"Apache-2.0"
] | null | null | null | openiPrototype/openiPrototype/APIS/Products_and_Services/Service/admin.py | OPENi-ict/ntua_demo | 104118fbe1f54db35386ca96286317ceb64cb658 | [
"Apache-2.0"
] | null | null | null | __author__ = 'mpetyx'
from django.contrib import admin
from .models import OpeniService
class ServiceAdmin(admin.ModelAdmin):
pass
admin.site.register(OpeniService, ServiceAdmin)
| 15.666667 | 47 | 0.792553 | __author__ = 'mpetyx'
from django.contrib import admin
from .models import OpeniService
class ServiceAdmin(admin.ModelAdmin):
pass
admin.site.register(OpeniService, ServiceAdmin)
| true | true |
f7fe22dedc1d45f27e90b38334c2e9aa6fce5232 | 192 | py | Python | packages/api-server/api_server/models/tortoise_models/building_map.py | Sald-for-Communication-and-IT/rmf-web | ec5996ab0b06440d7147170f3030b14c73d26116 | [
"Apache-2.0"
] | 23 | 2021-04-13T23:01:12.000Z | 2022-03-21T02:15:24.000Z | packages/api-server/api_server/models/tortoise_models/building_map.py | Sald-for-Communication-and-IT/rmf-web | ec5996ab0b06440d7147170f3030b14c73d26116 | [
"Apache-2.0"
] | 326 | 2021-03-10T17:32:17.000Z | 2022-03-30T04:42:14.000Z | packages/api-server/api_server/models/tortoise_models/building_map.py | Sald-for-Communication-and-IT/rmf-web | ec5996ab0b06440d7147170f3030b14c73d26116 | [
"Apache-2.0"
] | 13 | 2021-04-10T10:33:36.000Z | 2022-02-22T15:39:58.000Z | from tortoise.fields.data import CharField, JSONField
from tortoise.models import Model
class BuildingMap(Model):
id_ = CharField(255, pk=True, source_field="id")
data = JSONField()
| 24 | 53 | 0.75 | from tortoise.fields.data import CharField, JSONField
from tortoise.models import Model
class BuildingMap(Model):
id_ = CharField(255, pk=True, source_field="id")
data = JSONField()
| true | true |
f7fe242228bbaac9f15fc53c10452ad9b7032dc8 | 15,615 | py | Python | flask_admin/form/upload.py | gstf/flask-admin | 5f1b2d8813c7aa048862a820d17a6efbeba8c42f | [
"BSD-3-Clause"
] | null | null | null | flask_admin/form/upload.py | gstf/flask-admin | 5f1b2d8813c7aa048862a820d17a6efbeba8c42f | [
"BSD-3-Clause"
] | null | null | null | flask_admin/form/upload.py | gstf/flask-admin | 5f1b2d8813c7aa048862a820d17a6efbeba8c42f | [
"BSD-3-Clause"
] | null | null | null | import os
import os.path as op
from werkzeug import secure_filename
from werkzeug.datastructures import FileStorage
from wtforms import ValidationError, fields
from wtforms.widgets import HTMLString, html_params
try:
from wtforms.fields.core import _unset_value as unset_value
except ImportError:
from wtforms.utils import unset_value
from flask_admin.babel import gettext
from flask_admin.helpers import get_url
from flask_admin._compat import string_types, urljoin
try:
from PIL import Image, ImageOps
except ImportError:
Image = None
ImageOps = None
__all__ = ['FileUploadInput', 'FileUploadField',
'ImageUploadInput', 'ImageUploadField',
'namegen_filename', 'thumbgen_filename']
# Widgets
class FileUploadInput(object):
"""
Renders a file input chooser field.
You can customize `empty_template` and `data_template` members to customize
look and feel.
"""
empty_template = ('<input %(file)s>')
data_template = ('<div>'
' <input %(text)s>'
' <input type="checkbox" name="%(marker)s">Delete</input>'
'</div>'
'<input %(file)s>')
def __call__(self, field, **kwargs):
kwargs.setdefault('id', field.id)
kwargs.setdefault('name', field.name)
template = self.data_template if field.data else self.empty_template
return HTMLString(template % {
'text': html_params(type='text',
readonly='readonly',
value=field.data),
'file': html_params(type='file',
**kwargs),
'marker': '_%s-delete' % field.name
})
class ImageUploadInput(object):
"""
Renders a image input chooser field.
You can customize `empty_template` and `data_template` members to customize
look and feel.
"""
empty_template = ('<input %(file)s>')
data_template = ('<div class="image-thumbnail">'
' <img %(image)s>'
' <input type="checkbox" name="%(marker)s">Delete</input>'
'</div>'
'<input %(file)s>')
def __call__(self, field, **kwargs):
kwargs.setdefault('id', field.id)
kwargs.setdefault('name', field.name)
args = {
'file': html_params(type='file',
**kwargs),
'marker': '_%s-delete' % field.name
}
if field.data and isinstance(field.data, string_types):
url = self.get_url(field)
args['image'] = html_params(src=url)
template = self.data_template
else:
template = self.empty_template
return HTMLString(template % args)
def get_url(self, field):
if field.thumbnail_size:
filename = field.thumbnail_fn(field.data)
else:
filename = field.data
if field.url_relative_path:
filename = urljoin(field.url_relative_path, filename)
return get_url(field.endpoint, filename=filename)
# Fields
class FileUploadField(fields.StringField):
"""
Customizable file-upload field.
Saves file to configured path, handles updates and deletions. Inherits from `StringField`,
resulting filename will be stored as string.
"""
widget = FileUploadInput()
def __init__(self, label=None, validators=None,
base_path=None, relative_path=None,
namegen=None, allowed_extensions=None,
permission=0o666,
**kwargs):
"""
Constructor.
:param label:
Display label
:param validators:
Validators
:param base_path:
Absolute path to the directory which will store files
:param relative_path:
Relative path from the directory. Will be prepended to the file name for uploaded files.
Flask-Admin uses `urlparse.urljoin` to generate resulting filename, so make sure you have
trailing slash.
:param namegen:
Function that will generate filename from the model and uploaded file object.
Please note, that model is "dirty" model object, before it was committed to database.
For example::
import os.path as op
def prefix_name(obj, file_data):
parts = op.splitext(file_data.filename)
return secure_filename('file-%s%s' % parts)
class MyForm(BaseForm):
upload = FileUploadField('File', namegen=prefix_name)
:param allowed_extensions:
List of allowed extensions. If not provided, will allow any file.
"""
self.base_path = base_path
self.relative_path = relative_path
self.namegen = namegen or namegen_filename
self.allowed_extensions = allowed_extensions
self.permission = permission
self._should_delete = False
super(FileUploadField, self).__init__(label, validators, **kwargs)
def is_file_allowed(self, filename):
"""
Check if file extension is allowed.
:param filename:
File name to check
"""
if not self.allowed_extensions:
return True
return ('.' in filename and
filename.rsplit('.', 1)[1].lower() in
map(lambda x: x.lower(), self.allowed_extensions))
def _is_uploaded_file(self, data):
return (data
and isinstance(data, FileStorage)
and data.filename)
def pre_validate(self, form):
if self._is_uploaded_file(self.data) and not self.is_file_allowed(self.data.filename):
raise ValidationError(gettext('Invalid file extension'))
def process(self, formdata, data=unset_value):
if formdata:
marker = '_%s-delete' % self.name
if marker in formdata:
self._should_delete = True
return super(FileUploadField, self).process(formdata, data)
def process_formdata(self, valuelist):
if self._should_delete:
self.data = None
elif valuelist:
data = valuelist[0]
if self._is_uploaded_file(data):
self.data = data
def populate_obj(self, obj, name):
field = getattr(obj, name, None)
if field:
# If field should be deleted, clean it up
if self._should_delete:
self._delete_file(field)
setattr(obj, name, None)
return
if self._is_uploaded_file(self.data):
if field:
self._delete_file(field)
filename = self.generate_name(obj, self.data)
filename = self._save_file(self.data, filename)
# update filename of FileStorage to our validated name
self.data.filename = filename
setattr(obj, name, filename)
def generate_name(self, obj, file_data):
filename = self.namegen(obj, file_data)
if not self.relative_path:
return filename
return urljoin(self.relative_path, filename)
def _get_path(self, filename):
if not self.base_path:
raise ValueError('FileUploadField field requires base_path to be set.')
if callable(self.base_path):
return op.join(self.base_path(), filename)
return op.join(self.base_path, filename)
def _delete_file(self, filename):
path = self._get_path(filename)
if op.exists(path):
os.remove(path)
def _save_file(self, data, filename):
path = self._get_path(filename)
if not op.exists(op.dirname(path)):
os.makedirs(os.path.dirname(path), self.permission | 0o111)
data.save(path)
return filename
class ImageUploadField(FileUploadField):
"""
Image upload field.
Does image validation, thumbnail generation, updating and deleting images.
Requires PIL (or Pillow) to be installed.
"""
widget = ImageUploadInput()
keep_image_formats = ('PNG',)
"""
If field detects that uploaded image is not in this list, it will save image
as PNG.
"""
def __init__(self, label=None, validators=None,
base_path=None, relative_path=None,
namegen=None, allowed_extensions=None,
max_size=None,
thumbgen=None, thumbnail_size=None,
permission=0o666,
url_relative_path=None, endpoint='static',
**kwargs):
"""
Constructor.
:param label:
Display label
:param validators:
Validators
:param base_path:
Absolute path to the directory which will store files
:param relative_path:
Relative path from the directory. Will be prepended to the file name for uploaded files.
Flask-Admin uses `urlparse.urljoin` to generate resulting filename, so make sure you have
trailing slash.
:param namegen:
Function that will generate filename from the model and uploaded file object.
Please note, that model is "dirty" model object, before it was committed to database.
For example::
import os.path as op
def prefix_name(obj, file_data):
parts = op.splitext(file_data.filename)
return secure_filename('file-%s%s' % parts)
class MyForm(BaseForm):
upload = FileUploadField('File', namegen=prefix_name)
:param allowed_extensions:
List of allowed extensions. If not provided, then gif, jpg, jpeg, png and tiff will be allowed.
:param max_size:
Tuple of (width, height, force) or None. If provided, Flask-Admin will
resize image to the desired size.
:param thumbgen:
Thumbnail filename generation function. All thumbnails will be saved as JPEG files,
so there's no need to keep original file extension.
For example::
import os.path as op
def thumb_name(filename):
name, _ = op.splitext(filename)
return secure_filename('%s-thumb.jpg' % name)
class MyForm(BaseForm):
upload = ImageUploadField('File', thumbgen=prefix_name)
:param thumbnail_size:
Tuple or (width, height, force) values. If not provided, thumbnail won't be created.
Width and height is in pixels. If `force` is set to `True`, will try to fit image into dimensions and
keep aspect ratio, otherwise will just resize to target size.
:param url_relative_path:
Relative path from the root of the static directory URL. Only gets used when generating
preview image URLs.
For example, your model might store just file names (`relative_path` set to `None`), but
`base_path` is pointing to subdirectory.
:param endpoint:
Static endpoint for images. Used by widget to display previews. Defaults to 'static'.
"""
# Check if PIL is installed
if Image is None:
raise ImportError('PIL library was not found')
self.max_size = max_size
self.thumbnail_fn = thumbgen or thumbgen_filename
self.thumbnail_size = thumbnail_size
self.endpoint = endpoint
self.image = None
self.url_relative_path = url_relative_path
if not allowed_extensions:
allowed_extensions = ('gif', 'jpg', 'jpeg', 'png', 'tiff')
super(ImageUploadField, self).__init__(label, validators,
base_path=base_path,
relative_path=relative_path,
namegen=namegen,
allowed_extensions=allowed_extensions,
permission=permission,
**kwargs)
def pre_validate(self, form):
super(ImageUploadField, self).pre_validate(form)
if self._is_uploaded_file(self.data):
try:
self.image = Image.open(self.data)
except Exception as e:
raise ValidationError('Invalid image: %s' % e)
# Deletion
def _delete_file(self, filename):
super(ImageUploadField, self)._delete_file(filename)
self._delete_thumbnail(filename)
def _delete_thumbnail(self, filename):
path = self._get_path(self.thumbnail_fn(filename))
if op.exists(path):
os.remove(path)
# Saving
def _save_file(self, data, filename):
path = self._get_path(filename)
if not op.exists(op.dirname(path)):
os.makedirs(os.path.dirname(path), self.permission)
# Figure out format
filename, format = self._get_save_format(filename, self.image)
if self.image and (self.image.format != format or self.max_size):
if self.max_size:
image = self._resize(self.image, self.max_size)
else:
image = self.image
self._save_image(image, self._get_path(filename), format)
else:
data.seek(0)
data.save(self._get_path(filename))
self._save_thumbnail(data, filename, format)
return filename
def _save_thumbnail(self, data, filename, format):
if self.image and self.thumbnail_size:
path = self._get_path(self.thumbnail_fn(filename))
self._save_image(self._resize(self.image, self.thumbnail_size),
path,
format)
def _resize(self, image, size):
(width, height, force) = size
if image.size[0] > width or image.size[1] > height:
if force:
return ImageOps.fit(self.image, (width, height), Image.ANTIALIAS)
else:
thumb = self.image.copy()
thumb.thumbnail((width, height), Image.ANTIALIAS)
return thumb
return image
def _save_image(self, image, path, format='JPEG'):
if image.mode not in ('RGB', 'RGBA'):
image = image.convert('RGBA')
with open(path, 'wb') as fp:
image.save(fp, format)
def _get_save_format(self, filename, image):
if image.format not in self.keep_image_formats:
name, ext = op.splitext(filename)
filename = '%s.jpg' % name
return filename, 'JPEG'
return filename, image.format
# Helpers
def namegen_filename(obj, file_data):
"""
Generate secure filename for uploaded file.
"""
return secure_filename(file_data.filename)
def thumbgen_filename(filename):
"""
Generate thumbnail name from filename.
"""
name, ext = op.splitext(filename)
return '%s_thumb%s' % (name, ext)
| 33.580645 | 117 | 0.57464 | import os
import os.path as op
from werkzeug import secure_filename
from werkzeug.datastructures import FileStorage
from wtforms import ValidationError, fields
from wtforms.widgets import HTMLString, html_params
try:
from wtforms.fields.core import _unset_value as unset_value
except ImportError:
from wtforms.utils import unset_value
from flask_admin.babel import gettext
from flask_admin.helpers import get_url
from flask_admin._compat import string_types, urljoin
try:
from PIL import Image, ImageOps
except ImportError:
Image = None
ImageOps = None
__all__ = ['FileUploadInput', 'FileUploadField',
'ImageUploadInput', 'ImageUploadField',
'namegen_filename', 'thumbgen_filename']
class FileUploadInput(object):
empty_template = ('<input %(file)s>')
data_template = ('<div>'
' <input %(text)s>'
' <input type="checkbox" name="%(marker)s">Delete</input>'
'</div>'
'<input %(file)s>')
def __call__(self, field, **kwargs):
kwargs.setdefault('id', field.id)
kwargs.setdefault('name', field.name)
template = self.data_template if field.data else self.empty_template
return HTMLString(template % {
'text': html_params(type='text',
readonly='readonly',
value=field.data),
'file': html_params(type='file',
**kwargs),
'marker': '_%s-delete' % field.name
})
class ImageUploadInput(object):
empty_template = ('<input %(file)s>')
data_template = ('<div class="image-thumbnail">'
' <img %(image)s>'
' <input type="checkbox" name="%(marker)s">Delete</input>'
'</div>'
'<input %(file)s>')
def __call__(self, field, **kwargs):
kwargs.setdefault('id', field.id)
kwargs.setdefault('name', field.name)
args = {
'file': html_params(type='file',
**kwargs),
'marker': '_%s-delete' % field.name
}
if field.data and isinstance(field.data, string_types):
url = self.get_url(field)
args['image'] = html_params(src=url)
template = self.data_template
else:
template = self.empty_template
return HTMLString(template % args)
def get_url(self, field):
if field.thumbnail_size:
filename = field.thumbnail_fn(field.data)
else:
filename = field.data
if field.url_relative_path:
filename = urljoin(field.url_relative_path, filename)
return get_url(field.endpoint, filename=filename)
class FileUploadField(fields.StringField):
widget = FileUploadInput()
def __init__(self, label=None, validators=None,
base_path=None, relative_path=None,
namegen=None, allowed_extensions=None,
permission=0o666,
**kwargs):
self.base_path = base_path
self.relative_path = relative_path
self.namegen = namegen or namegen_filename
self.allowed_extensions = allowed_extensions
self.permission = permission
self._should_delete = False
super(FileUploadField, self).__init__(label, validators, **kwargs)
def is_file_allowed(self, filename):
if not self.allowed_extensions:
return True
return ('.' in filename and
filename.rsplit('.', 1)[1].lower() in
map(lambda x: x.lower(), self.allowed_extensions))
def _is_uploaded_file(self, data):
return (data
and isinstance(data, FileStorage)
and data.filename)
def pre_validate(self, form):
if self._is_uploaded_file(self.data) and not self.is_file_allowed(self.data.filename):
raise ValidationError(gettext('Invalid file extension'))
def process(self, formdata, data=unset_value):
if formdata:
marker = '_%s-delete' % self.name
if marker in formdata:
self._should_delete = True
return super(FileUploadField, self).process(formdata, data)
def process_formdata(self, valuelist):
if self._should_delete:
self.data = None
elif valuelist:
data = valuelist[0]
if self._is_uploaded_file(data):
self.data = data
def populate_obj(self, obj, name):
field = getattr(obj, name, None)
if field:
if self._should_delete:
self._delete_file(field)
setattr(obj, name, None)
return
if self._is_uploaded_file(self.data):
if field:
self._delete_file(field)
filename = self.generate_name(obj, self.data)
filename = self._save_file(self.data, filename)
self.data.filename = filename
setattr(obj, name, filename)
def generate_name(self, obj, file_data):
filename = self.namegen(obj, file_data)
if not self.relative_path:
return filename
return urljoin(self.relative_path, filename)
def _get_path(self, filename):
if not self.base_path:
raise ValueError('FileUploadField field requires base_path to be set.')
if callable(self.base_path):
return op.join(self.base_path(), filename)
return op.join(self.base_path, filename)
def _delete_file(self, filename):
path = self._get_path(filename)
if op.exists(path):
os.remove(path)
def _save_file(self, data, filename):
path = self._get_path(filename)
if not op.exists(op.dirname(path)):
os.makedirs(os.path.dirname(path), self.permission | 0o111)
data.save(path)
return filename
class ImageUploadField(FileUploadField):
widget = ImageUploadInput()
keep_image_formats = ('PNG',)
def __init__(self, label=None, validators=None,
base_path=None, relative_path=None,
namegen=None, allowed_extensions=None,
max_size=None,
thumbgen=None, thumbnail_size=None,
permission=0o666,
url_relative_path=None, endpoint='static',
**kwargs):
if Image is None:
raise ImportError('PIL library was not found')
self.max_size = max_size
self.thumbnail_fn = thumbgen or thumbgen_filename
self.thumbnail_size = thumbnail_size
self.endpoint = endpoint
self.image = None
self.url_relative_path = url_relative_path
if not allowed_extensions:
allowed_extensions = ('gif', 'jpg', 'jpeg', 'png', 'tiff')
super(ImageUploadField, self).__init__(label, validators,
base_path=base_path,
relative_path=relative_path,
namegen=namegen,
allowed_extensions=allowed_extensions,
permission=permission,
**kwargs)
def pre_validate(self, form):
super(ImageUploadField, self).pre_validate(form)
if self._is_uploaded_file(self.data):
try:
self.image = Image.open(self.data)
except Exception as e:
raise ValidationError('Invalid image: %s' % e)
def _delete_file(self, filename):
super(ImageUploadField, self)._delete_file(filename)
self._delete_thumbnail(filename)
def _delete_thumbnail(self, filename):
path = self._get_path(self.thumbnail_fn(filename))
if op.exists(path):
os.remove(path)
def _save_file(self, data, filename):
path = self._get_path(filename)
if not op.exists(op.dirname(path)):
os.makedirs(os.path.dirname(path), self.permission)
filename, format = self._get_save_format(filename, self.image)
if self.image and (self.image.format != format or self.max_size):
if self.max_size:
image = self._resize(self.image, self.max_size)
else:
image = self.image
self._save_image(image, self._get_path(filename), format)
else:
data.seek(0)
data.save(self._get_path(filename))
self._save_thumbnail(data, filename, format)
return filename
def _save_thumbnail(self, data, filename, format):
if self.image and self.thumbnail_size:
path = self._get_path(self.thumbnail_fn(filename))
self._save_image(self._resize(self.image, self.thumbnail_size),
path,
format)
def _resize(self, image, size):
(width, height, force) = size
if image.size[0] > width or image.size[1] > height:
if force:
return ImageOps.fit(self.image, (width, height), Image.ANTIALIAS)
else:
thumb = self.image.copy()
thumb.thumbnail((width, height), Image.ANTIALIAS)
return thumb
return image
def _save_image(self, image, path, format='JPEG'):
if image.mode not in ('RGB', 'RGBA'):
image = image.convert('RGBA')
with open(path, 'wb') as fp:
image.save(fp, format)
def _get_save_format(self, filename, image):
if image.format not in self.keep_image_formats:
name, ext = op.splitext(filename)
filename = '%s.jpg' % name
return filename, 'JPEG'
return filename, image.format
def namegen_filename(obj, file_data):
return secure_filename(file_data.filename)
def thumbgen_filename(filename):
name, ext = op.splitext(filename)
return '%s_thumb%s' % (name, ext)
| true | true |
f7fe243f1a8d1d2561bc5a138675a4aed8ce94e7 | 3,239 | py | Python | examples/stats.py | mojodojo101/TryHarder-InfoSecPrep | 3fd4f96590704ba086335ab847173751ad56f580 | [
"MIT"
] | 5 | 2020-10-28T04:05:10.000Z | 2021-11-30T09:42:16.000Z | examples/stats.py | mojodojo101/TryHarder-InfoSecPrep | 3fd4f96590704ba086335ab847173751ad56f580 | [
"MIT"
] | 1 | 2020-10-28T03:45:52.000Z | 2020-10-28T03:45:52.000Z | examples/stats.py | mojodojo101/TryHarder-InfoSecPrep | 3fd4f96590704ba086335ab847173751ad56f580 | [
"MIT"
] | 5 | 2020-04-22T08:02:39.000Z | 2021-06-30T06:30:31.000Z | import discord
from discord.ext import commands
from builtins import bot
@commands.command()
async def stats(ctx):
server = ctx.guild
channel = discord.utils.get(server.channels, name="bot-commands")
if ctx.channel == channel:
totalMembers = str(server.member_count)
oscp_S_Role = discord.utils.get(server.roles, name="OSCP Student")
osep_S_Role = discord.utils.get(server.roles, name="OSEP Student")
oswe_S_Role = discord.utils.get(server.roles, name="OSWE Student")
osce_S_Role = discord.utils.get(server.roles, name="OSCE Student")
oswp_S_Role = discord.utils.get(server.roles, name="OSWP Student")
osee_S_Role = discord.utils.get(server.roles, name="OSEE Student")
oscp_V_Role = discord.utils.get(server.roles, name="OSCP Certified")
osep_V_Role = discord.utils.get(server.roles, name="OSEP Certified")
oswe_V_Role = discord.utils.get(server.roles, name="OSWE Certified")
osce_V_Role = discord.utils.get(server.roles, name="OSCE Certified")
oswp_V_Role = discord.utils.get(server.roles, name="OSWP Certified")
osee_V_Role = discord.utils.get(server.roles, name="OSEE Certified")
wapt_S_Role = discord.utils.get(server.roles, name="WAPT Student")
embed=discord.Embed(title="Student Statistics", description="Displays statistics about total members in the server, how many are in each student role, and how many are certified.", color=0x00ff00)
embed.add_field(name="Total Members", value=totalMembers, inline=False)
embed.add_field(name=oscp_S_Role, value=len(oscp_S_Role.members), inline=True)
embed.add_field(name=oscp_V_Role, value=len(oscp_V_Role.members), inline=True)
embed.add_field(name="\u200b", value="\u200b", inline=True)
embed.add_field(name=osep_S_Role, value=len(osep_S_Role.members), inline=True)
embed.add_field(name=osep_V_Role, value=len(osep_V_Role.members), inline=True)
embed.add_field(name="\u200b", value="\u200b", inline=True)
embed.add_field(name=oswp_S_Role, value=len(oswp_S_Role.members), inline=True)
embed.add_field(name=oswp_V_Role, value=len(oswp_V_Role.members), inline=True)
embed.add_field(name="\u200b", value="\u200b", inline=True)
embed.add_field(name=oswe_S_Role, value=len(oswe_S_Role.members), inline=True)
embed.add_field(name=oswe_V_Role, value=len(oswe_V_Role.members), inline=True)
embed.add_field(name="\u200b", value="\u200b", inline=True)
embed.add_field(name=osce_S_Role, value=len(osce_S_Role.members), inline=True)
embed.add_field(name=osce_V_Role, value=len(osce_V_Role.members), inline=True)
embed.add_field(name="\u200b", value="\u200b", inline=True)
embed.add_field(name=osee_S_Role, value=len(osee_S_Role.members), inline=True)
embed.add_field(name=osee_V_Role, value=len(osee_V_Role.members), inline=True)
embed.add_field(name="\u200b", value="\u200b", inline=True)
embed.add_field(name=wapt_S_Role, value=len(wapt_S_Role.members), inline=True)
await channel.send(embed=embed)
def setup(bot):
bot.add_command(stats)
def teardown(bot):
bot.remove_command(stats) | 62.288462 | 204 | 0.712874 | import discord
from discord.ext import commands
from builtins import bot
@commands.command()
async def stats(ctx):
server = ctx.guild
channel = discord.utils.get(server.channels, name="bot-commands")
if ctx.channel == channel:
totalMembers = str(server.member_count)
oscp_S_Role = discord.utils.get(server.roles, name="OSCP Student")
osep_S_Role = discord.utils.get(server.roles, name="OSEP Student")
oswe_S_Role = discord.utils.get(server.roles, name="OSWE Student")
osce_S_Role = discord.utils.get(server.roles, name="OSCE Student")
oswp_S_Role = discord.utils.get(server.roles, name="OSWP Student")
osee_S_Role = discord.utils.get(server.roles, name="OSEE Student")
oscp_V_Role = discord.utils.get(server.roles, name="OSCP Certified")
osep_V_Role = discord.utils.get(server.roles, name="OSEP Certified")
oswe_V_Role = discord.utils.get(server.roles, name="OSWE Certified")
osce_V_Role = discord.utils.get(server.roles, name="OSCE Certified")
oswp_V_Role = discord.utils.get(server.roles, name="OSWP Certified")
osee_V_Role = discord.utils.get(server.roles, name="OSEE Certified")
wapt_S_Role = discord.utils.get(server.roles, name="WAPT Student")
embed=discord.Embed(title="Student Statistics", description="Displays statistics about total members in the server, how many are in each student role, and how many are certified.", color=0x00ff00)
embed.add_field(name="Total Members", value=totalMembers, inline=False)
embed.add_field(name=oscp_S_Role, value=len(oscp_S_Role.members), inline=True)
embed.add_field(name=oscp_V_Role, value=len(oscp_V_Role.members), inline=True)
embed.add_field(name="\u200b", value="\u200b", inline=True)
embed.add_field(name=osep_S_Role, value=len(osep_S_Role.members), inline=True)
embed.add_field(name=osep_V_Role, value=len(osep_V_Role.members), inline=True)
embed.add_field(name="\u200b", value="\u200b", inline=True)
embed.add_field(name=oswp_S_Role, value=len(oswp_S_Role.members), inline=True)
embed.add_field(name=oswp_V_Role, value=len(oswp_V_Role.members), inline=True)
embed.add_field(name="\u200b", value="\u200b", inline=True)
embed.add_field(name=oswe_S_Role, value=len(oswe_S_Role.members), inline=True)
embed.add_field(name=oswe_V_Role, value=len(oswe_V_Role.members), inline=True)
embed.add_field(name="\u200b", value="\u200b", inline=True)
embed.add_field(name=osce_S_Role, value=len(osce_S_Role.members), inline=True)
embed.add_field(name=osce_V_Role, value=len(osce_V_Role.members), inline=True)
embed.add_field(name="\u200b", value="\u200b", inline=True)
embed.add_field(name=osee_S_Role, value=len(osee_S_Role.members), inline=True)
embed.add_field(name=osee_V_Role, value=len(osee_V_Role.members), inline=True)
embed.add_field(name="\u200b", value="\u200b", inline=True)
embed.add_field(name=wapt_S_Role, value=len(wapt_S_Role.members), inline=True)
await channel.send(embed=embed)
def setup(bot):
bot.add_command(stats)
def teardown(bot):
bot.remove_command(stats) | true | true |
f7fe24b7a74fb7434d50614c7027afdf173ed32a | 11,318 | py | Python | python-micro-service-master/.venv/lib/site-packages/winpty/ptyprocess.py | hiepvo01/AnimeRecommendationSystem | 662531fc72134caedcd8e1dee7fefd3bdb0017a2 | [
"MIT"
] | 3 | 2020-07-09T18:52:40.000Z | 2020-07-13T08:46:44.000Z | python-micro-service-master/.venv/lib/site-packages/winpty/ptyprocess.py | hiepvo01/AnimeRecommendationSystem | 662531fc72134caedcd8e1dee7fefd3bdb0017a2 | [
"MIT"
] | 4 | 2021-03-11T05:09:40.000Z | 2021-09-02T15:56:43.000Z | python-micro-service-master/.venv/lib/site-packages/winpty/ptyprocess.py | hiepvo01/AnimeRecommendationSystem | 662531fc72134caedcd8e1dee7fefd3bdb0017a2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Standard library imports
import codecs
import os
import shlex
import signal
import socket
import subprocess
import threading
import time
try:
from shutil import which
except ImportError:
from backports.shutil_which import which
# Local imports
from .winpty_wrapper import PTY, PY2
class PtyProcess(object):
"""This class represents a process running in a pseudoterminal.
The main constructor is the :meth:`spawn` classmethod.
"""
def __init__(self, pty):
assert isinstance(pty, PTY)
self.pty = pty
self.pid = pty.pid
self.read_blocking = bool(os.environ.get('PYWINPTY_BLOCK', 1))
self.closed = False
self.flag_eof = False
self.decoder = codecs.getincrementaldecoder('utf-8')(errors='strict')
# Used by terminate() to give kernel time to update process status.
# Time in seconds.
self.delayafterterminate = 0.1
# Used by close() to give kernel time to update process status.
# Time in seconds.
self.delayafterclose = 0.1
# Set up our file reader sockets.
self._server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._server.bind(("10.0.0.7", 0))
address = self._server.getsockname()
self._server.listen(1)
# Read from the pty in a thread.
self._thread = threading.Thread(target=_read_in_thread,
args=(address, self.pty, self.read_blocking))
self._thread.setDaemon(True)
self._thread.start()
self.fileobj, _ = self._server.accept()
self.fd = self.fileobj.fileno()
@classmethod
def spawn(cls, argv, cwd=None, env=None, dimensions=(24, 80)):
"""Start the given command in a child process in a pseudo terminal.
This does all the setting up the pty, and returns an instance of
PtyProcess.
Dimensions of the psuedoterminal used for the subprocess can be
specified as a tuple (rows, cols), or the default (24, 80) will be
used.
"""
if isinstance(argv, str):
argv = shlex.split(argv, posix=False)
if not isinstance(argv, (list, tuple)):
raise TypeError("Expected a list or tuple for argv, got %r" % argv)
# Shallow copy of argv so we can modify it
argv = argv[:]
command = argv[0]
env = env or os.environ
path = env.get('PATH', os.defpath)
command_with_path = which(command, path=path)
if command_with_path is None:
raise FileNotFoundError(
'The command was not found or was not ' +
'executable: %s.' % command
)
command = command_with_path
argv[0] = command
cmdline = ' ' + subprocess.list2cmdline(argv[1:])
cwd = cwd or os.getcwd()
proc = PTY(dimensions[1], dimensions[0])
# Create the environemnt string.
envStrs = []
for (key, value) in env.items():
envStrs.append('%s=%s' % (key, value))
env = '\0'.join(envStrs) + '\0'
if PY2:
command = _unicode(command)
cwd = _unicode(cwd)
cmdline = _unicode(cmdline)
env = _unicode(env)
if len(argv) == 1:
proc.spawn(command, cwd=cwd, env=env)
else:
proc.spawn(command, cwd=cwd, env=env, cmdline=cmdline)
inst = cls(proc)
inst._winsize = dimensions
# Set some informational attributes
inst.argv = argv
if env is not None:
inst.env = env
if cwd is not None:
inst.launch_dir = cwd
return inst
@property
def exitstatus(self):
"""The exit status of the process.
"""
return self.pty.exitstatus
def fileno(self):
"""This returns the file descriptor of the pty for the child.
"""
return self.fd
def close(self, force=False):
"""This closes the connection with the child application. Note that
calling close() more than once is valid. This emulates standard Python
behavior with files. Set force to True if you want to make sure that
the child is terminated (SIGKILL is sent if the child ignores
SIGINT)."""
if not self.closed:
self.pty.close()
self.fileobj.close()
self._server.close()
# Give kernel time to update process status.
time.sleep(self.delayafterclose)
if self.isalive():
if not self.terminate(force):
raise IOError('Could not terminate the child.')
self.fd = -1
self.closed = True
del self.pty
self.pty = None
def __del__(self):
"""This makes sure that no system resources are left open. Python only
garbage collects Python objects. OS file descriptors are not Python
objects, so they must be handled explicitly. If the child file
descriptor was opened outside of this class (passed to the constructor)
then this does not close it.
"""
# It is possible for __del__ methods to execute during the
# teardown of the Python VM itself. Thus self.close() may
# trigger an exception because os.close may be None.
try:
self.close()
except Exception:
pass
def flush(self):
"""This does nothing. It is here to support the interface for a
File-like object. """
pass
def isatty(self):
"""This returns True if the file descriptor is open and connected to a
tty(-like) device, else False."""
return self.isalive()
def read(self, size=1024):
"""Read and return at most ``size`` characters from the pty.
Can block if there is nothing to read. Raises :exc:`EOFError` if the
terminal was closed.
"""
data = self.fileobj.recv(size)
if not data:
self.flag_eof = True
raise EOFError('Pty is closed')
return self.decoder.decode(data, final=False)
def readline(self):
"""Read one line from the pseudoterminal as bytes.
Can block if there is nothing to read. Raises :exc:`EOFError` if the
terminal was closed.
"""
buf = []
while 1:
try:
ch = self.read(1)
except EOFError:
return ''.join(buf)
buf.append(ch)
if ch == '\n':
return ''.join(buf)
def write(self, s):
"""Write the string ``s`` to the pseudoterminal.
Returns the number of bytes written.
"""
if not self.isalive():
raise EOFError('Pty is closed')
if PY2:
s = _unicode(s)
success, nbytes = self.pty.write(s)
if not success:
raise IOError('Write failed')
return nbytes
def terminate(self, force=False):
"""This forces a child process to terminate."""
if not self.isalive():
return True
self.kill(signal.SIGINT)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
if force:
self.kill(signal.SIGKILL)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
else:
return False
def wait(self):
"""This waits until the child exits. This is a blocking call. This will
not read any data from the child.
"""
while self.isalive():
time.sleep(0.1)
return self.exitstatus
def isalive(self):
"""This tests if the child process is running or not. This is
non-blocking. If the child was terminated then this will read the
exitstatus or signalstatus of the child. This returns True if the child
process appears to be running or False if not.
"""
return self.pty and self.pty.isalive()
def kill(self, sig=None):
"""Kill the process with the given signal.
"""
os.kill(self.pid, sig)
def sendcontrol(self, char):
'''Helper method that wraps send() with mnemonic access for sending control
character to the child (such as Ctrl-C or Ctrl-D). For example, to send
Ctrl-G (ASCII 7, bell, '\a')::
child.sendcontrol('g')
See also, sendintr() and sendeof().
'''
char = char.lower()
a = ord(char)
if 97 <= a <= 122:
a = a - ord('a') + 1
byte = bytes([a])
return self.pty.write(byte.decode('utf-8')), byte
d = {'@': 0, '`': 0,
'[': 27, '{': 27,
'\\': 28, '|': 28,
']': 29, '}': 29,
'^': 30, '~': 30,
'_': 31,
'?': 127}
if char not in d:
return 0, b''
byte = bytes([d[char]])
return self.pty.write(byte.decode('utf-8')), byte
def sendeof(self):
"""This sends an EOF to the child. This sends a character which causes
the pending parent output buffer to be sent to the waiting child
program without waiting for end-of-line. If it is the first character
of the line, the read() in the user program returns 0, which signifies
end-of-file. This means to work as expected a sendeof() has to be
called at the beginning of a line. This method does not send a newline.
It is the responsibility of the caller to ensure the eof is sent at the
beginning of a line."""
# Send control character 4 (Ctrl-D)
self.pty.write('\x04'), '\x04'
def sendintr(self):
"""This sends a SIGINT to the child. It does not require
the SIGINT to be the first character on a line. """
# Send control character 3 (Ctrl-C)
self.pty.write('\x03'), '\x03'
def eof(self):
"""This returns True if the EOF exception was ever raised.
"""
return self.flag_eof
def getwinsize(self):
"""Return the window size of the pseudoterminal as a tuple (rows, cols).
"""
return self._winsize
def setwinsize(self, rows, cols):
"""Set the terminal window size of the child tty.
"""
self._winsize = (rows, cols)
self.pty.set_size(cols, rows)
def _read_in_thread(address, pty, blocking):
"""Read data from the pty in a thread.
"""
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(address)
while 1:
data = pty.read(4096, blocking=blocking)
if not data and not pty.isalive():
while not data and not pty.iseof():
data += pty.read(4096, blocking=blocking)
if not data:
try:
client.send(b'')
except socket.error:
pass
break
try:
client.send(data)
except socket.error:
break
client.close()
def _unicode(s):
"""Ensure that a string is Unicode on Python 2.
"""
if isinstance(s, unicode): # noqa E891
return s
return s.decode('utf-8')
| 31.614525 | 83 | 0.570949 |
import codecs
import os
import shlex
import signal
import socket
import subprocess
import threading
import time
try:
from shutil import which
except ImportError:
from backports.shutil_which import which
from .winpty_wrapper import PTY, PY2
class PtyProcess(object):
def __init__(self, pty):
assert isinstance(pty, PTY)
self.pty = pty
self.pid = pty.pid
self.read_blocking = bool(os.environ.get('PYWINPTY_BLOCK', 1))
self.closed = False
self.flag_eof = False
self.decoder = codecs.getincrementaldecoder('utf-8')(errors='strict')
self.delayafterterminate = 0.1
self.delayafterclose = 0.1
self._server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._server.bind(("10.0.0.7", 0))
address = self._server.getsockname()
self._server.listen(1)
self._thread = threading.Thread(target=_read_in_thread,
args=(address, self.pty, self.read_blocking))
self._thread.setDaemon(True)
self._thread.start()
self.fileobj, _ = self._server.accept()
self.fd = self.fileobj.fileno()
@classmethod
def spawn(cls, argv, cwd=None, env=None, dimensions=(24, 80)):
if isinstance(argv, str):
argv = shlex.split(argv, posix=False)
if not isinstance(argv, (list, tuple)):
raise TypeError("Expected a list or tuple for argv, got %r" % argv)
argv = argv[:]
command = argv[0]
env = env or os.environ
path = env.get('PATH', os.defpath)
command_with_path = which(command, path=path)
if command_with_path is None:
raise FileNotFoundError(
'The command was not found or was not ' +
'executable: %s.' % command
)
command = command_with_path
argv[0] = command
cmdline = ' ' + subprocess.list2cmdline(argv[1:])
cwd = cwd or os.getcwd()
proc = PTY(dimensions[1], dimensions[0])
envStrs = []
for (key, value) in env.items():
envStrs.append('%s=%s' % (key, value))
env = '\0'.join(envStrs) + '\0'
if PY2:
command = _unicode(command)
cwd = _unicode(cwd)
cmdline = _unicode(cmdline)
env = _unicode(env)
if len(argv) == 1:
proc.spawn(command, cwd=cwd, env=env)
else:
proc.spawn(command, cwd=cwd, env=env, cmdline=cmdline)
inst = cls(proc)
inst._winsize = dimensions
inst.argv = argv
if env is not None:
inst.env = env
if cwd is not None:
inst.launch_dir = cwd
return inst
@property
def exitstatus(self):
return self.pty.exitstatus
def fileno(self):
return self.fd
def close(self, force=False):
if not self.closed:
self.pty.close()
self.fileobj.close()
self._server.close()
time.sleep(self.delayafterclose)
if self.isalive():
if not self.terminate(force):
raise IOError('Could not terminate the child.')
self.fd = -1
self.closed = True
del self.pty
self.pty = None
def __del__(self):
try:
self.close()
except Exception:
pass
def flush(self):
pass
def isatty(self):
return self.isalive()
def read(self, size=1024):
data = self.fileobj.recv(size)
if not data:
self.flag_eof = True
raise EOFError('Pty is closed')
return self.decoder.decode(data, final=False)
def readline(self):
buf = []
while 1:
try:
ch = self.read(1)
except EOFError:
return ''.join(buf)
buf.append(ch)
if ch == '\n':
return ''.join(buf)
def write(self, s):
if not self.isalive():
raise EOFError('Pty is closed')
if PY2:
s = _unicode(s)
success, nbytes = self.pty.write(s)
if not success:
raise IOError('Write failed')
return nbytes
def terminate(self, force=False):
if not self.isalive():
return True
self.kill(signal.SIGINT)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
if force:
self.kill(signal.SIGKILL)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
else:
return False
def wait(self):
while self.isalive():
time.sleep(0.1)
return self.exitstatus
def isalive(self):
return self.pty and self.pty.isalive()
def kill(self, sig=None):
os.kill(self.pid, sig)
def sendcontrol(self, char):
char = char.lower()
a = ord(char)
if 97 <= a <= 122:
a = a - ord('a') + 1
byte = bytes([a])
return self.pty.write(byte.decode('utf-8')), byte
d = {'@': 0, '`': 0,
'[': 27, '{': 27,
'\\': 28, '|': 28,
']': 29, '}': 29,
'^': 30, '~': 30,
'_': 31,
'?': 127}
if char not in d:
return 0, b''
byte = bytes([d[char]])
return self.pty.write(byte.decode('utf-8')), byte
def sendeof(self):
self.pty.write('\x04'), '\x04'
def sendintr(self):
self.pty.write('\x03'), '\x03'
def eof(self):
return self.flag_eof
def getwinsize(self):
return self._winsize
def setwinsize(self, rows, cols):
self._winsize = (rows, cols)
self.pty.set_size(cols, rows)
def _read_in_thread(address, pty, blocking):
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(address)
while 1:
data = pty.read(4096, blocking=blocking)
if not data and not pty.isalive():
while not data and not pty.iseof():
data += pty.read(4096, blocking=blocking)
if not data:
try:
client.send(b'')
except socket.error:
pass
break
try:
client.send(data)
except socket.error:
break
client.close()
def _unicode(s):
if isinstance(s, unicode):
return s
return s.decode('utf-8')
| true | true |
f7fe24ef1d6121951c07ba0175fe1f3920d6cb56 | 27 | py | Python | funcutils/__init__.py | juanmacaaz/FuncTools | 2bddc358d88262ee8dbd3728342b7fe0ac713f3c | [
"Apache-2.0"
] | 1 | 2021-12-09T00:04:55.000Z | 2021-12-09T00:04:55.000Z | funcutils/__init__.py | juanmacaaz/FuncUtils | 2bddc358d88262ee8dbd3728342b7fe0ac713f3c | [
"Apache-2.0"
] | null | null | null | funcutils/__init__.py | juanmacaaz/FuncUtils | 2bddc358d88262ee8dbd3728342b7fe0ac713f3c | [
"Apache-2.0"
] | null | null | null | from .funcutils import cost | 27 | 27 | 0.851852 | from .funcutils import cost | true | true |
f7fe263c6c4ab951b8a670bef02692cf8c999564 | 14,357 | py | Python | dbt/adapters/impala/impl.py | cloudera/dbt-impala | e8cd3eea6f0cdff49da043f4c1b49f3d88edf7a2 | [
"Apache-2.0"
] | 6 | 2022-03-28T23:14:21.000Z | 2022-03-31T03:49:50.000Z | dbt/adapters/impala/impl.py | cloudera/dbt-impala | e8cd3eea6f0cdff49da043f4c1b49f3d88edf7a2 | [
"Apache-2.0"
] | 1 | 2022-03-25T20:15:48.000Z | 2022-03-25T20:15:48.000Z | dbt/adapters/impala/impl.py | cloudera/dbt-impala | e8cd3eea6f0cdff49da043f4c1b49f3d88edf7a2 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dbt.adapters.sql import SQLAdapter
from dbt.adapters.impala import ImpalaConnectionManager
import re
from typing import List, Tuple, Dict, Iterable, Any
import agate
import dbt.exceptions
from dbt.exceptions import warn_or_error
from dbt.contracts.relation import RelationType
from dbt.adapters.impala.column import ImpalaColumn
from dbt.adapters.impala.relation import ImpalaRelation
from dbt.events import AdapterLogger
from dbt.clients.agate_helper import DEFAULT_TYPE_TESTER, ColumnTypeBuilder, NullableAgateType
from dbt.utils import executor
from concurrent.futures import as_completed, Future
logger = AdapterLogger("Impala")
LIST_SCHEMAS_MACRO_NAME = 'list_schemas'
LIST_RELATIONS_MACRO_NAME = 'list_relations_without_caching'
KEY_TABLE_OWNER = 'Owner'
KEY_TABLE_STATISTICS = 'Statistics'
class ImpalaAdapter(SQLAdapter):
Relation = ImpalaRelation
Column = ImpalaColumn
ConnectionManager = ImpalaConnectionManager
INFORMATION_COLUMNS_REGEX = re.compile(
r"^ \|-- (.*): (.*) \(nullable = (.*)\b", re.MULTILINE)
INFORMATION_OWNER_REGEX = re.compile(r"^Owner: (.*)$", re.MULTILINE)
INFORMATION_STATISTICS_REGEX = re.compile(
r"^Statistics: (.*)$", re.MULTILINE)
@classmethod
def date_function(cls):
return 'now()'
@classmethod
def convert_datetime_type(
cls, agate_table: agate.Table, col_idx: int
) -> str:
return "timestamp"
@classmethod
def convert_date_type(cls, agate_table: agate.Table, col_idx: int) -> str:
return "date"
@classmethod
def convert_time_type(cls, agate_table: agate.Table, col_idx: int) -> str:
return "time"
@classmethod
def convert_text_type(cls, agate_table: agate.Table, col_idx: int) -> str:
return "string"
def quote(self, identifier):
return identifier # no quote
@classmethod
def convert_number_type(cls, agate_table: agate.Table, col_idx: int) -> str:
decimals = agate_table.aggregate(agate.MaxPrecision(col_idx)) # type: ignore[attr-defined]
return "real" if decimals else "integer"
def check_schema_exists(self, database, schema):
results = self.execute_macro(
LIST_SCHEMAS_MACRO_NAME,
kwargs={'database': database}
)
exists = True if schema in [row[0] for row in results] else False
return exists
def list_schemas(self, database: str) -> List[str]:
results = self.execute_macro(
LIST_SCHEMAS_MACRO_NAME,
kwargs={'database': database}
)
schemas = []
for row in results:
_schema = row[0]
schemas.append(_schema)
return schemas
def list_relations_without_caching(
self, schema_relation: ImpalaRelation
) -> List[ImpalaRelation]:
kwargs = {'schema_relation': schema_relation}
try:
results = self.execute_macro(
LIST_RELATIONS_MACRO_NAME,
kwargs=kwargs
)
except dbt.exceptions.RuntimeException as e:
errmsg = getattr(e, 'msg', '')
if f"Database '{schema_relation}' not found" in errmsg:
return []
else:
description = "Error while retrieving information about"
logger.debug(f"{description} {schema_relation}: {e.msg}")
return []
relations = []
for row in results:
if len(row) != 1:
raise dbt.exceptions.RuntimeException(
f'Invalid value from "show table extended ...", '
f'got {len(row)} values, expected 4'
)
_identifier = row[0]
# TODO: the following is taken from spark, needs to see what is there in impala
# TODO: this modification is not really right, need fix
rel_type = RelationType.View \
if 'view' in _identifier else RelationType.Table
relation = self.Relation.create(
database=schema_relation.database,
schema=schema_relation.schema,
identifier=_identifier,
type=rel_type,
information=_identifier,
)
relations.append(relation)
return relations
def get_columns_in_relation(self, relation: Relation) -> List[ImpalaColumn]:
cached_relations = self.cache.get_relations(
relation.database, relation.schema)
cached_relation = next((cached_relation
for cached_relation in cached_relations
if str(cached_relation) == str(relation)),
None)
columns = []
if cached_relation and cached_relation.information:
columns = self.parse_columns_from_information(cached_relation)
# execute the macro and parse the data
if not columns:
rows: List[agate.Row] = super().get_columns_in_relation(relation)
columns = self.parse_describe_extended(relation, rows)
return columns
def parse_describe_extended(
self,
relation: Relation,
raw_rows: List[agate.Row]
) -> List[ImpalaColumn]:
# TODO: this method is largely from dbt-spark, sample test with impala works (test_dbt_base: base)
# need deeper testing
# Convert the Row to a dict
dict_rows = [dict(zip(row._keys, row._values)) for row in raw_rows]
# Find the separator between the rows and the metadata provided
# by the DESCRIBE EXTENDED {{relation}} statement
pos = self.find_table_information_separator(dict_rows)
# Remove rows that start with a hash, they are comments
rows = [
row for row in raw_rows[0:pos]
if not row['name'].startswith('#') and not row['name'] == ''
]
metadata = {
col['name']: col['type'] for col in raw_rows[pos + 1:]
}
raw_table_stats = metadata.get(KEY_TABLE_STATISTICS)
table_stats = ImpalaColumn.convert_table_stats(raw_table_stats)
return [ImpalaColumn(
table_database=None,
table_schema=relation.schema,
table_name=relation.name,
table_type=relation.type,
table_owner=str(metadata.get(KEY_TABLE_OWNER)),
table_stats=table_stats,
column=column['name'],
column_index=idx,
dtype=column['type'],
) for idx, column in enumerate(rows)]
@staticmethod
def find_table_information_separator(rows: List[dict]) -> int:
pos = 0
for row in rows:
if row['name'].startswith('# Detailed Table Information'):
break
pos += 1
return pos
def parse_columns_from_information(
self, relation: ImpalaRelation
) -> List[ImpalaColumn]:
# TODO: this method is largely from dbt-spark, sample test with impala works (test_dbt_base: base)
# need deeper testing
owner_match = re.findall(
self.INFORMATION_OWNER_REGEX, relation.information)
owner = owner_match[0] if owner_match else None
matches = re.finditer(
self.INFORMATION_COLUMNS_REGEX, relation.information)
columns = []
stats_match = re.findall(
self.INFORMATION_STATISTICS_REGEX, relation.information)
raw_table_stats = stats_match[0] if stats_match else None
table_stats = ImpalaColumn.convert_table_stats(raw_table_stats)
for match_num, match in enumerate(matches):
column_name, column_type, nullable = match.groups()
column = ImpalaColumn(
table_database=None,
table_schema=relation.schema,
table_name=relation.table,
table_type=relation.type,
column_index=match_num,
table_owner=owner,
column=column_name,
dtype=column_type,
table_stats=table_stats
)
columns.append(column)
return columns
def _merged_column_types(
tables: List[agate.Table]
) -> Dict[str, agate.data_types.DataType]:
# this is a lot like agate.Table.merge, but with handling for all-null
# rows being "any type".
new_columns: ColumnTypeBuilder = ColumnTypeBuilder()
for table in tables:
for i in range(len(table.columns)):
column_name: str = table.column_names[i]
column_type: NullableAgateType = table.column_types[i]
# avoid over-sensitive type inference
new_columns[column_name] = column_type
return new_columns.finalize()
def _merge_tables(tables: List[agate.Table]) -> agate.Table:
new_columns = ImpalaAdapter._merged_column_types(tables)
column_names = tuple(new_columns.keys())
column_types = tuple(new_columns.values())
rows: List[agate.Row] = []
for table in tables:
if (
table.column_names == column_names and
table.column_types == column_types
):
rows.extend(table.rows)
else:
for row in table.rows:
data = [row.get(name, None) for name in column_names]
rows.append(agate.Row(data, column_names))
# _is_fork to tell agate that we already made things into `Row`s.
return agate.Table(rows, column_names, column_types, _is_fork=True)
def _catch_as_completed(
futures # typing: List[Future[agate.Table]]
) -> Tuple[agate.Table, List[Exception]]:
# catalogs: agate.Table = agate.Table(rows=[])
tables: List[agate.Table] = []
exceptions: List[Exception] = []
for future in as_completed(futures):
exc = future.exception()
# we want to re-raise on ctrl+c and BaseException
if exc is None:
catalog = future.result()
tables.append(catalog)
elif (
isinstance(exc, KeyboardInterrupt) or
not isinstance(exc, Exception)
):
raise exc
else:
warn_or_error(
f'Encountered an error while generating catalog: {str(exc)}'
)
# exc is not None, derives from Exception, and isn't ctrl+c
exceptions.append(exc)
return ImpalaAdapter._merge_tables(tables), exceptions
def get_catalog(self, manifest):
schema_map = self._get_catalog_schemas(manifest)
with executor(self.config) as tpe:
futures: List[Future[agate.Table]] = []
for info, schemas in schema_map.items():
for schema in schemas:
for relation in self.list_relations(info.database, schema):
name = '.'.join([str(relation.database), str(relation.schema), str(relation.name)])
futures.append(tpe.submit_connected(
self, name,
self._get_one_catalog, relation, '.'.join([str(relation.database), str(relation.schema)])
))
catalogs, exceptions = ImpalaAdapter._catch_as_completed(futures)
return catalogs, exceptions
def _get_datatype(self, col_type):
defaultType = agate.data_types.Text(null_values=('null', ''))
datatypeMap = {
'int': agate.data_types.Number(null_values=('null', '')),
'double': agate.data_types.Number(null_values=('null', '')),
'timestamp': agate.data_types.DateTime(null_values=('null', ''), datetime_format='%Y-%m-%d %H:%M:%S'),
'date': agate.data_types.Date(null_values=('null', ''), date_format='%Y-%m-%d'),
'boolean': agate.data_types.Boolean(true_values=('true',), false_values=('false',), null_values=('null', '')),
'text': defaultType,
'string': defaultType
}
try:
dt = datatypeMap[col_type]
if (dt == None):
return defaultType
else:
return dt
except:
return defaultType
def _get_one_catalog(
self, relation, unique_id
) -> agate.Table:
columns: List[Dict[str, Any]] = []
columns.extend(self._get_columns_for_catalog(relation, unique_id))
tableFromCols = agate.Table.from_object(
columns, column_types=DEFAULT_TYPE_TESTER
)
colNames = list(map(lambda x: x['column_name'], columns))
colTypes = list(map(lambda x: self._get_datatype(x['column_type']), columns))
tableFromCols = agate.Table([], column_names=colNames, column_types=colTypes)
return tableFromCols
def _get_columns_for_catalog(
self, relation: ImpalaRelation, unique_id
) -> Iterable[Dict[str, Any]]:
columns = self.get_columns_in_relation(relation)
for column in columns:
# convert ImpalaColumns into catalog dicts
as_dict = column.to_column_dict()
if (unique_id):
as_dict['column_name'] = unique_id + '.' + relation.table + '.' + as_dict.pop('column', None)
else:
as_dict['column_name'] = relation.database + '.' + relation.schema + '.' + relation.table + '.' + as_dict.pop('column', None)
as_dict['column_type'] = as_dict.pop('dtype')
as_dict['table_database'] = None
yield as_dict
| 36.531807 | 141 | 0.605767 |
from dbt.adapters.sql import SQLAdapter
from dbt.adapters.impala import ImpalaConnectionManager
import re
from typing import List, Tuple, Dict, Iterable, Any
import agate
import dbt.exceptions
from dbt.exceptions import warn_or_error
from dbt.contracts.relation import RelationType
from dbt.adapters.impala.column import ImpalaColumn
from dbt.adapters.impala.relation import ImpalaRelation
from dbt.events import AdapterLogger
from dbt.clients.agate_helper import DEFAULT_TYPE_TESTER, ColumnTypeBuilder, NullableAgateType
from dbt.utils import executor
from concurrent.futures import as_completed, Future
logger = AdapterLogger("Impala")
LIST_SCHEMAS_MACRO_NAME = 'list_schemas'
LIST_RELATIONS_MACRO_NAME = 'list_relations_without_caching'
KEY_TABLE_OWNER = 'Owner'
KEY_TABLE_STATISTICS = 'Statistics'
class ImpalaAdapter(SQLAdapter):
Relation = ImpalaRelation
Column = ImpalaColumn
ConnectionManager = ImpalaConnectionManager
INFORMATION_COLUMNS_REGEX = re.compile(
r"^ \|-- (.*): (.*) \(nullable = (.*)\b", re.MULTILINE)
INFORMATION_OWNER_REGEX = re.compile(r"^Owner: (.*)$", re.MULTILINE)
INFORMATION_STATISTICS_REGEX = re.compile(
r"^Statistics: (.*)$", re.MULTILINE)
@classmethod
def date_function(cls):
return 'now()'
@classmethod
def convert_datetime_type(
cls, agate_table: agate.Table, col_idx: int
) -> str:
return "timestamp"
@classmethod
def convert_date_type(cls, agate_table: agate.Table, col_idx: int) -> str:
return "date"
@classmethod
def convert_time_type(cls, agate_table: agate.Table, col_idx: int) -> str:
return "time"
@classmethod
def convert_text_type(cls, agate_table: agate.Table, col_idx: int) -> str:
return "string"
def quote(self, identifier):
return identifier
@classmethod
def convert_number_type(cls, agate_table: agate.Table, col_idx: int) -> str:
decimals = agate_table.aggregate(agate.MaxPrecision(col_idx))
return "real" if decimals else "integer"
def check_schema_exists(self, database, schema):
results = self.execute_macro(
LIST_SCHEMAS_MACRO_NAME,
kwargs={'database': database}
)
exists = True if schema in [row[0] for row in results] else False
return exists
def list_schemas(self, database: str) -> List[str]:
results = self.execute_macro(
LIST_SCHEMAS_MACRO_NAME,
kwargs={'database': database}
)
schemas = []
for row in results:
_schema = row[0]
schemas.append(_schema)
return schemas
def list_relations_without_caching(
self, schema_relation: ImpalaRelation
) -> List[ImpalaRelation]:
kwargs = {'schema_relation': schema_relation}
try:
results = self.execute_macro(
LIST_RELATIONS_MACRO_NAME,
kwargs=kwargs
)
except dbt.exceptions.RuntimeException as e:
errmsg = getattr(e, 'msg', '')
if f"Database '{schema_relation}' not found" in errmsg:
return []
else:
description = "Error while retrieving information about"
logger.debug(f"{description} {schema_relation}: {e.msg}")
return []
relations = []
for row in results:
if len(row) != 1:
raise dbt.exceptions.RuntimeException(
f'Invalid value from "show table extended ...", '
f'got {len(row)} values, expected 4'
)
_identifier = row[0]
rel_type = RelationType.View \
if 'view' in _identifier else RelationType.Table
relation = self.Relation.create(
database=schema_relation.database,
schema=schema_relation.schema,
identifier=_identifier,
type=rel_type,
information=_identifier,
)
relations.append(relation)
return relations
def get_columns_in_relation(self, relation: Relation) -> List[ImpalaColumn]:
cached_relations = self.cache.get_relations(
relation.database, relation.schema)
cached_relation = next((cached_relation
for cached_relation in cached_relations
if str(cached_relation) == str(relation)),
None)
columns = []
if cached_relation and cached_relation.information:
columns = self.parse_columns_from_information(cached_relation)
if not columns:
rows: List[agate.Row] = super().get_columns_in_relation(relation)
columns = self.parse_describe_extended(relation, rows)
return columns
def parse_describe_extended(
self,
relation: Relation,
raw_rows: List[agate.Row]
) -> List[ImpalaColumn]:
dict_rows = [dict(zip(row._keys, row._values)) for row in raw_rows]
pos = self.find_table_information_separator(dict_rows)
rows = [
row for row in raw_rows[0:pos]
if not row['name'].startswith('#') and not row['name'] == ''
]
metadata = {
col['name']: col['type'] for col in raw_rows[pos + 1:]
}
raw_table_stats = metadata.get(KEY_TABLE_STATISTICS)
table_stats = ImpalaColumn.convert_table_stats(raw_table_stats)
return [ImpalaColumn(
table_database=None,
table_schema=relation.schema,
table_name=relation.name,
table_type=relation.type,
table_owner=str(metadata.get(KEY_TABLE_OWNER)),
table_stats=table_stats,
column=column['name'],
column_index=idx,
dtype=column['type'],
) for idx, column in enumerate(rows)]
@staticmethod
def find_table_information_separator(rows: List[dict]) -> int:
pos = 0
for row in rows:
if row['name'].startswith('# Detailed Table Information'):
break
pos += 1
return pos
def parse_columns_from_information(
self, relation: ImpalaRelation
) -> List[ImpalaColumn]:
owner_match = re.findall(
self.INFORMATION_OWNER_REGEX, relation.information)
owner = owner_match[0] if owner_match else None
matches = re.finditer(
self.INFORMATION_COLUMNS_REGEX, relation.information)
columns = []
stats_match = re.findall(
self.INFORMATION_STATISTICS_REGEX, relation.information)
raw_table_stats = stats_match[0] if stats_match else None
table_stats = ImpalaColumn.convert_table_stats(raw_table_stats)
for match_num, match in enumerate(matches):
column_name, column_type, nullable = match.groups()
column = ImpalaColumn(
table_database=None,
table_schema=relation.schema,
table_name=relation.table,
table_type=relation.type,
column_index=match_num,
table_owner=owner,
column=column_name,
dtype=column_type,
table_stats=table_stats
)
columns.append(column)
return columns
def _merged_column_types(
tables: List[agate.Table]
) -> Dict[str, agate.data_types.DataType]:
new_columns: ColumnTypeBuilder = ColumnTypeBuilder()
for table in tables:
for i in range(len(table.columns)):
column_name: str = table.column_names[i]
column_type: NullableAgateType = table.column_types[i]
new_columns[column_name] = column_type
return new_columns.finalize()
def _merge_tables(tables: List[agate.Table]) -> agate.Table:
new_columns = ImpalaAdapter._merged_column_types(tables)
column_names = tuple(new_columns.keys())
column_types = tuple(new_columns.values())
rows: List[agate.Row] = []
for table in tables:
if (
table.column_names == column_names and
table.column_types == column_types
):
rows.extend(table.rows)
else:
for row in table.rows:
data = [row.get(name, None) for name in column_names]
rows.append(agate.Row(data, column_names))
return agate.Table(rows, column_names, column_types, _is_fork=True)
def _catch_as_completed(
futures
) -> Tuple[agate.Table, List[Exception]]:
tables: List[agate.Table] = []
exceptions: List[Exception] = []
for future in as_completed(futures):
exc = future.exception()
if exc is None:
catalog = future.result()
tables.append(catalog)
elif (
isinstance(exc, KeyboardInterrupt) or
not isinstance(exc, Exception)
):
raise exc
else:
warn_or_error(
f'Encountered an error while generating catalog: {str(exc)}'
)
exceptions.append(exc)
return ImpalaAdapter._merge_tables(tables), exceptions
def get_catalog(self, manifest):
schema_map = self._get_catalog_schemas(manifest)
with executor(self.config) as tpe:
futures: List[Future[agate.Table]] = []
for info, schemas in schema_map.items():
for schema in schemas:
for relation in self.list_relations(info.database, schema):
name = '.'.join([str(relation.database), str(relation.schema), str(relation.name)])
futures.append(tpe.submit_connected(
self, name,
self._get_one_catalog, relation, '.'.join([str(relation.database), str(relation.schema)])
))
catalogs, exceptions = ImpalaAdapter._catch_as_completed(futures)
return catalogs, exceptions
def _get_datatype(self, col_type):
defaultType = agate.data_types.Text(null_values=('null', ''))
datatypeMap = {
'int': agate.data_types.Number(null_values=('null', '')),
'double': agate.data_types.Number(null_values=('null', '')),
'timestamp': agate.data_types.DateTime(null_values=('null', ''), datetime_format='%Y-%m-%d %H:%M:%S'),
'date': agate.data_types.Date(null_values=('null', ''), date_format='%Y-%m-%d'),
'boolean': agate.data_types.Boolean(true_values=('true',), false_values=('false',), null_values=('null', '')),
'text': defaultType,
'string': defaultType
}
try:
dt = datatypeMap[col_type]
if (dt == None):
return defaultType
else:
return dt
except:
return defaultType
def _get_one_catalog(
self, relation, unique_id
) -> agate.Table:
columns: List[Dict[str, Any]] = []
columns.extend(self._get_columns_for_catalog(relation, unique_id))
tableFromCols = agate.Table.from_object(
columns, column_types=DEFAULT_TYPE_TESTER
)
colNames = list(map(lambda x: x['column_name'], columns))
colTypes = list(map(lambda x: self._get_datatype(x['column_type']), columns))
tableFromCols = agate.Table([], column_names=colNames, column_types=colTypes)
return tableFromCols
def _get_columns_for_catalog(
self, relation: ImpalaRelation, unique_id
) -> Iterable[Dict[str, Any]]:
columns = self.get_columns_in_relation(relation)
for column in columns:
# convert ImpalaColumns into catalog dicts
as_dict = column.to_column_dict()
if (unique_id):
as_dict['column_name'] = unique_id + '.' + relation.table + '.' + as_dict.pop('column', None)
else:
as_dict['column_name'] = relation.database + '.' + relation.schema + '.' + relation.table + '.' + as_dict.pop('column', None)
as_dict['column_type'] = as_dict.pop('dtype')
as_dict['table_database'] = None
yield as_dict
| true | true |
f7fe26fc9905146c59da68e6f935fdc8d6271c10 | 13,934 | py | Python | src/semseg/data/potsdam.py | Suerte412/SemSeg | 46515f36291bb7e068ceb1b455fe1fc4a26842ef | [
"Apache-2.0"
] | 1 | 2020-11-04T02:52:11.000Z | 2020-11-04T02:52:11.000Z | src/semseg/data/potsdam.py | Suerte412/SemSeg | 46515f36291bb7e068ceb1b455fe1fc4a26842ef | [
"Apache-2.0"
] | null | null | null | src/semseg/data/potsdam.py | Suerte412/SemSeg | 46515f36291bb7e068ceb1b455fe1fc4a26842ef | [
"Apache-2.0"
] | null | null | null | from os.path import join
import numpy as np
import os
from .isprs import IsprsDataset
from .generators import FileGenerator, TRAIN, VALIDATION, TEST
from .utils import (
save_img, load_img, get_img_size, compute_ndvi, _makedirs,
save_numpy_array)
POTSDAM = 'potsdam'
PROCESSED_POTSDAM = 'processed_potsdam'
# dataset dependent parameters
class PotsdamDataset(IsprsDataset):
sharah_train_ratio = 17 / 24
def __init__(self, include_depth=True, include_ndvi=True):
self.include_ir = True
self.include_depth = include_depth
self.include_ndvi = include_ndvi
# DEBUG
# For 3 active channels
self.include_ir = False
self.include_depth = False
self.include_ndvi = False
self.red_ind = 0
self.green_ind = 1
self.blue_ind = 2
self.rgb_inds = [self.red_ind, self.green_ind, self.blue_ind]
self.ir_ind = 3
self.depth_ind = 4
self.ndvi_ind = 5
self.active_input_inds = list(self.rgb_inds)
# Add extra channels to batch_x in addition to rgb
if self.include_ir:
self.active_input_inds.append(self.ir_ind)
if self.include_depth:
self.active_input_inds.append(self.depth_ind)
if self.include_ndvi:
self.active_input_inds.append(self.ndvi_ind)
super().__init__()
def get_output_file_name(self, file_ind):
return 'top_potsdam_{}_{}_label.tif'.format(file_ind[0], file_ind[1])
def augment_channels(self, batch_x):
red = batch_x[:, :, :, [self.red_ind]]
ir = batch_x[:, :, :, [self.ir_ind]]
ndvi = compute_ndvi(red, ir)
return np.concatenate([batch_x, ndvi], axis=3)
class PotsdamFileGenerator(FileGenerator):
"""
A data generator for the Potsdam dataset that creates batches from
files on disk.
"""
# def __init__(self, active_input_inds, train_ratio, cross_validation):
def __init__(self, train_ratio):
self.dataset = PotsdamDataset()
# The first 24 indices correspond to the training set,
# and the rest to the validation set used
# in https://arxiv.org/abs/1606.02585
self.file_inds = [
(2, 10), (3, 10), (3, 11), (3, 12), (4, 11), (4, 12), (5, 10),
(5, 12), (6, 10), (6, 11), (6, 12), (6, 8), (6, 9), (7, 11),
(7, 12), (7, 7), (7, 9), (2, 11), (2, 12), (4, 10), (5, 11),
(6, 7), (7, 10), (7, 8)
]
self.test_file_inds = [
(2, 13), (2, 14), (3, 13), (3, 14), (4, 13), (4, 14), (4, 15),
(5, 13), (5, 14), (5, 15), (6, 13), (6, 14), (6, 15), (7, 13)
]
# super().__init__(active_input_inds, train_ratio, cross_validation)
super().__init__(self.dataset.active_input_inds, train_ratio, cross_validation=None)
class PotsdamImageFileGenerator(PotsdamFileGenerator):
"""
A data generator for the Potsdam dataset that creates batches from
the original TIFF and JPG files.
"""
# def __init__(self, datasets_path, active_input_inds,
# train_ratio=0.8, cross_validation=None):
# self.dataset_path = join(datasets_path, POTSDAM)
# super().__init__(active_input_inds, train_ratio, cross_validation)
def __init__(self, datasets_path,
train_ratio=0.8):
self.dataset_path = join(datasets_path, POTSDAM)
super().__init__(train_ratio)
@staticmethod
def preprocess(datasets_path):
# Fix the depth image that is missing a column if it hasn't been
# fixed already.
data_path = join(datasets_path, POTSDAM)
file_path = join(
data_path,
'1_DSM_normalisation/dsm_potsdam_03_13_normalized_lastools.jpg')
im = load_img(file_path)
if im.shape[1] == 5999:
im_fix = np.zeros((6000, 6000), dtype=np.uint8)
im_fix[:, 0:-1] = im[:, :, 0]
save_img(im_fix, file_path)
def get_file_size(self, file_ind):
ind0, ind1 = file_ind
rgbir_file_path = join(
self.dataset_path,
'4_Ortho_RGBIR' + os.sep + 'top_potsdam_{}_{}_RGBIR.tif'.format(ind0, ind1))
nb_rows, nb_cols = get_img_size(rgbir_file_path)
return nb_rows, nb_cols
def get_img(self, file_ind, window, has_y=True):
ind0, ind1 = file_ind
rgbir_file_path = join(
self.dataset_path,
'4_Ortho_RGBIR' + os.sep + 'top_potsdam_{}_{}_RGBIR.tif'.format(ind0, ind1))
depth_file_path = join(
self.dataset_path,
'1_DSM_normalisation' + os.sep + 'dsm_potsdam_{:0>2}_{:0>2}_normalized_lastools.jpg'.format(ind0, ind1)) # noqa
batch_y_file_path = join(
self.dataset_path,
'5_Labels_for_participants' + os.sep + 'top_potsdam_{}_{}_label.tif'.format(ind0, ind1)) # noqa
batch_y_no_boundary_file_path = join(
self.dataset_path,
'5_Labels_for_participants_no_Boundary' + os.sep + 'top_potsdam_{}_{}_label_noBoundary.tif'.format(ind0, ind1)) # noqa
rgbir = load_img(rgbir_file_path, window)
depth = load_img(depth_file_path, window)
channels = [rgbir, depth]
#DEBUG
# print("rgbir.shape: {}".format(rgbir.shape))
# print("depth.shape: {}".format(depth.shape))
if has_y:
batch_y = load_img(batch_y_file_path, window)
# print("batch_y.shape: {}".format(batch_y.shape))
batch_y_no_boundary = load_img(
batch_y_no_boundary_file_path, window)
# print("batch_y_no_boundary.shape: {}".format(batch_y_no_boundary.shape))
channels.extend([batch_y, batch_y_no_boundary])
img = np.concatenate(channels, axis=2)
return img
def parse_batch(self, batch, has_y=True):
# DEBUG
# print("Active input inds: {}".format(self.dataset.active_input_inds))
# print("Batch shape with everything: {}".format(batch.shape))
# DEBUG
# batch_x = batch[:, :, :, 0:5]
# Number of channels extracted from images
nb_channels = len(self.dataset.active_input_inds) - 1
# print("Number of channels without ndvi: {}".format(nb_channels))
batch_x = batch[:, :, :, self.dataset.active_input_inds[0]:nb_channels]
# print("Batch_x shape: {}".format(batch_x.shape))
batch_y = None
batch_y_mask = None
if has_y:
# batch_y = self.dataset.rgb_to_one_hot_batch(batch[:, :, :, 5:8])
# batch_y_mask = self.dataset.rgb_to_mask_batch(batch[:, :, :, 8:])
batch_y = self.dataset.rgb_to_one_hot_batch(batch[:, :, :, nb_channels:nb_channels+3])
# print("Batch_y shape: {0} from {1}".format(batch_y.shape, batch[:, :, :, nb_channels:nb_channels+3].shape))
# print("Batch_y_mask in rgb shape: {0}".format(batch[:, :, :, nb_channels+3:].shape))
batch_y_mask = self.dataset.rgb_to_mask_batch(batch[:, :, :, nb_channels+3:])
# print("Batch_y_mask shape: {0} and {1}".format(batch_y_mask.shape, batch[:, :, :, nb_channels+3:].shape))
# batch_x = batch[:, :, :, self.active_input_inds[0]:self.active_input_inds[-1]]
# batch_y = None
# batch_y_mask = None
# if has_y:
# batch_y = self.dataset.rgb_to_one_hot_batch(batch[:, :, :, self.active_input_inds[-1]:self.active_input_inds[-1] + 3])
# batch_y_mask = self.dataset.rgb_to_mask_batch(batch[:, :, :, self.active_input_inds[-1] + 3:])
# DEBUG
# print("Batch_x shape after parsing from image: {}".format(batch_x.shape))
# if has_y:
# print("Batch_y shape after parsing from image: {}".format(batch_y.shape))
# print("Batch_y_mask shape after parsing from image: {}".format(batch_y_mask.shape))
return batch_x, batch_y, batch_y_mask
class PotsdamNumpyFileGenerator(PotsdamFileGenerator):
"""
A data generator for the Potsdam dataset that creates batches from
numpy array files. This is about 20x faster than reading the raw files.
"""
# def __init__(self, datasets_path, active_input_inds,
# train_ratio=0.8, cross_validation=None):
# self.raw_dataset_path = join(datasets_path, POTSDAM)
# self.dataset_path = join(datasets_path, PROCESSED_POTSDAM)
# super().__init__(active_input_inds, train_ratio, cross_validation)
def __init__(self, datasets_path,
train_ratio=0.8):
self.raw_dataset_path = join(datasets_path, POTSDAM)
self.dataset_path = join(datasets_path, PROCESSED_POTSDAM)
# super().__init__(active_input_inds, train_ratio, cross_validation)
if (train_ratio == None):
train_ratio = 0.8
print("Train_ratio is: {0}".format(train_ratio))
super().__init__(train_ratio)
@staticmethod
def preprocess(datasets_path):
proc_data_path = join(datasets_path, PROCESSED_POTSDAM)
_makedirs(proc_data_path)
# generator = PotsdamImageFileGenerator(
# datasets_path, include_ir=True, include_depth=True,
# include_ndvi=False)
generator = PotsdamImageFileGenerator(
datasets_path
)
dataset = generator.dataset
def _preprocess(split):
gen = generator.make_split_generator(
split, batch_size=1, shuffle=False, augment=False,
normalize=False, eval_mode=True)
# for batch_x, batch_y, batch_y_mask, file_inds in gen:
for batch_tuple in gen:
batch_x = batch_tuple[0]
batch_y = batch_tuple[1]
batch_y_mask = batch_tuple[3]
file_inds = batch_tuple[4]
file_ind = file_inds[0]
batch_x = np.squeeze(batch_x, axis=0)
channels = [batch_x]
if batch_y is not None:
batch_y = np.squeeze(batch_y, axis=0)
batch_y = dataset.one_hot_to_label_batch(batch_y)
# DEBUG
# print("Batch_y shape after squeezing and one_hot_batching is: {}".format(batch_y.shape))
batch_y_mask = np.squeeze(batch_y_mask, axis=0)
channels.extend([batch_y, batch_y_mask])
channels = np.concatenate(channels, axis=2)
# Indexes in name of produced .npy files
ind0, ind1 = file_ind
# ind0 = file_ind[0]
# ind1 = file_ind[1]
file_name = '{}_{}'.format(ind0, ind1)
print("We are ready to save {0} to .npy and the batch shape is: {1}".format(file_name, channels.shape))
# Creating numpy arrays from input images
# DEBUG
save_numpy_array(
join(proc_data_path, file_name), channels)
# Free memory
channels = None
batch_x = None
batch_y = None
batch_y_mask = None
_preprocess(TRAIN)
_preprocess(VALIDATION)
_preprocess(TEST)
print ("Files have been preprocessed.")
def get_file_path(self, file_ind):
ind0, ind1 = file_ind
return join(self.dataset_path, '{}_{}.npy'.format(ind0, ind1))
def get_file_size(self, file_ind):
file_path = self.get_file_path(file_ind)
im = np.load(file_path, mmap_mode='r')
nb_rows, nb_cols = im.shape[0:2]
return nb_rows, nb_cols
def get_img(self, file_ind, window, has_y=True):
file_path = self.get_file_path(file_ind)
im = np.load(file_path, mmap_mode='r')
((row_begin, row_end), (col_begin, col_end)) = window
img = im[row_begin:row_end, col_begin:col_end, :]
return img
def parse_batch(self, batch, has_y=True):
# batch_x = batch[:, :, :, 0:5]
# print("Batch given to parse batch has shape: {}".format(batch.shape))
nb_channels = self.dataset.active_input_inds[-1]
# # DEBUG_BEGIN
# nb_channels = len(self.active_input_inds)
# print("Number of active channels: {0}".format(nb_channels))
# # DEBUG_END
batch_x = batch[:, :, :, self.dataset.active_input_inds[0]:nb_channels+1]
batch_y = None
batch_y_mask = None
if has_y:
# # DEBUG_BEGIN
# test_batch_y_label = batch[:, :, :, nb_channels+1:nb_channels+2]
# print("test_batch_y_label type: {0}".format(type(test_batch_y_label)))
# print("test_batch_y_label shape: {0}".format(test_batch_y_label.shape))
# print("batch with x, y and y_mask shape: {0}".format(batch.shape))
# # print(test_batch_y_label)
# # DEBUG_END
# batch_y = self.dataset.label_to_one_hot_batch(batch[:, :, :, 5:6])
batch_y = self.dataset.label_to_one_hot_batch(batch[:, :, :, -2:-1])
# batch_y = self.dataset.label_to_one_hot_batch(batch[:, :, :, nb_channels+1:nb_channels+2])
# batch_y_mask = batch[:, :, :, 6:7]
batch_y_mask = batch[:, :, :, -1:]
# batch_y_mask = batch[:, :, :, nb_channels+2:nb_channels+3]
# # DEBUG_BEGIN
# print("batch_y type: {0}".format(type(batch_y)))
# print("batch_y shape: {0}".format(batch_y.shape))
# # print(batch_y)
# # DEBUG_END
# DEBUG
# print("Batch_y shape after parsing numpy array: {}".format(batch_y.shape))
# print("Batch_y_mask shape after parsing numpy array: {}".format(batch_y_mask.shape))
# DEBUG
# print("Batch_x shape after parsing from numpy array: {}".format(batch_x.shape))
return batch_x, batch_y, batch_y_mask
| 39.030812 | 132 | 0.599756 | from os.path import join
import numpy as np
import os
from .isprs import IsprsDataset
from .generators import FileGenerator, TRAIN, VALIDATION, TEST
from .utils import (
save_img, load_img, get_img_size, compute_ndvi, _makedirs,
save_numpy_array)
POTSDAM = 'potsdam'
PROCESSED_POTSDAM = 'processed_potsdam'
class PotsdamDataset(IsprsDataset):
sharah_train_ratio = 17 / 24
def __init__(self, include_depth=True, include_ndvi=True):
self.include_ir = True
self.include_depth = include_depth
self.include_ndvi = include_ndvi
self.include_ir = False
self.include_depth = False
self.include_ndvi = False
self.red_ind = 0
self.green_ind = 1
self.blue_ind = 2
self.rgb_inds = [self.red_ind, self.green_ind, self.blue_ind]
self.ir_ind = 3
self.depth_ind = 4
self.ndvi_ind = 5
self.active_input_inds = list(self.rgb_inds)
if self.include_ir:
self.active_input_inds.append(self.ir_ind)
if self.include_depth:
self.active_input_inds.append(self.depth_ind)
if self.include_ndvi:
self.active_input_inds.append(self.ndvi_ind)
super().__init__()
def get_output_file_name(self, file_ind):
return 'top_potsdam_{}_{}_label.tif'.format(file_ind[0], file_ind[1])
def augment_channels(self, batch_x):
red = batch_x[:, :, :, [self.red_ind]]
ir = batch_x[:, :, :, [self.ir_ind]]
ndvi = compute_ndvi(red, ir)
return np.concatenate([batch_x, ndvi], axis=3)
class PotsdamFileGenerator(FileGenerator):
def __init__(self, train_ratio):
self.dataset = PotsdamDataset()
self.file_inds = [
(2, 10), (3, 10), (3, 11), (3, 12), (4, 11), (4, 12), (5, 10),
(5, 12), (6, 10), (6, 11), (6, 12), (6, 8), (6, 9), (7, 11),
(7, 12), (7, 7), (7, 9), (2, 11), (2, 12), (4, 10), (5, 11),
(6, 7), (7, 10), (7, 8)
]
self.test_file_inds = [
(2, 13), (2, 14), (3, 13), (3, 14), (4, 13), (4, 14), (4, 15),
(5, 13), (5, 14), (5, 15), (6, 13), (6, 14), (6, 15), (7, 13)
]
super().__init__(self.dataset.active_input_inds, train_ratio, cross_validation=None)
class PotsdamImageFileGenerator(PotsdamFileGenerator):
def __init__(self, datasets_path,
train_ratio=0.8):
self.dataset_path = join(datasets_path, POTSDAM)
super().__init__(train_ratio)
@staticmethod
def preprocess(datasets_path):
# fixed already.
data_path = join(datasets_path, POTSDAM)
file_path = join(
data_path,
'1_DSM_normalisation/dsm_potsdam_03_13_normalized_lastools.jpg')
im = load_img(file_path)
if im.shape[1] == 5999:
im_fix = np.zeros((6000, 6000), dtype=np.uint8)
im_fix[:, 0:-1] = im[:, :, 0]
save_img(im_fix, file_path)
def get_file_size(self, file_ind):
ind0, ind1 = file_ind
rgbir_file_path = join(
self.dataset_path,
'4_Ortho_RGBIR' + os.sep + 'top_potsdam_{}_{}_RGBIR.tif'.format(ind0, ind1))
nb_rows, nb_cols = get_img_size(rgbir_file_path)
return nb_rows, nb_cols
def get_img(self, file_ind, window, has_y=True):
ind0, ind1 = file_ind
rgbir_file_path = join(
self.dataset_path,
'4_Ortho_RGBIR' + os.sep + 'top_potsdam_{}_{}_RGBIR.tif'.format(ind0, ind1))
depth_file_path = join(
self.dataset_path,
'1_DSM_normalisation' + os.sep + 'dsm_potsdam_{:0>2}_{:0>2}_normalized_lastools.jpg'.format(ind0, ind1)) # noqa
batch_y_file_path = join(
self.dataset_path,
'5_Labels_for_participants' + os.sep + 'top_potsdam_{}_{}_label.tif'.format(ind0, ind1)) # noqa
batch_y_no_boundary_file_path = join(
self.dataset_path,
'5_Labels_for_participants_no_Boundary' + os.sep + 'top_potsdam_{}_{}_label_noBoundary.tif'.format(ind0, ind1)) # noqa
rgbir = load_img(rgbir_file_path, window)
depth = load_img(depth_file_path, window)
channels = [rgbir, depth]
#DEBUG
# print("rgbir.shape: {}".format(rgbir.shape))
# print("depth.shape: {}".format(depth.shape))
if has_y:
batch_y = load_img(batch_y_file_path, window)
# print("batch_y.shape: {}".format(batch_y.shape))
batch_y_no_boundary = load_img(
batch_y_no_boundary_file_path, window)
# print("batch_y_no_boundary.shape: {}".format(batch_y_no_boundary.shape))
channels.extend([batch_y, batch_y_no_boundary])
img = np.concatenate(channels, axis=2)
return img
def parse_batch(self, batch, has_y=True):
# DEBUG
# print("Active input inds: {}".format(self.dataset.active_input_inds))
# print("Batch shape with everything: {}".format(batch.shape))
# DEBUG
# batch_x = batch[:, :, :, 0:5]
# Number of channels extracted from images
nb_channels = len(self.dataset.active_input_inds) - 1
# print("Number of channels without ndvi: {}".format(nb_channels))
batch_x = batch[:, :, :, self.dataset.active_input_inds[0]:nb_channels]
# print("Batch_x shape: {}".format(batch_x.shape))
batch_y = None
batch_y_mask = None
if has_y:
# batch_y = self.dataset.rgb_to_one_hot_batch(batch[:, :, :, 5:8])
# batch_y_mask = self.dataset.rgb_to_mask_batch(batch[:, :, :, 8:])
batch_y = self.dataset.rgb_to_one_hot_batch(batch[:, :, :, nb_channels:nb_channels+3])
# print("Batch_y shape: {0} from {1}".format(batch_y.shape, batch[:, :, :, nb_channels:nb_channels+3].shape))
# print("Batch_y_mask in rgb shape: {0}".format(batch[:, :, :, nb_channels+3:].shape))
batch_y_mask = self.dataset.rgb_to_mask_batch(batch[:, :, :, nb_channels+3:])
# print("Batch_y_mask shape: {0} and {1}".format(batch_y_mask.shape, batch[:, :, :, nb_channels+3:].shape))
# batch_x = batch[:, :, :, self.active_input_inds[0]:self.active_input_inds[-1]]
# batch_y = None
# batch_y_mask = None
# if has_y:
# batch_y = self.dataset.rgb_to_one_hot_batch(batch[:, :, :, self.active_input_inds[-1]:self.active_input_inds[-1] + 3])
# batch_y_mask = self.dataset.rgb_to_mask_batch(batch[:, :, :, self.active_input_inds[-1] + 3:])
# DEBUG
# print("Batch_x shape after parsing from image: {}".format(batch_x.shape))
# if has_y:
# print("Batch_y shape after parsing from image: {}".format(batch_y.shape))
# print("Batch_y_mask shape after parsing from image: {}".format(batch_y_mask.shape))
return batch_x, batch_y, batch_y_mask
class PotsdamNumpyFileGenerator(PotsdamFileGenerator):
# def __init__(self, datasets_path, active_input_inds,
# train_ratio=0.8, cross_validation=None):
# self.raw_dataset_path = join(datasets_path, POTSDAM)
# self.dataset_path = join(datasets_path, PROCESSED_POTSDAM)
# super().__init__(active_input_inds, train_ratio, cross_validation)
def __init__(self, datasets_path,
train_ratio=0.8):
self.raw_dataset_path = join(datasets_path, POTSDAM)
self.dataset_path = join(datasets_path, PROCESSED_POTSDAM)
# super().__init__(active_input_inds, train_ratio, cross_validation)
if (train_ratio == None):
train_ratio = 0.8
print("Train_ratio is: {0}".format(train_ratio))
super().__init__(train_ratio)
@staticmethod
def preprocess(datasets_path):
proc_data_path = join(datasets_path, PROCESSED_POTSDAM)
_makedirs(proc_data_path)
# generator = PotsdamImageFileGenerator(
# datasets_path, include_ir=True, include_depth=True,
# include_ndvi=False)
generator = PotsdamImageFileGenerator(
datasets_path
)
dataset = generator.dataset
def _preprocess(split):
gen = generator.make_split_generator(
split, batch_size=1, shuffle=False, augment=False,
normalize=False, eval_mode=True)
# for batch_x, batch_y, batch_y_mask, file_inds in gen:
for batch_tuple in gen:
batch_x = batch_tuple[0]
batch_y = batch_tuple[1]
batch_y_mask = batch_tuple[3]
file_inds = batch_tuple[4]
file_ind = file_inds[0]
batch_x = np.squeeze(batch_x, axis=0)
channels = [batch_x]
if batch_y is not None:
batch_y = np.squeeze(batch_y, axis=0)
batch_y = dataset.one_hot_to_label_batch(batch_y)
# DEBUG
# print("Batch_y shape after squeezing and one_hot_batching is: {}".format(batch_y.shape))
batch_y_mask = np.squeeze(batch_y_mask, axis=0)
channels.extend([batch_y, batch_y_mask])
channels = np.concatenate(channels, axis=2)
# Indexes in name of produced .npy files
ind0, ind1 = file_ind
# ind0 = file_ind[0]
# ind1 = file_ind[1]
file_name = '{}_{}'.format(ind0, ind1)
print("We are ready to save {0} to .npy and the batch shape is: {1}".format(file_name, channels.shape))
# Creating numpy arrays from input images
# DEBUG
save_numpy_array(
join(proc_data_path, file_name), channels)
# Free memory
channels = None
batch_x = None
batch_y = None
batch_y_mask = None
_preprocess(TRAIN)
_preprocess(VALIDATION)
_preprocess(TEST)
print ("Files have been preprocessed.")
def get_file_path(self, file_ind):
ind0, ind1 = file_ind
return join(self.dataset_path, '{}_{}.npy'.format(ind0, ind1))
def get_file_size(self, file_ind):
file_path = self.get_file_path(file_ind)
im = np.load(file_path, mmap_mode='r')
nb_rows, nb_cols = im.shape[0:2]
return nb_rows, nb_cols
def get_img(self, file_ind, window, has_y=True):
file_path = self.get_file_path(file_ind)
im = np.load(file_path, mmap_mode='r')
((row_begin, row_end), (col_begin, col_end)) = window
img = im[row_begin:row_end, col_begin:col_end, :]
return img
def parse_batch(self, batch, has_y=True):
# batch_x = batch[:, :, :, 0:5]
# print("Batch given to parse batch has shape: {}".format(batch.shape))
nb_channels = self.dataset.active_input_inds[-1]
# # DEBUG_BEGIN
# nb_channels = len(self.active_input_inds)
# print("Number of active channels: {0}".format(nb_channels))
# # DEBUG_END
batch_x = batch[:, :, :, self.dataset.active_input_inds[0]:nb_channels+1]
batch_y = None
batch_y_mask = None
if has_y:
# # DEBUG_BEGIN
# test_batch_y_label = batch[:, :, :, nb_channels+1:nb_channels+2]
# print("test_batch_y_label type: {0}".format(type(test_batch_y_label)))
# print("test_batch_y_label shape: {0}".format(test_batch_y_label.shape))
# print("batch with x, y and y_mask shape: {0}".format(batch.shape))
# # print(test_batch_y_label)
# # DEBUG_END
# batch_y = self.dataset.label_to_one_hot_batch(batch[:, :, :, 5:6])
batch_y = self.dataset.label_to_one_hot_batch(batch[:, :, :, -2:-1])
# batch_y = self.dataset.label_to_one_hot_batch(batch[:, :, :, nb_channels+1:nb_channels+2])
# batch_y_mask = batch[:, :, :, 6:7]
batch_y_mask = batch[:, :, :, -1:]
# batch_y_mask = batch[:, :, :, nb_channels+2:nb_channels+3]
# # DEBUG_BEGIN
# print("batch_y type: {0}".format(type(batch_y)))
# print("batch_y shape: {0}".format(batch_y.shape))
# # print(batch_y)
# # DEBUG_END
# DEBUG
# print("Batch_y shape after parsing numpy array: {}".format(batch_y.shape))
# print("Batch_y_mask shape after parsing numpy array: {}".format(batch_y_mask.shape))
# DEBUG
# print("Batch_x shape after parsing from numpy array: {}".format(batch_x.shape))
return batch_x, batch_y, batch_y_mask
| true | true |
f7fe275300e90e66a07aab6311b94bd08cc6f698 | 11,713 | py | Python | samtranslator/intrinsics/resolver.py | eugeniosu/serverless-application-model | d93e15232a1921fa51667389d83aeabbf1ff72d3 | [
"Apache-2.0"
] | 2 | 2019-06-04T02:05:28.000Z | 2019-06-16T14:17:08.000Z | samtranslator/intrinsics/resolver.py | eugeniosu/serverless-application-model | d93e15232a1921fa51667389d83aeabbf1ff72d3 | [
"Apache-2.0"
] | 6 | 2020-09-07T16:12:04.000Z | 2022-03-12T00:04:12.000Z | samtranslator/intrinsics/resolver.py | eugeniosu/serverless-application-model | d93e15232a1921fa51667389d83aeabbf1ff72d3 | [
"Apache-2.0"
] | 15 | 2019-05-27T01:04:30.000Z | 2021-10-01T05:54:45.000Z | # Help resolve intrinsic functions
from samtranslator.intrinsics.actions import Action, SubAction, RefAction, GetAttAction
# All intrinsics are supported by default
DEFAULT_SUPPORTED_INTRINSICS = {action.intrinsic_name: action() for action in [RefAction, SubAction, GetAttAction]}
class IntrinsicsResolver(object):
def __init__(self, parameters, supported_intrinsics=DEFAULT_SUPPORTED_INTRINSICS):
"""
Instantiate the resolver
:param dict parameters: Map of parameter names to their values
:param dict supported_intrinsics: Dictionary of intrinsic functions this class supports along with the
Action class that can process this intrinsic
:raises TypeError: If parameters or the supported_intrinsics arguments are invalid
"""
if parameters is None or not isinstance(parameters, dict):
raise TypeError("parameters must be a valid dictionary")
if not isinstance(supported_intrinsics, dict) \
or not all([isinstance(value, Action) for value in supported_intrinsics.values()]):
raise TypeError("supported_intrinsics argument must be intrinsic names to corresponding Action classes")
self.supported_intrinsics = supported_intrinsics
self.parameters = parameters
def resolve_parameter_refs(self, input):
"""
Resolves references to parameters within the given dictionary recursively. Other intrinsic functions such as
!GetAtt, !Sub or !Ref to non-parameters will be left untouched.
Result is a dictionary where parameter values are inlined. Don't pass this dictionary directly into
transform's output because it changes the template structure by inlining parameter values.
:param input: Any primitive type (dict, array, string etc) whose values might contain intrinsic functions
:return: A copy of a dictionary with parameter references replaced by actual value.
"""
return self._traverse(input, self.parameters, self._try_resolve_parameter_refs)
def resolve_sam_resource_refs(self, input, supported_resource_refs):
"""
Customers can provide a reference to a "derived" SAM resource such as Alias of a Function or Stage of an API
resource. This method recursively walks the tree, converting all derived references to the real resource name,
if it is present.
Example:
{"Ref": "MyFunction.Alias"} -> {"Ref": "MyFunctionAliasLive"}
This method does not attempt to validate a reference. If it is invalid or non-resolvable, it skips the
occurrence and continues with the rest. It is recommended that you have an external process that detects and
surfaces invalid references.
For first call, it is recommended that `template` is the entire CFN template in order to handle
references in Mapping or Output sections.
:param dict input: CFN template that needs resolution. This method will modify the input
directly resolving references. In subsequent recursions, this will be a fragment of the CFN template.
:param SupportedResourceReferences supported_resource_refs: Object that contains information about the resource
references supported in this SAM template, along with the value they should resolve to.
:return list errors: List of dictionary containing information about invalid reference. Empty list otherwise
"""
return self._traverse(input, supported_resource_refs, self._try_resolve_sam_resource_refs)
def resolve_sam_resource_id_refs(self, input, supported_resource_id_refs):
"""
Some SAM resources have their logical ids mutated from the original id that the customer writes in the
template. This method recursively walks the tree and updates these logical ids from the old value
to the new value that is generated by SAM.
Example:
{"Ref": "MyLayer"} -> {"Ref": "MyLayerABC123"}
This method does not attempt to validate a reference. If it is invalid or non-resolvable, it skips the
occurrence and continues with the rest. It is recommended that you have an external process that detects and
surfaces invalid references.
For first call, it is recommended that `template` is the entire CFN template in order to handle
references in Mapping or Output sections.
:param dict input: CFN template that needs resolution. This method will modify the input
directly resolving references. In subsequent recursions, this will be a fragment of the CFN template.
:param dict supported_resource_id_refs: Dictionary that maps old logical ids to new ones.
:return list errors: List of dictionary containing information about invalid reference. Empty list otherwise
"""
return self._traverse(input, supported_resource_id_refs, self._try_resolve_sam_resource_id_refs)
def _traverse(self, input, resolution_data, resolver_method):
"""
Driver method that performs the actual traversal of input and calls the appropriate `resolver_method` when
to perform the resolution.
:param input: Any primitive type (dict, array, string etc) whose value might contain an intrinsic function
:param resolution_data: Data that will help with resolution. For example, when resolving parameter references,
this object will contain a dictionary of parameter names and their values.
:param resolver_method: Method that will be called to actually resolve an intrinsic function. This method
is called with the parameters `(input, resolution_data)`.
:return: Modified `input` with intrinsics resolved
"""
# There is data to help with resolution. Skip the traversal altogether
if len(resolution_data) == 0:
return input
#
# Traversal Algorithm:
#
# Imagine the input dictionary/list as a tree. We are doing a Pre-Order tree traversal here where we first
# process the root node before going to its children. Dict and Lists are the only two iterable nodes.
# Everything else is a leaf node.
#
# We do a Pre-Order traversal to handle the case where `input` contains intrinsic function as its only child
# ie. input = {"Ref": "foo}.
#
# We will try to resolve the intrinsics if we can, otherwise return the original input. In some cases, resolving
# an intrinsic will result in a terminal state ie. {"Ref": "foo"} could resolve to a string "bar". In other
# cases, resolving intrinsics is only partial and we might need to continue traversing the tree (ex: Fn::Sub)
# to handle nested intrinsics. All of these cases lend well towards a Pre-Order traversal where we try and
# process the intrinsic, which results in a modified sub-tree to traverse.
#
input = resolver_method(input, resolution_data)
if isinstance(input, dict):
return self._traverse_dict(input, resolution_data, resolver_method)
elif isinstance(input, list):
return self._traverse_list(input, resolution_data, resolver_method)
else:
# We can iterate only over dict or list types. Primitive types are terminals
return input
def _traverse_dict(self, input_dict, resolution_data, resolver_method):
"""
Traverse a dictionary to resolve intrinsic functions on every value
:param input_dict: Input dictionary to traverse
:param resolution_data: Data that the `resolver_method` needs to operate
:param resolver_method: Method that can actually resolve an intrinsic function, if it detects one
:return: Modified dictionary with values resolved
"""
for key, value in input_dict.items():
input_dict[key] = self._traverse(value, resolution_data, resolver_method)
return input_dict
def _traverse_list(self, input_list, resolution_data, resolver_method):
"""
Traverse a list to resolve intrinsic functions on every element
:param input_list: List of input
:param resolution_data: Data that the `resolver_method` needs to operate
:param resolver_method: Method that can actually resolve an intrinsic function, if it detects one
:return: Modified list with intrinsic functions resolved
"""
for index, value in enumerate(input_list):
input_list[index] = self._traverse(value, resolution_data, resolver_method)
return input_list
def _try_resolve_parameter_refs(self, input, parameters):
"""
Try to resolve parameter references on the given input object. The object could be of any type.
If the input is not in the format used by intrinsics (ie. dictionary with one key), input is returned
unmodified. If the single key in dictionary is one of the supported intrinsic function types,
go ahead and try to resolve it.
:param input: Input object to resolve
:param parameters: Parameter values used to for ref substitution
:return:
"""
if not self._is_intrinsic_dict(input):
return input
function_type = list(input.keys())[0]
return self.supported_intrinsics[function_type].resolve_parameter_refs(input, parameters)
def _try_resolve_sam_resource_refs(self, input, supported_resource_refs):
"""
Try to resolve SAM resource references on the given template. If the given object looks like one of the
supported intrinsics, it calls the appropriate resolution on it. If not, this method returns the original input
unmodified.
:param dict input: Dictionary that may represent an intrinsic function
:param SupportedResourceReferences supported_resource_refs: Object containing information about available
resource references and the values they resolve to.
:return: Modified input dictionary with references resolved
"""
if not self._is_intrinsic_dict(input):
return input
function_type = list(input.keys())[0]
return self.supported_intrinsics[function_type].resolve_resource_refs(input, supported_resource_refs)
def _try_resolve_sam_resource_id_refs(self, input, supported_resource_id_refs):
"""
Try to resolve SAM resource id references on the given template. If the given object looks like one of the
supported intrinsics, it calls the appropriate resolution on it. If not, this method returns the original input
unmodified.
:param dict input: Dictionary that may represent an intrinsic function
:param dict supported_resource_id_refs: Dictionary that maps old logical ids to new ones.
:return: Modified input dictionary with id references resolved
"""
if not self._is_intrinsic_dict(input):
return input
function_type = list(input.keys())[0]
return self.supported_intrinsics[function_type].resolve_resource_id_refs(input, supported_resource_id_refs)
def _is_intrinsic_dict(self, input):
"""
Can the input represent an intrinsic function in it?
:param input: Object to be checked
:return: True, if the input contains a supported intrinsic function. False otherwise
"""
# All intrinsic functions are dictionaries with just one key
return isinstance(input, dict) \
and len(input) == 1 \
and list(input.keys())[0] in self.supported_intrinsics
| 52.524664 | 120 | 0.708273 |
from samtranslator.intrinsics.actions import Action, SubAction, RefAction, GetAttAction
DEFAULT_SUPPORTED_INTRINSICS = {action.intrinsic_name: action() for action in [RefAction, SubAction, GetAttAction]}
class IntrinsicsResolver(object):
def __init__(self, parameters, supported_intrinsics=DEFAULT_SUPPORTED_INTRINSICS):
if parameters is None or not isinstance(parameters, dict):
raise TypeError("parameters must be a valid dictionary")
if not isinstance(supported_intrinsics, dict) \
or not all([isinstance(value, Action) for value in supported_intrinsics.values()]):
raise TypeError("supported_intrinsics argument must be intrinsic names to corresponding Action classes")
self.supported_intrinsics = supported_intrinsics
self.parameters = parameters
def resolve_parameter_refs(self, input):
return self._traverse(input, self.parameters, self._try_resolve_parameter_refs)
def resolve_sam_resource_refs(self, input, supported_resource_refs):
return self._traverse(input, supported_resource_refs, self._try_resolve_sam_resource_refs)
def resolve_sam_resource_id_refs(self, input, supported_resource_id_refs):
return self._traverse(input, supported_resource_id_refs, self._try_resolve_sam_resource_id_refs)
def _traverse(self, input, resolution_data, resolver_method):
if len(resolution_data) == 0:
return input
#
# We will try to resolve the intrinsics if we can, otherwise return the original input. In some cases, resolving
# an intrinsic will result in a terminal state ie. {"Ref": "foo"} could resolve to a string "bar". In other
# cases, resolving intrinsics is only partial and we might need to continue traversing the tree (ex: Fn::Sub)
# to handle nested intrinsics. All of these cases lend well towards a Pre-Order traversal where we try and
# process the intrinsic, which results in a modified sub-tree to traverse.
#
input = resolver_method(input, resolution_data)
if isinstance(input, dict):
return self._traverse_dict(input, resolution_data, resolver_method)
elif isinstance(input, list):
return self._traverse_list(input, resolution_data, resolver_method)
else:
# We can iterate only over dict or list types. Primitive types are terminals
return input
def _traverse_dict(self, input_dict, resolution_data, resolver_method):
for key, value in input_dict.items():
input_dict[key] = self._traverse(value, resolution_data, resolver_method)
return input_dict
def _traverse_list(self, input_list, resolution_data, resolver_method):
for index, value in enumerate(input_list):
input_list[index] = self._traverse(value, resolution_data, resolver_method)
return input_list
def _try_resolve_parameter_refs(self, input, parameters):
if not self._is_intrinsic_dict(input):
return input
function_type = list(input.keys())[0]
return self.supported_intrinsics[function_type].resolve_parameter_refs(input, parameters)
def _try_resolve_sam_resource_refs(self, input, supported_resource_refs):
if not self._is_intrinsic_dict(input):
return input
function_type = list(input.keys())[0]
return self.supported_intrinsics[function_type].resolve_resource_refs(input, supported_resource_refs)
def _try_resolve_sam_resource_id_refs(self, input, supported_resource_id_refs):
if not self._is_intrinsic_dict(input):
return input
function_type = list(input.keys())[0]
return self.supported_intrinsics[function_type].resolve_resource_id_refs(input, supported_resource_id_refs)
def _is_intrinsic_dict(self, input):
# All intrinsic functions are dictionaries with just one key
return isinstance(input, dict) \
and len(input) == 1 \
and list(input.keys())[0] in self.supported_intrinsics
| true | true |
f7fe28b0be00eae6a0bd98fff823e1a97f37ce7b | 973 | py | Python | script/feedback_js_sqli.py | bingpo/dedecmscan | 879470f51a22f1f544b469d80fd2a88d692f116d | [
"Apache-2.0"
] | 751 | 2019-08-16T06:44:25.000Z | 2022-03-30T01:57:53.000Z | script/feedback_js_sqli.py | W-Peter/dedecmscan | 42bc196f49e2930490b8f336025540a3d8329e7a | [
"Apache-2.0"
] | 2 | 2019-12-13T12:41:55.000Z | 2020-12-10T07:17:17.000Z | script/feedback_js_sqli.py | W-Peter/dedecmscan | 42bc196f49e2930490b8f336025540a3d8329e7a | [
"Apache-2.0"
] | 143 | 2019-08-29T11:26:42.000Z | 2022-03-30T06:54:47.000Z | import requests
from termcolor import cprint
class feedback_sqli:
def __init__(self,url):
self.url = url
def feedcheck(self):
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50"
}
payload = "plus/feedback_js.php?arcurl=' union select "' and 1=2 union select 1,1,1,userid,3,1,3,3,pwd,1,1,3,1,1,1,1,1 from dede_admin where 1=1 union select * from dede_feedback where 1=2 and ''='" from dede_admin where ''='"
if '://' not in self.url:
self.url = 'http://' + self.url + '/'
url = self.url
vulnurl = url + payload
try:
r = requests.get(url=vulnurl,headers=headers)
if r.status_code == 200 and "4beed3b9c4a886067de0e3a094246f78" in r.text:
cprint("target may be feedback_js.php SqlInject", "red")
except:
return False | 37.423077 | 234 | 0.600206 | import requests
from termcolor import cprint
class feedback_sqli:
def __init__(self,url):
self.url = url
def feedcheck(self):
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50"
}
payload = "plus/feedback_js.php?arcurl=' union select "' and 1=2 union select 1,1,1,userid,3,1,3,3,pwd,1,1,3,1,1,1,1,1 from dede_admin where 1=1 union select * from dede_feedback where 1=2 and ''='" from dede_admin where ''='"
if '://' not in self.url:
self.url = 'http://' + self.url + '/'
url = self.url
vulnurl = url + payload
try:
r = requests.get(url=vulnurl,headers=headers)
if r.status_code == 200 and "4beed3b9c4a886067de0e3a094246f78" in r.text:
cprint("target may be feedback_js.php SqlInject", "red")
except:
return False | true | true |
f7fe292b5fc71d9d0955e11bbc9b24a3a19fad6e | 1,072 | py | Python | 2019/problem06.py | wandrewjam/advent-of-code | 3bad15e38e9b6b4983965a96a43c85250444e3d2 | [
"MIT"
] | null | null | null | 2019/problem06.py | wandrewjam/advent-of-code | 3bad15e38e9b6b4983965a96a43c85250444e3d2 | [
"MIT"
] | null | null | null | 2019/problem06.py | wandrewjam/advent-of-code | 3bad15e38e9b6b4983965a96a43c85250444e3d2 | [
"MIT"
] | null | null | null | # https://adventofcode.com/2019/day/6
def load_file(filename: str) -> dict:
with open(filename) as f:
raw_orbits = f.readlines()
orbits = dict()
for line in raw_orbits:
value, key = line[:-1].split(')')
orbits[key] = value
return orbits
def get_lineage(parent: str, orbits: dict) -> list:
try:
lineage = get_lineage(orbits[parent], orbits)
lineage.append(parent)
return lineage
except KeyError:
return [parent]
def main(orbits: dict):
orbit_count = 0
for child, parent in orbits.items():
orbit_count += len(get_lineage(parent, orbits))
print(orbit_count)
my_lineage = get_lineage('YOU', orbits)
santas_lineage = get_lineage('SAN', orbits)
i = 0
while my_lineage[i] == santas_lineage[i]:
i += 1
print(len(my_lineage) + len(santas_lineage) - 2 * (i + 1))
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
orbits = load_file(sys.argv[1])
else:
orbits = load_file('problem06.in')
main(orbits)
| 22.333333 | 62 | 0.606343 |
def load_file(filename: str) -> dict:
with open(filename) as f:
raw_orbits = f.readlines()
orbits = dict()
for line in raw_orbits:
value, key = line[:-1].split(')')
orbits[key] = value
return orbits
def get_lineage(parent: str, orbits: dict) -> list:
try:
lineage = get_lineage(orbits[parent], orbits)
lineage.append(parent)
return lineage
except KeyError:
return [parent]
def main(orbits: dict):
orbit_count = 0
for child, parent in orbits.items():
orbit_count += len(get_lineage(parent, orbits))
print(orbit_count)
my_lineage = get_lineage('YOU', orbits)
santas_lineage = get_lineage('SAN', orbits)
i = 0
while my_lineage[i] == santas_lineage[i]:
i += 1
print(len(my_lineage) + len(santas_lineage) - 2 * (i + 1))
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
orbits = load_file(sys.argv[1])
else:
orbits = load_file('problem06.in')
main(orbits)
| true | true |
f7fe2b523104b7e75b08768d6ffbdbfbb2eb4186 | 6,546 | py | Python | lib/datasets/coco_train.py | wdd0225/RetinaNet-and-SSD-in-PyTorch-Detectron | 4140e197e78dfd59c8f09dcd33e97f6040a0f39e | [
"MIT"
] | 3 | 2019-01-26T08:59:30.000Z | 2021-03-03T21:45:41.000Z | lib/datasets/coco_train.py | daijucug/RetinaNet-and-SSD-in-PyTorch-Detectron | 4140e197e78dfd59c8f09dcd33e97f6040a0f39e | [
"MIT"
] | null | null | null | lib/datasets/coco_train.py | daijucug/RetinaNet-and-SSD-in-PyTorch-Detectron | 4140e197e78dfd59c8f09dcd33e97f6040a0f39e | [
"MIT"
] | 1 | 2019-03-26T12:51:37.000Z | 2019-03-26T12:51:37.000Z | from lib.core import ssd_config as cfg
import os
import os.path as osp
import sys
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
import cv2
import numpy as np
# def get_label_map(label_file):
# label_map = {}
# labels = open(label_file, 'r')
# for line in labels:
# ids = line.split(',')
# label_map[int(ids[0])] = int(ids[1])
# return label_map
def get_label_map(labels):
label_map = {}
for ids in labels:
label_map[int(ids[0])] = int(ids[1])
return label_map
class COCOAnnotationTransform(object):
"""Transforms a COCO annotation into a Tensor of bbox coords and label index
Initilized with a dictionary lookup of classnames to indexes
"""
def __init__(self):
self.label_map = get_label_map(cfg.COCO_LABEL_MAP)
def __call__(self, target, width, height):
"""
Args:
target (dict): COCO target json annotation as a python dict
height (int): height
width (int): width
Returns:
a list containing lists of bounding boxes [bbox coords, class idx]
"""
scale = np.array([width, height, width, height])
res = []
for obj in target:
if 'bbox' in obj:
bbox = obj['bbox']
bbox[2] += bbox[0]
bbox[3] += bbox[1]
label_idx = self.label_map[obj['category_id']] - 1
final_box = list(np.array(bbox)/scale)
final_box.append(label_idx)
res += [final_box] # [xmin, ymin, xmax, ymax, label_idx]
else:
print("no bbox problem!")
return res # [[xmin, ymin, xmax, ymax, label_idx], ... ]
class COCODetection(data.Dataset):
"""`MS Coco Detection <http://mscoco.org/dataset/#detections-challenge2016>`_ Dataset.
Args:
root (string): Root directory where images are downloaded to.
set_name (string): Name of the specific set of COCO images.
transform (callable, optional): A function/transform that augments the
raw images`
target_transform (callable, optional): A function/transform that takes
in the target (bbox) and transforms it.
"""
def __init__(self, root, image_set='train2017', transform=None,
target_transform=COCOAnnotationTransform(), dataset_name='MS COCO'):
sys.path.append(osp.join(root, cfg.COCO_API))
from pycocotools.coco import COCO
# self.root = osp.join(root, IMAGES, image_set)
# print('XXXXXXXX: ',self.root)
# self.coco = COCO(osp.join(root, ANNOTATIONS,
# 'image_info_test-dev2015.json'))
# self.root = osp.join(root, IMAGES, image_set)
# self.coco = COCO(osp.join(root, ANNOTATIONS,
# INSTANCES_SET.format(image_set)))
self.root = osp.join(cfg.COCO_ROOT, cfg.IMAGES, image_set)
self.coco = COCO(osp.join(cfg.COCO_ROOT, cfg.ANNOTATIONS,
cfg.INSTANCES_SET.format(image_set)))
self.ids = list(self.coco.imgToAnns.keys())
self.transform = transform
self.target_transform = target_transform
self.name = dataset_name
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, target).
target is the object returned by ``coco.loadAnns``.
"""
im, gt, h, w = self.pull_item(index)
return im, gt
def __len__(self):
return len(self.ids)
def pull_item(self, index):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, target, height, width).
target is the object returned by ``coco.loadAnns``.
"""
img_id = self.ids[index]
target = self.coco.imgToAnns[img_id]
ann_ids = self.coco.getAnnIds(imgIds=img_id)
target = self.coco.loadAnns(ann_ids)
path = osp.join(self.root, self.coco.loadImgs(img_id)[0]['file_name'])
# print('XXXXXXXX: ',path)
assert osp.exists(path), 'Image path does not exist: {}'.format(path)
img = cv2.imread(osp.join(self.root, path))
height, width, _ = img.shape
if self.target_transform is not None:
target = self.target_transform(target, width, height)
if self.transform is not None:
target = np.array(target)
img, boxes, labels = self.transform(img, target[:, :4],
target[:, 4])
# to rgb
img = img[:, :, (2, 1, 0)]
target = np.hstack((boxes, np.expand_dims(labels, axis=1)))
return torch.from_numpy(img).permute(2, 0, 1), target, height, width
def pull_image(self, index):
'''Returns the original image object at index in PIL form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
cv2 img
'''
img_id = self.ids[index]
path = self.coco.loadImgs(img_id)[0]['file_name']
return cv2.imread(osp.join(self.root, path), cv2.IMREAD_COLOR)
def pull_anno(self, index):
'''Returns the original annotation of image at index
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to get annotation of
Return:
list: [img_id, [(label, bbox coords),...]]
eg: ('001718', [('dog', (96, 13, 438, 332))])
'''
img_id = self.ids[index]
ann_ids = self.coco.getAnnIds(imgIds=img_id)
return img_id, self.coco.loadAnns(ann_ids)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
| 38.05814 | 110 | 0.572563 | from lib.core import ssd_config as cfg
import os
import os.path as osp
import sys
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
import cv2
import numpy as np
def get_label_map(labels):
label_map = {}
for ids in labels:
label_map[int(ids[0])] = int(ids[1])
return label_map
class COCOAnnotationTransform(object):
def __init__(self):
self.label_map = get_label_map(cfg.COCO_LABEL_MAP)
def __call__(self, target, width, height):
scale = np.array([width, height, width, height])
res = []
for obj in target:
if 'bbox' in obj:
bbox = obj['bbox']
bbox[2] += bbox[0]
bbox[3] += bbox[1]
label_idx = self.label_map[obj['category_id']] - 1
final_box = list(np.array(bbox)/scale)
final_box.append(label_idx)
res += [final_box]
else:
print("no bbox problem!")
return res
class COCODetection(data.Dataset):
def __init__(self, root, image_set='train2017', transform=None,
target_transform=COCOAnnotationTransform(), dataset_name='MS COCO'):
sys.path.append(osp.join(root, cfg.COCO_API))
from pycocotools.coco import COCO
self.root = osp.join(cfg.COCO_ROOT, cfg.IMAGES, image_set)
self.coco = COCO(osp.join(cfg.COCO_ROOT, cfg.ANNOTATIONS,
cfg.INSTANCES_SET.format(image_set)))
self.ids = list(self.coco.imgToAnns.keys())
self.transform = transform
self.target_transform = target_transform
self.name = dataset_name
def __getitem__(self, index):
im, gt, h, w = self.pull_item(index)
return im, gt
def __len__(self):
return len(self.ids)
def pull_item(self, index):
img_id = self.ids[index]
target = self.coco.imgToAnns[img_id]
ann_ids = self.coco.getAnnIds(imgIds=img_id)
target = self.coco.loadAnns(ann_ids)
path = osp.join(self.root, self.coco.loadImgs(img_id)[0]['file_name'])
assert osp.exists(path), 'Image path does not exist: {}'.format(path)
img = cv2.imread(osp.join(self.root, path))
height, width, _ = img.shape
if self.target_transform is not None:
target = self.target_transform(target, width, height)
if self.transform is not None:
target = np.array(target)
img, boxes, labels = self.transform(img, target[:, :4],
target[:, 4])
img = img[:, :, (2, 1, 0)]
target = np.hstack((boxes, np.expand_dims(labels, axis=1)))
return torch.from_numpy(img).permute(2, 0, 1), target, height, width
def pull_image(self, index):
img_id = self.ids[index]
path = self.coco.loadImgs(img_id)[0]['file_name']
return cv2.imread(osp.join(self.root, path), cv2.IMREAD_COLOR)
def pull_anno(self, index):
img_id = self.ids[index]
ann_ids = self.coco.getAnnIds(imgIds=img_id)
return img_id, self.coco.loadAnns(ann_ids)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
| true | true |
f7fe2bcd3bdb9de5858cb8fa048e92dc5e5e6186 | 418 | py | Python | tests/test_status_codes.py | ambrozic/http3 | 5442006a41f94a3e41186910d7a6e8546adf0f89 | [
"BSD-3-Clause"
] | null | null | null | tests/test_status_codes.py | ambrozic/http3 | 5442006a41f94a3e41186910d7a6e8546adf0f89 | [
"BSD-3-Clause"
] | null | null | null | tests/test_status_codes.py | ambrozic/http3 | 5442006a41f94a3e41186910d7a6e8546adf0f89 | [
"BSD-3-Clause"
] | null | null | null | import http3
def test_status_code_as_int():
assert http3.codes.NOT_FOUND == 404
assert str(http3.codes.NOT_FOUND) == "404"
def test_lowercase_status_code():
assert http3.codes.not_found == 404
def test_reason_phrase_for_status_code():
assert http3.codes.get_reason_phrase(404) == "Not Found"
def test_reason_phrase_for_unknown_status_code():
assert http3.codes.get_reason_phrase(499) == ""
| 22 | 60 | 0.753589 | import http3
def test_status_code_as_int():
assert http3.codes.NOT_FOUND == 404
assert str(http3.codes.NOT_FOUND) == "404"
def test_lowercase_status_code():
assert http3.codes.not_found == 404
def test_reason_phrase_for_status_code():
assert http3.codes.get_reason_phrase(404) == "Not Found"
def test_reason_phrase_for_unknown_status_code():
assert http3.codes.get_reason_phrase(499) == ""
| true | true |
f7fe2d8607bd560f589726b4e8444b0f89cc7a08 | 6,537 | py | Python | experiments/tsp/tsp.py | TCatshoek/lstar | 042b0ae3a0627db7a412c828f3752a9c30928ec1 | [
"MIT"
] | 2 | 2019-10-15T11:28:12.000Z | 2021-01-28T15:14:09.000Z | experiments/tsp/tsp.py | TCatshoek/lstar | 042b0ae3a0627db7a412c828f3752a9c30928ec1 | [
"MIT"
] | null | null | null | experiments/tsp/tsp.py | TCatshoek/lstar | 042b0ae3a0627db7a412c828f3752a9c30928ec1 | [
"MIT"
] | null | null | null | import tempfile
import numpy as np
from graphviz import Digraph
from equivalencecheckers.bruteforce import BFEquivalenceChecker
#from experiments.tsp.tsplearner import TSPLearner
from experiments.tsp.tsplearner import TSPLearner
from learners.mealylearner import MealyLearner
from suls.mealymachine import MealyState
from suls.sul import SUL
from teachers.teacher import Teacher
from itertools import permutations
import random
class TSPProblem:
def __init__(self, width=100, height=100):
self.cities = None
self.distances = None
self.width = width
self.height = height
def make_random(self, n_cities):
# Do all the math in numpy because fast
self.cities = np.random.rand(n_cities, 2) * np.array([self.width, self.height])
# Unreadable, but FAST
self.distances = np.sqrt(np.sum(np.square(self.cities.reshape(len(self.cities), -1, 2) - self.cities.reshape(-1, len(self.cities), 2)), axis=2))
return self
def get_dist(self, frm, to):
return self.distances[frm, to]
def get_path_dist(self, path):
assert len(path) > 1, f"cannot get path lenght of paths with just one state: {path}"
return sum([self.get_dist(a, b) for [a, b] in [path[x: x + 2] for x in range(len(path) - 1)]])
def bruteforce_shortestpath(self):
shortest_len = 999999999999
shortest_path = None
actions = list(range(1, len(self.cities)))
for p in permutations(actions):
dist = self.get_path_dist([0] + list(p) + [0])
print(dist)
if dist < shortest_len:
shortest_len = dist
shortest_path = [0] + list(p) + [0]
return (shortest_len, shortest_path)
class TSPSul(SUL):
def __init__(self, problem, initial_state):
self.problem = problem
self.initial_state = initial_state
self.state = initial_state
self.mem = {}
def calc_expected_future_len(self, inputs, n):
if tuple(inputs) in self.mem:
return self.mem[tuple(inputs)]
# if len(inputs) == len(self.problem.cities):
# return 0
alphabet = set(self.get_alphabet())
not_visited = alphabet.difference(set(inputs))
#not_visited.remove(str(self.initial_state))
not_visited = list(not_visited)
acc_dist = 0
for i in range(n):
random.shuffle(not_visited)
remaining_path = [int(self.initial_state) if len(inputs) < 1 else int(inputs[-1])] + [int(x) for x in not_visited] + [int(self.initial_state)]
acc_dist += self.problem.get_path_dist(remaining_path)
self.mem[tuple(inputs)] = acc_dist / n
return acc_dist / n
def process_input(self, inputs):
if len(inputs) < 1:
return None
output = 0
# We impose the restriction of not being able to visit a city twice,
# # Except for returning to the initial city as the last action
# visited = set()
# visited.add(str(self.initial_state))
#
# for idx, input in enumerate(inputs):
# # Last action can only be returning to the initial city:
# if idx == len(self.problem.cities) - 1:
# if int(input) == self.initial_state:
# output += self.problem.get_dist(self.state, int(input))
# self.state = int(input)
# return 0
# else:
# return 'invalid_input'
#
# else:
# if input not in visited:
# output += self.problem.get_dist(self.state, int(input))
# self.state = int(input)
# visited.add(input)
# else:
# return 'invalid_input'
return self.calc_expected_future_len(inputs, 1000)
def reset(self):
self.state = self.initial_state
def get_alphabet(self):
return [str(x) for x in list(range(len(self.problem.cities)))]
def filter_errs(hyp):
for state in hyp.get_states():
todelete = []
for action, (nextstate, output) in state.edges.items():
if output == 'invalid_input':
todelete.append(action)
for action in todelete:
del state.edges[action]
def cleanup(hyp):
for state in hyp.get_states():
for action, (nextstate, output) in state.edges.items():
state.edges[action] = (nextstate, f'{output:.2f}')
def draw(hyp, filename):
g = Digraph('G', filename=filename)
g.attr(rankdir='LR')
# Collect nodes and edges
to_visit = [hyp.initial_state]
visited = []
# Hacky way to draw start arrow pointing to first node
g.attr('node', shape='none')
g.node('startz', label='', _attributes={'height': '0', 'width': '0'})
# Draw initial state
g.attr('node', shape='circle')
g.node(hyp.initial_state.name, label='0')
g.edge('startz', hyp.initial_state.name)
laststeps = []
lastname = None
while len(to_visit) > 0:
cur_state = to_visit.pop()
visited.append(cur_state)
g.attr('node', shape='circle')
for action, (other_state, output) in cur_state.edges.items():
# Draw other states, but only once
if other_state not in visited and other_state not in to_visit:
to_visit.append(other_state)
if action == '0':
laststeps.append(float(output))
lastname = other_state.name
else:
g.node(other_state.name, label=output)
# Draw edges too
if action == '0':
g.edge(cur_state.name, other_state.name, label=f'{action}/{output}')
else:
g.edge(cur_state.name, other_state.name, label=f'{action}')
g.node(lastname, label=str(min(laststeps)))
g.view()
if __name__ == "__main__":
np.random.seed(1337)
tspprob = TSPProblem().make_random(4)
tsp = TSPSul(tspprob, 0)
tsp.calc_expected_future_len([], 1000)
eqc = BFEquivalenceChecker(tsp, max_depth=6)
teacher = Teacher(tsp, eqc)
learner = TSPLearner(teacher, tsp=tsp)
#learner = MealyLearner(teacher)
hyp = learner.run(show_intermediate=True)
#filter_errs(hyp)
cleanup(hyp)
#raw(hyp, tempfile.mktemp('.gv'))
hyp.render_graph(tempfile.mktemp('.gv'))
# tspprob = TSPProblem().make_random(5)
# tsp = TSPSul(tspprob, 0)
| 32.522388 | 154 | 0.598287 | import tempfile
import numpy as np
from graphviz import Digraph
from equivalencecheckers.bruteforce import BFEquivalenceChecker
from experiments.tsp.tsplearner import TSPLearner
from learners.mealylearner import MealyLearner
from suls.mealymachine import MealyState
from suls.sul import SUL
from teachers.teacher import Teacher
from itertools import permutations
import random
class TSPProblem:
def __init__(self, width=100, height=100):
self.cities = None
self.distances = None
self.width = width
self.height = height
def make_random(self, n_cities):
self.cities = np.random.rand(n_cities, 2) * np.array([self.width, self.height])
self.distances = np.sqrt(np.sum(np.square(self.cities.reshape(len(self.cities), -1, 2) - self.cities.reshape(-1, len(self.cities), 2)), axis=2))
return self
def get_dist(self, frm, to):
return self.distances[frm, to]
def get_path_dist(self, path):
assert len(path) > 1, f"cannot get path lenght of paths with just one state: {path}"
return sum([self.get_dist(a, b) for [a, b] in [path[x: x + 2] for x in range(len(path) - 1)]])
def bruteforce_shortestpath(self):
shortest_len = 999999999999
shortest_path = None
actions = list(range(1, len(self.cities)))
for p in permutations(actions):
dist = self.get_path_dist([0] + list(p) + [0])
print(dist)
if dist < shortest_len:
shortest_len = dist
shortest_path = [0] + list(p) + [0]
return (shortest_len, shortest_path)
class TSPSul(SUL):
def __init__(self, problem, initial_state):
self.problem = problem
self.initial_state = initial_state
self.state = initial_state
self.mem = {}
def calc_expected_future_len(self, inputs, n):
if tuple(inputs) in self.mem:
return self.mem[tuple(inputs)]
alphabet = set(self.get_alphabet())
not_visited = alphabet.difference(set(inputs))
not_visited = list(not_visited)
acc_dist = 0
for i in range(n):
random.shuffle(not_visited)
remaining_path = [int(self.initial_state) if len(inputs) < 1 else int(inputs[-1])] + [int(x) for x in not_visited] + [int(self.initial_state)]
acc_dist += self.problem.get_path_dist(remaining_path)
self.mem[tuple(inputs)] = acc_dist / n
return acc_dist / n
def process_input(self, inputs):
if len(inputs) < 1:
return None
output = 0
return self.calc_expected_future_len(inputs, 1000)
def reset(self):
self.state = self.initial_state
def get_alphabet(self):
return [str(x) for x in list(range(len(self.problem.cities)))]
def filter_errs(hyp):
for state in hyp.get_states():
todelete = []
for action, (nextstate, output) in state.edges.items():
if output == 'invalid_input':
todelete.append(action)
for action in todelete:
del state.edges[action]
def cleanup(hyp):
for state in hyp.get_states():
for action, (nextstate, output) in state.edges.items():
state.edges[action] = (nextstate, f'{output:.2f}')
def draw(hyp, filename):
g = Digraph('G', filename=filename)
g.attr(rankdir='LR')
to_visit = [hyp.initial_state]
visited = []
g.attr('node', shape='none')
g.node('startz', label='', _attributes={'height': '0', 'width': '0'})
g.attr('node', shape='circle')
g.node(hyp.initial_state.name, label='0')
g.edge('startz', hyp.initial_state.name)
laststeps = []
lastname = None
while len(to_visit) > 0:
cur_state = to_visit.pop()
visited.append(cur_state)
g.attr('node', shape='circle')
for action, (other_state, output) in cur_state.edges.items():
if other_state not in visited and other_state not in to_visit:
to_visit.append(other_state)
if action == '0':
laststeps.append(float(output))
lastname = other_state.name
else:
g.node(other_state.name, label=output)
if action == '0':
g.edge(cur_state.name, other_state.name, label=f'{action}/{output}')
else:
g.edge(cur_state.name, other_state.name, label=f'{action}')
g.node(lastname, label=str(min(laststeps)))
g.view()
if __name__ == "__main__":
np.random.seed(1337)
tspprob = TSPProblem().make_random(4)
tsp = TSPSul(tspprob, 0)
tsp.calc_expected_future_len([], 1000)
eqc = BFEquivalenceChecker(tsp, max_depth=6)
teacher = Teacher(tsp, eqc)
learner = TSPLearner(teacher, tsp=tsp)
hyp = learner.run(show_intermediate=True)
cleanup(hyp)
hyp.render_graph(tempfile.mktemp('.gv'))
| true | true |
f7fe2e12189f5c7bd5c301d8cd6a29b000ff6951 | 4,352 | py | Python | origin_check.py | mikispag/OriginCheck | b3bda26c382cdbfd78bddc11d99d6e8723255599 | [
"MIT"
] | 1 | 2020-08-19T06:53:24.000Z | 2020-08-19T06:53:24.000Z | origin_check.py | mikispag/OriginCheck | b3bda26c382cdbfd78bddc11d99d6e8723255599 | [
"MIT"
] | null | null | null | origin_check.py | mikispag/OriginCheck | b3bda26c382cdbfd78bddc11d99d6e8723255599 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import concurrent.futures
import logging
import requests
from sys import argv, exit
from urllib.parse import urlparse
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
HEADERS = {
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.30 Safari/537.36'
}
MIN_RESPONSE_LENGTH = 100
NUM_WORKERS = 50
urls = []
if len(argv) < 2:
exit("Please specify a URLs file.")
with open(argv[1]) as f:
urls = [line.rstrip() for line in f]
def check(url):
# Issue a GET request
r = requests.get(url, timeout=5, allow_redirects=False, headers=HEADERS)
response_size = len(r.text)
if r.status_code != 200 or response_size < MIN_RESPONSE_LENGTH:
logging.debug("Ignoring %s: response %d, response size %d.",
url, r.status_code, response_size)
return None
# Issue a second request to check for stability (200 + same response size)
r = requests.get(url, timeout=5, allow_redirects=False, headers=HEADERS)
if r.status_code != 200 or response_size != len(r.text):
logging.debug("URL %s is unstable.", url)
return None
logging.info("URL %s is stable.", url)
# If the URL is stable, try adding a same-origin Origin header
parsed_url = urlparse(r.url)
origin = parsed_url.scheme + '://' + parsed_url.netloc
logging.debug('Sending same-origin Origin %s for %s...', origin, url)
result = {
'url': url,
'SAMEORIGIN_OK': False,
'CROSSORIGIN_OK': False,
'SAMEORIGIN_KO_STATUS': False,
'SAMEORIGIN_KO_RESPONSE': False,
'CROSSORIGIN_KO_STATUS': False,
'CROSSORIGIN_KO_RESPONSE': False
}
r = requests.get(url, timeout=5, allow_redirects=False,
headers={**HEADERS, **{'Origin': origin}})
if r.status_code != 200:
logging.info(
"[SAME ORIGIN] URL %s changed status code to %d.", url, r.status_code)
result['SAMEORIGIN_KO_STATUS'] = r.status_code
return result
if response_size != len(r.text):
logging.info(
"[SAME ORIGIN] URL %s changed response size to %d.", url, len(r.text))
result['SAMEORIGIN_KO_RESPONSE'] = True
return result
result['SAMEORIGIN_OK'] = True
# If same-origin Origin header is OK, try a cross-origin one.
logging.debug('Sending cross-origin Origin for URL %s.', url)
r = requests.get(url, timeout=5, allow_redirects=False, headers={
**HEADERS, **{'Origin': 'https://example.org'}})
if r.status_code != 200:
logging.info(
"[CROSS ORIGIN] URL %s changed status code to %d.", url, r.status_code)
result['CROSSORIGIN_KO_STATUS'] = r.status_code
return result
if response_size != len(r.text):
logging.info(
"[CROSS ORIGIN] URL %s changed response size to %d.", url, len(r.text))
result['CROSSORIGIN_KO_RESPONSE'] = True
return result
result['CROSSORIGIN_OK'] = True
return result
with open('results.csv', 'w') as w:
print('url,SAMEORIGIN_OK,CROSSORIGIN_OK,SAMEORIGIN_KO_STATUS,SAMEORIGIN_KO_RESPONSE,CROSSORIGIN_KO_STATUS,CROSSORIGIN_KO_RESPONSE', file=w)
with concurrent.futures.ThreadPoolExecutor(max_workers=NUM_WORKERS) as executor:
future_to_result = {executor.submit(check, url): url for url in urls}
for future in concurrent.futures.as_completed(future_to_result):
try:
result = future.result()
except:
continue
else:
if result:
print('{},{},{},{},{},{},{}'.format(result['url'],
int(result['SAMEORIGIN_OK']),
int(result['CROSSORIGIN_OK']),
int(result['SAMEORIGIN_KO_STATUS']),
int(result['SAMEORIGIN_KO_RESPONSE']),
int(result['CROSSORIGIN_KO_STATUS']),
int(result['CROSSORIGIN_KO_RESPONSE'])
), file=w)
| 39.563636 | 143 | 0.584789 |
import concurrent.futures
import logging
import requests
from sys import argv, exit
from urllib.parse import urlparse
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
HEADERS = {
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.30 Safari/537.36'
}
MIN_RESPONSE_LENGTH = 100
NUM_WORKERS = 50
urls = []
if len(argv) < 2:
exit("Please specify a URLs file.")
with open(argv[1]) as f:
urls = [line.rstrip() for line in f]
def check(url):
r = requests.get(url, timeout=5, allow_redirects=False, headers=HEADERS)
response_size = len(r.text)
if r.status_code != 200 or response_size < MIN_RESPONSE_LENGTH:
logging.debug("Ignoring %s: response %d, response size %d.",
url, r.status_code, response_size)
return None
r = requests.get(url, timeout=5, allow_redirects=False, headers=HEADERS)
if r.status_code != 200 or response_size != len(r.text):
logging.debug("URL %s is unstable.", url)
return None
logging.info("URL %s is stable.", url)
parsed_url = urlparse(r.url)
origin = parsed_url.scheme + '://' + parsed_url.netloc
logging.debug('Sending same-origin Origin %s for %s...', origin, url)
result = {
'url': url,
'SAMEORIGIN_OK': False,
'CROSSORIGIN_OK': False,
'SAMEORIGIN_KO_STATUS': False,
'SAMEORIGIN_KO_RESPONSE': False,
'CROSSORIGIN_KO_STATUS': False,
'CROSSORIGIN_KO_RESPONSE': False
}
r = requests.get(url, timeout=5, allow_redirects=False,
headers={**HEADERS, **{'Origin': origin}})
if r.status_code != 200:
logging.info(
"[SAME ORIGIN] URL %s changed status code to %d.", url, r.status_code)
result['SAMEORIGIN_KO_STATUS'] = r.status_code
return result
if response_size != len(r.text):
logging.info(
"[SAME ORIGIN] URL %s changed response size to %d.", url, len(r.text))
result['SAMEORIGIN_KO_RESPONSE'] = True
return result
result['SAMEORIGIN_OK'] = True
logging.debug('Sending cross-origin Origin for URL %s.', url)
r = requests.get(url, timeout=5, allow_redirects=False, headers={
**HEADERS, **{'Origin': 'https://example.org'}})
if r.status_code != 200:
logging.info(
"[CROSS ORIGIN] URL %s changed status code to %d.", url, r.status_code)
result['CROSSORIGIN_KO_STATUS'] = r.status_code
return result
if response_size != len(r.text):
logging.info(
"[CROSS ORIGIN] URL %s changed response size to %d.", url, len(r.text))
result['CROSSORIGIN_KO_RESPONSE'] = True
return result
result['CROSSORIGIN_OK'] = True
return result
with open('results.csv', 'w') as w:
print('url,SAMEORIGIN_OK,CROSSORIGIN_OK,SAMEORIGIN_KO_STATUS,SAMEORIGIN_KO_RESPONSE,CROSSORIGIN_KO_STATUS,CROSSORIGIN_KO_RESPONSE', file=w)
with concurrent.futures.ThreadPoolExecutor(max_workers=NUM_WORKERS) as executor:
future_to_result = {executor.submit(check, url): url for url in urls}
for future in concurrent.futures.as_completed(future_to_result):
try:
result = future.result()
except:
continue
else:
if result:
print('{},{},{},{},{},{},{}'.format(result['url'],
int(result['SAMEORIGIN_OK']),
int(result['CROSSORIGIN_OK']),
int(result['SAMEORIGIN_KO_STATUS']),
int(result['SAMEORIGIN_KO_RESPONSE']),
int(result['CROSSORIGIN_KO_STATUS']),
int(result['CROSSORIGIN_KO_RESPONSE'])
), file=w)
| true | true |
f7fe2e35e7fe155380db07b62cce34529780ec91 | 3,648 | py | Python | cher2.py | acse-hz6818/Armageddon | de62affe0b3f08cd74090d5d5e9e3c0905c9c8ed | [
"MIT"
] | null | null | null | cher2.py | acse-hz6818/Armageddon | de62affe0b3f08cd74090d5d5e9e3c0905c9c8ed | [
"MIT"
] | null | null | null | cher2.py | acse-hz6818/Armageddon | de62affe0b3f08cd74090d5d5e9e3c0905c9c8ed | [
"MIT"
] | null | null | null | # pylint: disable=invalid-name
"""
Extension 3:
Inversion to calculate the chern explosion
Try different values of Y and r in order to find the ones that give a result closer to the data
-------------------------
returns:
a graphical output
df with errors and parameters choosen
"""
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
import scipy.interpolate as si
import armageddon
#init class
earth = armageddon.Planet()
#read the csv of the values
protoCEAtable = pd.read_csv(r'C:\Users\gc2016\OneDrive - Imperial College London\ACSE\ACSE-4.2\acse-4-armageddon-hygiea\data\ChelyabinskEnergyAltitude.csv')
#initialise inital values
rc = range(10, 30, 2)
v0c = 19200
thetac = 18.3
rhoc = 3300
#put the csv values into arrays
CEAheight = np.array(protoCEAtable['Height (km)'])
CEAE = np.array(protoCEAtable['Energy Per Unit Length (kt Km^-1)'])
#error for Cheryabinsk
def rY_finder(r_min, r_max, Y_min, Y_max, nr, nY):
"""
iterate to find the r and y from a range using the numerical solver
"""
#convert the points to a continious function
lp = si.interp1d(CEAheight, CEAE)
#array for candidates
rlist = np.linspace(r_min, r_max, nr)
Ylist = np.linspace(Y_min, Y_max, nY)
#containers for results
maperror = []
mapr = []
mapY = []
energies = []
#loop nessesary: you have to loop over all combinations
for i in range(nY):
for j in range(nr):
mapr.append(rlist[j])
mapY.append(Ylist[i])
#call numerical solver
df = earth.solve_atmospheric_entry(rlist[j], v0c, 3300, Ylist[i], 18.3,
1e5, dt=0.02, radians=False)
df2 = earth.calculate_energy(df)
#use only the curve for the error
df_filtered = df2[(df2['altitude'] > 30) & (df2['altitude'] < 33) ]
energies.append(df2.dedz)
#rms error
maperror.append(np.sqrt(np.sum((df_filtered.dedz)-lp(df_filtered.altitude))**2))
errordf = pd.DataFrame({'Error': maperror, 'Radious': mapr, 'Strenght': mapY})
return errordf, energies
def plot_model(list_e):
"""
function to plot
"""
plt.figure(figsize=(10,6))
for i in list_e:
plt.plot(i, np.linspace(100, 0, len(i)))
plt.plot(CEAE, CEAheight, 'k', label='raw data')
plt.xlabel('r gridpoints')
plt.ylabel('Y gridpoints')
plt.title('Squared Errors')
plt.show()
# error, energies_list = (rY_finder(10, 12, 9e6, 1e7, 3, 3))
# print("error = ", error)
# plot_model(energies_list)
#print(CEAE)
#for initial conditions
df = earth.solve_atmospheric_entry(radius=10, velocity=21000, density=3000, strength=1e5, angle=45,
init_altitude=100e3, dt=0.01, radians=False)
df2 = earth.calculate_energy(df)
print(df2)
plt.plot(df2.dedz, df2.altitude)
plt.show()
# print ("max energy", df2.dedz.max())
# print ("min energy", df2.dedz.min())
##################### Plot the initial values ########################################
# fig = plt.figure(figsize=(12, 10))
# CEA = fig.add_subplot(111)
# CEA.margins(0.1)
# lp = si.interp1d(CEAheight, CEAE)
# CEA.plot(CEAE, CEAheight, 'k', label='raw data')
# CEA.plot(lp(CEAheight), CEAheight, 'b*', label='approximation')
# CEA.set_xlabel('$dedz, kT/km$', fontsize=16)
# CEA.set_ylabel('$Height, z/m$', fontsize=16)
# CEA.grid(True)
# CEA.set_title('dE/dz-z Graph for Chelyabinsk and the interpolation to continuous', fontsize=16)
# CEA.legend(loc='upper left', fontsize=18)
| 32.864865 | 157 | 0.623081 |
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
import scipy.interpolate as si
import armageddon
earth = armageddon.Planet()
protoCEAtable = pd.read_csv(r'C:\Users\gc2016\OneDrive - Imperial College London\ACSE\ACSE-4.2\acse-4-armageddon-hygiea\data\ChelyabinskEnergyAltitude.csv')
rc = range(10, 30, 2)
v0c = 19200
thetac = 18.3
rhoc = 3300
CEAheight = np.array(protoCEAtable['Height (km)'])
CEAE = np.array(protoCEAtable['Energy Per Unit Length (kt Km^-1)'])
def rY_finder(r_min, r_max, Y_min, Y_max, nr, nY):
lp = si.interp1d(CEAheight, CEAE)
rlist = np.linspace(r_min, r_max, nr)
Ylist = np.linspace(Y_min, Y_max, nY)
maperror = []
mapr = []
mapY = []
energies = []
for i in range(nY):
for j in range(nr):
mapr.append(rlist[j])
mapY.append(Ylist[i])
df = earth.solve_atmospheric_entry(rlist[j], v0c, 3300, Ylist[i], 18.3,
1e5, dt=0.02, radians=False)
df2 = earth.calculate_energy(df)
df_filtered = df2[(df2['altitude'] > 30) & (df2['altitude'] < 33) ]
energies.append(df2.dedz)
maperror.append(np.sqrt(np.sum((df_filtered.dedz)-lp(df_filtered.altitude))**2))
errordf = pd.DataFrame({'Error': maperror, 'Radious': mapr, 'Strenght': mapY})
return errordf, energies
def plot_model(list_e):
plt.figure(figsize=(10,6))
for i in list_e:
plt.plot(i, np.linspace(100, 0, len(i)))
plt.plot(CEAE, CEAheight, 'k', label='raw data')
plt.xlabel('r gridpoints')
plt.ylabel('Y gridpoints')
plt.title('Squared Errors')
plt.show()
df = earth.solve_atmospheric_entry(radius=10, velocity=21000, density=3000, strength=1e5, angle=45,
init_altitude=100e3, dt=0.01, radians=False)
df2 = earth.calculate_energy(df)
print(df2)
plt.plot(df2.dedz, df2.altitude)
plt.show()
| true | true |
f7fe2f6fe8a9361a12505ab1363f6de686380d9d | 315 | py | Python | main.py | wasit7/lambda | cc68db2a1e692229ce98fab18e2fc170b69f9cbe | [
"MIT"
] | null | null | null | main.py | wasit7/lambda | cc68db2a1e692229ce98fab18e2fc170b69f9cbe | [
"MIT"
] | null | null | null | main.py | wasit7/lambda | cc68db2a1e692229ce98fab18e2fc170b69f9cbe | [
"MIT"
] | null | null | null | import numpy as np
def myfunction(event=None, context=None):
mylist=[
'My name is Wasit', 'Rule the world!!',
'get data, process and turn on the switch',
'get data and predict your future', 'keep coding',
]
return mylist[np.random.randint(0, len(mylist))]
if __name__ == "__main__":
print myfunction()
| 26.25 | 52 | 0.692063 | import numpy as np
def myfunction(event=None, context=None):
mylist=[
'My name is Wasit', 'Rule the world!!',
'get data, process and turn on the switch',
'get data and predict your future', 'keep coding',
]
return mylist[np.random.randint(0, len(mylist))]
if __name__ == "__main__":
print myfunction()
| false | true |
f7fe2ff7ed6dd9492b444201a89b884b369cfd4c | 6,635 | py | Python | stix_shifter_modules/datadog/test/stix_transmission/test_datadog.py | priti-patil/stix-shifter | 26954598fb79dde4506987388592ec391ff8a10b | [
"Apache-2.0"
] | 33 | 2018-05-25T17:07:28.000Z | 2019-09-30T10:08:53.000Z | stix_shifter_modules/datadog/test/stix_transmission/test_datadog.py | priti-patil/stix-shifter | 26954598fb79dde4506987388592ec391ff8a10b | [
"Apache-2.0"
] | 54 | 2018-06-01T18:17:24.000Z | 2019-09-30T18:36:15.000Z | stix_shifter_modules/datadog/test/stix_transmission/test_datadog.py | subbyte/stix-shifter | 36d71c172a5fc5b97d872e623753b0dd1bf4fe6c | [
"Apache-2.0"
] | 37 | 2018-07-24T13:29:46.000Z | 2019-09-29T19:06:27.000Z | import unittest
from unittest.mock import patch
from stix_shifter_modules.datadog.entry_point import EntryPoint
from stix_shifter_utils.utils.error_response import ErrorCode
class DatadogMockEvent():
def __init__(self, _data_store):
self._data_store = _data_store
class TestDatadogConnection(unittest.TestCase, object):
def connection(self):
return {
"site_url": "https://app.datadoghq.eu",
"selfSignedCert": False
}
def configuration(self):
return {
"auth": {
"api_key": "u",
"application_key": "pqwer"
}
}
def test_is_async(self):
entry_point = EntryPoint(self.connection(), self.configuration())
check_async = entry_point.is_async()
assert check_async is False
@patch('stix_shifter_modules.datadog.stix_transmission.api_client.APIClient.ping_data_source')
def test_ping(self, mock_generate_token):
mocked_return_value = {"code": 200}
mock_generate_token.return_value = mocked_return_value
entry_point = EntryPoint(self.connection(), self.configuration())
ping_result = entry_point.ping_connection()
assert ping_result["success"] is True
@patch('stix_shifter_modules.datadog.stix_transmission.api_client.APIClient.ping_data_source')
def test_ping_endpoint_exception(self, mock_generate_token):
mocked_return_value = {"code": 403, "message": "forbidden"}
mock_generate_token.return_value = mocked_return_value
entry_point = EntryPoint(self.connection(), self.configuration())
ping_response = entry_point.ping_connection()
assert ping_response['success'] is False
assert ping_response['error'] == "datadog connector error => forbidden"
assert ping_response['code'] == ErrorCode.TRANSMISSION_FORBIDDEN.value
@patch('stix_shifter_modules.datadog.stix_transmission.api_client.APIClient.ping_data_source')
@patch('stix_shifter_modules.datadog.stix_transmission.api_client.APIClient.get_search_results',
autospec=True)
def test_results_all_response(self, mock_results_response, mock_generate_token):
mocked_return_value = {"code": 200}
mock_generate_token.return_value = mocked_return_value
mocked_return_value = {"code": 200, "data": {"events": [DatadogMockEvent(_data_store={"host":"192.168.122.83", "is_aggregate": False}) for x in range(1000)]}}
mock_results_response.return_value = mocked_return_value
query = '{"query": {"host": "192.168.122.83", "unaggregated": "false", "start": 9580878, "end": 12345678}, "source": "events"}'
offset = 0
length = 1002
entry_point = EntryPoint(self.connection(), self.configuration())
results_response = entry_point.create_results_connection(query, offset, length)
assert results_response is not None
assert results_response['success']
assert 'data' in results_response
assert results_response['data'] is not None
@patch('stix_shifter_modules.datadog.stix_transmission.api_client.APIClient.ping_data_source')
@patch('stix_shifter_modules.datadog.stix_transmission.api_client.APIClient.get_search_results',
autospec=True)
def test_results_response_exception(self, mock_results_response, mock_generate_token):
mocked_return_value = {"code": 200}
mock_generate_token.return_value = mocked_return_value
mocked_return_value = {
"code": 400,
"message": "Bad Request"
}
mock_results_response.return_value = mocked_return_value
query = '{"query": {"host": "192.168.122.83", "start": 9580878, "end": 12345678}, "source": "events"}'
offset = 0
length = 1
entry_point = EntryPoint(self.connection(), self.configuration())
results_response = entry_point.create_results_connection(query, offset, length)
assert results_response is not None
assert results_response['success'] is False
assert results_response['error'] == 'datadog connector error => Bad Request'
assert results_response['code'] == ErrorCode.TRANSMISSION_INVALID_PARAMETER.value
@patch('stix_shifter_modules.datadog.stix_transmission.api_client.APIClient.ping_data_source')
@patch('stix_shifter_modules.datadog.stix_transmission.api_client.APIClient.get_processes_results',
autospec=True)
def test_results_processes_response(self, mock_results_response, mock_generate_token):
mocked_return_value = {"code": 200}
mock_generate_token.return_value = mocked_return_value
mocked_return_value = {"code": 200, "data": {"data": [{"attributes": DatadogMockEvent(_data_store={"host": "192.168.122.83", "is_aggregate": False})} for x in range(1000)]}}
mock_results_response.return_value = mocked_return_value
query = '{"query": {"host": "192.168.122.83", "unaggregated": "false", "start": 9580878, "end": 12345678}, "source": "processes"}'
offset = 0
length = 1002
entry_point = EntryPoint(self.connection(), self.configuration())
results_response = entry_point.create_results_connection(query, offset, length)
assert results_response is not None
assert results_response['success']
assert 'data' in results_response
assert results_response['data'] is not None
@patch('stix_shifter_modules.datadog.stix_transmission.api_client.APIClient.ping_data_source')
@patch('stix_shifter_modules.datadog.stix_transmission.api_client.APIClient.get_processes_results',
autospec=True)
def test_results_processes_response_exception(self, mock_results_response, mock_generate_token):
mocked_return_value = {"code": 200}
mock_generate_token.return_value = mocked_return_value
mocked_return_value = {
"code": 400,
"message": "Bad Request"
}
mock_results_response.return_value = mocked_return_value
query = '{"query": {"host": "192.168.122.83", "start": 9580878, "end": 12345678}, "source": "processes"}'
offset = 0
length = 1
entry_point = EntryPoint(self.connection(), self.configuration())
results_response = entry_point.create_results_connection(query, offset, length)
assert results_response is not None
assert results_response['success'] is False
assert results_response['error'] == 'datadog connector error => Bad Request'
assert results_response['code'] == ErrorCode.TRANSMISSION_INVALID_PARAMETER.value
| 48.07971 | 181 | 0.700528 | import unittest
from unittest.mock import patch
from stix_shifter_modules.datadog.entry_point import EntryPoint
from stix_shifter_utils.utils.error_response import ErrorCode
class DatadogMockEvent():
def __init__(self, _data_store):
self._data_store = _data_store
class TestDatadogConnection(unittest.TestCase, object):
def connection(self):
return {
"site_url": "https://app.datadoghq.eu",
"selfSignedCert": False
}
def configuration(self):
return {
"auth": {
"api_key": "u",
"application_key": "pqwer"
}
}
def test_is_async(self):
entry_point = EntryPoint(self.connection(), self.configuration())
check_async = entry_point.is_async()
assert check_async is False
@patch('stix_shifter_modules.datadog.stix_transmission.api_client.APIClient.ping_data_source')
def test_ping(self, mock_generate_token):
mocked_return_value = {"code": 200}
mock_generate_token.return_value = mocked_return_value
entry_point = EntryPoint(self.connection(), self.configuration())
ping_result = entry_point.ping_connection()
assert ping_result["success"] is True
@patch('stix_shifter_modules.datadog.stix_transmission.api_client.APIClient.ping_data_source')
def test_ping_endpoint_exception(self, mock_generate_token):
mocked_return_value = {"code": 403, "message": "forbidden"}
mock_generate_token.return_value = mocked_return_value
entry_point = EntryPoint(self.connection(), self.configuration())
ping_response = entry_point.ping_connection()
assert ping_response['success'] is False
assert ping_response['error'] == "datadog connector error => forbidden"
assert ping_response['code'] == ErrorCode.TRANSMISSION_FORBIDDEN.value
@patch('stix_shifter_modules.datadog.stix_transmission.api_client.APIClient.ping_data_source')
@patch('stix_shifter_modules.datadog.stix_transmission.api_client.APIClient.get_search_results',
autospec=True)
def test_results_all_response(self, mock_results_response, mock_generate_token):
mocked_return_value = {"code": 200}
mock_generate_token.return_value = mocked_return_value
mocked_return_value = {"code": 200, "data": {"events": [DatadogMockEvent(_data_store={"host":"192.168.122.83", "is_aggregate": False}) for x in range(1000)]}}
mock_results_response.return_value = mocked_return_value
query = '{"query": {"host": "192.168.122.83", "unaggregated": "false", "start": 9580878, "end": 12345678}, "source": "events"}'
offset = 0
length = 1002
entry_point = EntryPoint(self.connection(), self.configuration())
results_response = entry_point.create_results_connection(query, offset, length)
assert results_response is not None
assert results_response['success']
assert 'data' in results_response
assert results_response['data'] is not None
@patch('stix_shifter_modules.datadog.stix_transmission.api_client.APIClient.ping_data_source')
@patch('stix_shifter_modules.datadog.stix_transmission.api_client.APIClient.get_search_results',
autospec=True)
def test_results_response_exception(self, mock_results_response, mock_generate_token):
mocked_return_value = {"code": 200}
mock_generate_token.return_value = mocked_return_value
mocked_return_value = {
"code": 400,
"message": "Bad Request"
}
mock_results_response.return_value = mocked_return_value
query = '{"query": {"host": "192.168.122.83", "start": 9580878, "end": 12345678}, "source": "events"}'
offset = 0
length = 1
entry_point = EntryPoint(self.connection(), self.configuration())
results_response = entry_point.create_results_connection(query, offset, length)
assert results_response is not None
assert results_response['success'] is False
assert results_response['error'] == 'datadog connector error => Bad Request'
assert results_response['code'] == ErrorCode.TRANSMISSION_INVALID_PARAMETER.value
@patch('stix_shifter_modules.datadog.stix_transmission.api_client.APIClient.ping_data_source')
@patch('stix_shifter_modules.datadog.stix_transmission.api_client.APIClient.get_processes_results',
autospec=True)
def test_results_processes_response(self, mock_results_response, mock_generate_token):
mocked_return_value = {"code": 200}
mock_generate_token.return_value = mocked_return_value
mocked_return_value = {"code": 200, "data": {"data": [{"attributes": DatadogMockEvent(_data_store={"host": "192.168.122.83", "is_aggregate": False})} for x in range(1000)]}}
mock_results_response.return_value = mocked_return_value
query = '{"query": {"host": "192.168.122.83", "unaggregated": "false", "start": 9580878, "end": 12345678}, "source": "processes"}'
offset = 0
length = 1002
entry_point = EntryPoint(self.connection(), self.configuration())
results_response = entry_point.create_results_connection(query, offset, length)
assert results_response is not None
assert results_response['success']
assert 'data' in results_response
assert results_response['data'] is not None
@patch('stix_shifter_modules.datadog.stix_transmission.api_client.APIClient.ping_data_source')
@patch('stix_shifter_modules.datadog.stix_transmission.api_client.APIClient.get_processes_results',
autospec=True)
def test_results_processes_response_exception(self, mock_results_response, mock_generate_token):
mocked_return_value = {"code": 200}
mock_generate_token.return_value = mocked_return_value
mocked_return_value = {
"code": 400,
"message": "Bad Request"
}
mock_results_response.return_value = mocked_return_value
query = '{"query": {"host": "192.168.122.83", "start": 9580878, "end": 12345678}, "source": "processes"}'
offset = 0
length = 1
entry_point = EntryPoint(self.connection(), self.configuration())
results_response = entry_point.create_results_connection(query, offset, length)
assert results_response is not None
assert results_response['success'] is False
assert results_response['error'] == 'datadog connector error => Bad Request'
assert results_response['code'] == ErrorCode.TRANSMISSION_INVALID_PARAMETER.value
| true | true |
f7fe30844529ac629a1ec24c31ef21c2ae2f0503 | 1,075 | py | Python | tests/test_main.py | developmentseed/pydantic-ssm-settings | f8f7c5784a97e93d54c7ab056d6c959892429727 | [
"MIT"
] | null | null | null | tests/test_main.py | developmentseed/pydantic-ssm-settings | f8f7c5784a97e93d54c7ab056d6c959892429727 | [
"MIT"
] | null | null | null | tests/test_main.py | developmentseed/pydantic-ssm-settings | f8f7c5784a97e93d54c7ab056d6c959892429727 | [
"MIT"
] | null | null | null | import logging
import pytest
from pydantic import BaseSettings
from pydantic_ssm_settings import AwsSsmSourceConfig
logger = logging.getLogger("pydantic_ssm_settings")
logger.setLevel(logging.DEBUG)
class SimpleSettings(BaseSettings):
foo: str
class Config(AwsSsmSourceConfig):
...
class IntSettings(BaseSettings):
foo: str
bar: int
class Config(AwsSsmSourceConfig):
...
def test_secrets_dir_must_be_absolute():
with pytest.raises(ValueError):
SimpleSettings(_secrets_dir="asdf")
def test_lookup_from_ssm(ssm):
ssm.put_parameter(Name="/asdf/foo", Value="xyz123")
settings = SimpleSettings(_secrets_dir="/asdf")
assert settings.foo == "xyz123"
def test_prefer_provided(ssm):
settings = SimpleSettings(_secrets_dir="/asdf", foo="manually set")
assert settings.foo == "manually set"
def test_casting(ssm):
ssm.put_parameter(Name="/asdf/foo", Value="xyz123")
ssm.put_parameter(Name="/asdf/bar", Value="99")
settings = IntSettings(_secrets_dir="/asdf")
assert settings.bar == 99
| 22.395833 | 71 | 0.72093 | import logging
import pytest
from pydantic import BaseSettings
from pydantic_ssm_settings import AwsSsmSourceConfig
logger = logging.getLogger("pydantic_ssm_settings")
logger.setLevel(logging.DEBUG)
class SimpleSettings(BaseSettings):
foo: str
class Config(AwsSsmSourceConfig):
...
class IntSettings(BaseSettings):
foo: str
bar: int
class Config(AwsSsmSourceConfig):
...
def test_secrets_dir_must_be_absolute():
with pytest.raises(ValueError):
SimpleSettings(_secrets_dir="asdf")
def test_lookup_from_ssm(ssm):
ssm.put_parameter(Name="/asdf/foo", Value="xyz123")
settings = SimpleSettings(_secrets_dir="/asdf")
assert settings.foo == "xyz123"
def test_prefer_provided(ssm):
settings = SimpleSettings(_secrets_dir="/asdf", foo="manually set")
assert settings.foo == "manually set"
def test_casting(ssm):
ssm.put_parameter(Name="/asdf/foo", Value="xyz123")
ssm.put_parameter(Name="/asdf/bar", Value="99")
settings = IntSettings(_secrets_dir="/asdf")
assert settings.bar == 99
| true | true |
f7fe3177c09f5518ffa0c762ee12c3cbcb419795 | 686 | py | Python | 3.Object.Oriented.Programming/13.defineDafultValues.py | bhattvishal/programming-learning-python | 78498bfbe7c1c7b1bda53756ca8552ab30fbf538 | [
"MIT"
] | 1 | 2020-11-26T11:06:56.000Z | 2020-11-26T11:06:56.000Z | 3.Object.Oriented.Programming/13.defineDafultValues.py | bhattvishal/programming-learning-python | 78498bfbe7c1c7b1bda53756ca8552ab30fbf538 | [
"MIT"
] | null | null | null | 3.Object.Oriented.Programming/13.defineDafultValues.py | bhattvishal/programming-learning-python | 78498bfbe7c1c7b1bda53756ca8552ab30fbf538 | [
"MIT"
] | null | null | null | # KEEP IN MIND: Attributes with no Default Values must come first
from dataclasses import dataclass, field
import random
def getDiscount():
return float(random.randrange(20, 40))
@dataclass
class Book:
pages: int
title: str = "No Title"
author: str = "No Author"
price: float = field(default=10.0) # We can also use field to define default value
discount: float = field(default_factory=getDiscount) # We can get the dafule from funcation
def __post_init__(self):
self.description = f"Book {self.title} is written {self.author} and costs {self.price}"
b1 = Book(235)
print(b1)
print(b1.description)
print(b1.discount) | 27.44 | 98 | 0.686589 |
from dataclasses import dataclass, field
import random
def getDiscount():
return float(random.randrange(20, 40))
@dataclass
class Book:
pages: int
title: str = "No Title"
author: str = "No Author"
price: float = field(default=10.0)
discount: float = field(default_factory=getDiscount)
def __post_init__(self):
self.description = f"Book {self.title} is written {self.author} and costs {self.price}"
b1 = Book(235)
print(b1)
print(b1.description)
print(b1.discount) | true | true |
f7fe324cb814a1ae68efe73a1fa569f7ef9aa215 | 262 | py | Python | 11-20/20_factorial_digit_sum.py | sebranly/project-euler | 9e944126d936db92962ab66e9968bab57aa1af88 | [
"MIT"
] | null | null | null | 11-20/20_factorial_digit_sum.py | sebranly/project-euler | 9e944126d936db92962ab66e9968bab57aa1af88 | [
"MIT"
] | null | null | null | 11-20/20_factorial_digit_sum.py | sebranly/project-euler | 9e944126d936db92962ab66e9968bab57aa1af88 | [
"MIT"
] | null | null | null | def factorial(number, accumulator = 1):
if (number == 0):
return accumulator
return factorial(number - 1, accumulator * number)
result = factorial(100)
sum_digits = 0
for digit in str(result):
sum_digits += int(digit)
print(sum_digits)
| 21.833333 | 54 | 0.671756 | def factorial(number, accumulator = 1):
if (number == 0):
return accumulator
return factorial(number - 1, accumulator * number)
result = factorial(100)
sum_digits = 0
for digit in str(result):
sum_digits += int(digit)
print(sum_digits)
| true | true |
f7fe33100ae3d7599678d5a66a71858f5a4fd2c8 | 5,622 | py | Python | tensor2tensor/utils/learning_rate.py | sivaramakrishna7/tensor2tensor | eb0118d3f459913133e3d68a96944480a928bff1 | [
"Apache-2.0"
] | null | null | null | tensor2tensor/utils/learning_rate.py | sivaramakrishna7/tensor2tensor | eb0118d3f459913133e3d68a96944480a928bff1 | [
"Apache-2.0"
] | null | null | null | tensor2tensor/utils/learning_rate.py | sivaramakrishna7/tensor2tensor | eb0118d3f459913133e3d68a96944480a928bff1 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow as tf
def learning_rate_factor(name, step_num, hparams):
if name == "constant":
return hparams.learning_rate_constant
elif name == "linear_warmup":
return tf.minimum(1.0, step_num / hparams.learning_rate_warmup_steps)
elif name == "rsqrt_decay":
return tf.rsqrt(tf.maximum(step_num, hparams.learning_rate_warmup_steps))
elif name == "rsqrt_hidden_size":
return hparams.hidden_size ** -0.5
elif name == "legacy":
return legacy_learning_rate_schedule(hparams)
else:
raise ValueError("unknown learning rate factor %s" % name)
def learning_rate_schedule(hparams):
"""Learning rate schedule based on hparams."""
step_num = tf.to_float(tf.train.get_or_create_global_step())
schedule_string = hparams.learning_rate_schedule
names = schedule_string.split("*")
names = [name.strip() for name in names if name.strip()]
ret = 1.0
for name in names:
ret *= learning_rate_factor(name, step_num, hparams)
return ret
def legacy_learning_rate_schedule(hparams):
"""Backwards-compatible learning-rate schedule."""
step_num = tf.to_float(tf.train.get_or_create_global_step())
warmup_steps = tf.to_float(hparams.learning_rate_warmup_steps)
if hparams.learning_rate_decay_scheme == "noam":
ret = 5000.0 * hparams.hidden_size**-0.5 * tf.minimum(
(step_num + 1) * warmup_steps**-1.5, (step_num + 1)**-0.5)
else:
warmup_steps = hparams.learning_rate_warmup_steps
warmup = _learning_rate_warmup(warmup_steps)
decay = _learning_rate_decay(hparams, warmup_steps)
ret = tf.where(step_num < warmup_steps, warmup, decay)
optimizer_correction = 0.002 if "Adam" in hparams.optimizer else 1.0
return ret * optimizer_correction * hparams.learning_rate
def _legacy_sqrt_decay(step):
"""Decay like 1 / sqrt(step), multiplied by 500 to normalize."""
return 500.0 / tf.sqrt(tf.maximum(step, 1.0))
def _piecewise_learning_rate(step, boundaries, values):
"""Scale learning rate according to the given schedule.
Multipliers are not cumulative.
Args:
step: global step
boundaries: List of steps to transition on.
values: Multiplier to apply at each boundary transition.
Returns:
Scaled value for the learning rate.
"""
values = [1.0] + values
boundaries = [float(x) for x in boundaries]
return tf.train.piecewise_constant(
step, boundaries, values, name="piecewise_lr")
def _learning_rate_decay(hparams, warmup_steps=0):
"""Learning rate decay multiplier."""
scheme = hparams.learning_rate_decay_scheme
warmup_steps = tf.to_float(warmup_steps)
global_step = tf.to_float(tf.train.get_or_create_global_step())
if not scheme or scheme == "none":
return tf.constant(1.)
tf.logging.info("Applying learning rate decay: %s.", scheme)
if scheme == "exp":
decay_steps = hparams.learning_rate_decay_steps
p = (global_step - warmup_steps) / decay_steps
if hparams.learning_rate_decay_staircase:
p = tf.floor(p)
return tf.pow(hparams.learning_rate_decay_rate, p)
if scheme == "piecewise":
return _piecewise_learning_rate(global_step,
hparams.learning_rate_boundaries,
hparams.learning_rate_multiples)
if scheme == "cosine":
cycle_steps = hparams.learning_rate_cosine_cycle_steps
cycle_position = global_step % (2 * cycle_steps)
cycle_position = cycle_steps - tf.abs(cycle_steps - cycle_position)
return 0.5 * (1 + tf.cos(np.pi * cycle_position / cycle_steps))
if scheme == "cyclelinear10x":
# Cycle the rate linearly by 10x every warmup_steps, up and down.
cycle_steps = warmup_steps
cycle_position = global_step % (2 * cycle_steps)
cycle_position = tf.to_float( # Normalize to the interval [-1, 1].
cycle_position - cycle_steps) / float(cycle_steps)
cycle_position = 1.0 - tf.abs(cycle_position) # 0 to 1 and back to 0.
return (cycle_position + 0.1) * 3.0 # 10x difference each cycle (0.3-3).
if scheme == "sqrt":
return _legacy_sqrt_decay(global_step - warmup_steps)
raise ValueError("Unrecognized learning rate decay scheme: %s" %
hparams.learning_rate_decay_scheme)
def _learning_rate_warmup(warmup_steps, warmup_schedule="exp"):
"""Learning rate warmup multiplier."""
if not warmup_steps:
return tf.constant(1.)
tf.logging.info("Applying %s learning rate warmup for %d steps",
warmup_schedule, warmup_steps)
warmup_steps = tf.to_float(warmup_steps)
global_step = tf.to_float(tf.train.get_or_create_global_step())
if warmup_schedule == "exp":
return tf.exp(tf.log(0.01) / warmup_steps)**(warmup_steps - global_step)
else:
assert warmup_schedule == "linear"
start = tf.constant(0.35)
return ((tf.constant(1.) - start) / warmup_steps) * global_step + start
| 35.808917 | 77 | 0.721096 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
def learning_rate_factor(name, step_num, hparams):
if name == "constant":
return hparams.learning_rate_constant
elif name == "linear_warmup":
return tf.minimum(1.0, step_num / hparams.learning_rate_warmup_steps)
elif name == "rsqrt_decay":
return tf.rsqrt(tf.maximum(step_num, hparams.learning_rate_warmup_steps))
elif name == "rsqrt_hidden_size":
return hparams.hidden_size ** -0.5
elif name == "legacy":
return legacy_learning_rate_schedule(hparams)
else:
raise ValueError("unknown learning rate factor %s" % name)
def learning_rate_schedule(hparams):
step_num = tf.to_float(tf.train.get_or_create_global_step())
schedule_string = hparams.learning_rate_schedule
names = schedule_string.split("*")
names = [name.strip() for name in names if name.strip()]
ret = 1.0
for name in names:
ret *= learning_rate_factor(name, step_num, hparams)
return ret
def legacy_learning_rate_schedule(hparams):
step_num = tf.to_float(tf.train.get_or_create_global_step())
warmup_steps = tf.to_float(hparams.learning_rate_warmup_steps)
if hparams.learning_rate_decay_scheme == "noam":
ret = 5000.0 * hparams.hidden_size**-0.5 * tf.minimum(
(step_num + 1) * warmup_steps**-1.5, (step_num + 1)**-0.5)
else:
warmup_steps = hparams.learning_rate_warmup_steps
warmup = _learning_rate_warmup(warmup_steps)
decay = _learning_rate_decay(hparams, warmup_steps)
ret = tf.where(step_num < warmup_steps, warmup, decay)
optimizer_correction = 0.002 if "Adam" in hparams.optimizer else 1.0
return ret * optimizer_correction * hparams.learning_rate
def _legacy_sqrt_decay(step):
return 500.0 / tf.sqrt(tf.maximum(step, 1.0))
def _piecewise_learning_rate(step, boundaries, values):
values = [1.0] + values
boundaries = [float(x) for x in boundaries]
return tf.train.piecewise_constant(
step, boundaries, values, name="piecewise_lr")
def _learning_rate_decay(hparams, warmup_steps=0):
scheme = hparams.learning_rate_decay_scheme
warmup_steps = tf.to_float(warmup_steps)
global_step = tf.to_float(tf.train.get_or_create_global_step())
if not scheme or scheme == "none":
return tf.constant(1.)
tf.logging.info("Applying learning rate decay: %s.", scheme)
if scheme == "exp":
decay_steps = hparams.learning_rate_decay_steps
p = (global_step - warmup_steps) / decay_steps
if hparams.learning_rate_decay_staircase:
p = tf.floor(p)
return tf.pow(hparams.learning_rate_decay_rate, p)
if scheme == "piecewise":
return _piecewise_learning_rate(global_step,
hparams.learning_rate_boundaries,
hparams.learning_rate_multiples)
if scheme == "cosine":
cycle_steps = hparams.learning_rate_cosine_cycle_steps
cycle_position = global_step % (2 * cycle_steps)
cycle_position = cycle_steps - tf.abs(cycle_steps - cycle_position)
return 0.5 * (1 + tf.cos(np.pi * cycle_position / cycle_steps))
if scheme == "cyclelinear10x":
cycle_steps = warmup_steps
cycle_position = global_step % (2 * cycle_steps)
cycle_position = tf.to_float(
cycle_position - cycle_steps) / float(cycle_steps)
cycle_position = 1.0 - tf.abs(cycle_position)
return (cycle_position + 0.1) * 3.0
if scheme == "sqrt":
return _legacy_sqrt_decay(global_step - warmup_steps)
raise ValueError("Unrecognized learning rate decay scheme: %s" %
hparams.learning_rate_decay_scheme)
def _learning_rate_warmup(warmup_steps, warmup_schedule="exp"):
if not warmup_steps:
return tf.constant(1.)
tf.logging.info("Applying %s learning rate warmup for %d steps",
warmup_schedule, warmup_steps)
warmup_steps = tf.to_float(warmup_steps)
global_step = tf.to_float(tf.train.get_or_create_global_step())
if warmup_schedule == "exp":
return tf.exp(tf.log(0.01) / warmup_steps)**(warmup_steps - global_step)
else:
assert warmup_schedule == "linear"
start = tf.constant(0.35)
return ((tf.constant(1.) - start) / warmup_steps) * global_step + start
| true | true |
f7fe33ddbc3fa236767378e2122a73a94d0135f6 | 48,626 | py | Python | oneflow/python/eager/vm_util.py | zhouyuegit/oneflow | ddb6ad7fc43b867357394c3d3f0176f4e81cc8ef | [
"Apache-2.0"
] | null | null | null | oneflow/python/eager/vm_util.py | zhouyuegit/oneflow | ddb6ad7fc43b867357394c3d3f0176f4e81cc8ef | [
"Apache-2.0"
] | null | null | null | oneflow/python/eager/vm_util.py | zhouyuegit/oneflow | ddb6ad7fc43b867357394c3d3f0176f4e81cc8ef | [
"Apache-2.0"
] | null | null | null | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import re
from contextlib import contextmanager
import oneflow.core.eager.eager_symbol_pb2 as eager_symbol_util
import oneflow.core.job.placement_pb2 as placement_pb_util
import oneflow.core.operator.op_conf_pb2 as op_conf_util
import oneflow.core.operator.op_attribute_pb2 as op_attribute_pb
import oneflow.core.vm.instruction_pb2 as instr_util
import oneflow.python.eager.blob_cache as blob_cache_util
import oneflow.python.eager.boxing_util as boxing_util
import oneflow.python.eager.object as object_util
import oneflow.python.eager.object_storage as object_storage
import oneflow.python.eager.symbol as symbol_util
import oneflow.python.eager.symbol_storage as symbol_storage
import oneflow.python.framework.c_api_util as c_api_util
import oneflow.python.framework.scope_symbol as scope_symbol
import oneflow.python.framework.id_util as id_util
import oneflow.python.framework.op_arg_util as op_arg_util
import oneflow.python.framework.placement_context as placement_ctx
import oneflow.python.framework.python_callback as python_callback
import oneflow.python.framework.session_context as session_ctx
from oneflow.python.eager.opkernel_object import OpKernelObject
import oneflow.python.vm.id_util as vm_id_util
import oneflow
oneflow_api = oneflow.oneflow_api
def PhysicalRun(build):
return _Run(
build,
vm_id_util.PhysicalIdGenerator(),
c_api_util.RunPhysicalInstruction,
_ReleasePhysicalObject,
)
def LogicalRun(build):
return _Run(
build,
vm_id_util.LogicalIdGenerator(),
c_api_util.RunLogicalInstruction,
_ReleaseLogicalObject,
)
def _Run(build, id_generator, run_api, release_object):
instruction_list = session_ctx.GetDefaultSession().instruction_list
eager_symbol_list = session_ctx.GetDefaultSession().eager_symbol_list
build(
InstructionsBuilder(
id_generator, release_object, instruction_list, eager_symbol_list
)
)
run_api(instruction_list, eager_symbol_list)
instruction_list.ClearField("instruction")
eager_symbol_list.ClearField("eager_symbol")
def _DefaultBlobObject4Ibn(ibn):
raise NotImplementedError
class InstructionsBuilder(object):
def __init__(
self, id_generator, release_object, instruction_list, eager_symbol_list
):
self.id_generator_ = id_generator
self.release_object_ = release_object
assert isinstance(instruction_list, instr_util.InstructionListProto)
assert isinstance(eager_symbol_list, eager_symbol_util.EagerSymbolList)
self.instruction_list_ = instruction_list
self.eager_symbol_list_ = eager_symbol_list
def StatelessCall(self, op_attribute, parallel_conf, bn_in_op2blob_object={}):
op_parallel_desc_sym = self.GetParallelDescSymbol(parallel_conf)
self._CheckRefInBlobObjectParallelDesc(
op_attribute,
op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
)
def GetDelegateBlobObject(blob_object, op_arg_parallel_attr):
return _FindOrCreateDelegateBlobObject(
self, blob_object, op_arg_parallel_attr
)
self._StatelessCall(
"compute",
op_attribute,
op_parallel_desc_sym=op_parallel_desc_sym,
blob_parallel_desc_sym=op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
get_delegate_blob_object=GetDelegateBlobObject,
)
def BoxingStatelessCall(self, op_attribute, parallel_conf, bn_in_op2blob_object={}):
op_parallel_desc_sym = self.GetParallelDescSymbol(parallel_conf)
self._CheckRefInBlobObjectParallelDesc(
op_attribute,
op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
)
def GetDirectBlobObject(blob_object, op_arg_parallel_attr):
return blob_object
self._StatelessCall(
"compute",
op_attribute,
op_parallel_desc_sym=op_parallel_desc_sym,
blob_parallel_desc_sym=op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
get_delegate_blob_object=GetDirectBlobObject,
)
def BoxingCudaD2HStatelessCall(
self, op_attribute, in_parallel_conf, bn_in_op2blob_object={}
):
op_parallel_desc_sym = self.GetParallelDescSymbol(in_parallel_conf)
blob_parallel_desc_sym = boxing_util.TryReplaceDeviceTag(
self, op_parallel_desc_sym, "cpu"
)
self._CheckRefInBlobObjectParallelDesc(
op_attribute,
blob_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
)
def GetDirectBlobObject(blob_object, op_arg_parallel_attr):
return blob_object
self._StatelessCall(
"copy_d2h",
op_attribute,
op_parallel_desc_sym=op_parallel_desc_sym,
blob_parallel_desc_sym=blob_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
get_delegate_blob_object=GetDirectBlobObject,
)
def BoxingCudaH2DStatelessCall(
self, op_attribute, out_parallel_conf, bn_in_op2blob_object={}
):
op_parallel_desc_sym = self.GetParallelDescSymbol(out_parallel_conf)
self._CheckRefInBlobObjectParallelDesc(
op_attribute,
op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
)
def GetDirectBlobObject(blob_object, op_arg_parallel_attr):
return blob_object
self._StatelessCall(
"copy_h2d",
op_attribute,
op_parallel_desc_sym=op_parallel_desc_sym,
blob_parallel_desc_sym=op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
get_delegate_blob_object=GetDirectBlobObject,
)
def StatefulCall(self, op_attribute, opkernel_object, bn_in_op2blob_object={}):
op_parallel_desc_sym = opkernel_object.parallel_desc_symbol
parallel_sig = op_attribute.parallel_signature
assert parallel_sig.HasField("op_parallel_desc_symbol_id")
assert op_parallel_desc_sym.symbol_id == parallel_sig.op_parallel_desc_symbol_id
self._CheckRefInBlobObjectParallelDesc(
op_attribute,
op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
)
def GetDelegateBlobObject(blob_object, op_arg_parallel_attr):
return _FindOrCreateDelegateBlobObject(
self, blob_object, op_arg_parallel_attr
)
self._StatefulCall(
op_attribute,
opkernel_object=opkernel_object,
bn_in_op2blob_object=bn_in_op2blob_object,
get_delegate_blob_object=GetDelegateBlobObject,
)
def DeleteObject(self, obj):
self._TryClearObject(obj)
self._DeleteObject(obj)
def InsertRemoveForeignCallbackInstruction(self, object_id, callback):
unique_callback_id = python_callback.GetIdForRegisteredCallback(callback)
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "RemoveForeignCallback"
instruction.operand.append(_DelObjectOperand(object_id))
instruction.operand.append(_Int64Operand(unique_callback_id))
self.instruction_list_.instruction.append(instruction)
def FetchBlobHeader(self, blob_object, callback):
return self._FetchBlob("FetchBlobHeader", blob_object, callback)
def FetchBlobBody(self, blob_object, callback):
return self._FetchBlob("FetchBlobBody", blob_object, callback)
def PackPhysicalBlobsToLogicalBlob(
self, physical_blob_objects, op_arg_parallel_attr, op_arg_blob_attr
):
parallel_desc_symbol = op_arg_parallel_attr.parallel_desc_symbol
machine_id2device_ids = parallel_desc_symbol.machine_id2device_id_list
device_tag = parallel_desc_symbol.parallel_conf.device_tag
machine_device_ids = set()
for physical_blob_object in physical_blob_objects:
phy_paralle_desc_sym = physical_blob_object.parallel_desc_symbol
assert (
phy_paralle_desc_sym.parallel_num == 1
), phy_paralle_desc_sym.parallel_num
assert phy_paralle_desc_sym.device_tag == device_tag, "%s v.s. %s" % (
phy_paralle_desc_sym.device_tag,
device_tag,
)
phy_machine_id2device_ids = phy_paralle_desc_sym.machine_id2device_id_list
machine_id = list(phy_machine_id2device_ids.keys())[0]
pair = (machine_id, phy_machine_id2device_ids[machine_id][0])
machine_device_ids.add(pair)
for machine_id, device_ids in machine_id2device_ids.items():
for device_id in device_ids:
assert (machine_id, device_id) in machine_device_ids, "%s not in %s" % (
(machine_id, device_id),
machine_device_ids,
)
logical_blob_object = self._NewBlobObject(
op_arg_parallel_attr, op_arg_blob_attr
)
self._ReplaceMirrored(
op_arg_parallel_attr.parallel_desc_symbol,
[logical_blob_object],
physical_blob_objects,
)
return logical_blob_object
def GetPhysicalParallelDescSymbols(self, parallel_desc_symbol):
machine_id2device_ids = parallel_desc_symbol.machine_id2device_id_list
device_tag = parallel_desc_symbol.parallel_conf.device_tag
phy_parallel_desc_symbols = []
def AppendPhyParallelDescSymbol(machine_id, device_id):
parallel_conf = placement_pb_util.ParallelConf()
parallel_conf.device_tag = device_tag
parallel_conf.device_name.append("%d:%d" % (machine_id, device_id))
phy_parallel_desc_symbols.append(self.GetParallelDescSymbol(parallel_conf))
for machine_id, device_ids in machine_id2device_ids.items():
for device_id in device_ids:
AppendPhyParallelDescSymbol(machine_id, device_id)
return phy_parallel_desc_symbols
def UnpackLogicalBlobToPhysicalBlobs(self, blob_object):
phy_parallel_desc_symbols = self.GetPhysicalParallelDescSymbols(
blob_object.parallel_desc_symbol
)
def GetPhysicalBlob(parallel_desc_sym):
op_arg_parallel_attr = op_arg_util.MakeMirroredOpArgParallelAttribute(
parallel_desc_sym
)
pyhsical_blob_object = self._NewBlobObject(
op_arg_parallel_attr, blob_object.op_arg_blob_attr
)
return pyhsical_blob_object
physical_blob_objects = [
GetPhysicalBlob(symbol) for symbol in phy_parallel_desc_symbols
]
self._ReplaceMirrored(
blob_object.parallel_desc_symbol, physical_blob_objects, [blob_object]
)
return physical_blob_objects
def MakeReferenceBlobObject(self, blob_object, op_arg_parallel_attr):
parallel_desc_symbol = blob_object.parallel_desc_symbol
assert parallel_desc_symbol == op_arg_parallel_attr.parallel_desc_symbol
ref_blob_object = self._NewBlobObject(
op_arg_parallel_attr, blob_object.op_arg_blob_attr
)
self._ReplaceMirrored(parallel_desc_symbol, [ref_blob_object], [blob_object])
return ref_blob_object
def MakeLazyRefBlobObject(self, interface_op_name):
sess = session_ctx.GetDefaultSession()
op_attribute = sess.OpAttribute4InterfaceOpName(interface_op_name)
assert len(op_attribute.output_bns) == 1
obn = op_attribute.output_bns[0]
blob_parallel_desc_sym_id = op_attribute.parallel_signature.bn_in_op2parallel_desc_symbol_id[
obn
]
blob_parallel_desc_sym = symbol_storage.GetSymbol4Id(blob_parallel_desc_sym_id)
op_arg_parallel_attr = op_arg_util.GetOpArgParallelAttribute(
blob_parallel_desc_sym, op_attribute, obn
)
op_arg_blob_attr = op_arg_util.GetOpArgBlobAttribute(op_attribute, obn)
blob_object = self._NewBlobObject(op_arg_parallel_attr, op_arg_blob_attr)
self._LazyReference(blob_object, interface_op_name)
return blob_object
def GetSymbol4String(self, string):
if symbol_storage.HasSymbol4String(string):
return symbol_storage.GetSymbol4String(string)
symbol_id = self._NewSymbolId4String(string)
symbol = symbol_util.Symbol(symbol_id, string)
symbol_storage.SetSymbol4Id(symbol_id, symbol)
symbol_storage.SetSymbol4String(string, symbol)
return symbol
def GetJobConfSymbol(self, job_conf):
if symbol_storage.HasSymbol4JobConf(job_conf):
return symbol_storage.GetSymbol4JobConf(job_conf)
symbol_id = self._NewSymbolId4JobConf(job_conf)
symbol = symbol_util.Symbol(symbol_id, job_conf)
symbol_storage.SetSymbol4Id(symbol_id, symbol)
symbol_storage.SetSymbol4JobConf(job_conf, symbol)
return symbol
def GetParallelDescSymbol(self, parallel_conf):
device_tag = parallel_conf.device_tag
serialized_parallel_conf = parallel_conf.SerializeToString()
if symbol_storage.HasSymbol4SerializedParallelConf(serialized_parallel_conf):
return symbol_storage.GetSymbol4SerializedParallelConf(
serialized_parallel_conf
)
symbol_id = self._NewSymbolId4ParallelConf(parallel_conf)
symbol = symbol_util.ParallelDescSymbol(symbol_id, parallel_conf, device_tag)
symbol_storage.SetSymbol4Id(symbol_id, symbol)
symbol_storage.SetSymbol4SerializedParallelConf(
serialized_parallel_conf, symbol
)
return symbol
def GetScopeSymbol(self, scope_proto, parent_scope_symbol=None):
symbol_id = self._NewSymbolId4Scope(scope_proto)
serialized_scope_proto = scope_proto.SerializeToString()
if symbol_storage.HasSymbol4SerializedScopeProto(serialized_scope_proto):
return symbol_storage.GetSymbol4SerializedScopeProto(serialized_scope_proto)
symbol = scope_symbol.ScopeSymbol(symbol_id, scope_proto, parent_scope_symbol)
symbol_storage.SetSymbol4Id(symbol_id, symbol)
symbol_storage.SetSymbol4SerializedScopeProto(serialized_scope_proto, symbol)
return symbol
def GetSharedOpKernelObject4ParallelConfSymbol(self, parallel_desc_sym):
if object_storage.HasSharedOpKernelObject4ParallelConfSymbol(parallel_desc_sym):
return object_storage.GetSharedOpKernelObject4ParallelConfSymbol(
parallel_desc_sym
)
object_id = self._NewSharedOpKernelObjectId4ParallelConfSymbolId(
parallel_desc_sym
)
obj = object_util.Object(object_id, parallel_desc_sym)
object_storage.SetSharedOpKernelObject4ParallelConfSymbol(
parallel_desc_sym, obj
)
return obj
@contextmanager
def CudaHostPinBlob(self, blob_object):
self._CudaHostRegisterBlob(blob_object)
try:
yield
finally:
self._CudaHostUnregisterBlob(blob_object)
def BroadcastBlobReference(self, sole_mirrored_blob_object, parallel_desc_sym):
device_ids = (
sole_mirrored_blob_object.parallel_desc_symbol.machine_id2device_id_list
)
for _, dev_ids in device_ids.items():
assert len(dev_ids) == 1, "dev_ids: %s" % dev_ids
object_id = self._BroadcastObjectReference(
sole_mirrored_blob_object, parallel_desc_sym
)
op_arg_parallel_attr = op_arg_util.MakeBroadcastOpArgParallelAttribute(
parallel_desc_sym
)
return object_util.BlobObject(
object_id=object_id,
op_arg_parallel_attr=op_arg_parallel_attr,
op_arg_blob_attr=sole_mirrored_blob_object.op_arg_blob_attr,
release=self.release_object_,
)
def NewOpKernelObject(self, op_conf):
assert op_conf.HasField("scope_symbol_id")
scope_symbol = symbol_storage.GetSymbol4Id(op_conf.scope_symbol_id)
op_conf_sym = self._GetOpConfSymbol(op_conf)
parallel_desc_sym_id = c_api_util.GetOpParallelSymbolId(op_conf)
parallel_desc_symbol = symbol_storage.GetSymbol4Id(parallel_desc_sym_id)
object_id = self._NewOpKernelObject(
parallel_desc_symbol, scope_symbol.job_desc_symbol, op_conf_sym
)
return OpKernelObject(object_id, op_conf, self.release_object_)
def Build121AssignInstruction(self, ref_blob_object, value_blob_object):
parallel_num = ref_blob_object.parallel_desc_symbol.parallel_num
assert parallel_num == value_blob_object.parallel_desc_symbol.parallel_num
token_ids = (
[oneflow_api.NewTokenId() for _ in range(parallel_num)],
[oneflow_api.NewTokenId() for _ in range(parallel_num)],
)
self._BuildSendInstruction(
ref_blob_object.parallel_desc_symbol, value_blob_object, token_ids
)
self._BuildRecvInstruction(
value_blob_object.parallel_desc_symbol, ref_blob_object, token_ids
)
def _BuildSendInstruction(
self, dst_parallel_desc_symbol, src_blob_object, token_ids
):
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "SendBlob"
instruction.parallel_desc_symbol_id = (
src_blob_object.parallel_desc_symbol.symbol_id
)
instruction.operand.append(_SymbolOperand(dst_parallel_desc_symbol.symbol_id))
instruction.operand.append(_ConstOperand(src_blob_object.object_id))
instruction.operand.append(_OperandSeparator())
for token_id in token_ids[0]:
instruction.operand.append(_Uint64Operand(token_id))
instruction.operand.append(_OperandSeparator())
for token_id in token_ids[1]:
instruction.operand.append(_Uint64Operand(token_id))
self.instruction_list_.instruction.append(instruction)
def _BuildRecvInstruction(
self, src_parallel_desc_symbol, dst_blob_object, token_ids
):
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "ReceiveBlob"
instruction.parallel_desc_symbol_id = (
dst_blob_object.parallel_desc_symbol.symbol_id
)
instruction.operand.append(_SymbolOperand(src_parallel_desc_symbol.symbol_id))
instruction.operand.append(_Mut2Operand(dst_blob_object.object_id))
instruction.operand.append(_OperandSeparator())
for token_id in token_ids[0]:
instruction.operand.append(_Uint64Operand(token_id))
instruction.operand.append(_OperandSeparator())
for token_id in token_ids[1]:
instruction.operand.append(_Uint64Operand(token_id))
self.instruction_list_.instruction.append(instruction)
def _NewOpKernelObject(self, parallel_desc_symbol, job_desc_sym, op_conf_sym):
object_id = self._NewObjectId(parallel_desc_symbol)
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "InitOpKernelObject"
instruction.parallel_desc_symbol_id = parallel_desc_symbol.symbol_id
instruction.operand.append(_SymbolOperand(job_desc_sym.symbol_id))
instruction.operand.append(_SymbolOperand(op_conf_sym.symbol_id))
instruction.operand.append(_MutOperand(object_id))
self.instruction_list_.instruction.append(instruction)
return object_id
def _StatelessCall(
self,
stream_tag,
op_attribute,
op_parallel_desc_sym=None,
blob_parallel_desc_sym=None,
bn_in_op2blob_object={},
get_delegate_blob_object=None,
):
assert callable(get_delegate_blob_object)
if op_attribute.parallel_signature.HasField("op_parallel_desc_symbol_id"):
symbol_id = op_attribute.parallel_signature.op_parallel_desc_symbol_id
op_parallel_desc_sym = symbol_storage.GetSymbol4Id(symbol_id)
assert op_parallel_desc_sym is not None
def DelegateBlobObject4Ibn(ibn):
op_arg_parallel_attr = op_arg_util.GetOpArgParallelAttribute(
op_parallel_desc_sym, op_attribute, ibn
)
return get_delegate_blob_object(
bn_in_op2blob_object[ibn], op_arg_parallel_attr
)
op_conf = op_attribute.op_conf
assert op_conf.HasField("scope_symbol_id"), op_conf
scope_symbol = symbol_storage.GetSymbol4Id(op_conf.scope_symbol_id)
job_desc_sym = scope_symbol.job_desc_symbol
op_conf_sym = self._GetOpConfSymbol(op_conf)
op_node_signature_sym = self._GetOpNodeSignatureSymbol(op_attribute)
opkernel_obj = self.GetSharedOpKernelObject4ParallelConfSymbol(
op_parallel_desc_sym
)
const_input_operand_blob_objects = self._GetConstInputOperandBlobObjects(
op_attribute, blob_object4ibn=DelegateBlobObject4Ibn
)
mutable_input_operand_blob_objects = self._GetMutableInputOperandBlobObjects(
op_attribute, blob_object4ibn=DelegateBlobObject4Ibn
)
mut1_operand_blob_objects = self._GetMut1OperandBlobObjects(
op_attribute,
blob_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
)
mut2_operand_blob_objects = self._GetMut2OperandBlobObjects(
op_attribute,
blob_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
)
is_user_op = op_attribute.op_conf.HasField("user_conf")
instruction_prefix = "User" if is_user_op else "System"
self._StatelessCallOpKernel(
"%s.%sStatelessCallOpKernel" % (stream_tag, instruction_prefix),
op_parallel_desc_sym,
job_desc_sym,
op_conf_sym,
op_node_signature_sym,
opkernel_obj,
const_input_operand_blob_objects,
mutable_input_operand_blob_objects,
mut1_operand_blob_objects,
mut2_operand_blob_objects,
)
def _StatefulCall(
self,
op_attribute,
opkernel_object,
bn_in_op2blob_object,
get_delegate_blob_object,
):
op_parallel_desc_sym = opkernel_object.parallel_desc_symbol
def DelegateBlobObject4Ibn(ibn):
op_arg_parallel_attr = op_arg_util.GetOpArgParallelAttribute(
op_parallel_desc_sym, op_attribute, ibn
)
return get_delegate_blob_object(
bn_in_op2blob_object[ibn], op_arg_parallel_attr
)
op_node_signature_sym = self._GetOpNodeSignatureSymbol(op_attribute)
const_input_operand_blob_objects = self._GetConstInputOperandBlobObjects(
op_attribute, blob_object4ibn=DelegateBlobObject4Ibn
)
mutable_input_operand_blob_objects = self._GetMutableInputOperandBlobObjects(
op_attribute, blob_object4ibn=DelegateBlobObject4Ibn
)
mut1_operand_blob_objects = self._GetMut1OperandBlobObjects(
op_attribute,
op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
)
mut2_operand_blob_objects = self._GetMut2OperandBlobObjects(
op_attribute,
op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
)
is_user_op = op_attribute.op_conf.HasField("user_conf")
assert is_user_op
instruction_prefix = "" if is_user_op else "System"
self._StatefulCallOpKernel(
"%sCallOpKernel" % instruction_prefix,
op_parallel_desc_sym,
opkernel_object,
op_node_signature_sym,
const_input_operand_blob_objects,
mutable_input_operand_blob_objects,
mut1_operand_blob_objects,
mut2_operand_blob_objects,
)
def _CudaHostRegisterBlob(self, blob_object):
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "CudaHostRegisterBlob"
instruction.parallel_desc_symbol_id = blob_object.parallel_desc_symbol.symbol_id
instruction.operand.append(_MutOperand(blob_object.object_id))
self.instruction_list_.instruction.append(instruction)
def _CudaHostUnregisterBlob(self, blob_object):
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "CudaHostUnregisterBlob"
instruction.parallel_desc_symbol_id = blob_object.parallel_desc_symbol.symbol_id
instruction.operand.append(_MutOperand(blob_object.object_id))
self.instruction_list_.instruction.append(instruction)
def _GetOpConfSymbol(self, op_conf):
serialized_op_conf = op_conf.SerializeToString()
if symbol_storage.HasSymbol4SerializedOpConf(serialized_op_conf):
return symbol_storage.GetSymbol4SerializedOpConf(serialized_op_conf)
symbol_id = self._NewSymbolId4OpConf(op_conf)
symbol = symbol_util.Symbol(symbol_id, op_conf)
symbol_storage.SetSymbol4Id(symbol_id, symbol)
symbol_storage.SetSymbol4SerializedOpConf(serialized_op_conf, symbol)
return symbol
def _GetOpNodeSignatureSymbol(self, op_attribute):
new_op_node_signature = op_attribute_pb.OpNodeSignature()
new_op_node_signature.sbp_signature.CopyFrom(op_attribute.sbp_signature)
new_op_node_signature.mirrored_signature.CopyFrom(
op_attribute.mirrored_signature
)
new_op_node_signature.logical_blob_desc_signature.CopyFrom(
op_attribute.logical_blob_desc_signature
)
new_op_node_signature.batch_axis_signature.CopyFrom(
op_attribute.batch_axis_signature
)
new_op_node_signature.parallel_signature.CopyFrom(
op_attribute.parallel_signature
)
serialized_op_node_signature = new_op_node_signature.SerializeToString()
if symbol_storage.HasSymbol4SerializedOpNodeSignature(
serialized_op_node_signature
):
return symbol_storage.GetSymbol4SerializedOpNodeSignature(
serialized_op_node_signature
)
symbol_id = self._NewSymbolId4OpNodeSignature(new_op_node_signature)
symbol = symbol_util.Symbol(symbol_id, new_op_node_signature)
symbol_storage.SetSymbol4Id(symbol_id, symbol)
symbol_storage.SetSymbol4SerializedOpNodeSignature(
serialized_op_node_signature, symbol
)
return symbol
def _GetConstInputOperandBlobObjects(self, op_attribute, blob_object4ibn=None):
assert callable(blob_object4ibn)
const_input_operand_blob_objects = []
for ibn in op_attribute.input_bns:
ibn2modifier = op_attribute.arg_modifier_signature.ibn2input_blob_modifier
if ibn2modifier[ibn].is_mutable:
continue
ibn_sym = self.GetSymbol4String(ibn)
in_object = blob_object4ibn(ibn)
const_input_operand_blob_objects.append((ibn_sym, in_object))
return const_input_operand_blob_objects
def _GetMutableInputOperandBlobObjects(self, op_attribute, blob_object4ibn=None):
mutable_input_operand_blob_objects = []
for ibn in op_attribute.input_bns:
ibn2modifier = op_attribute.arg_modifier_signature.ibn2input_blob_modifier
if not ibn2modifier[ibn].is_mutable:
continue
ibn_sym = self.GetSymbol4String(ibn)
in_object = blob_object4ibn(ibn)
mutable_input_operand_blob_objects.append((ibn_sym, in_object))
return mutable_input_operand_blob_objects
def _GetMut1OperandBlobObjects(
self, op_attribute, parallel_desc_sym, bn_in_op2blob_object={}
):
mut1_operand_blob_objects = []
def GetOutBlobParallelDescSymbol(obn):
parallel_signature = op_attribute.parallel_signature
bn2symbol_id = parallel_signature.bn_in_op2parallel_desc_symbol_id
if obn in bn2symbol_id:
return symbol_storage.GetSymbol4Id(bn2symbol_id[obn])
else:
return parallel_desc_sym
def OutputBns():
obn2modifier = op_attribute.arg_modifier_signature.obn2output_blob_modifier
for obn in op_attribute.output_bns:
if obn2modifier[obn].header_infered_before_compute:
yield obn
for tmp_bn in op_attribute.tmp_bns:
yield tmp_bn
for obn in OutputBns():
obn_sym = self.GetSymbol4String(obn)
op_arg_parallel_attr = op_arg_util.GetOpArgParallelAttribute(
GetOutBlobParallelDescSymbol(obn), op_attribute, obn
)
op_arg_blob_attr = op_arg_util.GetOpArgBlobAttribute(op_attribute, obn)
out_blob_object = self._NewBlobObject(
op_arg_parallel_attr, op_arg_blob_attr
)
lbi = op_attribute.arg_signature.bn_in_op2lbi[obn]
bn_in_op2blob_object[obn] = out_blob_object
mut1_operand_blob_objects.append((obn_sym, out_blob_object))
return mut1_operand_blob_objects
def _CheckRefInBlobObjectParallelDesc(
self, op_attribute, op_parallel_desc_sym, bn_in_op2blob_object={}
):
op_conf = op_attribute.op_conf
for ibn in op_attribute.input_bns:
ibn2modifier = op_attribute.arg_modifier_signature.ibn2input_blob_modifier
if not ibn2modifier[ibn].is_mutable:
continue
ref_blob_object = bn_in_op2blob_object[ibn]
assert op_parallel_desc_sym == ref_blob_object.parallel_desc_symbol, (
"op_conf: %s\n%s\nv.s.\n%s"
% (op_conf, op_parallel_desc_sym, ref_blob_object.parallel_desc_symbol)
)
def _GetMut2OperandBlobObjects(
self, op_attribute, parallel_desc_sym, bn_in_op2blob_object={}
):
mut2_operand_blob_objects = []
def GetOutBlobParallelDescSymbol(obn):
parallel_signature = op_attribute.parallel_signature
bn2symbol_id = parallel_signature.bn_in_op2parallel_desc_symbol_id
if obn in bn2symbol_id:
return symbol_storage.GetSymbol4Id(bn2symbol_id[obn])
else:
return parallel_desc_sym
for obn in op_attribute.output_bns:
obn2modifier = op_attribute.arg_modifier_signature.obn2output_blob_modifier
if obn2modifier[obn].header_infered_before_compute:
continue
obn_sym = self.GetSymbol4String(obn)
op_arg_parallel_attr = op_arg_util.GetOpArgParallelAttribute(
GetOutBlobParallelDescSymbol(obn), op_attribute, obn
)
op_arg_blob_attr = op_arg_util.GetOpArgBlobAttribute(op_attribute, obn)
out_blob_object = self._NewBlobObject(
op_arg_parallel_attr, op_arg_blob_attr
)
bn_in_op2blob_object[obn] = out_blob_object
mut2_operand_blob_objects.append((obn_sym, out_blob_object))
return mut2_operand_blob_objects
def _NewBlobObject(self, op_arg_parallel_attr, op_arg_blob_attr):
object_id = self._NewObjectId(op_arg_parallel_attr.parallel_desc_symbol)
return object_util.BlobObject(
object_id=object_id,
op_arg_parallel_attr=op_arg_parallel_attr,
op_arg_blob_attr=op_arg_blob_attr,
release=self.release_object_,
)
def _NewSymbolId4String(self, string):
symbol_id = self._NewSymbolId()
self._InitStringSymbol(symbol_id, string)
return symbol_id
def _NewSymbolId4ParallelConf(self, parallel_conf):
symbol_id = self.id_generator_.NewSymbolId()
self._NewParallelConfSymbol(symbol_id, parallel_conf)
return symbol_id
def _NewSymbolId4Scope(self, scope_proto):
symbol_id = self._NewSymbolId()
scope_proto.symbol_id = symbol_id
self._NewScopeSymbol(scope_proto)
return symbol_id
def _NewSymbolId4JobConf(self, job_conf):
symbol_id = self._NewSymbolId()
self._InitJobConfSymbol(symbol_id, job_conf)
return symbol_id
def _NewSymbolId4OpConf(self, op_conf):
symbol_id = self._NewSymbolId()
self._InitOpConfSymbol(symbol_id, op_conf)
return symbol_id
def _NewSymbolId4OpNodeSignature(self, op_node_signature):
symbol_id = self._NewSymbolId()
self._InitOpNodeSignatureDescSymbol(symbol_id, op_node_signature)
return symbol_id
def _NewSharedOpKernelObjectId4ParallelConfSymbolId(self, parallel_desc_sym):
return self._NewObjectId(parallel_desc_sym)
def _StatelessCallOpKernel(
self,
instr_name,
parallel_desc_sym,
job_desc_sym,
op_conf_sym,
op_node_signature_sym,
shared_opkernel_obj,
const_input_operand_blob_objects,
mutable_input_operand_blob_objects,
mut1_operand_blob_objects,
mut2_operand_blob_objects,
):
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "%s.%s" % (
parallel_desc_sym.device_tag,
instr_name,
)
instruction.parallel_desc_symbol_id = parallel_desc_sym.symbol_id
instruction.operand.append(_SymbolOperand(job_desc_sym.symbol_id))
instruction.operand.append(_SymbolOperand(op_conf_sym.symbol_id))
instruction.operand.append(_SymbolOperand(op_node_signature_sym.symbol_id))
instruction.operand.append(_MutOperand(shared_opkernel_obj.object_id))
instruction.operand.append(_OperandSeparator())
for ibn_sym, _ in const_input_operand_blob_objects:
instruction.operand.append(_SymbolOperand(ibn_sym.symbol_id))
for _, blob_object in const_input_operand_blob_objects:
instruction.operand.append(_ConstOperand(blob_object.object_id))
instruction.operand.append(_OperandSeparator())
for ibn_sym, _ in mutable_input_operand_blob_objects:
instruction.operand.append(_SymbolOperand(ibn_sym.symbol_id))
for _, blob_object in mutable_input_operand_blob_objects:
instruction.operand.append(_MutOperand(blob_object.object_id))
instruction.operand.append(_OperandSeparator())
for obn_sym, _ in mut1_operand_blob_objects:
instruction.operand.append(_SymbolOperand(obn_sym.symbol_id))
for _, blob_object in mut1_operand_blob_objects:
instruction.operand.append(_MutOperand(blob_object.object_id))
instruction.operand.append(_OperandSeparator())
for obn_sym, _ in mut2_operand_blob_objects:
instruction.operand.append(_SymbolOperand(obn_sym.symbol_id))
for _, blob_object in mut2_operand_blob_objects:
instruction.operand.append(_Mut2Operand(blob_object.object_id))
self.instruction_list_.instruction.append(instruction)
def _StatefulCallOpKernel(
self,
instr_name,
parallel_desc_sym,
opkernel_object,
op_node_signature_sym,
const_input_operand_blob_objects,
mutable_input_operand_blob_objects,
mut1_operand_blob_objects,
mut2_operand_blob_objects,
):
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "%s.%s" % (
parallel_desc_sym.device_tag,
instr_name,
)
instruction.parallel_desc_symbol_id = parallel_desc_sym.symbol_id
instruction.operand.append(_MutOperand(opkernel_object.object_id))
instruction.operand.append(_SymbolOperand(op_node_signature_sym.symbol_id))
instruction.operand.append(_OperandSeparator())
for ibn_sym, _ in const_input_operand_blob_objects:
instruction.operand.append(_SymbolOperand(ibn_sym.symbol_id))
for _, blob_object in const_input_operand_blob_objects:
instruction.operand.append(_ConstOperand(blob_object.object_id))
instruction.operand.append(_OperandSeparator())
for ibn_sym, _ in mutable_input_operand_blob_objects:
instruction.operand.append(_SymbolOperand(ibn_sym.symbol_id))
for _, blob_object in mutable_input_operand_blob_objects:
instruction.operand.append(_MutOperand(blob_object.object_id))
instruction.operand.append(_OperandSeparator())
for obn_sym, _ in mut1_operand_blob_objects:
instruction.operand.append(_SymbolOperand(obn_sym.symbol_id))
for _, blob_object in mut1_operand_blob_objects:
instruction.operand.append(_MutOperand(blob_object.object_id))
instruction.operand.append(_OperandSeparator())
for obn_sym, _ in mut2_operand_blob_objects:
instruction.operand.append(_SymbolOperand(obn_sym.symbol_id))
for _, blob_object in mut2_operand_blob_objects:
instruction.operand.append(_Mut2Operand(blob_object.object_id))
self.instruction_list_.instruction.append(instruction)
def _NewSymbolId(self):
symbol_id = self.id_generator_.NewSymbolId()
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "NewSymbol"
instruction.operand.append(_Int64Operand(symbol_id))
self.instruction_list_.instruction.append(instruction)
return symbol_id
def _NewObjectId(self, parallel_desc_sym):
object_id = self.id_generator_.NewObjectId()
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "NewObject"
instruction.parallel_desc_symbol_id = parallel_desc_sym.symbol_id
instruction.operand.append(_Int64Operand(object_id))
self.instruction_list_.instruction.append(instruction)
return object_id
def _LazyReference(self, blob_object, interface_op_name):
instruction = instr_util.InstructionProto()
device_tag = blob_object.parallel_desc_symbol.device_tag
instruction.instr_type_name = "{}.LazyReference".format(device_tag)
instruction.parallel_desc_symbol_id = blob_object.parallel_desc_symbol.symbol_id
instruction.operand.append(_MutOperand(blob_object.object_id))
interface_op_name_sym = self.GetSymbol4String(
blob_object.op_arg_blob_attr.logical_blob_name
)
instruction.operand.append(_SymbolOperand(interface_op_name_sym.symbol_id))
self.instruction_list_.instruction.append(instruction)
def _BroadcastObjectReference(self, sole_mirrored_object, parallel_desc_sym):
object_id = self.id_generator_.NewObjectId()
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "BroadcastObjectReference"
instruction.parallel_desc_symbol_id = parallel_desc_sym.symbol_id
instruction.operand.append(_Int64Operand(object_id))
instruction.operand.append(_Int64Operand(sole_mirrored_object.object_id))
self.instruction_list_.instruction.append(instruction)
return object_id
def _InitStringSymbol(self, symbol_id, string):
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "InitStringSymbol"
instruction.operand.append(_InitSymbolOperand(symbol_id))
self.instruction_list_.instruction.append(instruction)
eager_symbol = eager_symbol_util.EagerSymbol()
eager_symbol.symbol_id = symbol_id
eager_symbol.string_symbol = string
self.eager_symbol_list_.eager_symbol.append(eager_symbol)
def _NewParallelConfSymbol(self, symbol_id, parallel_conf):
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "NewParallelDescSymbol"
instruction.operand.append(_Int64Operand(symbol_id))
self.instruction_list_.instruction.append(instruction)
eager_symbol = eager_symbol_util.EagerSymbol()
eager_symbol.symbol_id = symbol_id
eager_symbol.parallel_conf_symbol.CopyFrom(parallel_conf)
self.eager_symbol_list_.eager_symbol.append(eager_symbol)
def _NewScopeSymbol(self, scope_proto):
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "InitScopeSymbol"
instruction.operand.append(_InitSymbolOperand(scope_proto.symbol_id))
self.instruction_list_.instruction.append(instruction)
eager_symbol = eager_symbol_util.EagerSymbol()
eager_symbol.symbol_id = scope_proto.symbol_id
eager_symbol.scope_symbol.CopyFrom(scope_proto)
self.eager_symbol_list_.eager_symbol.append(eager_symbol)
def _InitJobConfSymbol(self, symbol_id, job_conf):
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "InitJobDescSymbol"
instruction.operand.append(_InitSymbolOperand(symbol_id))
self.instruction_list_.instruction.append(instruction)
eager_symbol = eager_symbol_util.EagerSymbol()
eager_symbol.symbol_id = symbol_id
eager_symbol.job_conf_symbol.CopyFrom(job_conf)
self.eager_symbol_list_.eager_symbol.append(eager_symbol)
def _InitOpConfSymbol(self, symbol_id, op_conf):
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "InitOperatorConfSymbol"
instruction.operand.append(_InitSymbolOperand(symbol_id))
self.instruction_list_.instruction.append(instruction)
eager_symbol = eager_symbol_util.EagerSymbol()
eager_symbol.symbol_id = symbol_id
eager_symbol.op_conf_symbol.CopyFrom(op_conf)
self.eager_symbol_list_.eager_symbol.append(eager_symbol)
def _InitOpNodeSignatureDescSymbol(self, symbol_id, op_node_signature):
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "InitOpNodeSignatureDescSymbol"
instruction.operand.append(_InitSymbolOperand(symbol_id))
self.instruction_list_.instruction.append(instruction)
eager_symbol = eager_symbol_util.EagerSymbol()
eager_symbol.symbol_id = symbol_id
eager_symbol.op_node_signature_symbol.CopyFrom(op_node_signature)
self.eager_symbol_list_.eager_symbol.append(eager_symbol)
def _FetchBlob(self, instruction_name, blob_object, fetcher):
unique_callback_id = python_callback.GetIdForRegisteredCallback(fetcher)
instruction = instr_util.InstructionProto()
device_tag = blob_object.parallel_desc_symbol.device_tag
instruction.instr_type_name = "%s.%s" % (device_tag, instruction_name)
instruction.parallel_desc_symbol_id = blob_object.parallel_desc_symbol.symbol_id
instruction.operand.append(_ConstOperand(blob_object.object_id))
instruction.operand.append(_Int64Operand(unique_callback_id))
self.instruction_list_.instruction.append(instruction)
def FeedBlob(self, blob_object, feeder):
unique_callback_id = python_callback.GetIdForRegisteredCallback(feeder)
instruction = instr_util.InstructionProto()
device_tag = blob_object.parallel_desc_symbol.device_tag
instruction.instr_type_name = "%s.%s" % (device_tag, "FeedBlob")
instruction.parallel_desc_symbol_id = blob_object.parallel_desc_symbol.symbol_id
instruction.operand.append(_Mut2Operand(blob_object.object_id))
instruction.operand.append(_Int64Operand(unique_callback_id))
self.instruction_list_.instruction.append(instruction)
def _TryClearObject(self, obj):
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "TryClearObject"
instruction.parallel_desc_symbol_id = obj.parallel_desc_symbol.symbol_id
instruction.operand.append(_MutOperand(obj.object_id))
self.instruction_list_.instruction.append(instruction)
def _DeleteObject(self, blob_object):
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "DeleteObject"
instruction.parallel_desc_symbol_id = blob_object.parallel_desc_symbol.symbol_id
instruction.operand.append(_DelObjectOperand(blob_object.object_id))
self.instruction_list_.instruction.append(instruction)
def _ReplaceMirrored(self, parallel_desc_sym, lhs_objects, rhs_objects):
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "ReplaceMirrored"
instruction.parallel_desc_symbol_id = parallel_desc_sym.symbol_id
for lhs_object in lhs_objects:
instruction.operand.append(_Int64Operand(lhs_object.object_id))
instruction.operand.append(_OperandSeparator())
for rhs_object in rhs_objects:
instruction.operand.append(_Int64Operand(rhs_object.object_id))
self.instruction_list_.instruction.append(instruction)
def _SymbolOperand(val):
operand = instr_util.InstructionOperandProto()
_SetSoleMirroredOperand(operand.symbol_operand, val)
return operand
def _InitSymbolOperand(val):
operand = instr_util.InstructionOperandProto()
_SetSoleMirroredOperand(operand.init_symbol_operand, val)
return operand
def _ConstOperand(val):
operand = instr_util.InstructionOperandProto()
_SetMirroredOperand(operand.const_operand, val)
return operand
def _MutOperand(val):
operand = instr_util.InstructionOperandProto()
_SetMirroredOperand(operand.mut_operand, val)
return operand
def _Mut2Operand(val):
operand = instr_util.InstructionOperandProto()
_SetMirroredOperand(operand.mut2_operand, val)
return operand
def _DelObjectOperand(val):
operand = instr_util.InstructionOperandProto()
_SetAllMirroredOperand(operand.mut_operand, val)
return operand
def _Int64Operand(val):
operand = instr_util.InstructionOperandProto()
operand.int64_operand = val
return operand
def _Uint64Operand(val):
operand = instr_util.InstructionOperandProto()
operand.uint64_operand = val
return operand
def _OperandSeparator():
operand = instr_util.InstructionOperandProto()
operand.separator.SetInParent()
return operand
def _SetMirroredOperand(operand, val):
operand.logical_object_id = val
operand.current_global_device_id.SetInParent()
def _SetSoleMirroredOperand(operand, val):
operand.logical_object_id = val
operand.sole_mirrored_object.SetInParent()
def _SetAllMirroredOperand(operand, val):
operand.logical_object_id = val
operand.all_mirrored_object.SetInParent()
def _FindOrCreateDelegateBlobObject(builder, x_blob_object, op_arg_parallel_attr):
if x_blob_object.op_arg_parallel_attr == op_arg_parallel_attr:
return x_blob_object
blob_cache = blob_cache_util.FindOrCreateBlobCache(x_blob_object)
def Fetch(x_blob_object, op_arg_parallel_attr):
return boxing_util.BoxingTo(builder, x_blob_object, op_arg_parallel_attr)
return blob_cache.GetCachedDelegateBlobObject(op_arg_parallel_attr, Fetch)
def _GetOpConfBlobNameAttr(pb_message, field):
if hasattr(pb_message, field):
return getattr(pb_message, field)
m = re.search("_(\d+)$", field)
assert m is not None
blob_name = field[0 : -len(m.group(0))]
index = int(m.group(0)[1:])
assert hasattr(pb_message, blob_name), (pb_message, blob_name)
repeated_field = getattr(pb_message, blob_name)
assert index >= 0
assert index < len(repeated_field)
return repeated_field[index]
def _ReleaseLogicalObject(obj):
LogicalRun(lambda builder: builder.DeleteObject(obj))
def _ReleasePhysicalObject(obj):
PhysicalRun(lambda builder: builder.DeleteObject(obj))
| 43.338681 | 101 | 0.721342 | from __future__ import absolute_import
import re
from contextlib import contextmanager
import oneflow.core.eager.eager_symbol_pb2 as eager_symbol_util
import oneflow.core.job.placement_pb2 as placement_pb_util
import oneflow.core.operator.op_conf_pb2 as op_conf_util
import oneflow.core.operator.op_attribute_pb2 as op_attribute_pb
import oneflow.core.vm.instruction_pb2 as instr_util
import oneflow.python.eager.blob_cache as blob_cache_util
import oneflow.python.eager.boxing_util as boxing_util
import oneflow.python.eager.object as object_util
import oneflow.python.eager.object_storage as object_storage
import oneflow.python.eager.symbol as symbol_util
import oneflow.python.eager.symbol_storage as symbol_storage
import oneflow.python.framework.c_api_util as c_api_util
import oneflow.python.framework.scope_symbol as scope_symbol
import oneflow.python.framework.id_util as id_util
import oneflow.python.framework.op_arg_util as op_arg_util
import oneflow.python.framework.placement_context as placement_ctx
import oneflow.python.framework.python_callback as python_callback
import oneflow.python.framework.session_context as session_ctx
from oneflow.python.eager.opkernel_object import OpKernelObject
import oneflow.python.vm.id_util as vm_id_util
import oneflow
oneflow_api = oneflow.oneflow_api
def PhysicalRun(build):
return _Run(
build,
vm_id_util.PhysicalIdGenerator(),
c_api_util.RunPhysicalInstruction,
_ReleasePhysicalObject,
)
def LogicalRun(build):
return _Run(
build,
vm_id_util.LogicalIdGenerator(),
c_api_util.RunLogicalInstruction,
_ReleaseLogicalObject,
)
def _Run(build, id_generator, run_api, release_object):
instruction_list = session_ctx.GetDefaultSession().instruction_list
eager_symbol_list = session_ctx.GetDefaultSession().eager_symbol_list
build(
InstructionsBuilder(
id_generator, release_object, instruction_list, eager_symbol_list
)
)
run_api(instruction_list, eager_symbol_list)
instruction_list.ClearField("instruction")
eager_symbol_list.ClearField("eager_symbol")
def _DefaultBlobObject4Ibn(ibn):
raise NotImplementedError
class InstructionsBuilder(object):
def __init__(
self, id_generator, release_object, instruction_list, eager_symbol_list
):
self.id_generator_ = id_generator
self.release_object_ = release_object
assert isinstance(instruction_list, instr_util.InstructionListProto)
assert isinstance(eager_symbol_list, eager_symbol_util.EagerSymbolList)
self.instruction_list_ = instruction_list
self.eager_symbol_list_ = eager_symbol_list
def StatelessCall(self, op_attribute, parallel_conf, bn_in_op2blob_object={}):
op_parallel_desc_sym = self.GetParallelDescSymbol(parallel_conf)
self._CheckRefInBlobObjectParallelDesc(
op_attribute,
op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
)
def GetDelegateBlobObject(blob_object, op_arg_parallel_attr):
return _FindOrCreateDelegateBlobObject(
self, blob_object, op_arg_parallel_attr
)
self._StatelessCall(
"compute",
op_attribute,
op_parallel_desc_sym=op_parallel_desc_sym,
blob_parallel_desc_sym=op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
get_delegate_blob_object=GetDelegateBlobObject,
)
def BoxingStatelessCall(self, op_attribute, parallel_conf, bn_in_op2blob_object={}):
op_parallel_desc_sym = self.GetParallelDescSymbol(parallel_conf)
self._CheckRefInBlobObjectParallelDesc(
op_attribute,
op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
)
def GetDirectBlobObject(blob_object, op_arg_parallel_attr):
return blob_object
self._StatelessCall(
"compute",
op_attribute,
op_parallel_desc_sym=op_parallel_desc_sym,
blob_parallel_desc_sym=op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
get_delegate_blob_object=GetDirectBlobObject,
)
def BoxingCudaD2HStatelessCall(
self, op_attribute, in_parallel_conf, bn_in_op2blob_object={}
):
op_parallel_desc_sym = self.GetParallelDescSymbol(in_parallel_conf)
blob_parallel_desc_sym = boxing_util.TryReplaceDeviceTag(
self, op_parallel_desc_sym, "cpu"
)
self._CheckRefInBlobObjectParallelDesc(
op_attribute,
blob_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
)
def GetDirectBlobObject(blob_object, op_arg_parallel_attr):
return blob_object
self._StatelessCall(
"copy_d2h",
op_attribute,
op_parallel_desc_sym=op_parallel_desc_sym,
blob_parallel_desc_sym=blob_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
get_delegate_blob_object=GetDirectBlobObject,
)
def BoxingCudaH2DStatelessCall(
self, op_attribute, out_parallel_conf, bn_in_op2blob_object={}
):
op_parallel_desc_sym = self.GetParallelDescSymbol(out_parallel_conf)
self._CheckRefInBlobObjectParallelDesc(
op_attribute,
op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
)
def GetDirectBlobObject(blob_object, op_arg_parallel_attr):
return blob_object
self._StatelessCall(
"copy_h2d",
op_attribute,
op_parallel_desc_sym=op_parallel_desc_sym,
blob_parallel_desc_sym=op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
get_delegate_blob_object=GetDirectBlobObject,
)
def StatefulCall(self, op_attribute, opkernel_object, bn_in_op2blob_object={}):
op_parallel_desc_sym = opkernel_object.parallel_desc_symbol
parallel_sig = op_attribute.parallel_signature
assert parallel_sig.HasField("op_parallel_desc_symbol_id")
assert op_parallel_desc_sym.symbol_id == parallel_sig.op_parallel_desc_symbol_id
self._CheckRefInBlobObjectParallelDesc(
op_attribute,
op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
)
def GetDelegateBlobObject(blob_object, op_arg_parallel_attr):
return _FindOrCreateDelegateBlobObject(
self, blob_object, op_arg_parallel_attr
)
self._StatefulCall(
op_attribute,
opkernel_object=opkernel_object,
bn_in_op2blob_object=bn_in_op2blob_object,
get_delegate_blob_object=GetDelegateBlobObject,
)
def DeleteObject(self, obj):
self._TryClearObject(obj)
self._DeleteObject(obj)
def InsertRemoveForeignCallbackInstruction(self, object_id, callback):
unique_callback_id = python_callback.GetIdForRegisteredCallback(callback)
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "RemoveForeignCallback"
instruction.operand.append(_DelObjectOperand(object_id))
instruction.operand.append(_Int64Operand(unique_callback_id))
self.instruction_list_.instruction.append(instruction)
def FetchBlobHeader(self, blob_object, callback):
return self._FetchBlob("FetchBlobHeader", blob_object, callback)
def FetchBlobBody(self, blob_object, callback):
return self._FetchBlob("FetchBlobBody", blob_object, callback)
def PackPhysicalBlobsToLogicalBlob(
self, physical_blob_objects, op_arg_parallel_attr, op_arg_blob_attr
):
parallel_desc_symbol = op_arg_parallel_attr.parallel_desc_symbol
machine_id2device_ids = parallel_desc_symbol.machine_id2device_id_list
device_tag = parallel_desc_symbol.parallel_conf.device_tag
machine_device_ids = set()
for physical_blob_object in physical_blob_objects:
phy_paralle_desc_sym = physical_blob_object.parallel_desc_symbol
assert (
phy_paralle_desc_sym.parallel_num == 1
), phy_paralle_desc_sym.parallel_num
assert phy_paralle_desc_sym.device_tag == device_tag, "%s v.s. %s" % (
phy_paralle_desc_sym.device_tag,
device_tag,
)
phy_machine_id2device_ids = phy_paralle_desc_sym.machine_id2device_id_list
machine_id = list(phy_machine_id2device_ids.keys())[0]
pair = (machine_id, phy_machine_id2device_ids[machine_id][0])
machine_device_ids.add(pair)
for machine_id, device_ids in machine_id2device_ids.items():
for device_id in device_ids:
assert (machine_id, device_id) in machine_device_ids, "%s not in %s" % (
(machine_id, device_id),
machine_device_ids,
)
logical_blob_object = self._NewBlobObject(
op_arg_parallel_attr, op_arg_blob_attr
)
self._ReplaceMirrored(
op_arg_parallel_attr.parallel_desc_symbol,
[logical_blob_object],
physical_blob_objects,
)
return logical_blob_object
def GetPhysicalParallelDescSymbols(self, parallel_desc_symbol):
machine_id2device_ids = parallel_desc_symbol.machine_id2device_id_list
device_tag = parallel_desc_symbol.parallel_conf.device_tag
phy_parallel_desc_symbols = []
def AppendPhyParallelDescSymbol(machine_id, device_id):
parallel_conf = placement_pb_util.ParallelConf()
parallel_conf.device_tag = device_tag
parallel_conf.device_name.append("%d:%d" % (machine_id, device_id))
phy_parallel_desc_symbols.append(self.GetParallelDescSymbol(parallel_conf))
for machine_id, device_ids in machine_id2device_ids.items():
for device_id in device_ids:
AppendPhyParallelDescSymbol(machine_id, device_id)
return phy_parallel_desc_symbols
def UnpackLogicalBlobToPhysicalBlobs(self, blob_object):
phy_parallel_desc_symbols = self.GetPhysicalParallelDescSymbols(
blob_object.parallel_desc_symbol
)
def GetPhysicalBlob(parallel_desc_sym):
op_arg_parallel_attr = op_arg_util.MakeMirroredOpArgParallelAttribute(
parallel_desc_sym
)
pyhsical_blob_object = self._NewBlobObject(
op_arg_parallel_attr, blob_object.op_arg_blob_attr
)
return pyhsical_blob_object
physical_blob_objects = [
GetPhysicalBlob(symbol) for symbol in phy_parallel_desc_symbols
]
self._ReplaceMirrored(
blob_object.parallel_desc_symbol, physical_blob_objects, [blob_object]
)
return physical_blob_objects
def MakeReferenceBlobObject(self, blob_object, op_arg_parallel_attr):
parallel_desc_symbol = blob_object.parallel_desc_symbol
assert parallel_desc_symbol == op_arg_parallel_attr.parallel_desc_symbol
ref_blob_object = self._NewBlobObject(
op_arg_parallel_attr, blob_object.op_arg_blob_attr
)
self._ReplaceMirrored(parallel_desc_symbol, [ref_blob_object], [blob_object])
return ref_blob_object
def MakeLazyRefBlobObject(self, interface_op_name):
sess = session_ctx.GetDefaultSession()
op_attribute = sess.OpAttribute4InterfaceOpName(interface_op_name)
assert len(op_attribute.output_bns) == 1
obn = op_attribute.output_bns[0]
blob_parallel_desc_sym_id = op_attribute.parallel_signature.bn_in_op2parallel_desc_symbol_id[
obn
]
blob_parallel_desc_sym = symbol_storage.GetSymbol4Id(blob_parallel_desc_sym_id)
op_arg_parallel_attr = op_arg_util.GetOpArgParallelAttribute(
blob_parallel_desc_sym, op_attribute, obn
)
op_arg_blob_attr = op_arg_util.GetOpArgBlobAttribute(op_attribute, obn)
blob_object = self._NewBlobObject(op_arg_parallel_attr, op_arg_blob_attr)
self._LazyReference(blob_object, interface_op_name)
return blob_object
def GetSymbol4String(self, string):
if symbol_storage.HasSymbol4String(string):
return symbol_storage.GetSymbol4String(string)
symbol_id = self._NewSymbolId4String(string)
symbol = symbol_util.Symbol(symbol_id, string)
symbol_storage.SetSymbol4Id(symbol_id, symbol)
symbol_storage.SetSymbol4String(string, symbol)
return symbol
def GetJobConfSymbol(self, job_conf):
if symbol_storage.HasSymbol4JobConf(job_conf):
return symbol_storage.GetSymbol4JobConf(job_conf)
symbol_id = self._NewSymbolId4JobConf(job_conf)
symbol = symbol_util.Symbol(symbol_id, job_conf)
symbol_storage.SetSymbol4Id(symbol_id, symbol)
symbol_storage.SetSymbol4JobConf(job_conf, symbol)
return symbol
def GetParallelDescSymbol(self, parallel_conf):
device_tag = parallel_conf.device_tag
serialized_parallel_conf = parallel_conf.SerializeToString()
if symbol_storage.HasSymbol4SerializedParallelConf(serialized_parallel_conf):
return symbol_storage.GetSymbol4SerializedParallelConf(
serialized_parallel_conf
)
symbol_id = self._NewSymbolId4ParallelConf(parallel_conf)
symbol = symbol_util.ParallelDescSymbol(symbol_id, parallel_conf, device_tag)
symbol_storage.SetSymbol4Id(symbol_id, symbol)
symbol_storage.SetSymbol4SerializedParallelConf(
serialized_parallel_conf, symbol
)
return symbol
def GetScopeSymbol(self, scope_proto, parent_scope_symbol=None):
symbol_id = self._NewSymbolId4Scope(scope_proto)
serialized_scope_proto = scope_proto.SerializeToString()
if symbol_storage.HasSymbol4SerializedScopeProto(serialized_scope_proto):
return symbol_storage.GetSymbol4SerializedScopeProto(serialized_scope_proto)
symbol = scope_symbol.ScopeSymbol(symbol_id, scope_proto, parent_scope_symbol)
symbol_storage.SetSymbol4Id(symbol_id, symbol)
symbol_storage.SetSymbol4SerializedScopeProto(serialized_scope_proto, symbol)
return symbol
def GetSharedOpKernelObject4ParallelConfSymbol(self, parallel_desc_sym):
if object_storage.HasSharedOpKernelObject4ParallelConfSymbol(parallel_desc_sym):
return object_storage.GetSharedOpKernelObject4ParallelConfSymbol(
parallel_desc_sym
)
object_id = self._NewSharedOpKernelObjectId4ParallelConfSymbolId(
parallel_desc_sym
)
obj = object_util.Object(object_id, parallel_desc_sym)
object_storage.SetSharedOpKernelObject4ParallelConfSymbol(
parallel_desc_sym, obj
)
return obj
@contextmanager
def CudaHostPinBlob(self, blob_object):
self._CudaHostRegisterBlob(blob_object)
try:
yield
finally:
self._CudaHostUnregisterBlob(blob_object)
def BroadcastBlobReference(self, sole_mirrored_blob_object, parallel_desc_sym):
device_ids = (
sole_mirrored_blob_object.parallel_desc_symbol.machine_id2device_id_list
)
for _, dev_ids in device_ids.items():
assert len(dev_ids) == 1, "dev_ids: %s" % dev_ids
object_id = self._BroadcastObjectReference(
sole_mirrored_blob_object, parallel_desc_sym
)
op_arg_parallel_attr = op_arg_util.MakeBroadcastOpArgParallelAttribute(
parallel_desc_sym
)
return object_util.BlobObject(
object_id=object_id,
op_arg_parallel_attr=op_arg_parallel_attr,
op_arg_blob_attr=sole_mirrored_blob_object.op_arg_blob_attr,
release=self.release_object_,
)
def NewOpKernelObject(self, op_conf):
assert op_conf.HasField("scope_symbol_id")
scope_symbol = symbol_storage.GetSymbol4Id(op_conf.scope_symbol_id)
op_conf_sym = self._GetOpConfSymbol(op_conf)
parallel_desc_sym_id = c_api_util.GetOpParallelSymbolId(op_conf)
parallel_desc_symbol = symbol_storage.GetSymbol4Id(parallel_desc_sym_id)
object_id = self._NewOpKernelObject(
parallel_desc_symbol, scope_symbol.job_desc_symbol, op_conf_sym
)
return OpKernelObject(object_id, op_conf, self.release_object_)
def Build121AssignInstruction(self, ref_blob_object, value_blob_object):
parallel_num = ref_blob_object.parallel_desc_symbol.parallel_num
assert parallel_num == value_blob_object.parallel_desc_symbol.parallel_num
token_ids = (
[oneflow_api.NewTokenId() for _ in range(parallel_num)],
[oneflow_api.NewTokenId() for _ in range(parallel_num)],
)
self._BuildSendInstruction(
ref_blob_object.parallel_desc_symbol, value_blob_object, token_ids
)
self._BuildRecvInstruction(
value_blob_object.parallel_desc_symbol, ref_blob_object, token_ids
)
def _BuildSendInstruction(
self, dst_parallel_desc_symbol, src_blob_object, token_ids
):
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "SendBlob"
instruction.parallel_desc_symbol_id = (
src_blob_object.parallel_desc_symbol.symbol_id
)
instruction.operand.append(_SymbolOperand(dst_parallel_desc_symbol.symbol_id))
instruction.operand.append(_ConstOperand(src_blob_object.object_id))
instruction.operand.append(_OperandSeparator())
for token_id in token_ids[0]:
instruction.operand.append(_Uint64Operand(token_id))
instruction.operand.append(_OperandSeparator())
for token_id in token_ids[1]:
instruction.operand.append(_Uint64Operand(token_id))
self.instruction_list_.instruction.append(instruction)
def _BuildRecvInstruction(
self, src_parallel_desc_symbol, dst_blob_object, token_ids
):
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "ReceiveBlob"
instruction.parallel_desc_symbol_id = (
dst_blob_object.parallel_desc_symbol.symbol_id
)
instruction.operand.append(_SymbolOperand(src_parallel_desc_symbol.symbol_id))
instruction.operand.append(_Mut2Operand(dst_blob_object.object_id))
instruction.operand.append(_OperandSeparator())
for token_id in token_ids[0]:
instruction.operand.append(_Uint64Operand(token_id))
instruction.operand.append(_OperandSeparator())
for token_id in token_ids[1]:
instruction.operand.append(_Uint64Operand(token_id))
self.instruction_list_.instruction.append(instruction)
def _NewOpKernelObject(self, parallel_desc_symbol, job_desc_sym, op_conf_sym):
object_id = self._NewObjectId(parallel_desc_symbol)
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "InitOpKernelObject"
instruction.parallel_desc_symbol_id = parallel_desc_symbol.symbol_id
instruction.operand.append(_SymbolOperand(job_desc_sym.symbol_id))
instruction.operand.append(_SymbolOperand(op_conf_sym.symbol_id))
instruction.operand.append(_MutOperand(object_id))
self.instruction_list_.instruction.append(instruction)
return object_id
def _StatelessCall(
self,
stream_tag,
op_attribute,
op_parallel_desc_sym=None,
blob_parallel_desc_sym=None,
bn_in_op2blob_object={},
get_delegate_blob_object=None,
):
assert callable(get_delegate_blob_object)
if op_attribute.parallel_signature.HasField("op_parallel_desc_symbol_id"):
symbol_id = op_attribute.parallel_signature.op_parallel_desc_symbol_id
op_parallel_desc_sym = symbol_storage.GetSymbol4Id(symbol_id)
assert op_parallel_desc_sym is not None
def DelegateBlobObject4Ibn(ibn):
op_arg_parallel_attr = op_arg_util.GetOpArgParallelAttribute(
op_parallel_desc_sym, op_attribute, ibn
)
return get_delegate_blob_object(
bn_in_op2blob_object[ibn], op_arg_parallel_attr
)
op_conf = op_attribute.op_conf
assert op_conf.HasField("scope_symbol_id"), op_conf
scope_symbol = symbol_storage.GetSymbol4Id(op_conf.scope_symbol_id)
job_desc_sym = scope_symbol.job_desc_symbol
op_conf_sym = self._GetOpConfSymbol(op_conf)
op_node_signature_sym = self._GetOpNodeSignatureSymbol(op_attribute)
opkernel_obj = self.GetSharedOpKernelObject4ParallelConfSymbol(
op_parallel_desc_sym
)
const_input_operand_blob_objects = self._GetConstInputOperandBlobObjects(
op_attribute, blob_object4ibn=DelegateBlobObject4Ibn
)
mutable_input_operand_blob_objects = self._GetMutableInputOperandBlobObjects(
op_attribute, blob_object4ibn=DelegateBlobObject4Ibn
)
mut1_operand_blob_objects = self._GetMut1OperandBlobObjects(
op_attribute,
blob_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
)
mut2_operand_blob_objects = self._GetMut2OperandBlobObjects(
op_attribute,
blob_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
)
is_user_op = op_attribute.op_conf.HasField("user_conf")
instruction_prefix = "User" if is_user_op else "System"
self._StatelessCallOpKernel(
"%s.%sStatelessCallOpKernel" % (stream_tag, instruction_prefix),
op_parallel_desc_sym,
job_desc_sym,
op_conf_sym,
op_node_signature_sym,
opkernel_obj,
const_input_operand_blob_objects,
mutable_input_operand_blob_objects,
mut1_operand_blob_objects,
mut2_operand_blob_objects,
)
def _StatefulCall(
self,
op_attribute,
opkernel_object,
bn_in_op2blob_object,
get_delegate_blob_object,
):
op_parallel_desc_sym = opkernel_object.parallel_desc_symbol
def DelegateBlobObject4Ibn(ibn):
op_arg_parallel_attr = op_arg_util.GetOpArgParallelAttribute(
op_parallel_desc_sym, op_attribute, ibn
)
return get_delegate_blob_object(
bn_in_op2blob_object[ibn], op_arg_parallel_attr
)
op_node_signature_sym = self._GetOpNodeSignatureSymbol(op_attribute)
const_input_operand_blob_objects = self._GetConstInputOperandBlobObjects(
op_attribute, blob_object4ibn=DelegateBlobObject4Ibn
)
mutable_input_operand_blob_objects = self._GetMutableInputOperandBlobObjects(
op_attribute, blob_object4ibn=DelegateBlobObject4Ibn
)
mut1_operand_blob_objects = self._GetMut1OperandBlobObjects(
op_attribute,
op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
)
mut2_operand_blob_objects = self._GetMut2OperandBlobObjects(
op_attribute,
op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
)
is_user_op = op_attribute.op_conf.HasField("user_conf")
assert is_user_op
instruction_prefix = "" if is_user_op else "System"
self._StatefulCallOpKernel(
"%sCallOpKernel" % instruction_prefix,
op_parallel_desc_sym,
opkernel_object,
op_node_signature_sym,
const_input_operand_blob_objects,
mutable_input_operand_blob_objects,
mut1_operand_blob_objects,
mut2_operand_blob_objects,
)
def _CudaHostRegisterBlob(self, blob_object):
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "CudaHostRegisterBlob"
instruction.parallel_desc_symbol_id = blob_object.parallel_desc_symbol.symbol_id
instruction.operand.append(_MutOperand(blob_object.object_id))
self.instruction_list_.instruction.append(instruction)
def _CudaHostUnregisterBlob(self, blob_object):
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "CudaHostUnregisterBlob"
instruction.parallel_desc_symbol_id = blob_object.parallel_desc_symbol.symbol_id
instruction.operand.append(_MutOperand(blob_object.object_id))
self.instruction_list_.instruction.append(instruction)
def _GetOpConfSymbol(self, op_conf):
serialized_op_conf = op_conf.SerializeToString()
if symbol_storage.HasSymbol4SerializedOpConf(serialized_op_conf):
return symbol_storage.GetSymbol4SerializedOpConf(serialized_op_conf)
symbol_id = self._NewSymbolId4OpConf(op_conf)
symbol = symbol_util.Symbol(symbol_id, op_conf)
symbol_storage.SetSymbol4Id(symbol_id, symbol)
symbol_storage.SetSymbol4SerializedOpConf(serialized_op_conf, symbol)
return symbol
def _GetOpNodeSignatureSymbol(self, op_attribute):
new_op_node_signature = op_attribute_pb.OpNodeSignature()
new_op_node_signature.sbp_signature.CopyFrom(op_attribute.sbp_signature)
new_op_node_signature.mirrored_signature.CopyFrom(
op_attribute.mirrored_signature
)
new_op_node_signature.logical_blob_desc_signature.CopyFrom(
op_attribute.logical_blob_desc_signature
)
new_op_node_signature.batch_axis_signature.CopyFrom(
op_attribute.batch_axis_signature
)
new_op_node_signature.parallel_signature.CopyFrom(
op_attribute.parallel_signature
)
serialized_op_node_signature = new_op_node_signature.SerializeToString()
if symbol_storage.HasSymbol4SerializedOpNodeSignature(
serialized_op_node_signature
):
return symbol_storage.GetSymbol4SerializedOpNodeSignature(
serialized_op_node_signature
)
symbol_id = self._NewSymbolId4OpNodeSignature(new_op_node_signature)
symbol = symbol_util.Symbol(symbol_id, new_op_node_signature)
symbol_storage.SetSymbol4Id(symbol_id, symbol)
symbol_storage.SetSymbol4SerializedOpNodeSignature(
serialized_op_node_signature, symbol
)
return symbol
def _GetConstInputOperandBlobObjects(self, op_attribute, blob_object4ibn=None):
assert callable(blob_object4ibn)
const_input_operand_blob_objects = []
for ibn in op_attribute.input_bns:
ibn2modifier = op_attribute.arg_modifier_signature.ibn2input_blob_modifier
if ibn2modifier[ibn].is_mutable:
continue
ibn_sym = self.GetSymbol4String(ibn)
in_object = blob_object4ibn(ibn)
const_input_operand_blob_objects.append((ibn_sym, in_object))
return const_input_operand_blob_objects
def _GetMutableInputOperandBlobObjects(self, op_attribute, blob_object4ibn=None):
mutable_input_operand_blob_objects = []
for ibn in op_attribute.input_bns:
ibn2modifier = op_attribute.arg_modifier_signature.ibn2input_blob_modifier
if not ibn2modifier[ibn].is_mutable:
continue
ibn_sym = self.GetSymbol4String(ibn)
in_object = blob_object4ibn(ibn)
mutable_input_operand_blob_objects.append((ibn_sym, in_object))
return mutable_input_operand_blob_objects
def _GetMut1OperandBlobObjects(
self, op_attribute, parallel_desc_sym, bn_in_op2blob_object={}
):
mut1_operand_blob_objects = []
def GetOutBlobParallelDescSymbol(obn):
parallel_signature = op_attribute.parallel_signature
bn2symbol_id = parallel_signature.bn_in_op2parallel_desc_symbol_id
if obn in bn2symbol_id:
return symbol_storage.GetSymbol4Id(bn2symbol_id[obn])
else:
return parallel_desc_sym
def OutputBns():
obn2modifier = op_attribute.arg_modifier_signature.obn2output_blob_modifier
for obn in op_attribute.output_bns:
if obn2modifier[obn].header_infered_before_compute:
yield obn
for tmp_bn in op_attribute.tmp_bns:
yield tmp_bn
for obn in OutputBns():
obn_sym = self.GetSymbol4String(obn)
op_arg_parallel_attr = op_arg_util.GetOpArgParallelAttribute(
GetOutBlobParallelDescSymbol(obn), op_attribute, obn
)
op_arg_blob_attr = op_arg_util.GetOpArgBlobAttribute(op_attribute, obn)
out_blob_object = self._NewBlobObject(
op_arg_parallel_attr, op_arg_blob_attr
)
lbi = op_attribute.arg_signature.bn_in_op2lbi[obn]
bn_in_op2blob_object[obn] = out_blob_object
mut1_operand_blob_objects.append((obn_sym, out_blob_object))
return mut1_operand_blob_objects
def _CheckRefInBlobObjectParallelDesc(
self, op_attribute, op_parallel_desc_sym, bn_in_op2blob_object={}
):
op_conf = op_attribute.op_conf
for ibn in op_attribute.input_bns:
ibn2modifier = op_attribute.arg_modifier_signature.ibn2input_blob_modifier
if not ibn2modifier[ibn].is_mutable:
continue
ref_blob_object = bn_in_op2blob_object[ibn]
assert op_parallel_desc_sym == ref_blob_object.parallel_desc_symbol, (
"op_conf: %s\n%s\nv.s.\n%s"
% (op_conf, op_parallel_desc_sym, ref_blob_object.parallel_desc_symbol)
)
def _GetMut2OperandBlobObjects(
self, op_attribute, parallel_desc_sym, bn_in_op2blob_object={}
):
mut2_operand_blob_objects = []
def GetOutBlobParallelDescSymbol(obn):
parallel_signature = op_attribute.parallel_signature
bn2symbol_id = parallel_signature.bn_in_op2parallel_desc_symbol_id
if obn in bn2symbol_id:
return symbol_storage.GetSymbol4Id(bn2symbol_id[obn])
else:
return parallel_desc_sym
for obn in op_attribute.output_bns:
obn2modifier = op_attribute.arg_modifier_signature.obn2output_blob_modifier
if obn2modifier[obn].header_infered_before_compute:
continue
obn_sym = self.GetSymbol4String(obn)
op_arg_parallel_attr = op_arg_util.GetOpArgParallelAttribute(
GetOutBlobParallelDescSymbol(obn), op_attribute, obn
)
op_arg_blob_attr = op_arg_util.GetOpArgBlobAttribute(op_attribute, obn)
out_blob_object = self._NewBlobObject(
op_arg_parallel_attr, op_arg_blob_attr
)
bn_in_op2blob_object[obn] = out_blob_object
mut2_operand_blob_objects.append((obn_sym, out_blob_object))
return mut2_operand_blob_objects
def _NewBlobObject(self, op_arg_parallel_attr, op_arg_blob_attr):
object_id = self._NewObjectId(op_arg_parallel_attr.parallel_desc_symbol)
return object_util.BlobObject(
object_id=object_id,
op_arg_parallel_attr=op_arg_parallel_attr,
op_arg_blob_attr=op_arg_blob_attr,
release=self.release_object_,
)
def _NewSymbolId4String(self, string):
symbol_id = self._NewSymbolId()
self._InitStringSymbol(symbol_id, string)
return symbol_id
def _NewSymbolId4ParallelConf(self, parallel_conf):
symbol_id = self.id_generator_.NewSymbolId()
self._NewParallelConfSymbol(symbol_id, parallel_conf)
return symbol_id
def _NewSymbolId4Scope(self, scope_proto):
symbol_id = self._NewSymbolId()
scope_proto.symbol_id = symbol_id
self._NewScopeSymbol(scope_proto)
return symbol_id
def _NewSymbolId4JobConf(self, job_conf):
symbol_id = self._NewSymbolId()
self._InitJobConfSymbol(symbol_id, job_conf)
return symbol_id
def _NewSymbolId4OpConf(self, op_conf):
symbol_id = self._NewSymbolId()
self._InitOpConfSymbol(symbol_id, op_conf)
return symbol_id
def _NewSymbolId4OpNodeSignature(self, op_node_signature):
symbol_id = self._NewSymbolId()
self._InitOpNodeSignatureDescSymbol(symbol_id, op_node_signature)
return symbol_id
def _NewSharedOpKernelObjectId4ParallelConfSymbolId(self, parallel_desc_sym):
return self._NewObjectId(parallel_desc_sym)
def _StatelessCallOpKernel(
self,
instr_name,
parallel_desc_sym,
job_desc_sym,
op_conf_sym,
op_node_signature_sym,
shared_opkernel_obj,
const_input_operand_blob_objects,
mutable_input_operand_blob_objects,
mut1_operand_blob_objects,
mut2_operand_blob_objects,
):
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "%s.%s" % (
parallel_desc_sym.device_tag,
instr_name,
)
instruction.parallel_desc_symbol_id = parallel_desc_sym.symbol_id
instruction.operand.append(_SymbolOperand(job_desc_sym.symbol_id))
instruction.operand.append(_SymbolOperand(op_conf_sym.symbol_id))
instruction.operand.append(_SymbolOperand(op_node_signature_sym.symbol_id))
instruction.operand.append(_MutOperand(shared_opkernel_obj.object_id))
instruction.operand.append(_OperandSeparator())
for ibn_sym, _ in const_input_operand_blob_objects:
instruction.operand.append(_SymbolOperand(ibn_sym.symbol_id))
for _, blob_object in const_input_operand_blob_objects:
instruction.operand.append(_ConstOperand(blob_object.object_id))
instruction.operand.append(_OperandSeparator())
for ibn_sym, _ in mutable_input_operand_blob_objects:
instruction.operand.append(_SymbolOperand(ibn_sym.symbol_id))
for _, blob_object in mutable_input_operand_blob_objects:
instruction.operand.append(_MutOperand(blob_object.object_id))
instruction.operand.append(_OperandSeparator())
for obn_sym, _ in mut1_operand_blob_objects:
instruction.operand.append(_SymbolOperand(obn_sym.symbol_id))
for _, blob_object in mut1_operand_blob_objects:
instruction.operand.append(_MutOperand(blob_object.object_id))
instruction.operand.append(_OperandSeparator())
for obn_sym, _ in mut2_operand_blob_objects:
instruction.operand.append(_SymbolOperand(obn_sym.symbol_id))
for _, blob_object in mut2_operand_blob_objects:
instruction.operand.append(_Mut2Operand(blob_object.object_id))
self.instruction_list_.instruction.append(instruction)
def _StatefulCallOpKernel(
self,
instr_name,
parallel_desc_sym,
opkernel_object,
op_node_signature_sym,
const_input_operand_blob_objects,
mutable_input_operand_blob_objects,
mut1_operand_blob_objects,
mut2_operand_blob_objects,
):
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "%s.%s" % (
parallel_desc_sym.device_tag,
instr_name,
)
instruction.parallel_desc_symbol_id = parallel_desc_sym.symbol_id
instruction.operand.append(_MutOperand(opkernel_object.object_id))
instruction.operand.append(_SymbolOperand(op_node_signature_sym.symbol_id))
instruction.operand.append(_OperandSeparator())
for ibn_sym, _ in const_input_operand_blob_objects:
instruction.operand.append(_SymbolOperand(ibn_sym.symbol_id))
for _, blob_object in const_input_operand_blob_objects:
instruction.operand.append(_ConstOperand(blob_object.object_id))
instruction.operand.append(_OperandSeparator())
for ibn_sym, _ in mutable_input_operand_blob_objects:
instruction.operand.append(_SymbolOperand(ibn_sym.symbol_id))
for _, blob_object in mutable_input_operand_blob_objects:
instruction.operand.append(_MutOperand(blob_object.object_id))
instruction.operand.append(_OperandSeparator())
for obn_sym, _ in mut1_operand_blob_objects:
instruction.operand.append(_SymbolOperand(obn_sym.symbol_id))
for _, blob_object in mut1_operand_blob_objects:
instruction.operand.append(_MutOperand(blob_object.object_id))
instruction.operand.append(_OperandSeparator())
for obn_sym, _ in mut2_operand_blob_objects:
instruction.operand.append(_SymbolOperand(obn_sym.symbol_id))
for _, blob_object in mut2_operand_blob_objects:
instruction.operand.append(_Mut2Operand(blob_object.object_id))
self.instruction_list_.instruction.append(instruction)
def _NewSymbolId(self):
symbol_id = self.id_generator_.NewSymbolId()
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "NewSymbol"
instruction.operand.append(_Int64Operand(symbol_id))
self.instruction_list_.instruction.append(instruction)
return symbol_id
def _NewObjectId(self, parallel_desc_sym):
object_id = self.id_generator_.NewObjectId()
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "NewObject"
instruction.parallel_desc_symbol_id = parallel_desc_sym.symbol_id
instruction.operand.append(_Int64Operand(object_id))
self.instruction_list_.instruction.append(instruction)
return object_id
def _LazyReference(self, blob_object, interface_op_name):
instruction = instr_util.InstructionProto()
device_tag = blob_object.parallel_desc_symbol.device_tag
instruction.instr_type_name = "{}.LazyReference".format(device_tag)
instruction.parallel_desc_symbol_id = blob_object.parallel_desc_symbol.symbol_id
instruction.operand.append(_MutOperand(blob_object.object_id))
interface_op_name_sym = self.GetSymbol4String(
blob_object.op_arg_blob_attr.logical_blob_name
)
instruction.operand.append(_SymbolOperand(interface_op_name_sym.symbol_id))
self.instruction_list_.instruction.append(instruction)
def _BroadcastObjectReference(self, sole_mirrored_object, parallel_desc_sym):
object_id = self.id_generator_.NewObjectId()
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "BroadcastObjectReference"
instruction.parallel_desc_symbol_id = parallel_desc_sym.symbol_id
instruction.operand.append(_Int64Operand(object_id))
instruction.operand.append(_Int64Operand(sole_mirrored_object.object_id))
self.instruction_list_.instruction.append(instruction)
return object_id
def _InitStringSymbol(self, symbol_id, string):
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "InitStringSymbol"
instruction.operand.append(_InitSymbolOperand(symbol_id))
self.instruction_list_.instruction.append(instruction)
eager_symbol = eager_symbol_util.EagerSymbol()
eager_symbol.symbol_id = symbol_id
eager_symbol.string_symbol = string
self.eager_symbol_list_.eager_symbol.append(eager_symbol)
def _NewParallelConfSymbol(self, symbol_id, parallel_conf):
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "NewParallelDescSymbol"
instruction.operand.append(_Int64Operand(symbol_id))
self.instruction_list_.instruction.append(instruction)
eager_symbol = eager_symbol_util.EagerSymbol()
eager_symbol.symbol_id = symbol_id
eager_symbol.parallel_conf_symbol.CopyFrom(parallel_conf)
self.eager_symbol_list_.eager_symbol.append(eager_symbol)
def _NewScopeSymbol(self, scope_proto):
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "InitScopeSymbol"
instruction.operand.append(_InitSymbolOperand(scope_proto.symbol_id))
self.instruction_list_.instruction.append(instruction)
eager_symbol = eager_symbol_util.EagerSymbol()
eager_symbol.symbol_id = scope_proto.symbol_id
eager_symbol.scope_symbol.CopyFrom(scope_proto)
self.eager_symbol_list_.eager_symbol.append(eager_symbol)
def _InitJobConfSymbol(self, symbol_id, job_conf):
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "InitJobDescSymbol"
instruction.operand.append(_InitSymbolOperand(symbol_id))
self.instruction_list_.instruction.append(instruction)
eager_symbol = eager_symbol_util.EagerSymbol()
eager_symbol.symbol_id = symbol_id
eager_symbol.job_conf_symbol.CopyFrom(job_conf)
self.eager_symbol_list_.eager_symbol.append(eager_symbol)
def _InitOpConfSymbol(self, symbol_id, op_conf):
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "InitOperatorConfSymbol"
instruction.operand.append(_InitSymbolOperand(symbol_id))
self.instruction_list_.instruction.append(instruction)
eager_symbol = eager_symbol_util.EagerSymbol()
eager_symbol.symbol_id = symbol_id
eager_symbol.op_conf_symbol.CopyFrom(op_conf)
self.eager_symbol_list_.eager_symbol.append(eager_symbol)
def _InitOpNodeSignatureDescSymbol(self, symbol_id, op_node_signature):
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "InitOpNodeSignatureDescSymbol"
instruction.operand.append(_InitSymbolOperand(symbol_id))
self.instruction_list_.instruction.append(instruction)
eager_symbol = eager_symbol_util.EagerSymbol()
eager_symbol.symbol_id = symbol_id
eager_symbol.op_node_signature_symbol.CopyFrom(op_node_signature)
self.eager_symbol_list_.eager_symbol.append(eager_symbol)
def _FetchBlob(self, instruction_name, blob_object, fetcher):
unique_callback_id = python_callback.GetIdForRegisteredCallback(fetcher)
instruction = instr_util.InstructionProto()
device_tag = blob_object.parallel_desc_symbol.device_tag
instruction.instr_type_name = "%s.%s" % (device_tag, instruction_name)
instruction.parallel_desc_symbol_id = blob_object.parallel_desc_symbol.symbol_id
instruction.operand.append(_ConstOperand(blob_object.object_id))
instruction.operand.append(_Int64Operand(unique_callback_id))
self.instruction_list_.instruction.append(instruction)
def FeedBlob(self, blob_object, feeder):
unique_callback_id = python_callback.GetIdForRegisteredCallback(feeder)
instruction = instr_util.InstructionProto()
device_tag = blob_object.parallel_desc_symbol.device_tag
instruction.instr_type_name = "%s.%s" % (device_tag, "FeedBlob")
instruction.parallel_desc_symbol_id = blob_object.parallel_desc_symbol.symbol_id
instruction.operand.append(_Mut2Operand(blob_object.object_id))
instruction.operand.append(_Int64Operand(unique_callback_id))
self.instruction_list_.instruction.append(instruction)
def _TryClearObject(self, obj):
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "TryClearObject"
instruction.parallel_desc_symbol_id = obj.parallel_desc_symbol.symbol_id
instruction.operand.append(_MutOperand(obj.object_id))
self.instruction_list_.instruction.append(instruction)
def _DeleteObject(self, blob_object):
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "DeleteObject"
instruction.parallel_desc_symbol_id = blob_object.parallel_desc_symbol.symbol_id
instruction.operand.append(_DelObjectOperand(blob_object.object_id))
self.instruction_list_.instruction.append(instruction)
def _ReplaceMirrored(self, parallel_desc_sym, lhs_objects, rhs_objects):
instruction = instr_util.InstructionProto()
instruction.instr_type_name = "ReplaceMirrored"
instruction.parallel_desc_symbol_id = parallel_desc_sym.symbol_id
for lhs_object in lhs_objects:
instruction.operand.append(_Int64Operand(lhs_object.object_id))
instruction.operand.append(_OperandSeparator())
for rhs_object in rhs_objects:
instruction.operand.append(_Int64Operand(rhs_object.object_id))
self.instruction_list_.instruction.append(instruction)
def _SymbolOperand(val):
operand = instr_util.InstructionOperandProto()
_SetSoleMirroredOperand(operand.symbol_operand, val)
return operand
def _InitSymbolOperand(val):
operand = instr_util.InstructionOperandProto()
_SetSoleMirroredOperand(operand.init_symbol_operand, val)
return operand
def _ConstOperand(val):
operand = instr_util.InstructionOperandProto()
_SetMirroredOperand(operand.const_operand, val)
return operand
def _MutOperand(val):
operand = instr_util.InstructionOperandProto()
_SetMirroredOperand(operand.mut_operand, val)
return operand
def _Mut2Operand(val):
operand = instr_util.InstructionOperandProto()
_SetMirroredOperand(operand.mut2_operand, val)
return operand
def _DelObjectOperand(val):
operand = instr_util.InstructionOperandProto()
_SetAllMirroredOperand(operand.mut_operand, val)
return operand
def _Int64Operand(val):
operand = instr_util.InstructionOperandProto()
operand.int64_operand = val
return operand
def _Uint64Operand(val):
operand = instr_util.InstructionOperandProto()
operand.uint64_operand = val
return operand
def _OperandSeparator():
operand = instr_util.InstructionOperandProto()
operand.separator.SetInParent()
return operand
def _SetMirroredOperand(operand, val):
operand.logical_object_id = val
operand.current_global_device_id.SetInParent()
def _SetSoleMirroredOperand(operand, val):
operand.logical_object_id = val
operand.sole_mirrored_object.SetInParent()
def _SetAllMirroredOperand(operand, val):
operand.logical_object_id = val
operand.all_mirrored_object.SetInParent()
def _FindOrCreateDelegateBlobObject(builder, x_blob_object, op_arg_parallel_attr):
if x_blob_object.op_arg_parallel_attr == op_arg_parallel_attr:
return x_blob_object
blob_cache = blob_cache_util.FindOrCreateBlobCache(x_blob_object)
def Fetch(x_blob_object, op_arg_parallel_attr):
return boxing_util.BoxingTo(builder, x_blob_object, op_arg_parallel_attr)
return blob_cache.GetCachedDelegateBlobObject(op_arg_parallel_attr, Fetch)
def _GetOpConfBlobNameAttr(pb_message, field):
if hasattr(pb_message, field):
return getattr(pb_message, field)
m = re.search("_(\d+)$", field)
assert m is not None
blob_name = field[0 : -len(m.group(0))]
index = int(m.group(0)[1:])
assert hasattr(pb_message, blob_name), (pb_message, blob_name)
repeated_field = getattr(pb_message, blob_name)
assert index >= 0
assert index < len(repeated_field)
return repeated_field[index]
def _ReleaseLogicalObject(obj):
LogicalRun(lambda builder: builder.DeleteObject(obj))
def _ReleasePhysicalObject(obj):
PhysicalRun(lambda builder: builder.DeleteObject(obj))
| true | true |
f7fe33f7e1b808437310c413e4e46ab5f213e9bb | 7,039 | py | Python | examples/docs/simple_bot_mult_res.py | eirrgang/radical.pilot | ceccd1867dd172935d602ff4c33a5ed4467e0dc8 | [
"MIT"
] | 1 | 2021-11-07T04:51:30.000Z | 2021-11-07T04:51:30.000Z | examples/docs/simple_bot_mult_res.py | eirrgang/radical.pilot | ceccd1867dd172935d602ff4c33a5ed4467e0dc8 | [
"MIT"
] | null | null | null | examples/docs/simple_bot_mult_res.py | eirrgang/radical.pilot | ceccd1867dd172935d602ff4c33a5ed4467e0dc8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
__copyright__ = "Copyright 2014-2015, http://radical.rutgers.edu"
__license__ = "MIT"
import sys
import radical.pilot as rp
""" DESCRIPTION: Tutorial 1: A Simple Workload consisting of a Bag-of-Tasks
submitted to multiple machines
"""
# READ: The RADICAL-Pilot documentation:
# https://radicalpilot.readthedocs.io/en/stable/
#
# Try running this example with RADICAL_PILOT_VERBOSE=debug set if
# you want to see what happens behind the scenes!
# ------------------------------------------------------------------------------
#
def pilot_state_cb (pilot, state):
if not pilot:
return
print("[Callback]: Pilot '%s' state: %s." % (pilot.uid, state))
if state == rp.FAILED:
sys.exit (1)
# ------------------------------------------------------------------------------
#
def task_state_cb (task, state):
if not task:
return
global CNT
print("[Callback]: task %s on %s: %s." % (task.uid, task.pilot_id, state))
if state == rp.FAILED:
print("stderr: %s" % task.stderr)
sys.exit(2)
# ------------------------------------------------------------------------------
#
if __name__ == "__main__":
# we can optionally pass session name to RP
if len(sys.argv) > 1:
session_name = sys.argv[1]
else:
session_name = None
# Create a new session. No need to try/except this: if session creation
# fails, there is not much we can do anyways...
session = rp.Session(name=session_name)
print("session id: %s" % session.uid)
# all other pilot code is now tried/excepted. If an exception is caught, we
# can rely on the session object to exist and be valid, and we can thus tear
# the whole RP stack down via a 'session.close()' call in the 'finally'
# clause...
try:
# ----- CHANGE THIS -- CHANGE THIS -- CHANGE THIS -- CHANGE THIS ------
#
# Change the user name below if you are using a remote resource
# and your username on that resource is different from the username
# on your local machine.
#
c = rp.Context('ssh')
c.user_id = "username"
# c.user_pass = "PutYourPasswordHere"
session.add_context(c)
# Add a Pilot Manager. Pilot managers manage one or more Pilots.
print("Initializing Pilot Manager ...")
pmgr = rp.PilotManager(session=session)
# Register our callback with the PilotManager. This callback will get
# called every time any of the pilots managed by the PilotManager
# change their state.
pmgr.register_callback(pilot_state_cb)
# ----- CHANGE THIS -- CHANGE THIS -- CHANGE THIS -- CHANGE THIS ------
#
# If you want to run this example on XSEDE Gordon and Comet, you have
# to add your allocation ID by setting the project attribute for each
# pilot description ot it.
#
# A list of preconfigured resources can be found at:
# https://radicalpilot.readthedocs.io/en/stable/ \
# machconf.html#preconfigured-resources
#
# ----- CHANGE THIS -- CHANGE THIS -- CHANGE THIS -- CHANGE THIS ------
# The pilot_list will contain the description of the pilot that will be
# submitted
pilot_list = list()
# Create the description of the first pilot and add it to the list
pdesc = rp.PilotDescription ()
pdesc.resource = "xsede.gordon"
pdesc.runtime = 10
pdesc.cores = 1
pdesc.cleanup = True
pdesc.project = ''
pilot_list.append(pdesc)
# Create the description of the secind pilot and add it to the list
pdesc2 = rp.PilotDescription ()
pdesc2.resource = "xsede.comet"
pdesc2.runtime = 10
pdesc2.cores = 1
pdesc2.cleanup = True
pdesc2.project = ''
pilot_list.append(pdesc2)
# Continue adding pilot by creating a new descrption and appending it to
# the list.
# Submit the pilot list to the Pilot Manager. Actually all the pilots are
# submitted to the Pilot Manager at once.
print("Submitting Pilots to Pilot Manager ...")
pilots = pmgr.submit_pilots(pilot_list)
# Combine the Pilot, the Tasks and a scheduler via
# a TaskManager object. The scheduler that supports multi-pilot sessions
# is Round Robin. Direct Submittion does not.
print("Initializing Task Manager ...")
tmgr = rp.TaskManager (session=session,
scheduler=rp.SCHEDULER_ROUND_ROBIN)
# Register our callback with the TaskManager. This callback will get
# called every time any of the tasks managed by the TaskManager
# change their state.
tmgr.register_callback(task_state_cb)
# Add the created Pilot to the TaskManager.
print("Registering Pilots with Task Manager ...")
tmgr.add_pilots(pilots)
NUMBER_JOBS = 64 # the total number of tasks to run
# submit tasks to pilot job
taskdesc_list = []
for i in range(NUMBER_JOBS):
# -------- BEGIN USER DEFINED Task DESCRIPTION --------- #
taskdesc = rp.TaskDescription()
taskdesc.environment = {'task_NO': i}
taskdesc.executable = "/bin/echo"
taskdesc.arguments = ['I am Task number $task_NO from $HOSTNAME']
taskdesc.cores = 1
# -------- END USER DEFINED Task DESCRIPTION --------- #
taskdesc_list.append(taskdesc)
# Submit the previously created Task descriptions to the
# PilotManager. This will trigger the selected scheduler to start
# assigning Tasks to the Pilots.
print("Submit Tasks to Task Manager ...")
task_set = tmgr.submit_tasks (taskdesc_list)
print("Waiting for tasks to complete ...")
tmgr.wait_tasks()
print("All tasks completed successfully!")
except Exception as e:
# Something unexpected happened in the pilot code above
print("caught Exception: %s" % e)
raise
except (KeyboardInterrupt, SystemExit) as e:
# the callback called sys.exit(), and we can here catch the
# corresponding KeyboardInterrupt exception for shutdown. We also catch
# SystemExit (which gets raised if the main threads exits for some other
# reason).
print("need to exit now: %s" % e)
finally:
# always clean up the session, no matter if we caught an exception or
# not.
print("closing session")
session.close ()
# the above is equivalent to
#
# session.close (cleanup=True, terminate=True)
#
# it will thus both clean out the session's database record, and kill
# all remaining pilots (none in our example).
# ------------------------------------------------------------------------------
| 34.504902 | 81 | 0.588862 |
__copyright__ = "Copyright 2014-2015, http://radical.rutgers.edu"
__license__ = "MIT"
import sys
import radical.pilot as rp
def pilot_state_cb (pilot, state):
if not pilot:
return
print("[Callback]: Pilot '%s' state: %s." % (pilot.uid, state))
if state == rp.FAILED:
sys.exit (1)
def task_state_cb (task, state):
if not task:
return
global CNT
print("[Callback]: task %s on %s: %s." % (task.uid, task.pilot_id, state))
if state == rp.FAILED:
print("stderr: %s" % task.stderr)
sys.exit(2)
if __name__ == "__main__":
if len(sys.argv) > 1:
session_name = sys.argv[1]
else:
session_name = None
session = rp.Session(name=session_name)
print("session id: %s" % session.uid)
try:
c = rp.Context('ssh')
c.user_id = "username"
session.add_context(c)
print("Initializing Pilot Manager ...")
pmgr = rp.PilotManager(session=session)
pmgr.register_callback(pilot_state_cb)
pilot_list = list()
pdesc = rp.PilotDescription ()
pdesc.resource = "xsede.gordon"
pdesc.runtime = 10
pdesc.cores = 1
pdesc.cleanup = True
pdesc.project = ''
pilot_list.append(pdesc)
pdesc2 = rp.PilotDescription ()
pdesc2.resource = "xsede.comet"
pdesc2.runtime = 10
pdesc2.cores = 1
pdesc2.cleanup = True
pdesc2.project = ''
pilot_list.append(pdesc2)
print("Submitting Pilots to Pilot Manager ...")
pilots = pmgr.submit_pilots(pilot_list)
print("Initializing Task Manager ...")
tmgr = rp.TaskManager (session=session,
scheduler=rp.SCHEDULER_ROUND_ROBIN)
tmgr.register_callback(task_state_cb)
print("Registering Pilots with Task Manager ...")
tmgr.add_pilots(pilots)
NUMBER_JOBS = 64
taskdesc_list = []
for i in range(NUMBER_JOBS):
taskdesc = rp.TaskDescription()
taskdesc.environment = {'task_NO': i}
taskdesc.executable = "/bin/echo"
taskdesc.arguments = ['I am Task number $task_NO from $HOSTNAME']
taskdesc.cores = 1
taskdesc_list.append(taskdesc)
print("Submit Tasks to Task Manager ...")
task_set = tmgr.submit_tasks (taskdesc_list)
print("Waiting for tasks to complete ...")
tmgr.wait_tasks()
print("All tasks completed successfully!")
except Exception as e:
print("caught Exception: %s" % e)
raise
except (KeyboardInterrupt, SystemExit) as e:
print("need to exit now: %s" % e)
finally:
print("closing session")
session.close ()
# all remaining pilots (none in our example).
# ------------------------------------------------------------------------------
| true | true |
f7fe349e37116aa645dec464fbddf1eedd53ec12 | 39,111 | py | Python | src/practice_problem1.py | fultoncn/14-Exam2Practice | 52206d105c7368a5ca548a3fe4afe896e2ac09d4 | [
"MIT"
] | null | null | null | src/practice_problem1.py | fultoncn/14-Exam2Practice | 52206d105c7368a5ca548a3fe4afe896e2ac09d4 | [
"MIT"
] | null | null | null | src/practice_problem1.py | fultoncn/14-Exam2Practice | 52206d105c7368a5ca548a3fe4afe896e2ac09d4 | [
"MIT"
] | null | null | null | """
PRACTICE Test 2, practice_problem 1.
This problem provides practice at:
*** IMPLEMENTING CLASSES. ***
Authors: David Mutchler, Valerie Galluzzi, Mark Hays, Amanda Stouder,
their colleagues and Colleen Fulton.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
########################################################################
# Students:
#
# These problems have DIFFICULTY and TIME ratings:
# DIFFICULTY rating: 1 to 10, where:
# 1 is very easy
# 3 is an "easy" Test 2 question.
# 5 is a "typical" Test 2 question.
# 7 is a "hard" Test 2 question.
# 10 is an EXTREMELY hard problem (too hard for a Test 2 question)
#
# TIME ratings: A ROUGH estimate of the number of minutes that we
# would expect a well-prepared student to take on the problem.
#
# IMPORTANT: For ALL the problems in this module,
# if you reach the time estimate and are NOT close to a solution,
# STOP working on that problem and ASK YOUR INSTRUCTOR FOR HELP
# on it, in class or via Piazza.
########################################################################
import time
import sys
def main():
""" Calls the TEST functions in this module. """
####################################################################
# UN-comment tests as you work the problems.
####################################################################
run_test_init()
run_test_append_string()
# run_test_double()
# run_test_shrink()
# run_test_double_then_shrink()
# run_test_reset()
# run_test_steal()
# run_test_get_history()
# run_test_combined_box()
########################################################################
# The Box class (and its methods) begins here.
########################################################################
class Box(object):
"""
A Box has:
-- CONTENTS, which is a string, and
-- VOLUME, which is a non-negative integer.
The length of the Box's CONTENTS can never exceed the Box's VOLUME.
"""
def __init__(self, contents, volume):
"""
What comes in:
-- self
-- A string that is the contents of the Box
-- An integer that is the volume (maximum capacity) of the Box
What goes out: Nothing (i.e., None).
Side effects:
-- Stores the Box's contents and volume
in the instance variables
self.contents
self.volume
-- EXCEPT if the length of the given contents
is bigger than the given volume,
then self.contents is set to the empty string
(simulating a "rejection" of the given contents).
-- Also initializes other instance variables as needed
by other methods.
Examples:
b1 = Box('Peace', 8)
# b1.contents is 'Peace'
# b2.volume is 8
b2 = Box('Peace and Love', 8)
# b2.contents is '' [The contents were too big, hence rejected]
# b2.volume is 8
Type hints:
:type contents: str
:type volume: int
"""
# --------------------------------------------------------------
# DONE: 2. Implement and test this function.
# See the testing code (below) for more examples.
# --------------------------------------------------------------
# --------------------------------------------------------------
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 3
# TIME ESTIMATE: 5 minutes.
# --------------------------------------------------------------
self.contents = contents
self.volume = volume
if len(contents) > volume:
self.contents = ''
# if len(contents) > volume:
# self.contents = ''
# else:
# self.contents = contents
# self.volume = volume
def append_string(self, additional_contents):
"""
What comes in:
-- self
-- A string that is to be appended to this Box's contents
What goes out:
Returns a string that is whatever substring of the
additional_contents did not fit in this Box
(or the empty string if the entire additional_contents fit)
Side effects:
-- Sets this Box's contents to its current contents plus
(i.e., followed by) as much of the given
additional_contents as will fit in this Box
Examples:
b1 = Box('Hello', 20)
s = b1.append_string('Goodbye')
# b1.contents is now 'HelloGoodbye'
# b1.volume is still 20
# s is '' [since the entire additional_contents
# fit in this Box]
b2 = Box('Hello', 8)
s = b2.append_string('Goodbye')
# b2.contents is now 'HelloGoo'
# b2.volume is still 8
# s is 'dbye' [this is the part of the additional_contents
# that did NOT fit in this Box]
Type hints:
:type additional_contents: str
"""
# --------------------------------------------------------------
# TODO: 3. Implement and test this function.
# See the testing code (below) for more examples.
# --------------------------------------------------------------
# --------------------------------------------------------------
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 10
# TIME ESTIMATE: 20 - 30 minutes.
#
# **** IMPORTANT: ****
# 1. Write a solution to this problem in pseudo-code,
# and THEN translate the pseudo-code to a solution.
#
# 2. If you do not have a correct solution after 10-15 minutes,
# read the file Read_this_ONLY_when_asked_Part_1.txt
# and continue working on the problem.
#
# 3. If you still do not have a solution after another 5-10
# minutes, then read the file
# Read_this_ONLY_when_asked_Part_2.txt
# and continue working on the problem.
# --------------------------------------------------------------
self.contents =
leftover = self.volume - len(additional_contents)
if leftover
def double(self):
"""
What comes in:
-- self
What goes out:
Returrns a string that is whatever substring of the
doubled contents did not fit in this Box
(or the empty string if the entire doubled contents fit)
Side effects:
-- Sets this Box's contents to what it was PLUS what it was,
but clipped if necessary so as not to exceed
this Box's volume.
Examples:
b1 = Box('Robot Fun', 20)
s = b1.double()
# b1.contents is now 'Robot FunRobot Fun'
# b1.volume is still 20
# s is '' [since the entire doubled contents fit]
b2 = Box('Robot Fun', 13)
s = b2.double()
# b2.contents is now 'Robot FunRobo'
# b2.volume is still 13
# s is 't Fun' [this is the part of the doubled contents
# that did NOT fit in this Box]
b3 = Box('Robot Fun', 9)
s = b3.double()
# b3.contents is now 'Robot Fun'
# b3.volume is still 9
# s is 'Robot Fun' [this is the part of the doubled
# contents that did NOT fit]
"""
# --------------------------------------------------------------
# TODO: 4. Implement and test this function.
# The testing code is already written for you (above).
# --------------------------------------------------------------
# --------------------------------------------------------------
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 5
# TIME ESTIMATE: 3 minutes.
# --------------------------------------------------------------
################################################################
# FOR FULL CREDIT, YOUR SOLUTION MUST BE NO MORE THAN
# ** TWO ** LINES OF CODE.
################################################################
def shrink(self, new_volume):
"""
What comes in:
-- self
-- A nonnegative integer that is to be the new volume
for this Box
What goes out:
Returns the portion (if any) of this Box's contents that had to be
discarded to make the contents fit within the new volume
(or the empty string if this Box's contents fit within
the new volume).
Side effects:
-- Sets this Box's volume to the given new_volume.
-- If the new volume is less than the length of this Box's
contents, sets this Box's contents to what it was
but "clipped" to fit in this Box
Examples:
b1 = Box('Goodbye', 20)
s = b1.shrink(8)
# b1.contents is still 'Goodbye'
# b1.volume is now 8
# s is '' [since the Box's contents fits even with
# the new volume]
b2 = Box('Goodbye', 20)
s = b2.shrink(4)
# b2.contents is now 'Good'
# b2.volume is now 4
# s is 'bye' [the portion of the contents that had to be
# discarded to make the contents fit
# within the new volume]
Type hints:
:type new_volume: int
"""
# --------------------------------------------------------------
# TODO: 5. Implement and test this function.
# The testing code is already written for you (above).
# --------------------------------------------------------------
# --------------------------------------------------------------
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 8
# TIME ESTIMATE: 12 minutes.
#
# IMPORTANT: Write a solution to this problem in pseudo-code,
# and THEN translate the pseudo-code to a solution.
# --------------------------------------------------------------
def double_then_shrink(self, new_volume):
"""
What comes in:
-- self
-- A nonnegative integer that is to be the new volume
for this Box
What goes out:
Returns the number of characters that were discarded (see examples)
Side effects:
-- Calls this Box's double method, then
-- Calls this Box's shrink method, sending it the given new_volume.
Examples:
b1 = Box('Goodbye', 20)
n = b1.double_then_shrink(17)
# b1.contents is now 'GoodbyeGoodbye'
# b1.volume is now 17
# n is 0 [since no characters were discarded during the doubling
# and no characters were discarded during the shrinking]
b2 = Box('Goodbye', 10)
n = b2.double_then_shrink(17)
# b2.contents is now 'GoodbyeGoo'
# b2.volume is now 17
# n is 4 [since 4 characters were discarded during the doubling
# and 0 characters were discarded during the shrinking]
b3 = Box('Goodbye', 20)
n = b3.double_then_shrink(13)
# b3.contents is now 'GoodbyeGoodby'
# b3.volume is now 13
# n is 1 [since 0 characters were discarded during the doubling
# and 1 character was discarded during the shrinking]
b4 = Box('Goodbye', 10)
n = b4.double_then_shrink(3)
# b4.contents is now 'Goo'
# b4.volume is now 3
# n is 11 [since 4 characters were discarded during the doubling
# and 7 characters were discarded during the shrinking]
Type hints:
:type new_volume: int
"""
# --------------------------------------------------------------
# TODO: 6. Implement and test this function.
# The testing code is already written for you (above).
# --------------------------------------------------------------
# --------------------------------------------------------------
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 5
# TIME ESTIMATE: 5 minutes.
# --------------------------------------------------------------
def reset(self):
"""
What comes in:
-- self
What goes out: Nothing (i.e., None).
Side effects:
Changes this Box's contents and volume to whatever they were
when this Box was constructed.
"""
# --------------------------------------------------------------
# TODO: 7. Implement and test this function.
# The testing code is already written for you (above).
# --------------------------------------------------------------
# --------------------------------------------------------------
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 4
# TIME ESTIMATE: 5 minutes.
# --------------------------------------------------------------
def steal(self, other_box):
"""
What comes in:
-- self
-- Another Box
What goes out: Nothing (i.e., None).
Side effects:
-- 1. Sets this Box's contents to what is was, but with the
other Box's contents appended to this Box's contents
(but clipped as needed to fit within this Box)
2. Sets the other Box's contents to whatever this Box
was unable to "steal" (so the empty string if the
other Box's entire contents fit within this Box)
Examples:
See the TEST cases for examples.
Type hints:
:type other_box: Box
"""
# --------------------------------------------------------------
# TODO: 8. Implement and test this function.
# The testing code is already written for you (above).
# --------------------------------------------------------------
# --------------------------------------------------------------
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 7
# TIME ESTIMATE: 5 minutes.
# --------------------------------------------------------------
################################################################
# FOR FULL CREDIT, YOUR SOLUTION MUST BE NO MORE THAN
# ** TWO ** LINES OF CODE.
################################################################
def get_history(self):
"""
What comes in:
-- self
What goes out:
Returns a list that contains the contents of this Box
just before each time that the reset method is called.
Examples:
b = Box('Good', 20)
h = b.get_history()
# h is now [] since there have been no calls to reset yet
b.double() # So now b.contents is 'GoodGood'
b.shrink(6) # So now b.contents is 'GoodGo'
h = b.get_history()
# h is still []
b.reset() # So now b.contents is 'Good' again and its volume is 20 again
h = b.get_history()
# h is now ['GoodGo']
b.append_string('Bye') # So now b.contents is 'GoodBye'
h = b.get_history()
# h is still ['GoodGo']
b.reset()
h = b.get_history()
# h is now ['GoodGo', 'GoodBye']
"""
# --------------------------------------------------------------
# TODO: 9. Implement and test this function.
# The testing code is already written for you (above).
# --------------------------------------------------------------
# --------------------------------------------------------------
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 6
# TIME ESTIMATE: 5 minutes.
# --------------------------------------------------------------
def combined_box(self, other_box):
"""
What comes in:
-- self
-- Another Box
What goes out:
Returns a new Box whose:
-- Contents is the contents of this Box plus (i.e., followed by)
the contents of the given other_box
-- Volume is the sum of the volumes of this Box and the given other_box
Side effects: None.
Examples:
See the TEST cases for examples.
Type hints:
:type other_box: Box
"""
# --------------------------------------------------------------
# TODO: 10. Implement and test this function.
# The testing code is already written for you (above).
# --------------------------------------------------------------
# --------------------------------------------------------------
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 4
# TIME ESTIMATE: 5 minutes.
# --------------------------------------------------------------
########################################################################
# The TEST functions for the Box class begin here.
########################################################################
def run_test_init():
""" Tests the __init__ method of the Box class. """
print()
print('-----------------------------------------------------------')
print('Testing the __init__ method of the Box class.')
print('-----------------------------------------------------------')
# Test 1: Contents fit in the Box easily.
box = Box('Good morning', 20)
expected_contents = 'Good morning'
expected_volume = 20
print("Expected:", expected_contents, expected_volume)
print("Actual: ", box.contents, box.volume)
if (expected_contents == box.contents) and (expected_volume == box.volume):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Test 2: Contents barely fit in the Box.
box = Box('Good morning', 12)
expected_contents = 'Good morning'
expected_volume = 12
print("Expected:", expected_contents, expected_volume)
print("Actual: ", box.contents, box.volume)
if (expected_contents == box.contents) and (expected_volume == box.volume):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Test 3: Contents do not fit in the Box, so are "rejected".
box = Box('Good morning', 11)
expected_contents = ''
expected_volume = 11
print("Expected:", expected_contents, expected_volume)
print("Actual: ", box.contents, box.volume)
if (expected_contents == box.contents) and (expected_volume == box.volume):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
def run_test_append_string():
""" Tests the append_string method of the Box class. """
print()
print('-----------------------------------------------------------')
print('Testing the append_string method of the Box class.')
print('-----------------------------------------------------------')
# Test 1: Appending fits, empty string returned.
box = Box('Hello', 20)
clipped = box.append_string('Goodbye')
# b1.contents is now 'HelloGoodbye'
# b1.volume is still 20
# s is '' [since the entire additional_contents
# fit in this Box]
expected_contents = 'HelloGoodbye'
expected_volume = 20
expected_clipped = ''
print("Expected:", expected_contents, expected_volume, expected_clipped)
print("Actual: ", box.contents, box.volume, clipped)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_clipped == clipped)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Test 2: Appending does NOT fit, clipped portion returned
box = Box('Hello', 8)
clipped = box.append_string('Goodbye')
# b2.contents is now 'HelloGoo'
# b2.volume is still 8
# s is 'dbye' [this is the part of the additional_contents
# that did NOT fit in this Box]
expected_contents = 'HelloGoo'
expected_volume = 8
expected_clipped = 'dbye'
print("Expected:", expected_contents, expected_volume, expected_clipped)
print("Actual: ", box.contents, box.volume, clipped)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_clipped == clipped)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
def run_test_double():
""" Tests the double method of the Box class. """
print()
print('-----------------------------------------------------------')
print('Testing the double method of the Box class.')
print('-----------------------------------------------------------')
# Test 1: Doubling fits easily, empty string returned.
initial_contents = 'Good morning'
box = Box(initial_contents, 30)
clipped = box.double()
expected_contents = initial_contents + initial_contents
expected_volume = 30
expected_clipped = ''
print("Expected:", expected_contents, expected_volume, expected_clipped)
print("Actual: ", box.contents, box.volume, clipped)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_clipped == clipped)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Test 2: Doubling fits barely, empty string returned.
initial_contents = 'Good morning'
box = Box(initial_contents, 24)
clipped = box.double()
expected_contents = initial_contents + initial_contents
expected_volume = 24
expected_clipped = ''
print("Expected:", expected_contents, expected_volume, expected_clipped)
print("Actual: ", box.contents, box.volume, clipped)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_clipped == clipped)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Test 3: Doubling almost fits, one-character string returned.
initial_contents = 'Good morning'
box = Box(initial_contents, 23)
clipped = box.double()
expected_contents = initial_contents + 'Good mornin'
expected_volume = 23
expected_clipped = 'g'
print("Expected:", expected_contents, expected_volume, expected_clipped)
print("Actual: ", box.contents, box.volume, clipped)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_clipped == clipped)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Test 4: Doubling does not fit, multiple-character string returned.
initial_contents = 'Good morning'
box = Box(initial_contents, 20)
clipped = box.double()
expected_contents = initial_contents + 'Good mor'
expected_volume = 20
expected_clipped = 'ning'
print("Expected:", expected_contents, expected_volume, expected_clipped)
print("Actual: ", box.contents, box.volume, clipped)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_clipped == clipped)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Test 5: mutiple doubles
initial_contents = 'Good morning'
expected_contents = initial_contents * 4
expected_volume = 100
box = Box(initial_contents, expected_volume)
box.double()
box.double()
print("Expected:", expected_contents, expected_volume)
print("Actual: ", box.contents, box.volume)
if (expected_contents == box.contents) and (expected_volume == box.volume):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
def run_test_shrink():
""" Tests the shrink method of the Box class. """
print()
print('-----------------------------------------------------------')
print('Testing the shrink method of the Box class.')
print('-----------------------------------------------------------')
# Test 1: Volume is small, shrinking occurs
initial_contents = 'Good morning'
initial_volume = 20
box = Box(initial_contents, initial_volume)
clipped = box.shrink(4)
expected_contents = 'Good'
expected_volume = 4
expected_clipped = ' morning'
print("Expected:", expected_contents, expected_volume, expected_clipped)
print("Actual: ", box.contents, box.volume, clipped)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_clipped == clipped)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Test 2: Volume is big, no shrinking.
initial_contents = 'Good morning'
initial_volume = 30
box = Box(initial_contents, initial_volume)
clipped = box.shrink(15)
expected_contents = initial_contents
expected_volume = 15
expected_clipped = ''
print("Expected:", expected_contents, expected_volume, expected_clipped)
print("Actual: ", box.contents, box.volume, clipped)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_clipped == clipped)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
def run_test_double_then_shrink():
""" Tests the double_then_shrink method of the Box class. """
print()
print('-----------------------------------------------------------')
print('Testing the double_then_shrink method of the Box class.')
print('-----------------------------------------------------------')
# Test 1: No clipping
box = Box('Goodbye', 20)
number_clipped = box.double_then_shrink(17)
# box.contents is now 'GoodbyeGoodbye'
# box.volume is now 17
# n is 0 [since no characters were discarded during the doubling
# and no characters were discarded during the shrinking]
expected_contents = 'GoodbyeGoodbye'
expected_volume = 17
expected_clipped = 0
print("Expected:", expected_contents, expected_volume, expected_clipped)
print("Actual: ", box.contents, box.volume, number_clipped)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_clipped == number_clipped)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Test 2: Clipping from doubling
box = Box('Goodbye', 10)
number_clipped = box.double_then_shrink(17)
# box.contents is now 'GoodbyeGoo'
# box.volume is now 17
# n is 4 [since 4 characters were discarded during the doubling
# and 0 characters were discarded during the shrinking]
expected_contents = 'GoodbyeGoo'
expected_volume = 17
expected_clipped = 4
print("Expected:", expected_contents, expected_volume, expected_clipped)
print("Actual: ", box.contents, box.volume, number_clipped)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_clipped == number_clipped)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Test 3: Clipping from shrinking
box = Box('Goodbye', 20)
number_clipped = box.double_then_shrink(13)
# box.contents is now 'GoodbyeGoodby'
# box.volume is now 13
# n is 1 [since 0 characters were discarded during the doubling
# and 1 character was discarded during the shrinking]
expected_contents = 'GoodbyeGoodby'
expected_volume = 13
expected_clipped = 1
print("Expected:", expected_contents, expected_volume, expected_clipped)
print("Actual: ", box.contents, box.volume, number_clipped)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_clipped == number_clipped)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Test 4: Clipping from doubling and shrinking
box = Box('Goodbye', 10)
number_clipped = box.double_then_shrink(3)
# box.contents is now 'Goo'
# box.volume is now 3
# n is 11 [since 4 characters were discarded during the doubling
# and 7 characters were discarded during the shrinking]
expected_contents = 'Goo'
expected_volume = 3
expected_clipped = 11
print("Expected:", expected_contents, expected_volume, expected_clipped)
print("Actual: ", box.contents, box.volume, number_clipped)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_clipped == number_clipped)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
def run_test_reset():
""" Tests the reset method of the Box class. """
print()
print('-----------------------------------------------------------')
print('Testing the reset method of the Box class.')
print('-----------------------------------------------------------')
# Test 1: Reset to contents that fit in original volume
initial_contents = 'Good morning'
initial_volume = 100
expected_contents = initial_contents
expected_volume = initial_volume
box = Box(initial_contents, initial_volume)
box.double()
box.double_then_shrink(2)
box.reset()
print("Expected:", expected_contents, expected_volume)
print("Actual: ", box.contents, box.volume)
if (expected_contents == box.contents) and (expected_volume == box.volume):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Test 2: Reset to contents that did not fit in original volume
initial_contents = 'Good morning'
initial_volume = 5
expected_contents = ''
expected_volume = initial_volume
box = Box(initial_contents, initial_volume)
box.double()
box.double_then_shrink(2)
box.reset()
print("Expected:", expected_contents, expected_volume)
print("Actual: ", box.contents, box.volume)
if (expected_contents == box.contents) and (expected_volume == box.volume):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
def run_test_steal():
""" Tests the steal method of the Box class. """
print()
print('-----------------------------------------------------------')
print('Testing the steal method of the Box class.')
print('-----------------------------------------------------------')
# Test 1: Box 1 steals from 2, where Box 1 has room for all of Box 2's
# content
initial_contents_1 = 'Good morning'
initial_volume_1 = 100
initial_contents_2 = 'Hello'
initial_volume_2 = 10
box1 = Box(initial_contents_1, initial_volume_1)
box2 = Box(initial_contents_2, initial_volume_2)
box1.steal(box2)
expected_contents_1 = initial_contents_1 + initial_contents_2
expected_volume_1 = initial_volume_1
expected_contents_2 = ''
expected_volume_2 = initial_volume_2
print("Expected 1:", expected_contents_1, expected_volume_1)
print("Actual 1:", box1.contents, box1.volume)
print("\nExpected 2:", expected_contents_2, expected_volume_2)
print("Actual 2:", box2.contents, box2.volume)
if (expected_contents_1 == box1.contents) and \
(expected_volume_1 == box1.volume) \
and (expected_contents_2 == box2.contents) \
and (expected_volume_2 == box2.volume):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Test 2: Box 1 steals from 2, where Box 1 does NOT have room
# for all of Box 2's content
initial_contents_1 = 'Good morning'
initial_volume_1 = 15
initial_contents_2 = 'Hello'
initial_volume_2 = 10
expected_contents_1 = initial_contents_1 + 'Hel'
expected_volume_1 = initial_volume_1
expected_contents_2 = 'lo'
expected_volume_2 = initial_volume_2
box1 = Box(initial_contents_1, initial_volume_1)
box2 = Box(initial_contents_2, initial_volume_2)
box1.steal(box2)
print("Expected 1:", expected_contents_1, expected_volume_1)
print("Actual 1:", box1.contents, box1.volume)
print("\nExpected 2:", expected_contents_2, expected_volume_2)
print("Actual 2:", box2.contents, box2.volume)
if (expected_contents_1 == box1.contents) \
and (expected_volume_1 == box1.volume) \
and (expected_contents_2 == box2.contents) \
and (expected_volume_2 == box2.volume):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
def run_test_get_history():
""" Tests the get_history method of the Box class. """
print()
print('-----------------------------------------------------------')
print('Testing the get_history method of the Box class.')
print('-----------------------------------------------------------')
# Step 1 of the test:
box = Box('Good', 20)
h = box.get_history()
# h is now [] since there have been no calls to reset yet
expected_contents = 'Good'
expected_volume = 20
expected_h = []
print("Expected:", expected_contents, expected_volume, expected_h)
print("Actual: ", box.contents, box.volume, h)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_h == h)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Step 2 of the test:
box.double() # So now box.contents is 'GoodGood'
box.shrink(6) # So now box.contents is 'GoodGo'
h = box.get_history()
# h is still []
expected_contents = 'GoodGo'
expected_volume = 6
expected_h = []
print("Expected:", expected_contents, expected_volume, expected_h)
print("Actual: ", box.contents, box.volume, h)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_h == h)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Step 3 of the test:
# So now box.contents is 'Good' again and box.volume is 20 again
box.reset()
h = box.get_history()
# h is now ['GoodGo']
expected_contents = 'Good'
expected_volume = 20
expected_h = ['GoodGo']
print("Expected:", expected_contents, expected_volume, expected_h)
print("Actual: ", box.contents, box.volume, h)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_h == h)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Step 4 of the test:
box.append_string('Bye') # So now box.contents is 'GoodBye'
h = box.get_history()
# h is still ['GoodGo']
expected_contents = 'GoodBye'
expected_volume = 20
expected_h = ['GoodGo']
print("Expected:", expected_contents, expected_volume, expected_h)
print("Actual: ", box.contents, box.volume, h)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_h == h)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Step 5 of the test:
box.reset()
h = box.get_history()
# h is now ['GoodGo', 'GoodBye']
expected_contents = 'Good'
expected_volume = 20
expected_h = ['GoodGo', 'GoodBye']
print("Expected:", expected_contents, expected_volume, expected_h)
print("Actual: ", box.contents, box.volume, h)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_h == h)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
def run_test_combined_box():
""" Tests the combined_box method of the Box class. """
print()
print('-----------------------------------------------------------')
print('Testing the combined_box method of the Box class.')
print('-----------------------------------------------------------')
b1 = Box('Roses', 8)
b2 = Box('Violets', 20)
b1.double() # So now 'RosesRos' with volume 8
b2.shrink(5) # So now 'Viole' with volume 5
new_box1 = b1.combined_box(b2)
new_box2 = b2.combined_box(b1)
# Test results for new_box1:
expected_contents = 'RosesRosViole'
expected_volume = 13
print("Expected:", expected_contents, expected_volume)
print("Actual: ", new_box1.contents, new_box1.volume)
if ((expected_contents == new_box1.contents) and
(expected_volume == new_box1.volume)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Test results for new_box2:
expected_contents = 'VioleRosesRos'
expected_volume = 13
print("Expected:", expected_contents, expected_volume)
print("Actual: ", new_box2.contents, new_box2.volume)
if ((expected_contents == new_box2.contents) and
(expected_volume == new_box2.volume)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
def print_failure_message(message=' *** FAILED the above test. ***',
flush_time=1.0):
""" Prints a message onto stderr, hence in RED. """
time.sleep(flush_time)
print(message,
file=sys.stderr, flush=True)
time.sleep(flush_time)
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
| 38.082765 | 83 | 0.535885 | """
PRACTICE Test 2, practice_problem 1.
This problem provides practice at:
*** IMPLEMENTING CLASSES. ***
Authors: David Mutchler, Valerie Galluzzi, Mark Hays, Amanda Stouder,
their colleagues and Colleen Fulton.
"""
Box
What goes out:
Returns a new Box whose:
-- Contents is the contents of this Box plus (i.e., followed by)
the contents of the given other_box
-- Volume is the sum of the volumes of this Box and the given other_box
Side effects: None.
Examples:
See the TEST cases for examples.
Type hints:
:type other_box: Box
"""
# --------------------------------------------------------------
# TODO: 10. Implement and test this function.
# The testing code is already written for you (above).
# --------------------------------------------------------------
# --------------------------------------------------------------
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 4
# TIME ESTIMATE: 5 minutes.
# --------------------------------------------------------------
########################################################################
# The TEST functions for the Box class begin here.
########################################################################
def run_test_init():
""" Tests the __init__ method of the Box class. """
print()
print('-----------------------------------------------------------')
print('Testing the __init__ method of the Box class.')
print('-----------------------------------------------------------')
# Test 1: Contents fit in the Box easily.
box = Box('Good morning', 20)
expected_contents = 'Good morning'
expected_volume = 20
print("Expected:", expected_contents, expected_volume)
print("Actual: ", box.contents, box.volume)
if (expected_contents == box.contents) and (expected_volume == box.volume):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Test 2: Contents barely fit in the Box.
box = Box('Good morning', 12)
expected_contents = 'Good morning'
expected_volume = 12
print("Expected:", expected_contents, expected_volume)
print("Actual: ", box.contents, box.volume)
if (expected_contents == box.contents) and (expected_volume == box.volume):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Test 3: Contents do not fit in the Box, so are "rejected".
box = Box('Good morning', 11)
expected_contents = ''
expected_volume = 11
print("Expected:", expected_contents, expected_volume)
print("Actual: ", box.contents, box.volume)
if (expected_contents == box.contents) and (expected_volume == box.volume):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
def run_test_append_string():
""" Tests the append_string method of the Box class. """
print()
print('-----------------------------------------------------------')
print('Testing the append_string method of the Box class.')
print('-----------------------------------------------------------')
# Test 1: Appending fits, empty string returned.
box = Box('Hello', 20)
clipped = box.append_string('Goodbye')
# b1.contents is now 'HelloGoodbye'
# b1.volume is still 20
# s is '' [since the entire additional_contents
# fit in this Box]
expected_contents = 'HelloGoodbye'
expected_volume = 20
expected_clipped = ''
print("Expected:", expected_contents, expected_volume, expected_clipped)
print("Actual: ", box.contents, box.volume, clipped)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_clipped == clipped)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Test 2: Appending does NOT fit, clipped portion returned
box = Box('Hello', 8)
clipped = box.append_string('Goodbye')
# b2.contents is now 'HelloGoo'
# b2.volume is still 8
# s is 'dbye' [this is the part of the additional_contents
# that did NOT fit in this Box]
expected_contents = 'HelloGoo'
expected_volume = 8
expected_clipped = 'dbye'
print("Expected:", expected_contents, expected_volume, expected_clipped)
print("Actual: ", box.contents, box.volume, clipped)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_clipped == clipped)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
def run_test_double():
""" Tests the double method of the Box class. """
print()
print('-----------------------------------------------------------')
print('Testing the double method of the Box class.')
print('-----------------------------------------------------------')
# Test 1: Doubling fits easily, empty string returned.
initial_contents = 'Good morning'
box = Box(initial_contents, 30)
clipped = box.double()
expected_contents = initial_contents + initial_contents
expected_volume = 30
expected_clipped = ''
print("Expected:", expected_contents, expected_volume, expected_clipped)
print("Actual: ", box.contents, box.volume, clipped)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_clipped == clipped)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Test 2: Doubling fits barely, empty string returned.
initial_contents = 'Good morning'
box = Box(initial_contents, 24)
clipped = box.double()
expected_contents = initial_contents + initial_contents
expected_volume = 24
expected_clipped = ''
print("Expected:", expected_contents, expected_volume, expected_clipped)
print("Actual: ", box.contents, box.volume, clipped)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_clipped == clipped)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Test 3: Doubling almost fits, one-character string returned.
initial_contents = 'Good morning'
box = Box(initial_contents, 23)
clipped = box.double()
expected_contents = initial_contents + 'Good mornin'
expected_volume = 23
expected_clipped = 'g'
print("Expected:", expected_contents, expected_volume, expected_clipped)
print("Actual: ", box.contents, box.volume, clipped)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_clipped == clipped)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Test 4: Doubling does not fit, multiple-character string returned.
initial_contents = 'Good morning'
box = Box(initial_contents, 20)
clipped = box.double()
expected_contents = initial_contents + 'Good mor'
expected_volume = 20
expected_clipped = 'ning'
print("Expected:", expected_contents, expected_volume, expected_clipped)
print("Actual: ", box.contents, box.volume, clipped)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_clipped == clipped)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Test 5: mutiple doubles
initial_contents = 'Good morning'
expected_contents = initial_contents * 4
expected_volume = 100
box = Box(initial_contents, expected_volume)
box.double()
box.double()
print("Expected:", expected_contents, expected_volume)
print("Actual: ", box.contents, box.volume)
if (expected_contents == box.contents) and (expected_volume == box.volume):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
def run_test_shrink():
""" Tests the shrink method of the Box class. """
print()
print('-----------------------------------------------------------')
print('Testing the shrink method of the Box class.')
print('-----------------------------------------------------------')
# Test 1: Volume is small, shrinking occurs
initial_contents = 'Good morning'
initial_volume = 20
box = Box(initial_contents, initial_volume)
clipped = box.shrink(4)
expected_contents = 'Good'
expected_volume = 4
expected_clipped = ' morning'
print("Expected:", expected_contents, expected_volume, expected_clipped)
print("Actual: ", box.contents, box.volume, clipped)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_clipped == clipped)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Test 2: Volume is big, no shrinking.
initial_contents = 'Good morning'
initial_volume = 30
box = Box(initial_contents, initial_volume)
clipped = box.shrink(15)
expected_contents = initial_contents
expected_volume = 15
expected_clipped = ''
print("Expected:", expected_contents, expected_volume, expected_clipped)
print("Actual: ", box.contents, box.volume, clipped)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_clipped == clipped)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
def run_test_double_then_shrink():
""" Tests the double_then_shrink method of the Box class. """
print()
print('-----------------------------------------------------------')
print('Testing the double_then_shrink method of the Box class.')
print('-----------------------------------------------------------')
# Test 1: No clipping
box = Box('Goodbye', 20)
number_clipped = box.double_then_shrink(17)
# box.contents is now 'GoodbyeGoodbye'
# box.volume is now 17
# n is 0 [since no characters were discarded during the doubling
# and no characters were discarded during the shrinking]
expected_contents = 'GoodbyeGoodbye'
expected_volume = 17
expected_clipped = 0
print("Expected:", expected_contents, expected_volume, expected_clipped)
print("Actual: ", box.contents, box.volume, number_clipped)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_clipped == number_clipped)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Test 2: Clipping from doubling
box = Box('Goodbye', 10)
number_clipped = box.double_then_shrink(17)
# box.contents is now 'GoodbyeGoo'
# box.volume is now 17
# n is 4 [since 4 characters were discarded during the doubling
# and 0 characters were discarded during the shrinking]
expected_contents = 'GoodbyeGoo'
expected_volume = 17
expected_clipped = 4
print("Expected:", expected_contents, expected_volume, expected_clipped)
print("Actual: ", box.contents, box.volume, number_clipped)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_clipped == number_clipped)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Test 3: Clipping from shrinking
box = Box('Goodbye', 20)
number_clipped = box.double_then_shrink(13)
# box.contents is now 'GoodbyeGoodby'
# box.volume is now 13
# n is 1 [since 0 characters were discarded during the doubling
# and 1 character was discarded during the shrinking]
expected_contents = 'GoodbyeGoodby'
expected_volume = 13
expected_clipped = 1
print("Expected:", expected_contents, expected_volume, expected_clipped)
print("Actual: ", box.contents, box.volume, number_clipped)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_clipped == number_clipped)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Test 4: Clipping from doubling and shrinking
box = Box('Goodbye', 10)
number_clipped = box.double_then_shrink(3)
# box.contents is now 'Goo'
# box.volume is now 3
# n is 11 [since 4 characters were discarded during the doubling
# and 7 characters were discarded during the shrinking]
expected_contents = 'Goo'
expected_volume = 3
expected_clipped = 11
print("Expected:", expected_contents, expected_volume, expected_clipped)
print("Actual: ", box.contents, box.volume, number_clipped)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_clipped == number_clipped)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
def run_test_reset():
""" Tests the reset method of the Box class. """
print()
print('-----------------------------------------------------------')
print('Testing the reset method of the Box class.')
print('-----------------------------------------------------------')
# Test 1: Reset to contents that fit in original volume
initial_contents = 'Good morning'
initial_volume = 100
expected_contents = initial_contents
expected_volume = initial_volume
box = Box(initial_contents, initial_volume)
box.double()
box.double_then_shrink(2)
box.reset()
print("Expected:", expected_contents, expected_volume)
print("Actual: ", box.contents, box.volume)
if (expected_contents == box.contents) and (expected_volume == box.volume):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Test 2: Reset to contents that did not fit in original volume
initial_contents = 'Good morning'
initial_volume = 5
expected_contents = ''
expected_volume = initial_volume
box = Box(initial_contents, initial_volume)
box.double()
box.double_then_shrink(2)
box.reset()
print("Expected:", expected_contents, expected_volume)
print("Actual: ", box.contents, box.volume)
if (expected_contents == box.contents) and (expected_volume == box.volume):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
def run_test_steal():
""" Tests the steal method of the Box class. """
print()
print('-----------------------------------------------------------')
print('Testing the steal method of the Box class.')
print('-----------------------------------------------------------')
# Test 1: Box 1 steals from 2, where Box 1 has room for all of Box 2's
initial_contents_1 = 'Good morning'
initial_volume_1 = 100
initial_contents_2 = 'Hello'
initial_volume_2 = 10
box1 = Box(initial_contents_1, initial_volume_1)
box2 = Box(initial_contents_2, initial_volume_2)
box1.steal(box2)
expected_contents_1 = initial_contents_1 + initial_contents_2
expected_volume_1 = initial_volume_1
expected_contents_2 = ''
expected_volume_2 = initial_volume_2
print("Expected 1:", expected_contents_1, expected_volume_1)
print("Actual 1:", box1.contents, box1.volume)
print("\nExpected 2:", expected_contents_2, expected_volume_2)
print("Actual 2:", box2.contents, box2.volume)
if (expected_contents_1 == box1.contents) and \
(expected_volume_1 == box1.volume) \
and (expected_contents_2 == box2.contents) \
and (expected_volume_2 == box2.volume):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
initial_contents_1 = 'Good morning'
initial_volume_1 = 15
initial_contents_2 = 'Hello'
initial_volume_2 = 10
expected_contents_1 = initial_contents_1 + 'Hel'
expected_volume_1 = initial_volume_1
expected_contents_2 = 'lo'
expected_volume_2 = initial_volume_2
box1 = Box(initial_contents_1, initial_volume_1)
box2 = Box(initial_contents_2, initial_volume_2)
box1.steal(box2)
print("Expected 1:", expected_contents_1, expected_volume_1)
print("Actual 1:", box1.contents, box1.volume)
print("\nExpected 2:", expected_contents_2, expected_volume_2)
print("Actual 2:", box2.contents, box2.volume)
if (expected_contents_1 == box1.contents) \
and (expected_volume_1 == box1.volume) \
and (expected_contents_2 == box2.contents) \
and (expected_volume_2 == box2.volume):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
def run_test_get_history():
""" Tests the get_history method of the Box class. """
print()
print('-----------------------------------------------------------')
print('Testing the get_history method of the Box class.')
print('-----------------------------------------------------------')
# Step 1 of the test:
box = Box('Good', 20)
h = box.get_history()
# h is now [] since there have been no calls to reset yet
expected_contents = 'Good'
expected_volume = 20
expected_h = []
print("Expected:", expected_contents, expected_volume, expected_h)
print("Actual: ", box.contents, box.volume, h)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_h == h)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Step 2 of the test:
box.double() # So now box.contents is 'GoodGood'
box.shrink(6) # So now box.contents is 'GoodGo'
h = box.get_history()
# h is still []
expected_contents = 'GoodGo'
expected_volume = 6
expected_h = []
print("Expected:", expected_contents, expected_volume, expected_h)
print("Actual: ", box.contents, box.volume, h)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_h == h)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Step 3 of the test:
# So now box.contents is 'Good' again and box.volume is 20 again
box.reset()
h = box.get_history()
# h is now ['GoodGo']
expected_contents = 'Good'
expected_volume = 20
expected_h = ['GoodGo']
print("Expected:", expected_contents, expected_volume, expected_h)
print("Actual: ", box.contents, box.volume, h)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_h == h)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Step 4 of the test:
box.append_string('Bye') # So now box.contents is 'GoodBye'
h = box.get_history()
# h is still ['GoodGo']
expected_contents = 'GoodBye'
expected_volume = 20
expected_h = ['GoodGo']
print("Expected:", expected_contents, expected_volume, expected_h)
print("Actual: ", box.contents, box.volume, h)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_h == h)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Step 5 of the test:
box.reset()
h = box.get_history()
# h is now ['GoodGo', 'GoodBye']
expected_contents = 'Good'
expected_volume = 20
expected_h = ['GoodGo', 'GoodBye']
print("Expected:", expected_contents, expected_volume, expected_h)
print("Actual: ", box.contents, box.volume, h)
if ((expected_contents == box.contents) and
(expected_volume == box.volume) and
(expected_h == h)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
def run_test_combined_box():
""" Tests the combined_box method of the Box class. """
print()
print('-----------------------------------------------------------')
print('Testing the combined_box method of the Box class.')
print('-----------------------------------------------------------')
b1 = Box('Roses', 8)
b2 = Box('Violets', 20)
b1.double() # So now 'RosesRos' with volume 8
b2.shrink(5) # So now 'Viole' with volume 5
new_box1 = b1.combined_box(b2)
new_box2 = b2.combined_box(b1)
# Test results for new_box1:
expected_contents = 'RosesRosViole'
expected_volume = 13
print("Expected:", expected_contents, expected_volume)
print("Actual: ", new_box1.contents, new_box1.volume)
if ((expected_contents == new_box1.contents) and
(expected_volume == new_box1.volume)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
# Test results for new_box2:
expected_contents = 'VioleRosesRos'
expected_volume = 13
print("Expected:", expected_contents, expected_volume)
print("Actual: ", new_box2.contents, new_box2.volume)
if ((expected_contents == new_box2.contents) and
(expected_volume == new_box2.volume)):
print("Test passed SUCCESSFULLY!")
else:
print_failure_message()
print()
def print_failure_message(message=' *** FAILED the above test. ***',
flush_time=1.0):
""" Prints a message onto stderr, hence in RED. """
time.sleep(flush_time)
print(message,
file=sys.stderr, flush=True)
time.sleep(flush_time)
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
| false | true |
f7fe359b6b90342b5dcba1521f6537096671ace7 | 5,136 | py | Python | applications/link/modules/api_methods.py | link-money-dev/link-api-web-service | 3da226c7115ee4267f8346620029b710b9987e74 | [
"BSD-3-Clause"
] | null | null | null | applications/link/modules/api_methods.py | link-money-dev/link-api-web-service | 3da226c7115ee4267f8346620029b710b9987e74 | [
"BSD-3-Clause"
] | 1 | 2021-06-01T22:32:25.000Z | 2021-06-01T22:32:25.000Z | applications/watcher/modules/api_methods.py | link-money-dev/link-api-web-service | 3da226c7115ee4267f8346620029b710b9987e74 | [
"BSD-3-Clause"
] | null | null | null | import check
import requests
import json
import error
from db import PGManager
def get_balance(LinkAddress, constant):
BASE_URL = constant.HORIZON_BASE_URL
validity = check.check_validity_of_address(LinkAddress)
if validity == False:
raise error.APIError('Account is invalid')
else:
# inquire api_server and reformat the response
_response=requests.get(BASE_URL + '/accounts/' + LinkAddress).text
_response = json.loads(_response)
id = _response.get('id', 'None')
if id == 'None':
raise error.APIError('Account does not exist')
else:
link_balance=0
balances = _response.get('balances', [])
for item in balances:
item = dict(item)
if item.get('asset_code', '') == 'LINK' and item.get('asset_issuer', '') == constant.ISSUER_ADDRESS:
link_balance=float(item['balance'])
return link_balance
return link_balance
def get_transactions(LinkAddress, constant, limit=50, page=1, asset_code='LINK' ):
# constant = CONSTANT.Constant('test')
BASE_URL = constant.BASE_URL
asset_issuer = constant.ISSUER_ADDRESS
if LinkAddress.__class__=='str':
raise error.APIError('LinkAddress must be String type')
if limit.__class__=='int':
raise error.APIError('limit must be Integer type')
if page.__class__=='int':
raise error.APIError('page must be Integer type')
if limit > 100:
raise error.APIError('limit should be less than 100')
validity = check.check_validity_of_address(LinkAddress)
if validity == False:
raise error.APIError('Account is invalid')
else:
result = {
'LinkAddress': LinkAddress,
'transactions': []
}
# inquire api_server and reformat the response
my_psycopg = PGManager(**constant.DB_HORIZON)
# sql='select * from history_transactions inner join history_operations on \
# history_transactions.id= history_operations.transaction_id where \
# history_transactions.account=\'' + id +'\' and history_operations.details::text like \'%' + asset_code + '%\' \
# and history_operations.details::text like \'%"asset_issuer": "' + asset_issuer + '"%\' \
# order by history_transactions.created_at ASC limit ' + str(limit) + ' offset ' + str(limit*(page-1))
sql0 = 'select id from history_accounts where address=\'' + LinkAddress + '\''
sql1 = 'select history_operation_id from history_operation_participants where history_account_id=(%s)' % (sql0,)
sql2 = 'select details::text,transaction_id from history_operations as BBB \
where id in (%s) \
and details::text like \'%%from%%\' \
order by transaction_id DESC limit %d offset %d' % (sql1, limit, limit * (page - 1))
result_of_details = my_psycopg.select(sql2)
'''
select id, created_at from history_transactions as AAA where id in
(select transaction_id from history_operations as C where id in (select history_operation_id from history_operation_participants as A
where history_account_id=231))
and id in (select transaction_id from history_operations as BBB where id in (select history_operation_id from history_operation_participants where history_account_id=231)
and details::text like '%from%')
order by created_at DESC
'''
if len(result_of_details) == 0:
raise error.APIError('Out of index error')
else:
transaction_ids = []
for detail in result_of_details:
transaction_ids.append(str(detail[1]))
if len(transaction_ids) == 1:
tmp0 = ' id=' + transaction_ids[0]
else:
transaction_ids = ','.join(transaction_ids)
tmp0 = ' id in (' + transaction_ids + ')'
sql3 = 'select id, created_at from history_transactions where %s order by id' % (tmp0,)
result_of_transactions = my_psycopg.select(sql3)
transactions = []
for i in range(len(result_of_details)):
detail = json.loads(result_of_details[i][0])
transaction = copy.deepcopy(detail)
transaction['id'] = str(result_of_details[i][1])
for tt in result_of_transactions:
if result_of_details[i][1] == tt[0]:
transaction['time'] = tt[1].strftime("%Y-%m-%d %H:%M:%S")
transactions.append(transaction)
if transaction['from'] == id:
transaction['amount'] = '-' + transaction['amount']
else:
transaction['amount'] = '+' + transaction['amount']
result['transactions'] = transactions
response['Result'] = result
response['Code'] = 1
response['Message'] = 'Successful'
t = time.time() - t0
| 48 | 178 | 0.597936 | import check
import requests
import json
import error
from db import PGManager
def get_balance(LinkAddress, constant):
BASE_URL = constant.HORIZON_BASE_URL
validity = check.check_validity_of_address(LinkAddress)
if validity == False:
raise error.APIError('Account is invalid')
else:
_response=requests.get(BASE_URL + '/accounts/' + LinkAddress).text
_response = json.loads(_response)
id = _response.get('id', 'None')
if id == 'None':
raise error.APIError('Account does not exist')
else:
link_balance=0
balances = _response.get('balances', [])
for item in balances:
item = dict(item)
if item.get('asset_code', '') == 'LINK' and item.get('asset_issuer', '') == constant.ISSUER_ADDRESS:
link_balance=float(item['balance'])
return link_balance
return link_balance
def get_transactions(LinkAddress, constant, limit=50, page=1, asset_code='LINK' ):
BASE_URL = constant.BASE_URL
asset_issuer = constant.ISSUER_ADDRESS
if LinkAddress.__class__=='str':
raise error.APIError('LinkAddress must be String type')
if limit.__class__=='int':
raise error.APIError('limit must be Integer type')
if page.__class__=='int':
raise error.APIError('page must be Integer type')
if limit > 100:
raise error.APIError('limit should be less than 100')
validity = check.check_validity_of_address(LinkAddress)
if validity == False:
raise error.APIError('Account is invalid')
else:
result = {
'LinkAddress': LinkAddress,
'transactions': []
}
my_psycopg = PGManager(**constant.DB_HORIZON)
# history_transactions.id= history_operations.transaction_id where \
# history_transactions.account=\'' + id +'\' and history_operations.details::text like \'%' + asset_code + '%\' \
# and history_operations.details::text like \'%"asset_issuer": "' + asset_issuer + '"%\' \
# order by history_transactions.created_at ASC limit ' + str(limit) + ' offset ' + str(limit*(page-1))
sql0 = 'select id from history_accounts where address=\'' + LinkAddress + '\''
sql1 = 'select history_operation_id from history_operation_participants where history_account_id=(%s)' % (sql0,)
sql2 = 'select details::text,transaction_id from history_operations as BBB \
where id in (%s) \
and details::text like \'%%from%%\' \
order by transaction_id DESC limit %d offset %d' % (sql1, limit, limit * (page - 1))
result_of_details = my_psycopg.select(sql2)
'''
select id, created_at from history_transactions as AAA where id in
(select transaction_id from history_operations as C where id in (select history_operation_id from history_operation_participants as A
where history_account_id=231))
and id in (select transaction_id from history_operations as BBB where id in (select history_operation_id from history_operation_participants where history_account_id=231)
and details::text like '%from%')
order by created_at DESC
'''
if len(result_of_details) == 0:
raise error.APIError('Out of index error')
else:
transaction_ids = []
for detail in result_of_details:
transaction_ids.append(str(detail[1]))
if len(transaction_ids) == 1:
tmp0 = ' id=' + transaction_ids[0]
else:
transaction_ids = ','.join(transaction_ids)
tmp0 = ' id in (' + transaction_ids + ')'
sql3 = 'select id, created_at from history_transactions where %s order by id' % (tmp0,)
result_of_transactions = my_psycopg.select(sql3)
transactions = []
for i in range(len(result_of_details)):
detail = json.loads(result_of_details[i][0])
transaction = copy.deepcopy(detail)
transaction['id'] = str(result_of_details[i][1])
for tt in result_of_transactions:
if result_of_details[i][1] == tt[0]:
transaction['time'] = tt[1].strftime("%Y-%m-%d %H:%M:%S")
transactions.append(transaction)
if transaction['from'] == id:
transaction['amount'] = '-' + transaction['amount']
else:
transaction['amount'] = '+' + transaction['amount']
result['transactions'] = transactions
response['Result'] = result
response['Code'] = 1
response['Message'] = 'Successful'
t = time.time() - t0
| true | true |
f7fe35e175d4819b012844825fcdc82bf7ed6031 | 8,655 | py | Python | intersight/model/kvm_tunnel_list_all_of.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 5 | 2021-12-16T15:13:32.000Z | 2022-03-29T16:09:54.000Z | intersight/model/kvm_tunnel_list_all_of.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 4 | 2022-01-25T19:05:51.000Z | 2022-03-29T20:18:37.000Z | intersight/model/kvm_tunnel_list_all_of.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 2 | 2020-07-07T15:01:08.000Z | 2022-01-31T04:27:35.000Z | """
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.kvm_tunnel import KvmTunnel
globals()['KvmTunnel'] = KvmTunnel
class KvmTunnelListAllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'count': (int,), # noqa: E501
'results': ([KvmTunnel], none_type,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'count': 'Count', # noqa: E501
'results': 'Results', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""KvmTunnelListAllOf - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
count (int): The total number of 'kvm.Tunnel' resources matching the request, accross all pages. The 'Count' attribute is included when the HTTP GET request includes the '$inlinecount' parameter.. [optional] # noqa: E501
results ([KvmTunnel], none_type): The array of 'kvm.Tunnel' resources matching the request.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 49.176136 | 1,678 | 0.636511 |
import re
import sys
from intersight.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.kvm_tunnel import KvmTunnel
globals()['KvmTunnel'] = KvmTunnel
class KvmTunnelListAllOf(ModelNormal):
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
lazy_import()
return {
'count': (int,),
'results': ([KvmTunnel], none_type,),
}
@cached_property
def discriminator():
return None
attribute_map = {
'count': 'Count',
'results': 'Results',
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
| true | true |
f7fe35f91a032c4e5f134545990176ab89c7acf6 | 4,228 | py | Python | bokeh/_testing/plugins/selenium.py | kinghows/bokeh | aeb7abc1dbe2b67ce0f4422838a96fb8362c52c7 | [
"BSD-3-Clause"
] | 1 | 2018-11-14T19:08:18.000Z | 2018-11-14T19:08:18.000Z | bokeh/_testing/plugins/selenium.py | kinghows/bokeh | aeb7abc1dbe2b67ce0f4422838a96fb8362c52c7 | [
"BSD-3-Clause"
] | 1 | 2021-05-09T02:45:17.000Z | 2021-05-09T02:45:17.000Z | bokeh/_testing/plugins/selenium.py | kinghows/bokeh | aeb7abc1dbe2b67ce0f4422838a96fb8362c52c7 | [
"BSD-3-Clause"
] | 1 | 2020-06-17T05:47:16.000Z | 2020-06-17T05:47:16.000Z | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Define a Pytest plugin for a log file fixture
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from warnings import warn
# External imports
import pytest
from selenium import webdriver
# Bokeh imports
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def pytest_report_collectionfinish(config, startdir, items):
'''
'''
driver = config.getoption('driver', 'chrome').lower()
asserts = "ON" if driver == "chrome" else "OFF"
return ["", "Bokeh selenium tests using %r driver (no-console-error assertions: %s)" % (driver, asserts)]
@pytest.yield_fixture(scope="session")
def driver(pytestconfig):
''' Select and configure a Selenium webdriver for integration tests.
'''
driver_name = pytestconfig.getoption('driver', 'chrome').lower()
if driver_name == "chrome":
from selenium.webdriver.chrome.options import Options
options = Options()
options.add_argument("--headless")
options.add_argument("--no-sandbox")
options.add_argument("--window-size=1920x1080")
driver = webdriver.Chrome(chrome_options=options)
elif driver_name == "firefox":
from selenium.webdriver.firefox.options import Options
options = Options()
options.add_argument("--headless")
options.add_argument("--window-size=1920x1080")
driver = webdriver.Firefox(firefox_options=options)
elif driver_name == "safari":
driver = webdriver.Safari()
driver.implicitly_wait(10)
yield driver
driver.quit()
@pytest.fixture(scope="session")
def has_no_console_errors(pytestconfig):
''' Provide a function to assert no browser console errors are present.
Unfortunately logs are only accessibly with Chrome web driver, see e.g.
https://github.com/mozilla/geckodriver/issues/284
For non-Chrome webdrivers this check always returns True.
'''
driver_name = pytestconfig.getoption('driver').lower()
if driver_name == "chrome":
def func(driver):
logs = driver.get_log('browser')
severe_errors = [x for x in logs if x.get('level') == 'SEVERE']
non_network_errors = [l for l in severe_errors if l.get('type') != 'network']
if len(non_network_errors) == 0:
if len(severe_errors) != 0:
warn("There were severe network errors (this may or may not have affected your test): %s" % severe_errors)
return True
pytest.fail('Console errors: %s' % non_network_errors)
else:
def func(driver):
return True
return func
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 33.824 | 126 | 0.469016 |
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
from warnings import warn
import pytest
from selenium import webdriver
def pytest_report_collectionfinish(config, startdir, items):
driver = config.getoption('driver', 'chrome').lower()
asserts = "ON" if driver == "chrome" else "OFF"
return ["", "Bokeh selenium tests using %r driver (no-console-error assertions: %s)" % (driver, asserts)]
@pytest.yield_fixture(scope="session")
def driver(pytestconfig):
driver_name = pytestconfig.getoption('driver', 'chrome').lower()
if driver_name == "chrome":
from selenium.webdriver.chrome.options import Options
options = Options()
options.add_argument("--headless")
options.add_argument("--no-sandbox")
options.add_argument("--window-size=1920x1080")
driver = webdriver.Chrome(chrome_options=options)
elif driver_name == "firefox":
from selenium.webdriver.firefox.options import Options
options = Options()
options.add_argument("--headless")
options.add_argument("--window-size=1920x1080")
driver = webdriver.Firefox(firefox_options=options)
elif driver_name == "safari":
driver = webdriver.Safari()
driver.implicitly_wait(10)
yield driver
driver.quit()
@pytest.fixture(scope="session")
def has_no_console_errors(pytestconfig):
driver_name = pytestconfig.getoption('driver').lower()
if driver_name == "chrome":
def func(driver):
logs = driver.get_log('browser')
severe_errors = [x for x in logs if x.get('level') == 'SEVERE']
non_network_errors = [l for l in severe_errors if l.get('type') != 'network']
if len(non_network_errors) == 0:
if len(severe_errors) != 0:
warn("There were severe network errors (this may or may not have affected your test): %s" % severe_errors)
return True
pytest.fail('Console errors: %s' % non_network_errors)
else:
def func(driver):
return True
return func
| true | true |
f7fe360ae492e2e48320e076e35a434343e19ad2 | 572 | py | Python | apps/base/apis/permissions.py | summerthe/summers_api | db345db7c0ed88c361ea944b96e26c7241ac6ce2 | [
"MIT"
] | null | null | null | apps/base/apis/permissions.py | summerthe/summers_api | db345db7c0ed88c361ea944b96e26c7241ac6ce2 | [
"MIT"
] | 4 | 2022-03-09T09:34:28.000Z | 2022-03-16T09:44:10.000Z | apps/base/apis/permissions.py | summerthe/summers_api | db345db7c0ed88c361ea944b96e26c7241ac6ce2 | [
"MIT"
] | null | null | null | from django.views import View
from rest_framework.permissions import BasePermission
from rest_framework.request import Request
class AppOwnPermission(BasePermission):
def has_permission(self, request: Request, view: View) -> bool:
"""Checks if App-Own key is in request header, App-Own key will be passed from internal requests.
Parameters
----------
request : Request
view : View
Returns
-------
bool
"""
if "App-Own" in request.headers:
return True
return False
| 26 | 105 | 0.624126 | from django.views import View
from rest_framework.permissions import BasePermission
from rest_framework.request import Request
class AppOwnPermission(BasePermission):
def has_permission(self, request: Request, view: View) -> bool:
if "App-Own" in request.headers:
return True
return False
| true | true |
f7fe36bc6dd8ff2d7c073f7e78d53bb3aa775aea | 526 | py | Python | Uni_To_Zg/main.py | EiEiKyaw/Right | 710665ce0ee132f56249530cf1389b4b7fabb9be | [
"MIT"
] | null | null | null | Uni_To_Zg/main.py | EiEiKyaw/Right | 710665ce0ee132f56249530cf1389b4b7fabb9be | [
"MIT"
] | null | null | null | Uni_To_Zg/main.py | EiEiKyaw/Right | 710665ce0ee132f56249530cf1389b4b7fabb9be | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Usage: main.py inputfilename.ext outputfilename.ext
# Example: main.py zawgyi.txt unicode.txt
import codecs
import Uni_To_Zg
import sys
input_file_name = sys.argv[1]
output_file_name = sys.argv[2]
input_file = codecs.open(input_file_name,encoding='utf-8')
output_file = codecs.open(output_file_name,encoding='utf-8', mode='w')
for input_line in input_file:
input_line = Uni_To_Zg.convert(input_line)
output_file.write(input_line)
output_file.flush()
input_file.close()
output_file.close()
| 23.909091 | 70 | 0.768061 |
import codecs
import Uni_To_Zg
import sys
input_file_name = sys.argv[1]
output_file_name = sys.argv[2]
input_file = codecs.open(input_file_name,encoding='utf-8')
output_file = codecs.open(output_file_name,encoding='utf-8', mode='w')
for input_line in input_file:
input_line = Uni_To_Zg.convert(input_line)
output_file.write(input_line)
output_file.flush()
input_file.close()
output_file.close()
| true | true |
f7fe36c9977438b141069682e219cfc0b80a53b9 | 20,454 | py | Python | .venv/lib/python3.7/site-packages/nbformat/sign.py | ITCRStevenLPZ/Proyecto2-Analisis-de-Algoritmos | 4acdbc423428fb2e0068720add69e7870c87929a | [
"Apache-2.0"
] | 76 | 2020-07-06T14:44:05.000Z | 2022-02-14T15:30:21.000Z | .venv/lib/python3.7/site-packages/nbformat/sign.py | ITCRStevenLPZ/Proyecto2-Analisis-de-Algoritmos | 4acdbc423428fb2e0068720add69e7870c87929a | [
"Apache-2.0"
] | 24 | 2020-03-25T19:35:43.000Z | 2022-02-10T11:46:50.000Z | .venv/lib/python3.7/site-packages/nbformat/sign.py | ITCRStevenLPZ/Proyecto2-Analisis-de-Algoritmos | 4acdbc423428fb2e0068720add69e7870c87929a | [
"Apache-2.0"
] | 11 | 2019-01-21T17:51:48.000Z | 2021-08-10T07:04:33.000Z | """Utilities for signing notebooks"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from collections import OrderedDict
from contextlib import contextmanager
from datetime import datetime
import hashlib
from hmac import HMAC
import io
import os
import sys
try:
import sqlite3
except ImportError:
try:
from pysqlite2 import dbapi2 as sqlite3
except ImportError:
sqlite3 = None
from ipython_genutils.py3compat import cast_bytes, cast_unicode
from traitlets import (
Instance, Bytes, Enum, Any, Unicode, Bool, Integer, TraitType,
default, observe,
)
from traitlets.config import LoggingConfigurable, MultipleInstanceError
from jupyter_core.application import JupyterApp, base_flags
from . import read, reads, NO_CONVERT, __version__
from ._compat import encodebytes
try:
# Python 3
algorithms = hashlib.algorithms_guaranteed
# shake algorithms in py36 are not compatible with hmac
# due to required length argument in digests
algorithms = [ a for a in algorithms if not a.startswith('shake_') ]
except AttributeError:
algorithms = hashlib.algorithms
# This has been added to traitlets, but is not released as of traitlets 4.3.1,
# so a copy is included here for now.
class Callable(TraitType):
"""A trait which is callable.
Notes
-----
Classes are callable, as are instances
with a __call__() method."""
info_text = 'a callable'
def validate(self, obj, value):
if callable(value):
return value
else:
self.error(obj, value)
class SignatureStore(object):
"""Base class for a signature store."""
def store_signature(self, digest, algorithm):
"""Implement in subclass to store a signature.
Should not raise if the signature is already stored.
"""
raise NotImplementedError
def check_signature(self, digest, algorithm):
"""Implement in subclass to check if a signature is known.
Return True for a known signature, False for unknown.
"""
raise NotImplementedError
def remove_signature(self, digest, algorithm):
"""Implement in subclass to delete a signature.
Should not raise if the signature is not stored.
"""
raise NotImplementedError
def close(self):
"""Close any open connections this store may use.
If the store maintains any open connections (e.g. to a database),
they should be closed.
"""
pass
class MemorySignatureStore(SignatureStore):
"""Non-persistent storage of signatures in memory.
"""
cache_size = 65535
def __init__(self):
# We really only want an ordered set, but the stdlib has OrderedDict,
# and it's easy to use a dict as a set.
self.data = OrderedDict()
def store_signature(self, digest, algorithm):
key = (digest, algorithm)
# Pop it so it goes to the end when we reinsert it
self.data.pop(key, None)
self.data[key] = None
self._maybe_cull()
def _maybe_cull(self):
"""If more than cache_size signatures are stored, delete the oldest 25%
"""
if len(self.data) < self.cache_size:
return
for _ in range(len(self.data) // 4):
self.data.popitem(last=False)
def check_signature(self, digest, algorithm):
key = (digest, algorithm)
if key in self.data:
# Move it to the end (.move_to_end() method is new in Py3)
del self.data[key]
self.data[key] = None
return True
return False
def remove_signature(self, digest, algorithm):
self.data.pop((digest, algorithm), None)
class SQLiteSignatureStore(SignatureStore, LoggingConfigurable):
"""Store signatures in an SQLite database.
"""
# 64k entries ~ 12MB
cache_size = Integer(65535,
help="""The number of notebook signatures to cache.
When the number of signatures exceeds this value,
the oldest 25% of signatures will be culled.
"""
).tag(config=True)
def __init__(self, db_file, **kwargs):
super(SQLiteSignatureStore, self).__init__(**kwargs)
self.db_file = db_file
self.db = self._connect_db(db_file)
def close(self):
if self.db is not None:
self.db.close()
def _connect_db(self, db_file):
kwargs = dict(
detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)
db = None
try:
db = sqlite3.connect(db_file, **kwargs)
self.init_db(db)
except (sqlite3.DatabaseError, sqlite3.OperationalError):
if db_file != ':memory:':
old_db_location = db_file + ".bak"
if db is not None:
db.close()
self.log.warning(
("The signatures database cannot be opened; maybe it is corrupted or encrypted. "
"You may need to rerun your notebooks to ensure that they are trusted to run Javascript. "
"The old signatures database has been renamed to %s and a new one has been created."),
old_db_location)
try:
os.rename(db_file, old_db_location)
db = sqlite3.connect(db_file, **kwargs)
self.init_db(db)
except (sqlite3.DatabaseError, sqlite3.OperationalError, OSError):
if db is not None:
db.close()
self.log.warning(
("Failed commiting signatures database to disk. "
"You may need to move the database file to a non-networked file system, "
"using config option `NotebookNotary.db_file`. "
"Using in-memory signatures database for the remainder of this session."))
self.db_file = ':memory:'
db = sqlite3.connect(':memory:', **kwargs)
self.init_db(db)
else:
raise
return db
def init_db(self, db):
db.execute("""
CREATE TABLE IF NOT EXISTS nbsignatures
(
id integer PRIMARY KEY AUTOINCREMENT,
algorithm text,
signature text,
path text,
last_seen timestamp
)""")
db.execute("""
CREATE INDEX IF NOT EXISTS algosig ON nbsignatures(algorithm, signature)
""")
db.commit()
def store_signature(self, digest, algorithm):
if self.db is None:
return
if not self.check_signature(digest, algorithm):
self.db.execute("""
INSERT INTO nbsignatures (algorithm, signature, last_seen)
VALUES (?, ?, ?)
""", (algorithm, digest, datetime.utcnow())
)
else:
self.db.execute("""UPDATE nbsignatures SET last_seen = ? WHERE
algorithm = ? AND
signature = ?;
""", (datetime.utcnow(), algorithm, digest)
)
self.db.commit()
# Check size and cull old entries if necessary
n, = self.db.execute("SELECT Count(*) FROM nbsignatures").fetchone()
if n > self.cache_size:
self.cull_db()
def check_signature(self, digest, algorithm):
if self.db is None:
return False
r = self.db.execute("""SELECT id FROM nbsignatures WHERE
algorithm = ? AND
signature = ?;
""", (algorithm, digest)).fetchone()
if r is None:
return False
self.db.execute("""UPDATE nbsignatures SET last_seen = ? WHERE
algorithm = ? AND
signature = ?;
""",
(datetime.utcnow(), algorithm, digest),
)
self.db.commit()
return True
def remove_signature(self, digest, algorithm):
self.db.execute("""DELETE FROM nbsignatures WHERE
algorithm = ? AND
signature = ?;
""",
(algorithm, digest)
)
self.db.commit()
def cull_db(self):
"""Cull oldest 25% of the trusted signatures when the size limit is reached"""
self.db.execute("""DELETE FROM nbsignatures WHERE id IN (
SELECT id FROM nbsignatures ORDER BY last_seen DESC LIMIT -1 OFFSET ?
);
""", (max(int(0.75 * self.cache_size), 1),))
def yield_everything(obj):
"""Yield every item in a container as bytes
Allows any JSONable object to be passed to an HMAC digester
without having to serialize the whole thing.
"""
if isinstance(obj, dict):
for key in sorted(obj):
value = obj[key]
yield cast_bytes(key)
for b in yield_everything(value):
yield b
elif isinstance(obj, (list, tuple)):
for element in obj:
for b in yield_everything(element):
yield b
elif isinstance(obj, str):
yield obj.encode('utf8')
else:
yield str(obj).encode('utf8')
def yield_code_cells(nb):
"""Iterator that yields all cells in a notebook
nbformat version independent
"""
if nb.nbformat >= 4:
for cell in nb['cells']:
if cell['cell_type'] == 'code':
yield cell
elif nb.nbformat == 3:
for ws in nb['worksheets']:
for cell in ws['cells']:
if cell['cell_type'] == 'code':
yield cell
@contextmanager
def signature_removed(nb):
"""Context manager for operating on a notebook with its signature removed
Used for excluding the previous signature when computing a notebook's signature.
"""
save_signature = nb['metadata'].pop('signature', None)
try:
yield
finally:
if save_signature is not None:
nb['metadata']['signature'] = save_signature
class NotebookNotary(LoggingConfigurable):
"""A class for computing and verifying notebook signatures."""
data_dir = Unicode()
@default('data_dir')
def _data_dir_default(self):
app = None
try:
if JupyterApp.initialized():
app = JupyterApp.instance()
except MultipleInstanceError:
pass
if app is None:
# create an app, without the global instance
app = JupyterApp()
app.initialize(argv=[])
return app.data_dir
store_factory = Callable(
help="""A callable returning the storage backend for notebook signatures.
The default uses an SQLite database.""").tag(config=True)
@default('store_factory')
def _store_factory_default(self):
def factory():
if sqlite3 is None:
self.log.warning("Missing SQLite3, all notebooks will be untrusted!")
return MemorySignatureStore()
return SQLiteSignatureStore(self.db_file)
return factory
db_file = Unicode(
help="""The sqlite file in which to store notebook signatures.
By default, this will be in your Jupyter data directory.
You can set it to ':memory:' to disable sqlite writing to the filesystem.
""").tag(config=True)
@default('db_file')
def _db_file_default(self):
if not self.data_dir:
return ':memory:'
return os.path.join(self.data_dir, u'nbsignatures.db')
algorithm = Enum(algorithms, default_value='sha256',
help="""The hashing algorithm used to sign notebooks."""
).tag(config=True)
@observe('algorithm')
def _algorithm_changed(self, change):
self.digestmod = getattr(hashlib, change.new)
digestmod = Any()
@default('digestmod')
def _digestmod_default(self):
return getattr(hashlib, self.algorithm)
secret_file = Unicode(
help="""The file where the secret key is stored."""
).tag(config=True)
@default('secret_file')
def _secret_file_default(self):
if not self.data_dir:
return ''
return os.path.join(self.data_dir, 'notebook_secret')
secret = Bytes(
help="""The secret key with which notebooks are signed."""
).tag(config=True)
@default('secret')
def _secret_default(self):
# note : this assumes an Application is running
if os.path.exists(self.secret_file):
with io.open(self.secret_file, 'rb') as f:
return f.read()
else:
secret = encodebytes(os.urandom(1024))
self._write_secret_file(secret)
return secret
def __init__(self, **kwargs):
super(NotebookNotary, self).__init__(**kwargs)
self.store = self.store_factory()
def _write_secret_file(self, secret):
"""write my secret to my secret_file"""
self.log.info("Writing notebook-signing key to %s", self.secret_file)
with io.open(self.secret_file, 'wb') as f:
f.write(secret)
try:
os.chmod(self.secret_file, 0o600)
except OSError:
self.log.warning(
"Could not set permissions on %s",
self.secret_file
)
return secret
def compute_signature(self, nb):
"""Compute a notebook's signature
by hashing the entire contents of the notebook via HMAC digest.
"""
hmac = HMAC(self.secret, digestmod=self.digestmod)
# don't include the previous hash in the content to hash
with signature_removed(nb):
# sign the whole thing
for b in yield_everything(nb):
hmac.update(b)
return hmac.hexdigest()
def check_signature(self, nb):
"""Check a notebook's stored signature
If a signature is stored in the notebook's metadata,
a new signature is computed and compared with the stored value.
Returns True if the signature is found and matches, False otherwise.
The following conditions must all be met for a notebook to be trusted:
- a signature is stored in the form 'scheme:hexdigest'
- the stored scheme matches the requested scheme
- the requested scheme is available from hashlib
- the computed hash from notebook_signature matches the stored hash
"""
if nb.nbformat < 3:
return False
signature = self.compute_signature(nb)
return self.store.check_signature(signature, self.algorithm)
def sign(self, nb):
"""Sign a notebook, indicating that its output is trusted on this machine
Stores hash algorithm and hmac digest in a local database of trusted notebooks.
"""
if nb.nbformat < 3:
return
signature = self.compute_signature(nb)
self.store.store_signature(signature, self.algorithm)
def unsign(self, nb):
"""Ensure that a notebook is untrusted
by removing its signature from the trusted database, if present.
"""
signature = self.compute_signature(nb)
self.store.remove_signature(signature, self.algorithm)
def mark_cells(self, nb, trusted):
"""Mark cells as trusted if the notebook's signature can be verified
Sets ``cell.metadata.trusted = True | False`` on all code cells,
depending on the *trusted* parameter. This will typically be the return
value from ``self.check_signature(nb)``.
This function is the inverse of check_cells
"""
if nb.nbformat < 3:
return
for cell in yield_code_cells(nb):
cell['metadata']['trusted'] = trusted
def _check_cell(self, cell, nbformat_version):
"""Do we trust an individual cell?
Return True if:
- cell is explicitly trusted
- cell has no potentially unsafe rich output
If a cell has no output, or only simple print statements,
it will always be trusted.
"""
# explicitly trusted
if cell['metadata'].pop("trusted", False):
return True
# explicitly safe output
if nbformat_version >= 4:
unsafe_output_types = ['execute_result', 'display_data']
safe_keys = {"output_type", "execution_count", "metadata"}
else: # v3
unsafe_output_types = ['pyout', 'display_data']
safe_keys = {"output_type", "prompt_number", "metadata"}
for output in cell['outputs']:
output_type = output['output_type']
if output_type in unsafe_output_types:
# if there are any data keys not in the safe whitelist
output_keys = set(output)
if output_keys.difference(safe_keys):
return False
return True
def check_cells(self, nb):
"""Return whether all code cells are trusted.
A cell is trusted if the 'trusted' field in its metadata is truthy, or
if it has no potentially unsafe outputs.
If there are no code cells, return True.
This function is the inverse of mark_cells.
"""
if nb.nbformat < 3:
return False
trusted = True
for cell in yield_code_cells(nb):
# only distrust a cell if it actually has some output to distrust
if not self._check_cell(cell, nb.nbformat):
trusted = False
return trusted
trust_flags = {
'reset' : (
{'TrustNotebookApp' : { 'reset' : True}},
"""Delete the trusted notebook cache.
All previously signed notebooks will become untrusted.
"""
),
}
trust_flags.update(base_flags)
class TrustNotebookApp(JupyterApp):
version = __version__
description="""Sign one or more Jupyter notebooks with your key,
to trust their dynamic (HTML, Javascript) output.
Otherwise, you will have to re-execute the notebook to see output.
"""
# This command line tool should use the same config file as the notebook
@default('config_file_name')
def _config_file_name_default(self):
return 'jupyter_notebook_config'
examples = """
jupyter trust mynotebook.ipynb and_this_one.ipynb
"""
flags = trust_flags
reset = Bool(False,
help="""If True, delete the trusted signature cache.
After reset, all previously signed notebooks will become untrusted.
"""
).tag(config=True)
notary = Instance(NotebookNotary)
@default('notary')
def _notary_default(self):
return NotebookNotary(parent=self, data_dir=self.data_dir)
def sign_notebook_file(self, notebook_path):
"""Sign a notebook from the filesystem"""
if not os.path.exists(notebook_path):
self.log.error("Notebook missing: %s" % notebook_path)
self.exit(1)
with io.open(notebook_path, encoding='utf8') as f:
nb = read(f, NO_CONVERT)
self.sign_notebook(nb, notebook_path)
def sign_notebook(self, nb, notebook_path='<stdin>'):
"""Sign a notebook that's been loaded"""
if self.notary.check_signature(nb):
print("Notebook already signed: %s" % notebook_path)
else:
print("Signing notebook: %s" % notebook_path)
self.notary.sign(nb)
def generate_new_key(self):
"""Generate a new notebook signature key"""
print("Generating new notebook key: %s" % self.notary.secret_file)
self.notary._write_secret_file(os.urandom(1024))
def start(self):
if self.reset:
if os.path.exists(self.notary.db_file):
print("Removing trusted signature cache: %s" % self.notary.db_file)
os.remove(self.notary.db_file)
self.generate_new_key()
return
if not self.extra_args:
self.log.debug("Reading notebook from stdin")
nb_s = cast_unicode(sys.stdin.read())
nb = reads(nb_s, NO_CONVERT)
self.sign_notebook(nb, '<stdin>')
else:
for notebook_path in self.extra_args:
self.sign_notebook_file(notebook_path)
main = TrustNotebookApp.launch_instance
if __name__ == '__main__':
main()
| 33.531148 | 111 | 0.603598 |
from collections import OrderedDict
from contextlib import contextmanager
from datetime import datetime
import hashlib
from hmac import HMAC
import io
import os
import sys
try:
import sqlite3
except ImportError:
try:
from pysqlite2 import dbapi2 as sqlite3
except ImportError:
sqlite3 = None
from ipython_genutils.py3compat import cast_bytes, cast_unicode
from traitlets import (
Instance, Bytes, Enum, Any, Unicode, Bool, Integer, TraitType,
default, observe,
)
from traitlets.config import LoggingConfigurable, MultipleInstanceError
from jupyter_core.application import JupyterApp, base_flags
from . import read, reads, NO_CONVERT, __version__
from ._compat import encodebytes
try:
algorithms = hashlib.algorithms_guaranteed
algorithms = [ a for a in algorithms if not a.startswith('shake_') ]
except AttributeError:
algorithms = hashlib.algorithms
class Callable(TraitType):
info_text = 'a callable'
def validate(self, obj, value):
if callable(value):
return value
else:
self.error(obj, value)
class SignatureStore(object):
def store_signature(self, digest, algorithm):
raise NotImplementedError
def check_signature(self, digest, algorithm):
raise NotImplementedError
def remove_signature(self, digest, algorithm):
raise NotImplementedError
def close(self):
pass
class MemorySignatureStore(SignatureStore):
cache_size = 65535
def __init__(self):
self.data = OrderedDict()
def store_signature(self, digest, algorithm):
key = (digest, algorithm)
# Pop it so it goes to the end when we reinsert it
self.data.pop(key, None)
self.data[key] = None
self._maybe_cull()
def _maybe_cull(self):
if len(self.data) < self.cache_size:
return
for _ in range(len(self.data) // 4):
self.data.popitem(last=False)
def check_signature(self, digest, algorithm):
key = (digest, algorithm)
if key in self.data:
# Move it to the end (.move_to_end() method is new in Py3)
del self.data[key]
self.data[key] = None
return True
return False
def remove_signature(self, digest, algorithm):
self.data.pop((digest, algorithm), None)
class SQLiteSignatureStore(SignatureStore, LoggingConfigurable):
# 64k entries ~ 12MB
cache_size = Integer(65535,
help="""The number of notebook signatures to cache.
When the number of signatures exceeds this value,
the oldest 25% of signatures will be culled.
"""
).tag(config=True)
def __init__(self, db_file, **kwargs):
super(SQLiteSignatureStore, self).__init__(**kwargs)
self.db_file = db_file
self.db = self._connect_db(db_file)
def close(self):
if self.db is not None:
self.db.close()
def _connect_db(self, db_file):
kwargs = dict(
detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)
db = None
try:
db = sqlite3.connect(db_file, **kwargs)
self.init_db(db)
except (sqlite3.DatabaseError, sqlite3.OperationalError):
if db_file != ':memory:':
old_db_location = db_file + ".bak"
if db is not None:
db.close()
self.log.warning(
("The signatures database cannot be opened; maybe it is corrupted or encrypted. "
"You may need to rerun your notebooks to ensure that they are trusted to run Javascript. "
"The old signatures database has been renamed to %s and a new one has been created."),
old_db_location)
try:
os.rename(db_file, old_db_location)
db = sqlite3.connect(db_file, **kwargs)
self.init_db(db)
except (sqlite3.DatabaseError, sqlite3.OperationalError, OSError):
if db is not None:
db.close()
self.log.warning(
("Failed commiting signatures database to disk. "
"You may need to move the database file to a non-networked file system, "
"using config option `NotebookNotary.db_file`. "
"Using in-memory signatures database for the remainder of this session."))
self.db_file = ':memory:'
db = sqlite3.connect(':memory:', **kwargs)
self.init_db(db)
else:
raise
return db
def init_db(self, db):
db.execute("""
CREATE TABLE IF NOT EXISTS nbsignatures
(
id integer PRIMARY KEY AUTOINCREMENT,
algorithm text,
signature text,
path text,
last_seen timestamp
)""")
db.execute("""
CREATE INDEX IF NOT EXISTS algosig ON nbsignatures(algorithm, signature)
""")
db.commit()
def store_signature(self, digest, algorithm):
if self.db is None:
return
if not self.check_signature(digest, algorithm):
self.db.execute("""
INSERT INTO nbsignatures (algorithm, signature, last_seen)
VALUES (?, ?, ?)
""", (algorithm, digest, datetime.utcnow())
)
else:
self.db.execute("""UPDATE nbsignatures SET last_seen = ? WHERE
algorithm = ? AND
signature = ?;
""", (datetime.utcnow(), algorithm, digest)
)
self.db.commit()
# Check size and cull old entries if necessary
n, = self.db.execute("SELECT Count(*) FROM nbsignatures").fetchone()
if n > self.cache_size:
self.cull_db()
def check_signature(self, digest, algorithm):
if self.db is None:
return False
r = self.db.execute("""SELECT id FROM nbsignatures WHERE
algorithm = ? AND
signature = ?;
""", (algorithm, digest)).fetchone()
if r is None:
return False
self.db.execute("""UPDATE nbsignatures SET last_seen = ? WHERE
algorithm = ? AND
signature = ?;
""",
(datetime.utcnow(), algorithm, digest),
)
self.db.commit()
return True
def remove_signature(self, digest, algorithm):
self.db.execute("""DELETE FROM nbsignatures WHERE
algorithm = ? AND
signature = ?;
""",
(algorithm, digest)
)
self.db.commit()
def cull_db(self):
self.db.execute("""DELETE FROM nbsignatures WHERE id IN (
SELECT id FROM nbsignatures ORDER BY last_seen DESC LIMIT -1 OFFSET ?
);
""", (max(int(0.75 * self.cache_size), 1),))
def yield_everything(obj):
if isinstance(obj, dict):
for key in sorted(obj):
value = obj[key]
yield cast_bytes(key)
for b in yield_everything(value):
yield b
elif isinstance(obj, (list, tuple)):
for element in obj:
for b in yield_everything(element):
yield b
elif isinstance(obj, str):
yield obj.encode('utf8')
else:
yield str(obj).encode('utf8')
def yield_code_cells(nb):
if nb.nbformat >= 4:
for cell in nb['cells']:
if cell['cell_type'] == 'code':
yield cell
elif nb.nbformat == 3:
for ws in nb['worksheets']:
for cell in ws['cells']:
if cell['cell_type'] == 'code':
yield cell
@contextmanager
def signature_removed(nb):
save_signature = nb['metadata'].pop('signature', None)
try:
yield
finally:
if save_signature is not None:
nb['metadata']['signature'] = save_signature
class NotebookNotary(LoggingConfigurable):
data_dir = Unicode()
@default('data_dir')
def _data_dir_default(self):
app = None
try:
if JupyterApp.initialized():
app = JupyterApp.instance()
except MultipleInstanceError:
pass
if app is None:
# create an app, without the global instance
app = JupyterApp()
app.initialize(argv=[])
return app.data_dir
store_factory = Callable(
help="""A callable returning the storage backend for notebook signatures.
The default uses an SQLite database.""").tag(config=True)
@default('store_factory')
def _store_factory_default(self):
def factory():
if sqlite3 is None:
self.log.warning("Missing SQLite3, all notebooks will be untrusted!")
return MemorySignatureStore()
return SQLiteSignatureStore(self.db_file)
return factory
db_file = Unicode(
help="""The sqlite file in which to store notebook signatures.
By default, this will be in your Jupyter data directory.
You can set it to ':memory:' to disable sqlite writing to the filesystem.
""").tag(config=True)
@default('db_file')
def _db_file_default(self):
if not self.data_dir:
return ':memory:'
return os.path.join(self.data_dir, u'nbsignatures.db')
algorithm = Enum(algorithms, default_value='sha256',
help="""The hashing algorithm used to sign notebooks."""
).tag(config=True)
@observe('algorithm')
def _algorithm_changed(self, change):
self.digestmod = getattr(hashlib, change.new)
digestmod = Any()
@default('digestmod')
def _digestmod_default(self):
return getattr(hashlib, self.algorithm)
secret_file = Unicode(
help="""The file where the secret key is stored."""
).tag(config=True)
@default('secret_file')
def _secret_file_default(self):
if not self.data_dir:
return ''
return os.path.join(self.data_dir, 'notebook_secret')
secret = Bytes(
help="""The secret key with which notebooks are signed."""
).tag(config=True)
@default('secret')
def _secret_default(self):
# note : this assumes an Application is running
if os.path.exists(self.secret_file):
with io.open(self.secret_file, 'rb') as f:
return f.read()
else:
secret = encodebytes(os.urandom(1024))
self._write_secret_file(secret)
return secret
def __init__(self, **kwargs):
super(NotebookNotary, self).__init__(**kwargs)
self.store = self.store_factory()
def _write_secret_file(self, secret):
self.log.info("Writing notebook-signing key to %s", self.secret_file)
with io.open(self.secret_file, 'wb') as f:
f.write(secret)
try:
os.chmod(self.secret_file, 0o600)
except OSError:
self.log.warning(
"Could not set permissions on %s",
self.secret_file
)
return secret
def compute_signature(self, nb):
hmac = HMAC(self.secret, digestmod=self.digestmod)
# don't include the previous hash in the content to hash
with signature_removed(nb):
for b in yield_everything(nb):
hmac.update(b)
return hmac.hexdigest()
def check_signature(self, nb):
if nb.nbformat < 3:
return False
signature = self.compute_signature(nb)
return self.store.check_signature(signature, self.algorithm)
def sign(self, nb):
if nb.nbformat < 3:
return
signature = self.compute_signature(nb)
self.store.store_signature(signature, self.algorithm)
def unsign(self, nb):
signature = self.compute_signature(nb)
self.store.remove_signature(signature, self.algorithm)
def mark_cells(self, nb, trusted):
if nb.nbformat < 3:
return
for cell in yield_code_cells(nb):
cell['metadata']['trusted'] = trusted
def _check_cell(self, cell, nbformat_version):
if cell['metadata'].pop("trusted", False):
return True
if nbformat_version >= 4:
unsafe_output_types = ['execute_result', 'display_data']
safe_keys = {"output_type", "execution_count", "metadata"}
else:
unsafe_output_types = ['pyout', 'display_data']
safe_keys = {"output_type", "prompt_number", "metadata"}
for output in cell['outputs']:
output_type = output['output_type']
if output_type in unsafe_output_types:
output_keys = set(output)
if output_keys.difference(safe_keys):
return False
return True
def check_cells(self, nb):
if nb.nbformat < 3:
return False
trusted = True
for cell in yield_code_cells(nb):
if not self._check_cell(cell, nb.nbformat):
trusted = False
return trusted
trust_flags = {
'reset' : (
{'TrustNotebookApp' : { 'reset' : True}},
"""Delete the trusted notebook cache.
All previously signed notebooks will become untrusted.
"""
),
}
trust_flags.update(base_flags)
class TrustNotebookApp(JupyterApp):
version = __version__
description="""Sign one or more Jupyter notebooks with your key,
to trust their dynamic (HTML, Javascript) output.
Otherwise, you will have to re-execute the notebook to see output.
"""
@default('config_file_name')
def _config_file_name_default(self):
return 'jupyter_notebook_config'
examples = """
jupyter trust mynotebook.ipynb and_this_one.ipynb
"""
flags = trust_flags
reset = Bool(False,
help="""If True, delete the trusted signature cache.
After reset, all previously signed notebooks will become untrusted.
"""
).tag(config=True)
notary = Instance(NotebookNotary)
@default('notary')
def _notary_default(self):
return NotebookNotary(parent=self, data_dir=self.data_dir)
def sign_notebook_file(self, notebook_path):
if not os.path.exists(notebook_path):
self.log.error("Notebook missing: %s" % notebook_path)
self.exit(1)
with io.open(notebook_path, encoding='utf8') as f:
nb = read(f, NO_CONVERT)
self.sign_notebook(nb, notebook_path)
def sign_notebook(self, nb, notebook_path='<stdin>'):
if self.notary.check_signature(nb):
print("Notebook already signed: %s" % notebook_path)
else:
print("Signing notebook: %s" % notebook_path)
self.notary.sign(nb)
def generate_new_key(self):
print("Generating new notebook key: %s" % self.notary.secret_file)
self.notary._write_secret_file(os.urandom(1024))
def start(self):
if self.reset:
if os.path.exists(self.notary.db_file):
print("Removing trusted signature cache: %s" % self.notary.db_file)
os.remove(self.notary.db_file)
self.generate_new_key()
return
if not self.extra_args:
self.log.debug("Reading notebook from stdin")
nb_s = cast_unicode(sys.stdin.read())
nb = reads(nb_s, NO_CONVERT)
self.sign_notebook(nb, '<stdin>')
else:
for notebook_path in self.extra_args:
self.sign_notebook_file(notebook_path)
main = TrustNotebookApp.launch_instance
if __name__ == '__main__':
main()
| true | true |
f7fe3746fa2700f7f701579a9782ee6e927ce0d1 | 743 | py | Python | consol_real_ensemble.py | SudeepDasari/video_prediction-1 | ef0953b514aa1b7a1f5e96fd30aebef01334fb2d | [
"MIT"
] | null | null | null | consol_real_ensemble.py | SudeepDasari/video_prediction-1 | ef0953b514aa1b7a1f5e96fd30aebef01334fb2d | [
"MIT"
] | null | null | null | consol_real_ensemble.py | SudeepDasari/video_prediction-1 | ef0953b514aa1b7a1f5e96fd30aebef01334fb2d | [
"MIT"
] | null | null | null | import numpy as np
import argparse
import imageio
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--log_dirs', nargs='+')
parser.add_argument('--max_num', type=int, default=32)
args = parser.parse_args()
dirs = args.log_dirs
big_numpy = None
for area in dirs:
file_name = '{}/prediction_eval_psnr_max/outputs/gen_image.npy'.format(area)
loaded = np.load(file_name)[:args.max_num]
print(loaded.shape)
if big_numpy is None:
big_numpy = loaded
else:
big_numpy = np.concatenate([big_numpy, loaded], axis=2)
for i in range(args.max_num):
imageio.mimsave('consolidated/{}.gif'.format(i), big_numpy[i])
| 29.72 | 84 | 0.643338 | import numpy as np
import argparse
import imageio
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--log_dirs', nargs='+')
parser.add_argument('--max_num', type=int, default=32)
args = parser.parse_args()
dirs = args.log_dirs
big_numpy = None
for area in dirs:
file_name = '{}/prediction_eval_psnr_max/outputs/gen_image.npy'.format(area)
loaded = np.load(file_name)[:args.max_num]
print(loaded.shape)
if big_numpy is None:
big_numpy = loaded
else:
big_numpy = np.concatenate([big_numpy, loaded], axis=2)
for i in range(args.max_num):
imageio.mimsave('consolidated/{}.gif'.format(i), big_numpy[i])
| true | true |
f7fe37b154d92154ecaf98c31c103bfee545ca07 | 443 | py | Python | waters/migrations/0003_product_product_image.py | chymdyugah/water | fa5da223e57100e6d2853c763afe5e89187e249d | [
"MIT"
] | null | null | null | waters/migrations/0003_product_product_image.py | chymdyugah/water | fa5da223e57100e6d2853c763afe5e89187e249d | [
"MIT"
] | null | null | null | waters/migrations/0003_product_product_image.py | chymdyugah/water | fa5da223e57100e6d2853c763afe5e89187e249d | [
"MIT"
] | null | null | null | # Generated by Django 2.2.2 on 2020-05-14 20:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('waters', '0002_auto_20200514_2140'),
]
operations = [
migrations.AddField(
model_name='product',
name='product_image',
field=models.FileField(default='1.jpg', upload_to=''),
preserve_default=False,
),
]
| 22.15 | 66 | 0.598194 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('waters', '0002_auto_20200514_2140'),
]
operations = [
migrations.AddField(
model_name='product',
name='product_image',
field=models.FileField(default='1.jpg', upload_to=''),
preserve_default=False,
),
]
| true | true |
f7fe38a3c4f946cbf408f5bd3abd0d3e5a12fdbd | 1,902 | py | Python | Visualize-Data-using-Matplotlib/code.py | nagnath001/ga-learner-dsmp-repo | 7035fd2ebe967182a44011dde59cb8ae411badb6 | [
"MIT"
] | null | null | null | Visualize-Data-using-Matplotlib/code.py | nagnath001/ga-learner-dsmp-repo | 7035fd2ebe967182a44011dde59cb8ae411badb6 | [
"MIT"
] | null | null | null | Visualize-Data-using-Matplotlib/code.py | nagnath001/ga-learner-dsmp-repo | 7035fd2ebe967182a44011dde59cb8ae411badb6 | [
"MIT"
] | null | null | null | # --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Code starts here
# load data
data = pd.read_csv(path)
loan_status=data.Loan_Status.value_counts()
plt.bar(loan_status.index,loan_status)
plt.show()
# --------------
#Code starts here
#Plotting an unstacked bar plot
property_and_loan=data.groupby(['Property_Area', 'Loan_Status'])
property_and_loan=property_and_loan.size().unstack()
property_and_loan.plot(kind='bar', stacked=False, figsize=(15,10))
#Changing the x-axis label
plt.xlabel('Property_Area')
#Changing the y-axis label
plt.ylabel('Loan_Status')
#Rotating the ticks of X-axis
plt.xticks(rotation=45)
#Code ends here
# --------------
#Code starts here
education_and_loan=data.groupby(['Education','Loan_Status'])
education_and_loan=education_and_loan.size().unstack()
print(education_and_loan)
education_and_loan.plot(kind='bar', stacked=False, figsize=(15,10))
#Changing the x-axis label
plt.xlabel('Education Status')
#Changing the y-axis label
plt.ylabel('Loan_Status')
#Rotating the ticks of X-axis
plt.xticks(rotation=45)
#Code ends here
# --------------
#Code starts here
graduate=data[data['Education'] == 'Graduate']
not_graduate=data[data['Education'] == 'Not Graduate']
graduate['LoanAmount'].plot(kind='density', label='Graduate')
not_graduate['LoanAmount'].plot(kind='density', label='Not Graduate')
#Code ends here
#For automatic legend display
plt.legend()
# --------------
#Code starts here
# Initialize figure and axes
fig, (ax_1, ax_2,ax_3) = plt.subplots(3,1, figsize=(20,10))
ax_1.scatter(data['ApplicantIncome'],data["LoanAmount"])
ax_2.scatter(data['CoapplicantIncome'],data["LoanAmount"])
data['TotalIncome']=data['ApplicantIncome'] + data['CoapplicantIncome']
ax_3.scatter(data['TotalIncome'],data["LoanAmount"])
| 22.642857 | 72 | 0.704522 |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data = pd.read_csv(path)
loan_status=data.Loan_Status.value_counts()
plt.bar(loan_status.index,loan_status)
plt.show()
property_and_loan=data.groupby(['Property_Area', 'Loan_Status'])
property_and_loan=property_and_loan.size().unstack()
property_and_loan.plot(kind='bar', stacked=False, figsize=(15,10))
plt.xlabel('Property_Area')
plt.ylabel('Loan_Status')
plt.xticks(rotation=45)
education_and_loan=data.groupby(['Education','Loan_Status'])
education_and_loan=education_and_loan.size().unstack()
print(education_and_loan)
education_and_loan.plot(kind='bar', stacked=False, figsize=(15,10))
plt.xlabel('Education Status')
plt.ylabel('Loan_Status')
plt.xticks(rotation=45)
graduate=data[data['Education'] == 'Graduate']
not_graduate=data[data['Education'] == 'Not Graduate']
graduate['LoanAmount'].plot(kind='density', label='Graduate')
not_graduate['LoanAmount'].plot(kind='density', label='Not Graduate')
plt.legend()
fig, (ax_1, ax_2,ax_3) = plt.subplots(3,1, figsize=(20,10))
ax_1.scatter(data['ApplicantIncome'],data["LoanAmount"])
ax_2.scatter(data['CoapplicantIncome'],data["LoanAmount"])
data['TotalIncome']=data['ApplicantIncome'] + data['CoapplicantIncome']
ax_3.scatter(data['TotalIncome'],data["LoanAmount"])
| true | true |
f7fe38c9a4b8c5796670a8aa33b5cb1b8bbd7c39 | 5,246 | py | Python | src/jetson/Sensors/sensors_simple.py | ichalkiad/VW_challenge | 333222010ecf3d1ca4a0e181239f761c975453e9 | [
"Apache-2.0"
] | 1 | 2017-08-16T08:42:49.000Z | 2017-08-16T08:42:49.000Z | src/jetson/Sensors/sensors_simple.py | ichalkiad/VW_challenge | 333222010ecf3d1ca4a0e181239f761c975453e9 | [
"Apache-2.0"
] | 4 | 2017-08-09T23:01:30.000Z | 2017-08-24T16:44:13.000Z | src/jetson/Sensors/sensors_simple.py | yhalk/vw_challenge_ECR | c1ff50070d0f7367ccfbf473c69e90fd2be5e85e | [
"Apache-2.0"
] | null | null | null | import paho.mqtt.client as mqtt
import ev3dev.ev3 as ev3
import ctypes
import numpy as np
import sys
import cv2
from Sensors.mpu6050.mpu6050 import MPU6050
import smbus
from Sensors.odometry import Odometry
import sys, serial
from serial.tools import list_ports
class Sensor(object):
def __init__(self, *args, **kwargs):
pass
def read(self):
raise ValueError('This function must be implemented by ')
class IR_teensy(Sensor):
def __init__(self):
self.ports = list(list_ports.comports()) # get all the connected serial devices
self.serial_port = serial.Serial('/dev/'+self.ports[0].name) # connect to the first
def debug(self):
'''
Use if cannot connect to the port
This function will print all found serial devices and prints the name and index of the port
'''
for i, item in enumerate(self.ports):
print(i + ' : ' + item.name)
def read(self):
'''
Reads the current value from the teensy
Returns:
Distance in cm
'''
measurement = self.serial_port.readline() # read the measurement
measurement = measurement.decode('utf-8').split('\r') # change it to utf and split it on funny characters
return measurement[0] # only return the actual measurment
class IMU2(Sensor):
def __init__(self, bus='/dev/i2c-1', address=0x68):
self.bus = smbus.SMBus(1)
self.address = address
self.mpu = MPU6050(self.bus,self.address, 'IMU')
def read(self):
'''
Reads the current values from the IMU using the mpu library
Returns:
tuple containing: pitch, roll, gyro x,y,z, accel x,y,z these values are scaled and NOT raw
'''
return self.mpu.read_all()
class IMU(Sensor):
def __init__(self, path_to_shared_lib_mpu='/home/nvidia/jetson-robot/IOInterface/jetson/Sensors/mpu/libmpu.so', bus_filename='/dev/i2c-1', bus_adresses=[0x68, 0x69]):
bus_filename = bus_filename.encode('ascii')
self.libmpu = ctypes.cdll.LoadLibrary(path_to_shared_lib_mpu)
self.file_descriptors = [self.libmpu.initIMU(bus_filename, bus_adress) for bus_adress in bus_adresses]
self.data_c_arrays = [(ctypes.c_int16*7)() for _ in range(len(bus_adresses))]
self.name = 'imu'
self.data_sources = ["temperature", "acceleration", "gyro"]
def read(self):
data_dict = {}
for idx, (file_descriptor, data_c_array) in enumerate(zip(self.file_descriptors, self.data_c_arrays)):
self.libmpu.readIMU(file_descriptor, data_c_array)
data_np_array = np.array(data_c_array)
data_dict['temperature_{}'.format(idx)] = data_np_array[0] / 340.0 + 36.53
data_dict['acceleration_{}'.format(idx)] = np.array([int(data_np_array[1]),
int(data_np_array[2]),
int(data_np_array[3]),
])
data_dict['gyro_{}'.format(idx)] = np.array([int(data_np_array[4]),
int(data_np_array[5]),
int(data_np_array[6]),
])
return data_dict
def read_sensor_nr(self, sensor_nr):
# TODO: Ask Max, if the magic values for temperature conversion are correct.
data_dict = {}
self.libmpu.readIMU(self.file_descriptors[sensor_nr], self.data_c_arrays[sensor_nr])
data_np_array = np.array(self.data_c_arrays[sensor_nr])
data_dict['temperature'] = data_np_array[0] / 340.0 + 36.53
data_dict['acceleration'] = np.array([int(data_np_array[1]), int(data_np_array[2]), int(data_np_array[3])])
data_dict['gyro'] = np.array([int(data_np_array[4]), int(data_np_array[5]), int(data_np_array[6])])
return data_dict
def get_data_sources(self):
return self.data_sources
class OnBoardCamera(Sensor):
def __init__(self):
self.name = 'onBoardCamera'
self.cap = cv2.VideoCapture("nvcamerasrc ! video/x-raw(memory:NVMM), width=(int)160, height=(int)120, format=(string)I420, framerate=(fraction)30/1 ! nvvidconv flip-method=2 ! video/x-raw, format=(string)I420 ! videoconvert ! video/x-raw, format=(string)BGR ! appsink")
#self.cap = cv2.VideoCapture("nvcamerasrc ! video/x-raw(memory:NVMM), width=(int)(160), height=(int)(120),format=(string)I420, framerate=(fraction)2/1 ! nvvidconv flip-method=0 ! video/x-raw, format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsink")
def read(self):
if self.cap.isOpened():
ret_val, frame = self.cap.read();
frame = cv2.flip(frame,0)
frame = cv2.flip(frame,1)
else:
raise ValueError('Camera not opened. Sorry this message is not really helpful, blame openCV :-) ')
return {'onBoardCamera':frame}
def clean_buf(self):
for i in range(5):
self.cap.grab()
#Create camera sensor object
camera = OnBoardCamera()
| 43 | 280 | 0.609989 | import paho.mqtt.client as mqtt
import ev3dev.ev3 as ev3
import ctypes
import numpy as np
import sys
import cv2
from Sensors.mpu6050.mpu6050 import MPU6050
import smbus
from Sensors.odometry import Odometry
import sys, serial
from serial.tools import list_ports
class Sensor(object):
def __init__(self, *args, **kwargs):
pass
def read(self):
raise ValueError('This function must be implemented by ')
class IR_teensy(Sensor):
def __init__(self):
self.ports = list(list_ports.comports())
self.serial_port = serial.Serial('/dev/'+self.ports[0].name)
def debug(self):
for i, item in enumerate(self.ports):
print(i + ' : ' + item.name)
def read(self):
measurement = self.serial_port.readline()
measurement = measurement.decode('utf-8').split('\r')
return measurement[0]
class IMU2(Sensor):
def __init__(self, bus='/dev/i2c-1', address=0x68):
self.bus = smbus.SMBus(1)
self.address = address
self.mpu = MPU6050(self.bus,self.address, 'IMU')
def read(self):
return self.mpu.read_all()
class IMU(Sensor):
def __init__(self, path_to_shared_lib_mpu='/home/nvidia/jetson-robot/IOInterface/jetson/Sensors/mpu/libmpu.so', bus_filename='/dev/i2c-1', bus_adresses=[0x68, 0x69]):
bus_filename = bus_filename.encode('ascii')
self.libmpu = ctypes.cdll.LoadLibrary(path_to_shared_lib_mpu)
self.file_descriptors = [self.libmpu.initIMU(bus_filename, bus_adress) for bus_adress in bus_adresses]
self.data_c_arrays = [(ctypes.c_int16*7)() for _ in range(len(bus_adresses))]
self.name = 'imu'
self.data_sources = ["temperature", "acceleration", "gyro"]
def read(self):
data_dict = {}
for idx, (file_descriptor, data_c_array) in enumerate(zip(self.file_descriptors, self.data_c_arrays)):
self.libmpu.readIMU(file_descriptor, data_c_array)
data_np_array = np.array(data_c_array)
data_dict['temperature_{}'.format(idx)] = data_np_array[0] / 340.0 + 36.53
data_dict['acceleration_{}'.format(idx)] = np.array([int(data_np_array[1]),
int(data_np_array[2]),
int(data_np_array[3]),
])
data_dict['gyro_{}'.format(idx)] = np.array([int(data_np_array[4]),
int(data_np_array[5]),
int(data_np_array[6]),
])
return data_dict
def read_sensor_nr(self, sensor_nr):
data_dict = {}
self.libmpu.readIMU(self.file_descriptors[sensor_nr], self.data_c_arrays[sensor_nr])
data_np_array = np.array(self.data_c_arrays[sensor_nr])
data_dict['temperature'] = data_np_array[0] / 340.0 + 36.53
data_dict['acceleration'] = np.array([int(data_np_array[1]), int(data_np_array[2]), int(data_np_array[3])])
data_dict['gyro'] = np.array([int(data_np_array[4]), int(data_np_array[5]), int(data_np_array[6])])
return data_dict
def get_data_sources(self):
return self.data_sources
class OnBoardCamera(Sensor):
def __init__(self):
self.name = 'onBoardCamera'
self.cap = cv2.VideoCapture("nvcamerasrc ! video/x-raw(memory:NVMM), width=(int)160, height=(int)120, format=(string)I420, framerate=(fraction)30/1 ! nvvidconv flip-method=2 ! video/x-raw, format=(string)I420 ! videoconvert ! video/x-raw, format=(string)BGR ! appsink")
def read(self):
if self.cap.isOpened():
ret_val, frame = self.cap.read();
frame = cv2.flip(frame,0)
frame = cv2.flip(frame,1)
else:
raise ValueError('Camera not opened. Sorry this message is not really helpful, blame openCV :-) ')
return {'onBoardCamera':frame}
def clean_buf(self):
for i in range(5):
self.cap.grab()
camera = OnBoardCamera()
| true | true |
f7fe397f1d17d0aeaa6c3cb6c30e4e1a3fe8ff22 | 4,365 | py | Python | AXF/settings.example.py | luomantic/AiXianFeng | 52723fb764889f583c010f9889313b6f50cbc3bd | [
"MIT"
] | null | null | null | AXF/settings.example.py | luomantic/AiXianFeng | 52723fb764889f583c010f9889313b6f50cbc3bd | [
"MIT"
] | 1 | 2019-03-01T12:00:36.000Z | 2019-03-01T12:01:12.000Z | AXF/settings.example.py | luomantic/AiXianFeng | 52723fb764889f583c010f9889313b6f50cbc3bd | [
"MIT"
] | null | null | null | """
Django settings for AXF project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%k3*q-^xjfzb!o&qc8n4f5h9is)%=k9v+h_(-(*j_af$$ca7lq'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'App',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'AXF.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'AXF.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': '',
'HOST': 'x.x.x.x',
'PORT': '3306',
'USER': 'root',
'PASSWORD': 'x',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
MEDIA_ROOT = os.path.join(BASE_DIR, 'static/upload/icon')
# session 设置
SESSION_ENGINE = 'django.contrib.sessions.backends.db' # 引擎(默认)
SESSION_COOKIE_AGE = 60 * 30 # 30分钟
SESSION_SAVE_EVERY_REQUEST = True
SESSION_EXPIRE_AT_BROWSER_CLOSE = True # 关闭浏览器,则COOKIE失效
# 查看 django 源代码 “django.contrib.sessions.middleware.SessionMiddleware” 得知
# SESSION_COOKIE_AGE 和 SESSION_EXPIRE_AT_BROWSER_CLOSE 这两个参数
# 只有在 SESSION_SAVE_EVERY_REQUEST 为 True 时才有效。
# SESSION_COOKIE_NAME = "sessionid" # Session的cookie保存在浏览器上时的key,即:sessionid=随机字符串(默认)
# SESSION_COOKIE_PATH = "/" # Session的cookie保存的路径(默认)
# SESSION_COOKIE_DOMAIN = None # Session的cookie保存的域名(默认)
# SESSION_COOKIE_SECURE = False # 是否Https传输cookie(默认)
# SESSION_COOKIE_HTTPONLY = True # 是否Session的cookie只支持http传输(默认)
# SESSION_COOKIE_AGE = 1209600 # Session的cookie失效日期(2周)(默认)
# SESSION_EXPIRE_AT_BROWSER_CLOSE = False # 是否关闭浏览器使得Session过期(默认)
# SESSION_SAVE_EVERY_REQUEST = False # 是否每次请求都保存Session,默认修改之后才保存(默认)
| 29.1 | 91 | 0.701947 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '%k3*q-^xjfzb!o&qc8n4f5h9is)%=k9v+h_(-(*j_af$$ca7lq'
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'App',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'AXF.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'AXF.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': '',
'HOST': 'x.x.x.x',
'PORT': '3306',
'USER': 'root',
'PASSWORD': 'x',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
MEDIA_ROOT = os.path.join(BASE_DIR, 'static/upload/icon')
# session 设置
SESSION_ENGINE = 'django.contrib.sessions.backends.db' # 引擎(默认)
SESSION_COOKIE_AGE = 60 * 30 # 30分钟
SESSION_SAVE_EVERY_REQUEST = True
SESSION_EXPIRE_AT_BROWSER_CLOSE = True # 关闭浏览器,则COOKIE失效
# 查看 django 源代码 “django.contrib.sessions.middleware.SessionMiddleware” 得知
# SESSION_COOKIE_AGE 和 SESSION_EXPIRE_AT_BROWSER_CLOSE 这两个参数
# 只有在 SESSION_SAVE_EVERY_REQUEST 为 True 时才有效。
# SESSION_COOKIE_NAME = "sessionid" # Session的cookie保存在浏览器上时的key,即:sessionid=随机字符串(默认)
# SESSION_COOKIE_PATH = "/" # Session的cookie保存的路径(默认)
# SESSION_COOKIE_DOMAIN = None # Session的cookie保存的域名(默认)
# SESSION_COOKIE_SECURE = False # 是否Https传输cookie(默认)
# SESSION_COOKIE_HTTPONLY = True # 是否Session的cookie只支持http传输(默认)
# SESSION_COOKIE_AGE = 1209600 # Session的cookie失效日期(2周)(默认)
# SESSION_EXPIRE_AT_BROWSER_CLOSE = False # 是否关闭浏览器使得Session过期(默认)
# SESSION_SAVE_EVERY_REQUEST = False # 是否每次请求都保存Session,默认修改之后才保存(默认)
| true | true |
f7fe3a23a78b1a94c3414e3554c02a7be8a784bc | 1,924 | py | Python | api/autotest/testcodegen/actions/__init__.py | P-JIANGH/autonium | 99a7de401c378e5e546727e0f920e11ce7e24a15 | [
"Apache-2.0"
] | null | null | null | api/autotest/testcodegen/actions/__init__.py | P-JIANGH/autonium | 99a7de401c378e5e546727e0f920e11ce7e24a15 | [
"Apache-2.0"
] | null | null | null | api/autotest/testcodegen/actions/__init__.py | P-JIANGH/autonium | 99a7de401c378e5e546727e0f920e11ce7e24a15 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
__author__ = 'JIANGH'
__all__ = [
'judge', 'event', 'find', 'excel', 'database',
'ModeType', 'Args',
'get_action', 'get_actions_def'
]
class ModeType():
"""选择器模式枚举"""
ID = 'id'
CLASS = 'class'
CSS_SELECTOR = 'css'
XPATH = 'xpath'
NAME = 'name'
INNER_TEXT = 'text'
PARTIAL_TEXT = 'partial_text'
class Args(object):
"""装饰器\n
标记函数需要哪些参数(拓展用:执行前检查参数名和数目正不正确等)\n
For example:\n
@arg({
'arg': {require type of this arg},
'mode': None #None for default (or select type)
'index': 'number' #for create a vaildator
})
"""
def __init__(self, args):
self.__args = args
def __call__(self, func):
return self.decorator(func)
def decorator(self, func):
import functools
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.args = self.__args
return wrapper
def get_action(action_name):
"""
根据action_name字符串取得对应方法,取不到时抛出异常
"""
from . import event, find, judge, excel, database
package_group = [event, find, judge, excel, database]
for action_package in package_group:
if getattr(action_package, action_name, None):
return getattr(action_package, action_name, None)
raise Exception('Could not find the action of %s' % action_name)
def get_actions_def():
"""
取得所有action的签名定义,dict形式\n
key为action_name\n
value为参数及类型\n
"""
def is_action(module, action_name):
'''判断一个名称是否是模块中的action'''
if len(action_name) < 1: return False
return action_name[0] != '_' and isinstance(getattr(module, action_name, None), types.FunctionType)
import types
from . import event, find, judge, excel, database
package_group = [event, find, judge, excel, database]
all_actions = {}
for module in package_group:
all_actions = {**all_actions, **{fn: getattr(getattr(module, fn), 'args', None) for fn in dir(module) if is_action(module, fn)}}
return all_actions
| 26 | 132 | 0.673597 |
__author__ = 'JIANGH'
__all__ = [
'judge', 'event', 'find', 'excel', 'database',
'ModeType', 'Args',
'get_action', 'get_actions_def'
]
class ModeType():
ID = 'id'
CLASS = 'class'
CSS_SELECTOR = 'css'
XPATH = 'xpath'
NAME = 'name'
INNER_TEXT = 'text'
PARTIAL_TEXT = 'partial_text'
class Args(object):
def __init__(self, args):
self.__args = args
def __call__(self, func):
return self.decorator(func)
def decorator(self, func):
import functools
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.args = self.__args
return wrapper
def get_action(action_name):
from . import event, find, judge, excel, database
package_group = [event, find, judge, excel, database]
for action_package in package_group:
if getattr(action_package, action_name, None):
return getattr(action_package, action_name, None)
raise Exception('Could not find the action of %s' % action_name)
def get_actions_def():
def is_action(module, action_name):
if len(action_name) < 1: return False
return action_name[0] != '_' and isinstance(getattr(module, action_name, None), types.FunctionType)
import types
from . import event, find, judge, excel, database
package_group = [event, find, judge, excel, database]
all_actions = {}
for module in package_group:
all_actions = {**all_actions, **{fn: getattr(getattr(module, fn), 'args', None) for fn in dir(module) if is_action(module, fn)}}
return all_actions
| true | true |
f7fe3ad7af2bbe7af74d29097983bfe987f4499b | 6,505 | py | Python | twitch/iterators.py | Fozar/twitch-tools | 9b9dac699082101d3e8c65cbbed8256d677d3d93 | [
"MIT"
] | null | null | null | twitch/iterators.py | Fozar/twitch-tools | 9b9dac699082101d3e8c65cbbed8256d677d3d93 | [
"MIT"
] | null | null | null | twitch/iterators.py | Fozar/twitch-tools | 9b9dac699082101d3e8c65cbbed8256d677d3d93 | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2020 Fozar
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
from abc import abstractmethod
from collections import AsyncIterator
from itertools import zip_longest
from typing import Any, Optional, List
from .errors import NoMoreItems
from .game import Game
from .stream import Stream
from .user import User
from .utils import chunks
__all__ = ("GameIterator", "StreamIterator", "UserIterator")
class _AsyncIterator(AsyncIterator):
async def __anext__(self) -> Any:
try:
msg = await self.next()
except NoMoreItems:
raise StopAsyncIteration()
else:
return msg
@abstractmethod
async def next(self):
raise NotImplemented
class GameIterator(_AsyncIterator):
def __init__(
self, client, ids: Optional[List[str]] = None, names: Optional[List[str]] = None,
):
if ids is None and names is None:
raise TypeError("Missing one of positional arguments: 'ids', 'names'")
self.client = client
self.ids = chunks(list(set(ids)) if ids else [], 100)
self.names = chunks(list(set(names)) if names else [], 100)
self.get_games = self.client.http.get_games
self.games = asyncio.Queue()
async def next(self) -> Game:
if self.games.empty():
await self.fill_games()
try:
return self.games.get_nowait()
except asyncio.QueueEmpty:
raise NoMoreItems()
async def fill_games(self):
ids = next(self.ids, None)
names = next(self.names, None)
if ids is None and names is None:
return
resp = await self.get_games(ids, names)
data = resp["data"]
if not data:
return
for element in data:
await self.games.put(Game(self.client, element))
class StreamIterator(_AsyncIterator):
def __init__(self, client, limit: int = 100):
self.client = client
self.limit = limit
self._cursor = None
self._filter = {}
self.get_streams = self.client.http.get_streams
self.streams = asyncio.Queue()
async def next(self) -> Stream:
if self.streams.empty():
await self.fill_streams()
try:
return self.streams.get_nowait()
except asyncio.QueueEmpty:
raise NoMoreItems()
def _get_retrieve(self):
limit = self.limit
if limit is None or limit > 100:
retrieve = 100
else:
retrieve = limit
self.retrieve = retrieve
return retrieve > 0
def filter(
self,
user_ids: Optional[List[str]] = None,
user_logins: Optional[List[str]] = None,
game_ids: Optional[List[str]] = None,
languages: Optional[List[str]] = None,
):
if user_ids and len(user_ids) > 100:
raise OverflowError("Too many user IDs. Maximum: 100")
if user_logins and len(user_logins) > 100:
raise OverflowError("Too many user logins. Maximum: 100")
if game_ids and len(game_ids) > 10:
raise OverflowError("Too many game IDs. Maximum: 10")
if languages and len(languages) > 100:
raise OverflowError("Too many languages. Maximum: 100")
self._filter.update({k: v for k, v in locals().items() if isinstance(v, list)})
return self
async def fill_streams(self):
if self._get_retrieve():
resp = await self.get_streams(first=self.retrieve, after=self._cursor, **self._filter)
data = resp["data"]
if len(data) < 100:
self.limit = 0
elif self.limit is not None:
self.limit -= len(data)
if resp["pagination"].get("cursor"):
self._cursor = resp["pagination"]["cursor"]
for element in data:
await self.streams.put(Stream(self.client, element))
class UserIterator(_AsyncIterator):
def __init__(
self, client, ids: Optional[List[str]] = None, logins: Optional[List[str]] = None,
):
self.client = client
self.ids = chunks(list(set(ids)), 100) if ids else None
self.logins = chunks(list(set(logins)), 100) if logins else None
self.get_users = self.client.http.get_users
self.users = asyncio.Queue()
async def next(self) -> User:
if self.users.empty():
await self.fill_users()
try:
return self.users.get_nowait()
except asyncio.QueueEmpty:
raise NoMoreItems()
async def fill_users(self):
if self.ids is None and self.logins is None:
return await self._get_elements(self.ids, self.logins)
ids = next(self.ids, None) if self.ids else None
logins = next(self.logins, None) if self.logins else None
if ids is None and logins is None:
return
if ids and logins and len(ids) + len(logins) > 100:
for ids, logins in zip_longest(chunks(ids, 50), chunks(logins, 50)):
await self._get_elements(ids, logins)
return
await self._get_elements(ids, logins)
async def _get_elements(self, ids, logins):
resp = await self.get_users(ids, logins)
data = resp["data"]
if not data:
return
for element in data:
await self.users.put(User(self.client, element))
| 33.358974 | 98 | 0.625365 |
import asyncio
from abc import abstractmethod
from collections import AsyncIterator
from itertools import zip_longest
from typing import Any, Optional, List
from .errors import NoMoreItems
from .game import Game
from .stream import Stream
from .user import User
from .utils import chunks
__all__ = ("GameIterator", "StreamIterator", "UserIterator")
class _AsyncIterator(AsyncIterator):
async def __anext__(self) -> Any:
try:
msg = await self.next()
except NoMoreItems:
raise StopAsyncIteration()
else:
return msg
@abstractmethod
async def next(self):
raise NotImplemented
class GameIterator(_AsyncIterator):
def __init__(
self, client, ids: Optional[List[str]] = None, names: Optional[List[str]] = None,
):
if ids is None and names is None:
raise TypeError("Missing one of positional arguments: 'ids', 'names'")
self.client = client
self.ids = chunks(list(set(ids)) if ids else [], 100)
self.names = chunks(list(set(names)) if names else [], 100)
self.get_games = self.client.http.get_games
self.games = asyncio.Queue()
async def next(self) -> Game:
if self.games.empty():
await self.fill_games()
try:
return self.games.get_nowait()
except asyncio.QueueEmpty:
raise NoMoreItems()
async def fill_games(self):
ids = next(self.ids, None)
names = next(self.names, None)
if ids is None and names is None:
return
resp = await self.get_games(ids, names)
data = resp["data"]
if not data:
return
for element in data:
await self.games.put(Game(self.client, element))
class StreamIterator(_AsyncIterator):
def __init__(self, client, limit: int = 100):
self.client = client
self.limit = limit
self._cursor = None
self._filter = {}
self.get_streams = self.client.http.get_streams
self.streams = asyncio.Queue()
async def next(self) -> Stream:
if self.streams.empty():
await self.fill_streams()
try:
return self.streams.get_nowait()
except asyncio.QueueEmpty:
raise NoMoreItems()
def _get_retrieve(self):
limit = self.limit
if limit is None or limit > 100:
retrieve = 100
else:
retrieve = limit
self.retrieve = retrieve
return retrieve > 0
def filter(
self,
user_ids: Optional[List[str]] = None,
user_logins: Optional[List[str]] = None,
game_ids: Optional[List[str]] = None,
languages: Optional[List[str]] = None,
):
if user_ids and len(user_ids) > 100:
raise OverflowError("Too many user IDs. Maximum: 100")
if user_logins and len(user_logins) > 100:
raise OverflowError("Too many user logins. Maximum: 100")
if game_ids and len(game_ids) > 10:
raise OverflowError("Too many game IDs. Maximum: 10")
if languages and len(languages) > 100:
raise OverflowError("Too many languages. Maximum: 100")
self._filter.update({k: v for k, v in locals().items() if isinstance(v, list)})
return self
async def fill_streams(self):
if self._get_retrieve():
resp = await self.get_streams(first=self.retrieve, after=self._cursor, **self._filter)
data = resp["data"]
if len(data) < 100:
self.limit = 0
elif self.limit is not None:
self.limit -= len(data)
if resp["pagination"].get("cursor"):
self._cursor = resp["pagination"]["cursor"]
for element in data:
await self.streams.put(Stream(self.client, element))
class UserIterator(_AsyncIterator):
def __init__(
self, client, ids: Optional[List[str]] = None, logins: Optional[List[str]] = None,
):
self.client = client
self.ids = chunks(list(set(ids)), 100) if ids else None
self.logins = chunks(list(set(logins)), 100) if logins else None
self.get_users = self.client.http.get_users
self.users = asyncio.Queue()
async def next(self) -> User:
if self.users.empty():
await self.fill_users()
try:
return self.users.get_nowait()
except asyncio.QueueEmpty:
raise NoMoreItems()
async def fill_users(self):
if self.ids is None and self.logins is None:
return await self._get_elements(self.ids, self.logins)
ids = next(self.ids, None) if self.ids else None
logins = next(self.logins, None) if self.logins else None
if ids is None and logins is None:
return
if ids and logins and len(ids) + len(logins) > 100:
for ids, logins in zip_longest(chunks(ids, 50), chunks(logins, 50)):
await self._get_elements(ids, logins)
return
await self._get_elements(ids, logins)
async def _get_elements(self, ids, logins):
resp = await self.get_users(ids, logins)
data = resp["data"]
if not data:
return
for element in data:
await self.users.put(User(self.client, element))
| true | true |
f7fe3d6562253ccf809ddee89d15a6f59dae2f68 | 295 | py | Python | users/admin.py | dhanupandey12/Blog | fcd274b7249c255786b46cf81d6e949a903e9a53 | [
"MIT"
] | null | null | null | users/admin.py | dhanupandey12/Blog | fcd274b7249c255786b46cf81d6e949a903e9a53 | [
"MIT"
] | null | null | null | users/admin.py | dhanupandey12/Blog | fcd274b7249c255786b46cf81d6e949a903e9a53 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Profile,MessageData,Notifications,Following,Followers
# Register your models here.
admin.site.register(Profile)
admin.site.register(MessageData)
admin.site.register(Notifications)
admin.site.register(Following)
admin.site.register(Followers) | 32.777778 | 73 | 0.844068 | from django.contrib import admin
from .models import Profile,MessageData,Notifications,Following,Followers
admin.site.register(Profile)
admin.site.register(MessageData)
admin.site.register(Notifications)
admin.site.register(Following)
admin.site.register(Followers) | true | true |
f7fe3de5dabdebe255210a7b6247809dbd70d10b | 10,560 | py | Python | tensorflow/contrib/autograph/impl/api.py | elielhojman/tensorflow | 163aae337c875efce2518c3cd0fecb61968fe408 | [
"Apache-2.0"
] | 4 | 2019-04-12T00:49:38.000Z | 2020-06-12T07:12:00.000Z | tensorflow/contrib/autograph/impl/api.py | AKIRA-MIYAKE/tensorflow | 89e06304aad35bfb019a8c10f39fc1ead83e0f99 | [
"Apache-2.0"
] | 4 | 2019-08-14T22:32:51.000Z | 2020-03-09T14:59:18.000Z | tensorflow/contrib/autograph/impl/api.py | AKIRA-MIYAKE/tensorflow | 89e06304aad35bfb019a8c10f39fc1ead83e0f99 | [
"Apache-2.0"
] | 1 | 2020-03-31T22:04:40.000Z | 2020-03-31T22:04:40.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Public API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import wraps
from enum import Enum
# pylint:disable=g-bad-import-order
import gast
import six
# pylint:enable=g-bad-import-order
from tensorflow.contrib.autograph.core import config
from tensorflow.contrib.autograph.core import converter
from tensorflow.contrib.autograph.impl import conversion
from tensorflow.contrib.autograph.pyct import compiler
from tensorflow.contrib.autograph.pyct import inspect_utils
from tensorflow.contrib.autograph.utils import builtins
from tensorflow.contrib.autograph.utils import py_func
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
# TODO(mdan): Properly document the type hints.
# TODO(mdan): Reduce the type hint information to (module, type).
# (currently we require (module + class name, type))
def convert(recursive=False, verbose=False, arg_types=None):
"""Decorator that compiles a function to graph mode.
The decorator is dynamic - invoking compilation whenever the decorated
function is called. This means the parameter values are known at compilation.
Args:
recursive: Whether to recursively convert any functions that the decorator
function may call.
verbose: Whether to output the compiled code in the logs.
arg_types: See to_graph.
Returns:
A decorator that compiles the given function to graph mode.
Raises:
ValueError: If any of the arguments are illegal.
"""
if arg_types is None:
arg_types = {}
def decorator(f):
"""Decorator implementation."""
@wraps(f)
def wrapper(*args, **kwargs):
return converted_call(f, recursive, verbose, arg_types, *args, **kwargs)
wrapper = tf_decorator.make_decorator(f, wrapper)
# Sometimes the decorator is just desugared, making it impossible to detect.
# This attribute makes detection easier.
setattr(wrapper, '__pyct_is_compile_decorator', True)
return wrapper
return decorator
class RunMode(Enum):
GRAPH = 1
PY_FUNC = 2
def do_not_convert(run_as=RunMode.GRAPH, return_dtypes=None):
"""Decorator that suppresses compilation of a function.
Args:
run_as: RunMode value. Whether to run the function as-is, or wrap it into
a py_func.
return_dtypes: See autograph.utils.py_func.wrap_py_func. Setting to None or
empty list or tuple will create a dummy return value that can be used
to set control dependencies.
Returns:
A decorator that wraps the original function.
"""
def decorator(f):
"""Decorator implementation."""
@wraps(f)
def graph_wrapper(*args, **kwargs):
return f(*args, **kwargs)
@wraps(f)
def py_func_wrapper(*args, **kwargs):
if kwargs:
raise NotImplementedError('RunMode.PY_FUNC does not yet support kwargs')
# TODO(mdan): Add support for kwargs.
return py_func.wrap_py_func(
f, return_dtypes, args, kwargs, use_dummy_return=not return_dtypes)
if run_as == RunMode.GRAPH:
wrapper = graph_wrapper
elif run_as == RunMode.PY_FUNC:
wrapper = py_func_wrapper
else:
raise ValueError('unknown value for run_as: %s' % run_as)
# Sometimes the decorator is just desugared, making it impossible to detect.
# This attribute makes detection easier.
setattr(wrapper, '__pyct_is_compile_decorator', True)
return wrapper
return decorator
def converted_call(f, recursive, verbose, arg_types, *args, **kwargs):
"""Compiles a function call inline."""
# TODO(mdan): This needs cleanup.
# In particular, we may want to avoid renaming functions altogether.
if conversion.is_whitelisted_for_graph(f):
return f(*args, **kwargs)
unknown_arg_value = object() # Sentinel for arguments of unknown value
if inspect_utils.isbuiltin(f):
return builtins.dynamic_builtin(f, *args, **kwargs)
if tf_inspect.isfunction(f) or tf_inspect.ismethod(f):
# Regular functions
target_entity = f
arg_map_target = f
effective_args = args
f_class = inspect_utils.getmethodclass(f)
if f_class is not None:
partial_types = (f_class,)
else:
partial_types = ()
elif tf_inspect.isclass(f):
# Constructors
target_entity = f
arg_map_target = f.__init__
effective_args = args
partial_types = ()
elif hasattr(f, '__call__') and hasattr(f, '__class__'):
# Callable objects
target_entity = f.__call__
arg_map_target = f.__call__
effective_args = (f,) + args
partial_types = (f.__class__,)
else:
NotImplementedError('unknown callable type "%s"' % type(f))
arg_values = tf_inspect.getcallargs(arg_map_target, *args, **kwargs)
for name, arg in arg_values.items():
if arg is unknown_arg_value:
continue
arg_class = arg.__class__
# If arg_value_hints specifies any name, use that instead.
if name not in arg_types:
arg_types[name] = (arg_class.__name__, arg_class)
# When called from within a decorator, this is the only indication that
# the function is a method - it appears that the decorator is applied
# before the method is bound.
if not partial_types:
if 'self' in arg_values:
if tf_inspect.isclass(arg_values['self'].__class__):
partial_types = (arg_values['self'].__class__,)
elif 'cls' in arg_values:
if tf_inspect.isclass(arg_values['cls']):
partial_types = (arg_values['cls'],)
converted_f = to_graph(
target_entity,
recursive=recursive,
verbose=verbose,
arg_values=arg_values,
arg_types=arg_types,
partial_types=partial_types)
return converted_f(*effective_args, **kwargs)
def to_graph(e,
recursive=True,
verbose=False,
arg_values=None,
arg_types=None,
partial_types=None):
"""Compile a Python entity into equivalent TensorFlow code.
Currently supported entities:
* functions
* classes
Classes are handled by converting all their methods into a new class.
Args:
e: A Python entity.
recursive: Whether to recursively convert any functions that the decorator
function may call.
verbose: Whether to output the compiled code in the logs.
arg_values: A dict containing value hints for symbols like function
parameters.
arg_types: A dict containing type hints for symbols like function
parameters.
partial_types: A set of types (e.g. classes) that will not be converted
entirely. Calls to member functions for these types will be renamed
independently.
Returns:
A function with a signature identical to `o`, but which when executed it
creates TF a graph that has the same functionality as the original entity.
Raises:
ValueError: If the converted function defines or refers to symbol names that
are reserved for AutoGraph.
"""
program_ctx = converter.ProgramContext(
recursive=recursive,
autograph_decorators=(convert, do_not_convert, converted_call),
partial_types=partial_types,
autograph_module=tf_inspect.getmodule(to_graph),
uncompiled_modules=config.DEFAULT_UNCOMPILED_MODULES)
_, name, namespace = conversion.entity_to_graph(e, program_ctx, arg_values,
arg_types)
module = gast.Module([])
for dep in reversed(program_ctx.dependency_cache.values()):
module.body.append(dep)
compiled_node, compiled_src = compiler.ast_to_object(
module, source_prefix=program_ctx.required_imports)
# The compiled code should see everything the entry entity saw.
# TODO(mdan): This might not work well if the call tree spans modules?
for key, val in namespace.items():
# Avoid overwriting entities that have been transformed.
if key not in compiled_node.__dict__:
compiled_node.__dict__[key] = val
compiled_fn = getattr(compiled_node, name)
# Need this so the source_mapping attribute is available for the context
# manager to access for runtime errors.
#
# Note that compiler.ast_to_object attaches the source map 'ag_source_map__'
# symbol to the compiled module.
source_map_attribute_name = 'ag_source_map'
if getattr(compiled_fn, source_map_attribute_name, None) is not None:
raise ValueError('cannot convert %s because is has an attribute '
'"%s", which is reserved for AutoGraph.' %
(compiled_fn, source_map_attribute_name))
setattr(compiled_fn, source_map_attribute_name,
compiled_node.__dict__['ag_source_map__'])
if verbose:
logging.info('Compiled output of %s:\n\n%s\n', e, compiled_src)
return compiled_fn
def to_code(e,
recursive=True,
arg_values=None,
arg_types=None,
partial_types=None,
indentation=' '):
"""Return the equivalent of an entity in TensorFlow code.
See `to_graph` for more details.
Args:
e: A Python entity.
recursive: See to_graph.
arg_values: See to_graph.
arg_types: See to_graph.
partial_types: See to_graph.
indentation: String, when to use for each level of indentation.
Returns:
String.
"""
program_ctx = converter.ProgramContext(
recursive=recursive,
autograph_decorators=(convert, do_not_convert, converted_call),
partial_types=partial_types,
autograph_module=tf_inspect.getmodule(to_graph),
uncompiled_modules=config.DEFAULT_UNCOMPILED_MODULES)
conversion.entity_to_graph(e, program_ctx, arg_values, arg_types)
code = '\n'.join(
compiler.ast_to_source(dep, indentation)[0]
for dep in reversed(tuple(six.itervalues(program_ctx.dependency_cache))))
return program_ctx.required_imports + '\n\n' + code
| 33.52381 | 80 | 0.711269 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import wraps
from enum import Enum
import gast
import six
from tensorflow.contrib.autograph.core import config
from tensorflow.contrib.autograph.core import converter
from tensorflow.contrib.autograph.impl import conversion
from tensorflow.contrib.autograph.pyct import compiler
from tensorflow.contrib.autograph.pyct import inspect_utils
from tensorflow.contrib.autograph.utils import builtins
from tensorflow.contrib.autograph.utils import py_func
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
def convert(recursive=False, verbose=False, arg_types=None):
if arg_types is None:
arg_types = {}
def decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
return converted_call(f, recursive, verbose, arg_types, *args, **kwargs)
wrapper = tf_decorator.make_decorator(f, wrapper)
setattr(wrapper, '__pyct_is_compile_decorator', True)
return wrapper
return decorator
class RunMode(Enum):
GRAPH = 1
PY_FUNC = 2
def do_not_convert(run_as=RunMode.GRAPH, return_dtypes=None):
def decorator(f):
@wraps(f)
def graph_wrapper(*args, **kwargs):
return f(*args, **kwargs)
@wraps(f)
def py_func_wrapper(*args, **kwargs):
if kwargs:
raise NotImplementedError('RunMode.PY_FUNC does not yet support kwargs')
return py_func.wrap_py_func(
f, return_dtypes, args, kwargs, use_dummy_return=not return_dtypes)
if run_as == RunMode.GRAPH:
wrapper = graph_wrapper
elif run_as == RunMode.PY_FUNC:
wrapper = py_func_wrapper
else:
raise ValueError('unknown value for run_as: %s' % run_as)
setattr(wrapper, '__pyct_is_compile_decorator', True)
return wrapper
return decorator
def converted_call(f, recursive, verbose, arg_types, *args, **kwargs):
if conversion.is_whitelisted_for_graph(f):
return f(*args, **kwargs)
unknown_arg_value = object()
if inspect_utils.isbuiltin(f):
return builtins.dynamic_builtin(f, *args, **kwargs)
if tf_inspect.isfunction(f) or tf_inspect.ismethod(f):
target_entity = f
arg_map_target = f
effective_args = args
f_class = inspect_utils.getmethodclass(f)
if f_class is not None:
partial_types = (f_class,)
else:
partial_types = ()
elif tf_inspect.isclass(f):
target_entity = f
arg_map_target = f.__init__
effective_args = args
partial_types = ()
elif hasattr(f, '__call__') and hasattr(f, '__class__'):
target_entity = f.__call__
arg_map_target = f.__call__
effective_args = (f,) + args
partial_types = (f.__class__,)
else:
NotImplementedError('unknown callable type "%s"' % type(f))
arg_values = tf_inspect.getcallargs(arg_map_target, *args, **kwargs)
for name, arg in arg_values.items():
if arg is unknown_arg_value:
continue
arg_class = arg.__class__
if name not in arg_types:
arg_types[name] = (arg_class.__name__, arg_class)
if not partial_types:
if 'self' in arg_values:
if tf_inspect.isclass(arg_values['self'].__class__):
partial_types = (arg_values['self'].__class__,)
elif 'cls' in arg_values:
if tf_inspect.isclass(arg_values['cls']):
partial_types = (arg_values['cls'],)
converted_f = to_graph(
target_entity,
recursive=recursive,
verbose=verbose,
arg_values=arg_values,
arg_types=arg_types,
partial_types=partial_types)
return converted_f(*effective_args, **kwargs)
def to_graph(e,
recursive=True,
verbose=False,
arg_values=None,
arg_types=None,
partial_types=None):
program_ctx = converter.ProgramContext(
recursive=recursive,
autograph_decorators=(convert, do_not_convert, converted_call),
partial_types=partial_types,
autograph_module=tf_inspect.getmodule(to_graph),
uncompiled_modules=config.DEFAULT_UNCOMPILED_MODULES)
_, name, namespace = conversion.entity_to_graph(e, program_ctx, arg_values,
arg_types)
module = gast.Module([])
for dep in reversed(program_ctx.dependency_cache.values()):
module.body.append(dep)
compiled_node, compiled_src = compiler.ast_to_object(
module, source_prefix=program_ctx.required_imports)
for key, val in namespace.items():
if key not in compiled_node.__dict__:
compiled_node.__dict__[key] = val
compiled_fn = getattr(compiled_node, name)
source_map_attribute_name = 'ag_source_map'
if getattr(compiled_fn, source_map_attribute_name, None) is not None:
raise ValueError('cannot convert %s because is has an attribute '
'"%s", which is reserved for AutoGraph.' %
(compiled_fn, source_map_attribute_name))
setattr(compiled_fn, source_map_attribute_name,
compiled_node.__dict__['ag_source_map__'])
if verbose:
logging.info('Compiled output of %s:\n\n%s\n', e, compiled_src)
return compiled_fn
def to_code(e,
recursive=True,
arg_values=None,
arg_types=None,
partial_types=None,
indentation=' '):
program_ctx = converter.ProgramContext(
recursive=recursive,
autograph_decorators=(convert, do_not_convert, converted_call),
partial_types=partial_types,
autograph_module=tf_inspect.getmodule(to_graph),
uncompiled_modules=config.DEFAULT_UNCOMPILED_MODULES)
conversion.entity_to_graph(e, program_ctx, arg_values, arg_types)
code = '\n'.join(
compiler.ast_to_source(dep, indentation)[0]
for dep in reversed(tuple(six.itervalues(program_ctx.dependency_cache))))
return program_ctx.required_imports + '\n\n' + code
| true | true |
f7fe3df397864ad714beb62212fe0d2ad1a5aaa3 | 5,937 | py | Python | src/ebay_rest/api/sell_analytics/models/metadata_header.py | gbm001/ebay_rest | 077d3478423ccd80ff35e0361821d6a11180bc54 | [
"MIT"
] | 3 | 2021-12-12T04:28:03.000Z | 2022-03-10T03:29:18.000Z | src/ebay_rest/api/sell_analytics/models/metadata_header.py | jdavv/ebay_rest | 20fc88c6aefdae9ab90f9c1330e79abddcd750cd | [
"MIT"
] | 33 | 2021-06-16T20:44:36.000Z | 2022-03-30T14:55:06.000Z | src/ebay_rest/api/sell_analytics/models/metadata_header.py | jdavv/ebay_rest | 20fc88c6aefdae9ab90f9c1330e79abddcd750cd | [
"MIT"
] | 7 | 2021-06-03T09:30:23.000Z | 2022-03-08T19:51:33.000Z | # coding: utf-8
"""
Seller Service Metrics API
The <i>Analytics API</i> provides data and information about a seller and their eBay business. <br><br>The resources and methods in this API let sellers review information on their listing performance, metrics on their customer service performance, and details on their eBay seller performance rating. <br><br>The three resources in the Analytics API provide the following data and information: <ul><li><b>Customer Service Metric</b> – Returns benchmark data and a metric rating pertaining to a seller's customer service performance as compared to other seller's in the same peer group.</li> <li><b>Traffic Report</b> – Returns data and information that shows how buyers are engaging with a seller's listings.</li> <li><b>Seller Standards Profile</b> – Returns information pertaining to a seller's profile rating.</li></ul> Sellers can use the data and information returned by the various Analytics API methods to determine where they can make improvements to increase sales and how they might improve their seller status as viewed by eBay buyers. <br><br>For details on using this API, see <a href=\"/api-docs/sell/static/performance/analyzing-performance.html\" title=\"Selling Integration Guide\">Analyzing seller performance</a>. # noqa: E501
OpenAPI spec version: 1.3.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class MetadataHeader(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'key': 'str',
'metadata_keys': 'list[Definition]'
}
attribute_map = {
'key': 'key',
'metadata_keys': 'metadataKeys'
}
def __init__(self, key=None, metadata_keys=None): # noqa: E501
"""MetadataHeader - a model defined in Swagger""" # noqa: E501
self._key = None
self._metadata_keys = None
self.discriminator = None
if key is not None:
self.key = key
if metadata_keys is not None:
self.metadata_keys = metadata_keys
@property
def key(self):
"""Gets the key of this MetadataHeader. # noqa: E501
The key value used for the report. <br><br>For example: <code>\"key\": \"LISTING_ID\"</code> # noqa: E501
:return: The key of this MetadataHeader. # noqa: E501
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""Sets the key of this MetadataHeader.
The key value used for the report. <br><br>For example: <code>\"key\": \"LISTING_ID\"</code> # noqa: E501
:param key: The key of this MetadataHeader. # noqa: E501
:type: str
"""
self._key = key
@property
def metadata_keys(self):
"""Gets the metadata_keys of this MetadataHeader. # noqa: E501
The list of dimension key values used for the report header. Each list element contains the key name, its data type, and its localized name. <br><br>For example: <p><code>\"metadataKeys\": [<br> \"key\": \"LISTING_TITLE\",<br> \"localizedName\": \"Listing title\",<br> \"dataType\": \"STRING\"</code></p> # noqa: E501
:return: The metadata_keys of this MetadataHeader. # noqa: E501
:rtype: list[Definition]
"""
return self._metadata_keys
@metadata_keys.setter
def metadata_keys(self, metadata_keys):
"""Sets the metadata_keys of this MetadataHeader.
The list of dimension key values used for the report header. Each list element contains the key name, its data type, and its localized name. <br><br>For example: <p><code>\"metadataKeys\": [<br> \"key\": \"LISTING_TITLE\",<br> \"localizedName\": \"Listing title\",<br> \"dataType\": \"STRING\"</code></p> # noqa: E501
:param metadata_keys: The metadata_keys of this MetadataHeader. # noqa: E501
:type: list[Definition]
"""
self._metadata_keys = metadata_keys
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(MetadataHeader, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MetadataHeader):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 42.106383 | 1,272 | 0.623547 |
import pprint
import re
import six
class MetadataHeader(object):
swagger_types = {
'key': 'str',
'metadata_keys': 'list[Definition]'
}
attribute_map = {
'key': 'key',
'metadata_keys': 'metadataKeys'
}
def __init__(self, key=None, metadata_keys=None):
self._key = None
self._metadata_keys = None
self.discriminator = None
if key is not None:
self.key = key
if metadata_keys is not None:
self.metadata_keys = metadata_keys
@property
def key(self):
return self._key
@key.setter
def key(self, key):
self._key = key
@property
def metadata_keys(self):
return self._metadata_keys
@metadata_keys.setter
def metadata_keys(self, metadata_keys):
self._metadata_keys = metadata_keys
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(MetadataHeader, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, MetadataHeader):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f7fe3e4ad42116f197de8e648d46269e05cf320b | 19,429 | py | Python | tools/Polygraphy/polygraphy/backend/trt/util.py | wjc852456/TensorRT | c7c15e7547022298ec3e586e8bc8a3d0cf7166fe | [
"Apache-2.0"
] | null | null | null | tools/Polygraphy/polygraphy/backend/trt/util.py | wjc852456/TensorRT | c7c15e7547022298ec3e586e8bc8a3d0cf7166fe | [
"Apache-2.0"
] | null | null | null | tools/Polygraphy/polygraphy/backend/trt/util.py | wjc852456/TensorRT | c7c15e7547022298ec3e586e8bc8a3d0cf7166fe | [
"Apache-2.0"
] | 1 | 2022-03-29T12:39:29.000Z | 2022-03-29T12:39:29.000Z | #
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import contextlib
from polygraphy import config, mod, util
from polygraphy.common import TensorMetadata
from polygraphy.logger import G_LOGGER, LogMode
trt = mod.lazy_import("tensorrt")
np = mod.lazy_import("numpy")
TRT_LOGGER = None
@mod.export()
def get_trt_logger():
"""
Get the global TensorRT logger created by Polygraphy.
Returns:
trt.Logger: The TensorRT logger.
"""
global TRT_LOGGER
if TRT_LOGGER is None:
TRT_LOGGER = trt.Logger()
return TRT_LOGGER
def fail_unavailable(what):
G_LOGGER.backtrace()
G_LOGGER.critical("{:} is not available on TensorRT version {:}.".format(what, trt.__version__))
def check_onnx_parser_errors(parser, success):
if parser.num_errors > 0:
for index in range(parser.num_errors):
G_LOGGER.error(parser.get_error(index))
G_LOGGER.critical("Could not parse ONNX correctly")
if not success:
G_LOGGER.critical("Failed to parse ONNX model. Does the model file exist and contain a valid ONNX model?")
def get_layer_class_mapping():
layer_class_mapping = {}
def try_add(layer_type, layer_cls):
try:
layer_type = getattr(trt.LayerType, layer_type)
layer_cls = getattr(trt, layer_cls)
except AttributeError:
if config.INTERNAL_CORRECTNESS_CHECKS:
G_LOGGER.warning(
"Could not find one or more of layer type: {:} or layer class: {:}".format(layer_type, layer_cls)
)
else:
layer_class_mapping[layer_type] = layer_cls
try_add("CONVOLUTION", "IConvolutionLayer")
try_add("FULLY_CONNECTED", "IFullyConnectedLayer")
try_add("ACTIVATION", "IActivationLayer")
try_add("POOLING", "IPoolingLayer")
try_add("LRN", "ILRNLayer")
try_add("SCALE", "IScaleLayer")
try_add("SOFTMAX", "ISoftMaxLayer")
try_add("DECONVOLUTION", "IDeconvolutionLayer")
try_add("CONCATENATION", "IConcatenationLayer")
try_add("ELEMENTWISE", "IElementWiseLayer")
try_add("PLUGIN", "IPluginLayer")
try_add("UNARY", "IUnaryLayer")
try_add("PADDING", "IPaddingLayer")
try_add("SHUFFLE", "IShuffleLayer")
try_add("REDUCE", "IReduceLayer")
try_add("TOPK", "ITopKLayer")
try_add("GATHER", "IGatherLayer")
try_add("MATRIX_MULTIPLY", "IMatrixMultiplyLayer")
try_add("RAGGED_SOFTMAX", "IRaggedSoftMaxLayer")
try_add("CONSTANT", "IConstantLayer")
try_add("RNN", "IRNNLayer")
try_add("RNN_V2", "IRNNv2Layer")
try_add("IDENTITY", "IIdentityLayer")
try_add("PLUGIN_V2", "IPluginV2Layer")
try_add("SLICE", "ISliceLayer")
try_add("SHAPE", "IShapeLayer")
try_add("PARAMETRIC_RELU", "IParametricReLULayer")
try_add("RESIZE", "IResizeLayer")
try_add("TRIP_LIMIT", "ITripLimitLayer")
try_add("RECURRENCE", "IRecurrenceLayer")
try_add("ITERATOR", "IIteratorLayer")
try_add("LOOP_OUTPUT", "ILoopOutputLayer")
try_add("SELECT", "ISelectLayer")
try_add("FILL", "IFillLayer")
try_add("QUANTIZE", "IQuantizeLayer")
try_add("DEQUANTIZE", "IDequantizeLayer")
try_add("CONDITION", "IConditionLayer")
try_add("CONDITIONAL_INPUT", "IIfConditionalInputLayer")
try_add("CONDITIONAL_OUTPUT", "IIfConditionalOutputLayer")
return layer_class_mapping
def np_dtype_from_trt(trt_dtype):
_ = mod.has_mod(np) # Force numpy to be imported
return np.dtype(trt.nptype(trt_dtype))
def get_network_input_metadata(network):
inputs = TensorMetadata()
for i in range(network.num_inputs):
tensor = network.get_input(i)
inputs.add(name=tensor.name, dtype=np_dtype_from_trt(tensor.dtype), shape=tensor.shape)
return inputs
def get_network_output_metadata(network):
outputs = TensorMetadata()
for i in range(network.num_outputs):
tensor = network.get_output(i)
outputs.add(name=tensor.name, dtype=np_dtype_from_trt(tensor.dtype), shape=tensor.shape)
return outputs
def get_layer_input_metadata(layer):
meta = TensorMetadata()
for i in range(layer.num_inputs):
inp = layer.get_input(i)
if inp:
meta.add(inp.name, np_dtype_from_trt(inp.dtype), inp.shape)
return meta
def get_layer_output_metadata(layer):
meta = TensorMetadata()
for i in range(layer.num_outputs):
outp = layer.get_output(i)
if outp:
meta.add(outp.name, np_dtype_from_trt(outp.dtype), outp.shape)
return meta
def str_from_layer(layer, index):
input_info = get_layer_input_metadata(layer)
output_info = get_layer_output_metadata(layer)
return util.str_from_layer("Layer", index, layer.name, layer.type, input_info, output_info)
def get_layer_attribute_names(layer):
def is_special_attribute(attr):
return attr.startswith("__") and attr.endswith("__")
def is_valid_attribute(attr, layer):
if (
type(layer) == trt.IPoolingLayer
or type(layer) == trt.IConvolutionLayer
or type(layer) == trt.IDeconvolutionLayer
):
if len(layer.get_input(0).shape) > 4:
# 3D pooling uses padding_nd
return attr not in ["padding", "stride", "window_size"]
if type(layer) == trt.IResizeLayer:
if layer.num_inputs > 1:
return attr not in ["scales"]
if type(layer) == trt.ISliceLayer:
if layer.num_inputs > 1:
return attr not in ["shape", "start", "stride"]
return True
return [
attr
for attr in dir(layer)
if not is_special_attribute(attr) and not hasattr(trt.ILayer, attr) and is_valid_attribute(attr, layer)
]
def str_from_network(network, mode="full"):
"""
Converts a TensorRT network to a human-readable representation
Args:
network (trt.INetworkDefinition): The network.
mode (str): Controls what is displayed for each layer. Choices: ["none", "basic", "attrs", "full"]
Returns:
str
"""
LAYER_TYPE_CLASS_MAPPING = get_layer_class_mapping()
network_str = "Name: {:} | {:} Batch Network{:}\n".format(
network.name,
"Implicit"
if hasattr(network, "has_implicit_batch_dimension") and network.has_implicit_batch_dimension
else "Explicit",
" with Explicit Precision "
if hasattr(network, "has_explicit_precision") and network.has_explicit_precision
else "",
)
network_str += "\n"
input_metadata = get_network_input_metadata(network)
network_str += "---- {:} Network Input(s) ----\n{:}\n\n".format(len(input_metadata), input_metadata)
output_metadata = get_network_output_metadata(network)
network_str += "---- {:} Network Output(s) ----\n{:}\n\n".format(len(output_metadata), output_metadata)
network_str += "---- {:} Layer(s) ----\n".format(network.num_layers)
if mode != "none":
for index, layer in enumerate(network):
if layer.type in LAYER_TYPE_CLASS_MAPPING:
layer.__class__ = LAYER_TYPE_CLASS_MAPPING[layer.type]
network_str += str_from_layer(layer, index)
if mode in ["attrs", "full"]:
# Exclude special attributes, as well as any attributes of the base layer class (those can be displayed above).
attrs = get_layer_attribute_names(layer)
if attrs:
network_str += util.indent_block("---- Attributes ----") + "\n"
for attr in attrs:
with G_LOGGER.verbosity():
val = getattr(layer, attr)
if mode == "full" or not isinstance(val, np.ndarray):
attr_str = ""
if layer.name:
attr_str += "{:}.".format(layer.name)
network_str += util.indent_block("{:}{:} = {:}".format(attr_str, attr, val)) + "\n"
network_str += "\n"
return util.indent_block(network_str, level=0)
def _get_network_outputs(network):
return [network.get_output(index).name for index in range(network.num_outputs)]
def check_outputs_not_found(not_found, available_outputs):
if not_found:
available_outputs = util.unique_list(available_outputs)
G_LOGGER.critical(
"The following outputs were not found: {:}.\n"
"Note: Available tensors:\n\t{:}".format(not_found, "\n\t".join(available_outputs))
)
def mark_outputs(network, outputs):
"""
Mark the specified outputs as network outputs.
Args:
network (trt.INetworkDefinition): The network in which to mark outputs.
outputs (Sequence[str]): The names of tensors to mark as outputs.
"""
outputs = set(outputs)
all_outputs = []
for layer in network:
for index in range(layer.num_outputs):
tensor = layer.get_output(index)
all_outputs.append(tensor.name)
# Clear all old outputs
if tensor.is_network_output:
network.unmark_output(tensor)
if tensor.name in outputs:
if not tensor.is_network_output:
G_LOGGER.ultra_verbose("Marking {:} as an output".format(tensor.name))
network.mark_output(tensor)
marked_outputs = set(_get_network_outputs(network))
not_found = outputs - marked_outputs
check_outputs_not_found(not_found, all_outputs)
def mark_layerwise(network):
# Layers within loops cannot be marked as network outputs.
LOOP_START_NAMES = ["TRIP_LIMIT", "ITERATOR", "RECURRENCE"]
LOOP_END_NAMES = ["LOOP_OUTPUT"]
LOOP_START_LAYERS = [getattr(trt.LayerType, attr) for attr in LOOP_START_NAMES if hasattr(trt.LayerType, attr)]
LOOP_END_LAYERS = [getattr(trt.LayerType, attr) for attr in LOOP_END_NAMES if hasattr(trt.LayerType, attr)]
EXCLUDE_LAYERS = [trt.LayerType.SHAPE, trt.LayerType.CONSTANT]
outputs = []
in_loop = False
for layer in network:
if layer.type in LOOP_START_LAYERS:
G_LOGGER.warning(
"Loop detected. Please ensure the network is topologically sorted so that layers within "
"the loop body are not marked as network outputs in layerwise mode",
mode=LogMode.ONCE,
)
in_loop = True
elif layer.type in LOOP_END_LAYERS:
in_loop = False
should_mark_layer = not in_loop and layer.type not in EXCLUDE_LAYERS
if should_mark_layer:
for index in range(layer.num_outputs):
tensor = layer.get_output(index)
outputs.append(tensor.name)
G_LOGGER.verbose("Marking {:} tensors as outputs".format(len(outputs)))
mark_outputs(network, outputs)
def unmark_outputs(network, outputs):
outputs = set(outputs)
unmarked_outputs = set()
for layer in network:
for index in range(layer.num_outputs):
tensor = layer.get_output(index)
if tensor.is_network_output and tensor.name in outputs:
network.unmark_output(tensor)
unmarked_outputs.add(tensor.name)
not_found = outputs - unmarked_outputs
check_outputs_not_found(not_found, _get_network_outputs(network))
def str_from_config(config):
config_str = "{:20} | {:} bytes ({:.2f} MiB)\n".format(
"Workspace", config.max_workspace_size, config.max_workspace_size / (1024.0 ** 2)
)
config_str += "{:20} | ".format("Precision")
with contextlib.suppress(AttributeError):
config_str += "TF32: {:}, ".format(config.get_flag(trt.BuilderFlag.TF32))
config_str += "FP16: {:}, INT8: {:}, Strict Types: {:}\n".format(
config.get_flag(trt.BuilderFlag.FP16),
config.get_flag(trt.BuilderFlag.INT8),
config.get_flag(trt.BuilderFlag.STRICT_TYPES),
)
with contextlib.suppress(AttributeError):
source_vals = [
val.name for val in trt.TacticSource.__members__.values() if (1 << int(val)) & config.get_tactic_sources()
]
config_str += "{:20} | {:}\n".format("Tactic Sources", source_vals)
with contextlib.suppress(AttributeError):
config_str += "{:20} | {:}\n".format("Safety Restricted", config.get_flag(trt.BuilderFlag.SAFETY_SCOPE))
if config.int8_calibrator:
config_str += "{:20} | {:}\n".format("Calibrator", config.int8_calibrator)
config_str += "{:20} | {:} profile(s)".format("Profiles", config.num_optimization_profiles)
return config_str
def check_profile(profile):
if not bool(profile):
G_LOGGER.critical("Profile is not valid, please provide profile data.\nNote: profile was: {:}".format(profile))
return profile
def str_from_tensor(tensor, is_shape_tensor):
ret = "Input "
if is_shape_tensor:
ret += "shape-tensor"
else:
ret += "tensor"
ret += ": {:} (dtype={:}, shape={:})".format(tensor.name, tensor.dtype, tensor.shape)
return ret
def get_input_metadata_from_profile(profile, network):
"""
Returns metadata about the inputs based on the OPT values set in a profile.
Args:
profile (trt.IOptimizationProfile):
The profile from which to retrieve input metada.
network (trt.INetworkDefinition):
The network the profile applies to.
Returns:
TensorMetadata:
A mapping of input names to their types and shapes.
Shapes are retrieved from the OPT values in the profile.
"""
input_metadata = TensorMetadata()
for index in range(network.num_inputs):
tensor = network.get_input(index)
if tensor.is_shape_tensor:
shapes = profile.get_shape_input(tensor.name)
else:
shapes = profile.get_shape(tensor.name)
if tuple(shapes[0]) != tuple(shapes[2]):
G_LOGGER.warning(
"Will use `opt` shapes from profile 0 for calibration. "
"Note that even though `min` != `max` in this profile, calibration "
"will use fixed input shapes (this is not necessarily an issue)."
)
# Always use opt shape
input_metadata.add(name=tensor.name, dtype=np_dtype_from_trt(tensor.dtype), shape=shapes[1])
return input_metadata
def add_binding_to_metadata(engine, binding, metadata, name_binding):
# name_binding always comes from profile 0, since that's where we
# get all binding names in the runner
metadata.add(
name=engine[name_binding],
dtype=np_dtype_from_trt(engine.get_binding_dtype(binding)),
shape=list(engine.get_binding_shape(binding)),
)
def get_input_metadata_from_engine(engine, start_binding, end_binding):
inputs = TensorMetadata()
for index, binding in enumerate(range(start_binding, end_binding)):
if engine.binding_is_input(binding):
add_binding_to_metadata(engine, binding, inputs, name_binding=index)
return inputs
def get_output_metadata_from_engine(engine, start_binding, end_binding):
outputs = TensorMetadata()
for index, binding in enumerate(range(start_binding, end_binding)):
if not engine.binding_is_input(binding):
add_binding_to_metadata(engine, binding, outputs, name_binding=index)
return outputs
def str_from_engine(engine):
bindings_per_profile = get_bindings_per_profile(engine)
engine_str = "Name: {:} | {:}{:} Batch Engine ({:} layers)\n".format(
engine.name,
"Refittable " if engine.refittable else "",
"Implicit"
if hasattr(engine, "has_implicit_batch_dimension") and engine.has_implicit_batch_dimension
else "Explicit",
engine.num_layers,
)
engine_str += "\n"
# Show metadata for the first profile (i.e. the dynamic shapes)
input_metadata = get_input_metadata_from_engine(engine, 0, bindings_per_profile)
engine_str += "---- {:} Engine Input(s) ----\n{:}\n\n".format(len(input_metadata), input_metadata)
output_metadata = get_output_metadata_from_engine(engine, 0, bindings_per_profile)
engine_str += "---- {:} Engine Output(s) ----\n{:}\n\n".format(len(output_metadata), output_metadata)
engine_str += "---- Memory ----\nDevice Memory: {:} bytes\n\n".format(engine.device_memory_size)
engine_str += "---- {:} Profile(s) ({:} Binding(s) Each) ----\n".format(
engine.num_optimization_profiles, bindings_per_profile
)
for profile_index in range(engine.num_optimization_profiles):
engine_str += "- Profile: {:}\n".format(profile_index)
max_width = max([len(binding) for binding in engine]) + 8
for offset in range(bindings_per_profile):
binding = profile_index * bindings_per_profile + offset
name = "[Name: {:}]".format(engine.get_binding_name(binding))
engine_str += util.indent_block(
"Binding Index: {:} {:} {:<{max_width}}".format(
binding, "(Input) " if engine.binding_is_input(binding) else "(Output)", name, max_width=max_width
)
)
if engine.binding_is_input(binding):
if engine.is_shape_binding(binding):
min_shape, opt_shape, max_shape = engine.get_profile_shape_input(profile_index, binding)
else:
min_shape, opt_shape, max_shape = engine.get_profile_shape(profile_index, binding)
engine_str += " | Shapes: min={:}, opt={:}, max={:}\n".format(min_shape, opt_shape, max_shape)
else:
engine_str += " | Shape: {:}\n".format(engine.get_binding_shape(binding))
engine_str += "\n"
return util.indent_block(engine_str, level=0)
def get_bindings_per_profile(engine):
return engine.num_bindings // engine.num_optimization_profiles
def get_active_profile_bindings(context):
"""
Gets the start and end binding indices for the active optimization profile.
Args:
engine (trt.ICudaEngine): The engine in question.
context (trt.IExecutionContext): The context where the profile is currently set.
Returns:
Tuple[int, int]: The start and end bindings indices, in that order
"""
active_profile = context.active_optimization_profile
bindings_per_profile = get_bindings_per_profile(context.engine)
start_binding = bindings_per_profile * active_profile
end_binding = start_binding + bindings_per_profile
G_LOGGER.ultra_verbose(
"Total # of Profiles: {:}, Bindings Per Profile: {:}, Active Profile: {:}, "
"Start Binding: {:}, End Binding: {:}".format(
context.engine.num_optimization_profiles, bindings_per_profile, active_profile, start_binding, end_binding
)
)
return start_binding, end_binding
| 38.246063 | 127 | 0.656081 |
import contextlib
from polygraphy import config, mod, util
from polygraphy.common import TensorMetadata
from polygraphy.logger import G_LOGGER, LogMode
trt = mod.lazy_import("tensorrt")
np = mod.lazy_import("numpy")
TRT_LOGGER = None
@mod.export()
def get_trt_logger():
global TRT_LOGGER
if TRT_LOGGER is None:
TRT_LOGGER = trt.Logger()
return TRT_LOGGER
def fail_unavailable(what):
G_LOGGER.backtrace()
G_LOGGER.critical("{:} is not available on TensorRT version {:}.".format(what, trt.__version__))
def check_onnx_parser_errors(parser, success):
if parser.num_errors > 0:
for index in range(parser.num_errors):
G_LOGGER.error(parser.get_error(index))
G_LOGGER.critical("Could not parse ONNX correctly")
if not success:
G_LOGGER.critical("Failed to parse ONNX model. Does the model file exist and contain a valid ONNX model?")
def get_layer_class_mapping():
layer_class_mapping = {}
def try_add(layer_type, layer_cls):
try:
layer_type = getattr(trt.LayerType, layer_type)
layer_cls = getattr(trt, layer_cls)
except AttributeError:
if config.INTERNAL_CORRECTNESS_CHECKS:
G_LOGGER.warning(
"Could not find one or more of layer type: {:} or layer class: {:}".format(layer_type, layer_cls)
)
else:
layer_class_mapping[layer_type] = layer_cls
try_add("CONVOLUTION", "IConvolutionLayer")
try_add("FULLY_CONNECTED", "IFullyConnectedLayer")
try_add("ACTIVATION", "IActivationLayer")
try_add("POOLING", "IPoolingLayer")
try_add("LRN", "ILRNLayer")
try_add("SCALE", "IScaleLayer")
try_add("SOFTMAX", "ISoftMaxLayer")
try_add("DECONVOLUTION", "IDeconvolutionLayer")
try_add("CONCATENATION", "IConcatenationLayer")
try_add("ELEMENTWISE", "IElementWiseLayer")
try_add("PLUGIN", "IPluginLayer")
try_add("UNARY", "IUnaryLayer")
try_add("PADDING", "IPaddingLayer")
try_add("SHUFFLE", "IShuffleLayer")
try_add("REDUCE", "IReduceLayer")
try_add("TOPK", "ITopKLayer")
try_add("GATHER", "IGatherLayer")
try_add("MATRIX_MULTIPLY", "IMatrixMultiplyLayer")
try_add("RAGGED_SOFTMAX", "IRaggedSoftMaxLayer")
try_add("CONSTANT", "IConstantLayer")
try_add("RNN", "IRNNLayer")
try_add("RNN_V2", "IRNNv2Layer")
try_add("IDENTITY", "IIdentityLayer")
try_add("PLUGIN_V2", "IPluginV2Layer")
try_add("SLICE", "ISliceLayer")
try_add("SHAPE", "IShapeLayer")
try_add("PARAMETRIC_RELU", "IParametricReLULayer")
try_add("RESIZE", "IResizeLayer")
try_add("TRIP_LIMIT", "ITripLimitLayer")
try_add("RECURRENCE", "IRecurrenceLayer")
try_add("ITERATOR", "IIteratorLayer")
try_add("LOOP_OUTPUT", "ILoopOutputLayer")
try_add("SELECT", "ISelectLayer")
try_add("FILL", "IFillLayer")
try_add("QUANTIZE", "IQuantizeLayer")
try_add("DEQUANTIZE", "IDequantizeLayer")
try_add("CONDITION", "IConditionLayer")
try_add("CONDITIONAL_INPUT", "IIfConditionalInputLayer")
try_add("CONDITIONAL_OUTPUT", "IIfConditionalOutputLayer")
return layer_class_mapping
def np_dtype_from_trt(trt_dtype):
_ = mod.has_mod(np)
return np.dtype(trt.nptype(trt_dtype))
def get_network_input_metadata(network):
inputs = TensorMetadata()
for i in range(network.num_inputs):
tensor = network.get_input(i)
inputs.add(name=tensor.name, dtype=np_dtype_from_trt(tensor.dtype), shape=tensor.shape)
return inputs
def get_network_output_metadata(network):
outputs = TensorMetadata()
for i in range(network.num_outputs):
tensor = network.get_output(i)
outputs.add(name=tensor.name, dtype=np_dtype_from_trt(tensor.dtype), shape=tensor.shape)
return outputs
def get_layer_input_metadata(layer):
meta = TensorMetadata()
for i in range(layer.num_inputs):
inp = layer.get_input(i)
if inp:
meta.add(inp.name, np_dtype_from_trt(inp.dtype), inp.shape)
return meta
def get_layer_output_metadata(layer):
meta = TensorMetadata()
for i in range(layer.num_outputs):
outp = layer.get_output(i)
if outp:
meta.add(outp.name, np_dtype_from_trt(outp.dtype), outp.shape)
return meta
def str_from_layer(layer, index):
input_info = get_layer_input_metadata(layer)
output_info = get_layer_output_metadata(layer)
return util.str_from_layer("Layer", index, layer.name, layer.type, input_info, output_info)
def get_layer_attribute_names(layer):
def is_special_attribute(attr):
return attr.startswith("__") and attr.endswith("__")
def is_valid_attribute(attr, layer):
if (
type(layer) == trt.IPoolingLayer
or type(layer) == trt.IConvolutionLayer
or type(layer) == trt.IDeconvolutionLayer
):
if len(layer.get_input(0).shape) > 4:
return attr not in ["padding", "stride", "window_size"]
if type(layer) == trt.IResizeLayer:
if layer.num_inputs > 1:
return attr not in ["scales"]
if type(layer) == trt.ISliceLayer:
if layer.num_inputs > 1:
return attr not in ["shape", "start", "stride"]
return True
return [
attr
for attr in dir(layer)
if not is_special_attribute(attr) and not hasattr(trt.ILayer, attr) and is_valid_attribute(attr, layer)
]
def str_from_network(network, mode="full"):
LAYER_TYPE_CLASS_MAPPING = get_layer_class_mapping()
network_str = "Name: {:} | {:} Batch Network{:}\n".format(
network.name,
"Implicit"
if hasattr(network, "has_implicit_batch_dimension") and network.has_implicit_batch_dimension
else "Explicit",
" with Explicit Precision "
if hasattr(network, "has_explicit_precision") and network.has_explicit_precision
else "",
)
network_str += "\n"
input_metadata = get_network_input_metadata(network)
network_str += "---- {:} Network Input(s) ----\n{:}\n\n".format(len(input_metadata), input_metadata)
output_metadata = get_network_output_metadata(network)
network_str += "---- {:} Network Output(s) ----\n{:}\n\n".format(len(output_metadata), output_metadata)
network_str += "---- {:} Layer(s) ----\n".format(network.num_layers)
if mode != "none":
for index, layer in enumerate(network):
if layer.type in LAYER_TYPE_CLASS_MAPPING:
layer.__class__ = LAYER_TYPE_CLASS_MAPPING[layer.type]
network_str += str_from_layer(layer, index)
if mode in ["attrs", "full"]:
attrs = get_layer_attribute_names(layer)
if attrs:
network_str += util.indent_block("---- Attributes ----") + "\n"
for attr in attrs:
with G_LOGGER.verbosity():
val = getattr(layer, attr)
if mode == "full" or not isinstance(val, np.ndarray):
attr_str = ""
if layer.name:
attr_str += "{:}.".format(layer.name)
network_str += util.indent_block("{:}{:} = {:}".format(attr_str, attr, val)) + "\n"
network_str += "\n"
return util.indent_block(network_str, level=0)
def _get_network_outputs(network):
return [network.get_output(index).name for index in range(network.num_outputs)]
def check_outputs_not_found(not_found, available_outputs):
if not_found:
available_outputs = util.unique_list(available_outputs)
G_LOGGER.critical(
"The following outputs were not found: {:}.\n"
"Note: Available tensors:\n\t{:}".format(not_found, "\n\t".join(available_outputs))
)
def mark_outputs(network, outputs):
outputs = set(outputs)
all_outputs = []
for layer in network:
for index in range(layer.num_outputs):
tensor = layer.get_output(index)
all_outputs.append(tensor.name)
if tensor.is_network_output:
network.unmark_output(tensor)
if tensor.name in outputs:
if not tensor.is_network_output:
G_LOGGER.ultra_verbose("Marking {:} as an output".format(tensor.name))
network.mark_output(tensor)
marked_outputs = set(_get_network_outputs(network))
not_found = outputs - marked_outputs
check_outputs_not_found(not_found, all_outputs)
def mark_layerwise(network):
LOOP_START_NAMES = ["TRIP_LIMIT", "ITERATOR", "RECURRENCE"]
LOOP_END_NAMES = ["LOOP_OUTPUT"]
LOOP_START_LAYERS = [getattr(trt.LayerType, attr) for attr in LOOP_START_NAMES if hasattr(trt.LayerType, attr)]
LOOP_END_LAYERS = [getattr(trt.LayerType, attr) for attr in LOOP_END_NAMES if hasattr(trt.LayerType, attr)]
EXCLUDE_LAYERS = [trt.LayerType.SHAPE, trt.LayerType.CONSTANT]
outputs = []
in_loop = False
for layer in network:
if layer.type in LOOP_START_LAYERS:
G_LOGGER.warning(
"Loop detected. Please ensure the network is topologically sorted so that layers within "
"the loop body are not marked as network outputs in layerwise mode",
mode=LogMode.ONCE,
)
in_loop = True
elif layer.type in LOOP_END_LAYERS:
in_loop = False
should_mark_layer = not in_loop and layer.type not in EXCLUDE_LAYERS
if should_mark_layer:
for index in range(layer.num_outputs):
tensor = layer.get_output(index)
outputs.append(tensor.name)
G_LOGGER.verbose("Marking {:} tensors as outputs".format(len(outputs)))
mark_outputs(network, outputs)
def unmark_outputs(network, outputs):
outputs = set(outputs)
unmarked_outputs = set()
for layer in network:
for index in range(layer.num_outputs):
tensor = layer.get_output(index)
if tensor.is_network_output and tensor.name in outputs:
network.unmark_output(tensor)
unmarked_outputs.add(tensor.name)
not_found = outputs - unmarked_outputs
check_outputs_not_found(not_found, _get_network_outputs(network))
def str_from_config(config):
config_str = "{:20} | {:} bytes ({:.2f} MiB)\n".format(
"Workspace", config.max_workspace_size, config.max_workspace_size / (1024.0 ** 2)
)
config_str += "{:20} | ".format("Precision")
with contextlib.suppress(AttributeError):
config_str += "TF32: {:}, ".format(config.get_flag(trt.BuilderFlag.TF32))
config_str += "FP16: {:}, INT8: {:}, Strict Types: {:}\n".format(
config.get_flag(trt.BuilderFlag.FP16),
config.get_flag(trt.BuilderFlag.INT8),
config.get_flag(trt.BuilderFlag.STRICT_TYPES),
)
with contextlib.suppress(AttributeError):
source_vals = [
val.name for val in trt.TacticSource.__members__.values() if (1 << int(val)) & config.get_tactic_sources()
]
config_str += "{:20} | {:}\n".format("Tactic Sources", source_vals)
with contextlib.suppress(AttributeError):
config_str += "{:20} | {:}\n".format("Safety Restricted", config.get_flag(trt.BuilderFlag.SAFETY_SCOPE))
if config.int8_calibrator:
config_str += "{:20} | {:}\n".format("Calibrator", config.int8_calibrator)
config_str += "{:20} | {:} profile(s)".format("Profiles", config.num_optimization_profiles)
return config_str
def check_profile(profile):
if not bool(profile):
G_LOGGER.critical("Profile is not valid, please provide profile data.\nNote: profile was: {:}".format(profile))
return profile
def str_from_tensor(tensor, is_shape_tensor):
ret = "Input "
if is_shape_tensor:
ret += "shape-tensor"
else:
ret += "tensor"
ret += ": {:} (dtype={:}, shape={:})".format(tensor.name, tensor.dtype, tensor.shape)
return ret
def get_input_metadata_from_profile(profile, network):
input_metadata = TensorMetadata()
for index in range(network.num_inputs):
tensor = network.get_input(index)
if tensor.is_shape_tensor:
shapes = profile.get_shape_input(tensor.name)
else:
shapes = profile.get_shape(tensor.name)
if tuple(shapes[0]) != tuple(shapes[2]):
G_LOGGER.warning(
"Will use `opt` shapes from profile 0 for calibration. "
"Note that even though `min` != `max` in this profile, calibration "
"will use fixed input shapes (this is not necessarily an issue)."
)
input_metadata.add(name=tensor.name, dtype=np_dtype_from_trt(tensor.dtype), shape=shapes[1])
return input_metadata
def add_binding_to_metadata(engine, binding, metadata, name_binding):
# get all binding names in the runner
metadata.add(
name=engine[name_binding],
dtype=np_dtype_from_trt(engine.get_binding_dtype(binding)),
shape=list(engine.get_binding_shape(binding)),
)
def get_input_metadata_from_engine(engine, start_binding, end_binding):
inputs = TensorMetadata()
for index, binding in enumerate(range(start_binding, end_binding)):
if engine.binding_is_input(binding):
add_binding_to_metadata(engine, binding, inputs, name_binding=index)
return inputs
def get_output_metadata_from_engine(engine, start_binding, end_binding):
outputs = TensorMetadata()
for index, binding in enumerate(range(start_binding, end_binding)):
if not engine.binding_is_input(binding):
add_binding_to_metadata(engine, binding, outputs, name_binding=index)
return outputs
def str_from_engine(engine):
bindings_per_profile = get_bindings_per_profile(engine)
engine_str = "Name: {:} | {:}{:} Batch Engine ({:} layers)\n".format(
engine.name,
"Refittable " if engine.refittable else "",
"Implicit"
if hasattr(engine, "has_implicit_batch_dimension") and engine.has_implicit_batch_dimension
else "Explicit",
engine.num_layers,
)
engine_str += "\n"
# Show metadata for the first profile (i.e. the dynamic shapes)
input_metadata = get_input_metadata_from_engine(engine, 0, bindings_per_profile)
engine_str += "---- {:} Engine Input(s) ----\n{:}\n\n".format(len(input_metadata), input_metadata)
output_metadata = get_output_metadata_from_engine(engine, 0, bindings_per_profile)
engine_str += "---- {:} Engine Output(s) ----\n{:}\n\n".format(len(output_metadata), output_metadata)
engine_str += "---- Memory ----\nDevice Memory: {:} bytes\n\n".format(engine.device_memory_size)
engine_str += "---- {:} Profile(s) ({:} Binding(s) Each) ----\n".format(
engine.num_optimization_profiles, bindings_per_profile
)
for profile_index in range(engine.num_optimization_profiles):
engine_str += "- Profile: {:}\n".format(profile_index)
max_width = max([len(binding) for binding in engine]) + 8
for offset in range(bindings_per_profile):
binding = profile_index * bindings_per_profile + offset
name = "[Name: {:}]".format(engine.get_binding_name(binding))
engine_str += util.indent_block(
"Binding Index: {:} {:} {:<{max_width}}".format(
binding, "(Input) " if engine.binding_is_input(binding) else "(Output)", name, max_width=max_width
)
)
if engine.binding_is_input(binding):
if engine.is_shape_binding(binding):
min_shape, opt_shape, max_shape = engine.get_profile_shape_input(profile_index, binding)
else:
min_shape, opt_shape, max_shape = engine.get_profile_shape(profile_index, binding)
engine_str += " | Shapes: min={:}, opt={:}, max={:}\n".format(min_shape, opt_shape, max_shape)
else:
engine_str += " | Shape: {:}\n".format(engine.get_binding_shape(binding))
engine_str += "\n"
return util.indent_block(engine_str, level=0)
def get_bindings_per_profile(engine):
return engine.num_bindings // engine.num_optimization_profiles
def get_active_profile_bindings(context):
active_profile = context.active_optimization_profile
bindings_per_profile = get_bindings_per_profile(context.engine)
start_binding = bindings_per_profile * active_profile
end_binding = start_binding + bindings_per_profile
G_LOGGER.ultra_verbose(
"Total # of Profiles: {:}, Bindings Per Profile: {:}, Active Profile: {:}, "
"Start Binding: {:}, End Binding: {:}".format(
context.engine.num_optimization_profiles, bindings_per_profile, active_profile, start_binding, end_binding
)
)
return start_binding, end_binding
| true | true |
f7fe3f38008fbe8da34a6149bbdff2bab6ef6e4d | 102 | py | Python | erica/domain/DomainModule.py | punknoir101/erica-1 | 675a6280d38ca5b56946af6f3ed7e295ba896db0 | [
"MIT"
] | 3 | 2022-01-31T15:17:17.000Z | 2022-03-01T16:15:47.000Z | erica/domain/DomainModule.py | punknoir101/erica-1 | 675a6280d38ca5b56946af6f3ed7e295ba896db0 | [
"MIT"
] | 59 | 2022-01-31T14:04:20.000Z | 2022-03-31T20:08:47.000Z | erica/domain/DomainModule.py | punknoir101/erica-1 | 675a6280d38ca5b56946af6f3ed7e295ba896db0 | [
"MIT"
] | 1 | 2022-03-10T09:24:28.000Z | 2022-03-10T09:24:28.000Z | from opyoid import Module
class DomainModule(Module):
def configure(self) -> None:
pass
| 14.571429 | 32 | 0.676471 | from opyoid import Module
class DomainModule(Module):
def configure(self) -> None:
pass
| true | true |
f7fe3f61e2cb49a84cc03e59dba550b1fe4a85de | 10,793 | py | Python | tests/attacks/inference/test_membership_inference.py | meghana-sesetti/adversarial-robustness-toolbox | 6a5ce9e4142734ad9004e5c093ef8fa754ea6b39 | [
"MIT"
] | 1 | 2020-12-26T10:02:05.000Z | 2020-12-26T10:02:05.000Z | tests/attacks/inference/test_membership_inference.py | Tikquuss/adversarial-robustness-toolbox | 62ffe7c951d8a60d49a9ea6ac7b04aa4432a3fb7 | [
"MIT"
] | 33 | 2021-01-18T08:30:34.000Z | 2022-03-11T07:05:13.000Z | tests/attacks/inference/test_membership_inference.py | Tikquuss/adversarial-robustness-toolbox | 62ffe7c951d8a60d49a9ea6ac7b04aa4432a3fb7 | [
"MIT"
] | 1 | 2020-09-28T12:58:01.000Z | 2020-09-28T12:58:01.000Z | # MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import pytest
import keras
from art.attacks.inference.membership_inference.black_box import MembershipInferenceBlackBox
from art.attacks.inference.membership_inference.black_box_rule_based import MembershipInferenceBlackBoxRuleBased
from art.estimators.classification.keras import KerasClassifier
from art.estimators.estimator import BaseEstimator
from art.estimators.classification.classifier import ClassifierMixin
from tests.attacks.utils import backend_test_classifier_type_check_fail
logger = logging.getLogger(__name__)
attack_train_ratio = 0.5
num_classes_iris = 3
num_classes_mnist = 10
def test_rule_based_image(get_default_mnist_subset, image_dl_estimator_for_attack):
classifier_list = image_dl_estimator_for_attack(MembershipInferenceBlackBoxRuleBased)
if not classifier_list:
logging.warning("Couldn't perform this test because no classifier is defined")
return
for classifier in classifier_list:
attack = MembershipInferenceBlackBoxRuleBased(classifier)
backend_check_membership_accuracy_no_fit(attack, get_default_mnist_subset, 0.8)
def test_rule_based_tabular(get_iris_dataset, get_tabular_classifier_list):
classifier_list = get_tabular_classifier_list(MembershipInferenceBlackBoxRuleBased)
if not classifier_list:
logging.warning("Couldn't perform this test because no classifier is defined")
return
for classifier in classifier_list:
attack = MembershipInferenceBlackBoxRuleBased(classifier)
backend_check_membership_accuracy_no_fit(attack, get_iris_dataset, 0.06)
def test_black_box_image(get_default_mnist_subset, image_dl_estimator_for_attack):
classifier_list = image_dl_estimator_for_attack(MembershipInferenceBlackBox)
if not classifier_list:
logging.warning("Couldn't perform this test because no classifier is defined")
return
for classifier in classifier_list:
attack = MembershipInferenceBlackBox(classifier)
backend_check_membership_accuracy(attack, get_default_mnist_subset, attack_train_ratio, 0.03)
@pytest.mark.parametrize("model_type", ["nn", "rf", "gb"])
def test_black_box_tabular(model_type, get_tabular_classifier_list, get_iris_dataset):
classifier_list = get_tabular_classifier_list(MembershipInferenceBlackBox)
if not classifier_list:
logging.warning("Couldn't perform this test because no classifier is defined")
return
for classifier in classifier_list:
attack = MembershipInferenceBlackBox(classifier, attack_model_type=model_type)
backend_check_membership_accuracy(attack, get_iris_dataset, attack_train_ratio, 0.08)
@pytest.mark.parametrize("model_type", ["nn", "rf", "gb"])
def test_black_box_loss_tabular(model_type, get_tabular_classifier_list, get_iris_dataset):
classifier_list = get_tabular_classifier_list(MembershipInferenceBlackBox)
if not classifier_list:
logging.warning("Couldn't perform this test because no classifier is defined")
return
for classifier in classifier_list:
if type(classifier).__name__ == "PyTorchClassifier" or type(classifier).__name__ == "TensorFlowV2Classifier":
attack = MembershipInferenceBlackBox(classifier, input_type="loss", attack_model_type=model_type)
backend_check_membership_accuracy(attack, get_iris_dataset, attack_train_ratio, 0.15)
@pytest.mark.only_with_platform("keras")
@pytest.mark.skipif(keras.__version__.startswith("2.2"), reason="requires Keras 2.3.0 or higher")
def test_black_box_keras_loss(get_iris_dataset):
(x_train, y_train), (_, _) = get_iris_dataset
# This test creates a framework-specific (keras) model because it needs to check both the case of a string-based
# loss and a class-based loss, and therefore cannot use the generic fixture get_tabular_classifier_list
model = keras.models.Sequential()
model.add(keras.layers.Dense(8, input_dim=4, activation="relu"))
model.add(keras.layers.Dense(3, activation="softmax"))
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.fit(x_train, y_train, epochs=150, batch_size=10)
classifier = KerasClassifier(model)
attack = MembershipInferenceBlackBox(classifier, input_type="loss")
backend_check_membership_accuracy(attack, get_iris_dataset, attack_train_ratio, 0.15)
model2 = keras.models.Sequential()
model2.add(keras.layers.Dense(12, input_dim=4, activation="relu"))
model2.add(keras.layers.Dense(3, activation="softmax"))
model2.compile(loss=keras.losses.CategoricalCrossentropy(), optimizer="adam", metrics=["accuracy"])
model2.fit(x_train, y_train, epochs=150, batch_size=10)
classifier = KerasClassifier(model2)
attack = MembershipInferenceBlackBox(classifier, input_type="loss")
backend_check_membership_accuracy(attack, get_iris_dataset, attack_train_ratio, 0.15)
def test_black_box_tabular_rf(get_tabular_classifier_list, get_iris_dataset):
classifier_list = get_tabular_classifier_list(MembershipInferenceBlackBox)
if not classifier_list:
logging.warning("Couldn't perform this test because no classifier is defined")
return
for classifier in classifier_list:
attack = MembershipInferenceBlackBox(classifier, attack_model_type="rf")
backend_check_membership_accuracy(attack, get_iris_dataset, attack_train_ratio, 0.1)
def test_black_box_tabular_gb(get_tabular_classifier_list, get_iris_dataset):
classifier_list = get_tabular_classifier_list(MembershipInferenceBlackBox)
if not classifier_list:
logging.warning("Couldn't perform this test because no classifier is defined")
return
for classifier in classifier_list:
attack = MembershipInferenceBlackBox(classifier, attack_model_type="gb")
# train attack model using only attack_train_ratio of data
backend_check_membership_accuracy(attack, get_iris_dataset, attack_train_ratio, 0.03)
@pytest.mark.only_with_platform("pytorch")
def test_black_box_with_model(get_tabular_classifier_list, get_attack_classifier_list, get_iris_dataset):
classifier_list = get_tabular_classifier_list(MembershipInferenceBlackBox)
if not classifier_list:
logging.warning("Couldn't perform this test because no classifier is defined")
return
attack_model_list = get_attack_classifier_list(num_features=2 * num_classes_iris)
if not attack_model_list:
logging.warning("Couldn't perform this test because no attack model is defined")
return
for classifier in classifier_list:
for attack_model in attack_model_list:
print(type(attack_model).__name__)
attack = MembershipInferenceBlackBox(classifier, attack_model=attack_model)
backend_check_membership_accuracy(attack, get_iris_dataset, attack_train_ratio, 0.03)
def test_errors(get_tabular_classifier_list, get_iris_dataset):
classifier_list = get_tabular_classifier_list(MembershipInferenceBlackBox)
if not classifier_list:
logging.warning("Couldn't perform this test because no classifier is defined")
return
(x_train, y_train), (x_test, y_test) = get_iris_dataset
with pytest.raises(ValueError):
MembershipInferenceBlackBox(classifier_list[0], attack_model_type="a")
with pytest.raises(ValueError):
MembershipInferenceBlackBox(classifier_list[0], input_type="a")
attack = MembershipInferenceBlackBox(classifier_list[0])
with pytest.raises(ValueError):
attack.fit(x_train, y_test, x_test, y_test)
with pytest.raises(ValueError):
attack.fit(x_train, y_train, x_test, y_train)
with pytest.raises(ValueError):
attack.infer(x_train, y_test)
def test_classifier_type_check_fail():
backend_test_classifier_type_check_fail(MembershipInferenceBlackBoxRuleBased, [BaseEstimator, ClassifierMixin])
backend_test_classifier_type_check_fail(MembershipInferenceBlackBox, [BaseEstimator, ClassifierMixin])
def backend_check_membership_accuracy_no_fit(attack, dataset, approx):
(x_train, y_train), (x_test, y_test) = dataset
# infer attacked feature
inferred_train = attack.infer(x_train, y_train)
inferred_test = attack.infer(x_test, y_test)
# check accuracy
backend_check_accuracy(inferred_train, inferred_test, approx)
def backend_check_membership_accuracy(attack, dataset, attack_train_ratio, approx):
(x_train, y_train), (x_test, y_test) = dataset
attack_train_size = int(len(x_train) * attack_train_ratio)
attack_test_size = int(len(x_test) * attack_train_ratio)
# train attack model using only attack_train_ratio of data
attack.fit(
x_train[:attack_train_size], y_train[:attack_train_size], x_test[:attack_test_size], y_test[:attack_test_size]
)
# infer attacked feature on remainder of data
inferred_train = attack.infer(x_train[attack_train_size:], y_train[attack_train_size:])
inferred_test = attack.infer(x_test[attack_test_size:], y_test[attack_test_size:])
# check accuracy
backend_check_accuracy(inferred_train, inferred_test, approx)
def backend_check_accuracy(inferred_train, inferred_test, approx):
train_pos = sum(inferred_train) / len(inferred_train)
test_pos = sum(inferred_test) / len(inferred_test)
assert train_pos > test_pos or train_pos == pytest.approx(test_pos, abs=approx) or test_pos == 1
if __name__ == "__main__":
pytest.cmdline.main("-q -s {} --mlFramework=tensorflow --durations=0".format(__file__).split(" "))
| 47.337719 | 120 | 0.77995 |
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import pytest
import keras
from art.attacks.inference.membership_inference.black_box import MembershipInferenceBlackBox
from art.attacks.inference.membership_inference.black_box_rule_based import MembershipInferenceBlackBoxRuleBased
from art.estimators.classification.keras import KerasClassifier
from art.estimators.estimator import BaseEstimator
from art.estimators.classification.classifier import ClassifierMixin
from tests.attacks.utils import backend_test_classifier_type_check_fail
logger = logging.getLogger(__name__)
attack_train_ratio = 0.5
num_classes_iris = 3
num_classes_mnist = 10
def test_rule_based_image(get_default_mnist_subset, image_dl_estimator_for_attack):
classifier_list = image_dl_estimator_for_attack(MembershipInferenceBlackBoxRuleBased)
if not classifier_list:
logging.warning("Couldn't perform this test because no classifier is defined")
return
for classifier in classifier_list:
attack = MembershipInferenceBlackBoxRuleBased(classifier)
backend_check_membership_accuracy_no_fit(attack, get_default_mnist_subset, 0.8)
def test_rule_based_tabular(get_iris_dataset, get_tabular_classifier_list):
classifier_list = get_tabular_classifier_list(MembershipInferenceBlackBoxRuleBased)
if not classifier_list:
logging.warning("Couldn't perform this test because no classifier is defined")
return
for classifier in classifier_list:
attack = MembershipInferenceBlackBoxRuleBased(classifier)
backend_check_membership_accuracy_no_fit(attack, get_iris_dataset, 0.06)
def test_black_box_image(get_default_mnist_subset, image_dl_estimator_for_attack):
classifier_list = image_dl_estimator_for_attack(MembershipInferenceBlackBox)
if not classifier_list:
logging.warning("Couldn't perform this test because no classifier is defined")
return
for classifier in classifier_list:
attack = MembershipInferenceBlackBox(classifier)
backend_check_membership_accuracy(attack, get_default_mnist_subset, attack_train_ratio, 0.03)
@pytest.mark.parametrize("model_type", ["nn", "rf", "gb"])
def test_black_box_tabular(model_type, get_tabular_classifier_list, get_iris_dataset):
classifier_list = get_tabular_classifier_list(MembershipInferenceBlackBox)
if not classifier_list:
logging.warning("Couldn't perform this test because no classifier is defined")
return
for classifier in classifier_list:
attack = MembershipInferenceBlackBox(classifier, attack_model_type=model_type)
backend_check_membership_accuracy(attack, get_iris_dataset, attack_train_ratio, 0.08)
@pytest.mark.parametrize("model_type", ["nn", "rf", "gb"])
def test_black_box_loss_tabular(model_type, get_tabular_classifier_list, get_iris_dataset):
classifier_list = get_tabular_classifier_list(MembershipInferenceBlackBox)
if not classifier_list:
logging.warning("Couldn't perform this test because no classifier is defined")
return
for classifier in classifier_list:
if type(classifier).__name__ == "PyTorchClassifier" or type(classifier).__name__ == "TensorFlowV2Classifier":
attack = MembershipInferenceBlackBox(classifier, input_type="loss", attack_model_type=model_type)
backend_check_membership_accuracy(attack, get_iris_dataset, attack_train_ratio, 0.15)
@pytest.mark.only_with_platform("keras")
@pytest.mark.skipif(keras.__version__.startswith("2.2"), reason="requires Keras 2.3.0 or higher")
def test_black_box_keras_loss(get_iris_dataset):
(x_train, y_train), (_, _) = get_iris_dataset
# This test creates a framework-specific (keras) model because it needs to check both the case of a string-based
# loss and a class-based loss, and therefore cannot use the generic fixture get_tabular_classifier_list
model = keras.models.Sequential()
model.add(keras.layers.Dense(8, input_dim=4, activation="relu"))
model.add(keras.layers.Dense(3, activation="softmax"))
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.fit(x_train, y_train, epochs=150, batch_size=10)
classifier = KerasClassifier(model)
attack = MembershipInferenceBlackBox(classifier, input_type="loss")
backend_check_membership_accuracy(attack, get_iris_dataset, attack_train_ratio, 0.15)
model2 = keras.models.Sequential()
model2.add(keras.layers.Dense(12, input_dim=4, activation="relu"))
model2.add(keras.layers.Dense(3, activation="softmax"))
model2.compile(loss=keras.losses.CategoricalCrossentropy(), optimizer="adam", metrics=["accuracy"])
model2.fit(x_train, y_train, epochs=150, batch_size=10)
classifier = KerasClassifier(model2)
attack = MembershipInferenceBlackBox(classifier, input_type="loss")
backend_check_membership_accuracy(attack, get_iris_dataset, attack_train_ratio, 0.15)
def test_black_box_tabular_rf(get_tabular_classifier_list, get_iris_dataset):
classifier_list = get_tabular_classifier_list(MembershipInferenceBlackBox)
if not classifier_list:
logging.warning("Couldn't perform this test because no classifier is defined")
return
for classifier in classifier_list:
attack = MembershipInferenceBlackBox(classifier, attack_model_type="rf")
backend_check_membership_accuracy(attack, get_iris_dataset, attack_train_ratio, 0.1)
def test_black_box_tabular_gb(get_tabular_classifier_list, get_iris_dataset):
classifier_list = get_tabular_classifier_list(MembershipInferenceBlackBox)
if not classifier_list:
logging.warning("Couldn't perform this test because no classifier is defined")
return
for classifier in classifier_list:
attack = MembershipInferenceBlackBox(classifier, attack_model_type="gb")
# train attack model using only attack_train_ratio of data
backend_check_membership_accuracy(attack, get_iris_dataset, attack_train_ratio, 0.03)
@pytest.mark.only_with_platform("pytorch")
def test_black_box_with_model(get_tabular_classifier_list, get_attack_classifier_list, get_iris_dataset):
classifier_list = get_tabular_classifier_list(MembershipInferenceBlackBox)
if not classifier_list:
logging.warning("Couldn't perform this test because no classifier is defined")
return
attack_model_list = get_attack_classifier_list(num_features=2 * num_classes_iris)
if not attack_model_list:
logging.warning("Couldn't perform this test because no attack model is defined")
return
for classifier in classifier_list:
for attack_model in attack_model_list:
print(type(attack_model).__name__)
attack = MembershipInferenceBlackBox(classifier, attack_model=attack_model)
backend_check_membership_accuracy(attack, get_iris_dataset, attack_train_ratio, 0.03)
def test_errors(get_tabular_classifier_list, get_iris_dataset):
classifier_list = get_tabular_classifier_list(MembershipInferenceBlackBox)
if not classifier_list:
logging.warning("Couldn't perform this test because no classifier is defined")
return
(x_train, y_train), (x_test, y_test) = get_iris_dataset
with pytest.raises(ValueError):
MembershipInferenceBlackBox(classifier_list[0], attack_model_type="a")
with pytest.raises(ValueError):
MembershipInferenceBlackBox(classifier_list[0], input_type="a")
attack = MembershipInferenceBlackBox(classifier_list[0])
with pytest.raises(ValueError):
attack.fit(x_train, y_test, x_test, y_test)
with pytest.raises(ValueError):
attack.fit(x_train, y_train, x_test, y_train)
with pytest.raises(ValueError):
attack.infer(x_train, y_test)
def test_classifier_type_check_fail():
backend_test_classifier_type_check_fail(MembershipInferenceBlackBoxRuleBased, [BaseEstimator, ClassifierMixin])
backend_test_classifier_type_check_fail(MembershipInferenceBlackBox, [BaseEstimator, ClassifierMixin])
def backend_check_membership_accuracy_no_fit(attack, dataset, approx):
(x_train, y_train), (x_test, y_test) = dataset
inferred_train = attack.infer(x_train, y_train)
inferred_test = attack.infer(x_test, y_test)
backend_check_accuracy(inferred_train, inferred_test, approx)
def backend_check_membership_accuracy(attack, dataset, attack_train_ratio, approx):
(x_train, y_train), (x_test, y_test) = dataset
attack_train_size = int(len(x_train) * attack_train_ratio)
attack_test_size = int(len(x_test) * attack_train_ratio)
attack.fit(
x_train[:attack_train_size], y_train[:attack_train_size], x_test[:attack_test_size], y_test[:attack_test_size]
)
inferred_train = attack.infer(x_train[attack_train_size:], y_train[attack_train_size:])
inferred_test = attack.infer(x_test[attack_test_size:], y_test[attack_test_size:])
backend_check_accuracy(inferred_train, inferred_test, approx)
def backend_check_accuracy(inferred_train, inferred_test, approx):
train_pos = sum(inferred_train) / len(inferred_train)
test_pos = sum(inferred_test) / len(inferred_test)
assert train_pos > test_pos or train_pos == pytest.approx(test_pos, abs=approx) or test_pos == 1
if __name__ == "__main__":
pytest.cmdline.main("-q -s {} --mlFramework=tensorflow --durations=0".format(__file__).split(" "))
| true | true |
f7fe3f625de60e920278fa9877a8710f2d833150 | 1,390 | py | Python | ktrade/client.py | krosstrading/ktrade | 13ef132772f22acfac05ed60a2fcfbb1f34c2615 | [
"Apache-2.0"
] | null | null | null | ktrade/client.py | krosstrading/ktrade | 13ef132772f22acfac05ed60a2fcfbb1f34c2615 | [
"Apache-2.0"
] | null | null | null | ktrade/client.py | krosstrading/ktrade | 13ef132772f22acfac05ed60a2fcfbb1f34c2615 | [
"Apache-2.0"
] | null | null | null | import asyncio
from .protocol import stock_pb2
from . import providerdict
class KrossClient:
def __init__(self, socket, node):
self.sock = socket
self.node = node
self.write_lock = asyncio.Lock()
async def send_object(self, data):
async with self.write_lock:
await self.sock.send(data.SerializeToString())
async def send_response(self, query, result_code):
result = stock_pb2.StockResult()
result.uuid_h = query.uuid_h
result.uuid_l = query.uuid_l
result.code = result_code
await self.send_object(result)
async def listen(self):
while True:
try:
recv_data = await self.sock.recv()
print('recv msg', len(recv_data))
query = stock_pb2.StockQuery()
query.ParseFromString(recv_data)
provider = await providerdict.find_provider(self.node.vendor,
self.node.channel_type,
self.node.service)
print(provider)
if provider is None:
await self.send_response(query, stock_pb2.NO_PROVIDER)
else:
await provider.send_request(query)
except Exception as e:
print(e)
break
print('quit listening...')
| 30.888889 | 77 | 0.560432 | import asyncio
from .protocol import stock_pb2
from . import providerdict
class KrossClient:
def __init__(self, socket, node):
self.sock = socket
self.node = node
self.write_lock = asyncio.Lock()
async def send_object(self, data):
async with self.write_lock:
await self.sock.send(data.SerializeToString())
async def send_response(self, query, result_code):
result = stock_pb2.StockResult()
result.uuid_h = query.uuid_h
result.uuid_l = query.uuid_l
result.code = result_code
await self.send_object(result)
async def listen(self):
while True:
try:
recv_data = await self.sock.recv()
print('recv msg', len(recv_data))
query = stock_pb2.StockQuery()
query.ParseFromString(recv_data)
provider = await providerdict.find_provider(self.node.vendor,
self.node.channel_type,
self.node.service)
print(provider)
if provider is None:
await self.send_response(query, stock_pb2.NO_PROVIDER)
else:
await provider.send_request(query)
except Exception as e:
print(e)
break
print('quit listening...')
| true | true |
f7fe3f64ab7d9812e1dfd30423018d4e5b38e582 | 11,395 | py | Python | sdf/analytics.py | drgmk/sdf | a44e66a82f876dda079686b32c767370276c38a1 | [
"MIT"
] | 1 | 2020-07-01T15:55:16.000Z | 2020-07-01T15:55:16.000Z | sdf/analytics.py | drgmk/sdf | a44e66a82f876dda079686b32c767370276c38a1 | [
"MIT"
] | 4 | 2017-03-28T19:18:09.000Z | 2021-09-21T08:17:45.000Z | sdf/analytics.py | drgmk/sdf | a44e66a82f876dda079686b32c767370276c38a1 | [
"MIT"
] | 1 | 2020-07-13T19:39:15.000Z | 2020-07-13T19:39:15.000Z | '''Analytic routines for debris disks.'''
import numpy as np
from . import photometry
from . import filter
from . import utils
class BB_Disk(object):
'''A blackbody disk class.
Takes multiple temperatures, the purpose being for use to show
disk properties in parameter spaces such as fractional luminosity
vs. temperature.
Parameters
----------
lstar : float
Stellar luminosity in Solar units.
tstar : float
Stellar effective temperature in Kelvin.
distance : float
Stellar distance in parsec.
wavelengths : 1-D array, optional
Vector of wavelengths.
temperatures : 1-D array, optional
Vector of temperatures.
.. todo:: distance not actually needed for calibration limited, fix.
.. todo:: don't use a for loop over temperatures,
fix utils.bnu_wav_micron instead.
'''
def __init__(self,wavelengths=None,temperatures=None,
lstar=None,tstar=None,distance=None):
'''Initialise, default T=100K, Omega=1.0'''
if wavelengths is None:
self.wavelengths = 10**np.linspace(-1,4,1000)
else:
self.wavelengths = wavelengths
if temperatures is None:
self.temperatures = 10**np.linspace(1,3,1000)
else:
self.temperatures = temperatures
self.lstar = lstar
self.tstar = tstar
self.distance = distance
def blackbody_radii(self):
'''Return the blackbody radii.'''
return (278.3/self.temperatures)**2 * self.lstar**0.5
def radiance(self):
'''Return radiance, in W / m^2 / sr.'''
return 5.67e-8 * self.temperatures**4 / np.pi
def f_limits(self,lim_waves,flux_limits=None,r_limits=None,
stellar_flux=None,fwhm=None,lstar_1pc=None):
'''Return fractional luminosity limits.
This routine implements Wyatt (2008) equations 8 and 11.
Parameters
----------
lim_waves : numpy.ndarray
Array of wavelengths at which limits apply.
flux_limits : numpy.ndarray, optional
Array of flux limits.
r_limits : numpy.ndarray, optional
Array of calibration limits (F_disk/F_star).
stellar_flux : numpy.ndarray, optional
Array of stellar fluxes at lim_waves.
fwhm : numpy.ndarray, optional
Array of spatial resolutions at lim_waves, affects flux
limited observations if disk is resolved.
lstar_1pc : float
L_star at 1pc, used for flux limits when distance unknown.
One of flux_limits or r_limits must be given. If both, they must
have the same length, and correspond to the wavelengths given.
Likewise for stellar_flux and fwhm.
'''
if flux_limits is not None and r_limits is not None:
if len(flux_limits) != len(r_limits):
raise RuntimeError(
'flux_limits must be same length as r_limits')
# sensitivity limit
if flux_limits is not None:
slims = np.zeros((len(self.temperatures),len(flux_limits)))
for i,temp in enumerate(self.temperatures):
if self.distance is not None:
slims[i,:] = 3.4e9 * flux_limits * self.distance**2 / \
self.blackbody_radii()[i]**2 / \
utils.bnu_wav_micron(lim_waves,temp)
else:
# distance independent calculation, 2487305. is
# pc^2/Lsun, haven't tracked down the 4 yet
ldisk_1pc = 4 * 5.6704e-8 * flux_limits * 2487305. * \
temp**4 / utils.bnu_wav_micron(lim_waves,temp)
slims[i,:] = ldisk_1pc / lstar_1pc
# apply correction for resolved disks
if self.distance is not None and fwhm is not None:
fwhm_fact = 2 * self.blackbody_radii()[i] / self.distance / fwhm
resolved = fwhm_fact > 1.0
slims[i,resolved] *= fwhm_fact[resolved]
# calibration limit, use actual stellar flux if given
if r_limits is not None:
if stellar_flux is not None:
if len(stellar_flux) != len(r_limits):
raise RuntimeError(
'Stellar flux ({}) must have same '
'length as r_limits ({})'.format(
len(stellar_flux),
len(r_limits)
)
)
fstar = stellar_flux
else:
fstar = 1.77 * utils.bnu_wav_micron(lim_waves,self.tstar) * \
self.lstar / self.tstar**4 / self.distance**2
clims = np.zeros((len(self.temperatures),len(r_limits)))
for i,temp in enumerate(self.temperatures):
clims[i,:] = 6e9/1.77 * r_limits * fstar / \
utils.bnu_wav_micron(lim_waves,temp) * \
(self.distance/self.blackbody_radii()[i])**2
if flux_limits is not None and r_limits is not None:
return np.minimum(slims,clims)
elif flux_limits is not None:
return slims
elif r_limits is not None:
return clims
else:
raise RuntimeError('Need to pass flux_limits or r_limits')
def f_limits_from_result(self,r,min_wavelength=8.0, sn=3,
x={}, x_det={},
skip_filters=[],keep_filters=None):
'''Derive fractional luminosity limits from an sdf result object.
Also derive fractional luminosities and signal to noise of excess
detections. Return low and high limits, expect to plot these
with pyplot.fill_between and something like:
ax.fill_between(temps, det_lo[:,i], det_hi[:,i],
where=(det_lo[:,i]<det_hi[:,i]), alpha=0.25)
Account for long wavelength grain inefficiency with X factor,
used per filter, e.g. {'WAV850':4}.
Rather than worry about flux vs. calibration limited, just do
the calculation assuming flux limited by calculating the flux
limit for each observed filter (whether it was an upper limit
or not).
Parameters
----------
r : sdf.result.Result
Result object with photometry.
min_wavelength : float, optional
Exclude filters with a mean wavelength shorter than this.
sn : float, optional
S/N at which detection significant, used only for detections.
x : dict
X factor to increase limits by: {filter,X}
x_det : dict
X factor to increase upper detection limit by: {filter,X}
skip_filters : list, optional
List of filters to skip.
keep_filters : list, optional
List of filters to keep, applied after skip_filters.
'''
waves = np.array([])
filters = np.array([])
f_lim = np.array([])
f_det = np.array([])
e_det = np.array([])
f_star = np.array([])
# get stellar luminosity at 1pc if no distance
lstar = None
if self.distance is None:
lstar = 0.0
if hasattr(r,'star'):
for s in r.star:
lstar += s['lstar_1pc']
if lstar == 0.0:
raise utils.SdfError('dont have lstar_1pc or distance')
for p in r.obs:
if not isinstance(p,photometry.Photometry):
continue
ok = np.invert(p.ignore)
# loop to grab correct stellar photometry
for i,filt in enumerate(p.filters[ok]):
new_wave = p.mean_wavelength()[ok][i]
if (filter.iscolour(filt) or
new_wave < min_wavelength or
filt in skip_filters):
continue
if keep_filters is not None:
if filt not in keep_filters:
continue
waves = np.append(waves,new_wave)
filters = np.append(filters,filt)
filt_i = np.where(filt == np.array(r.all_filters))[0]
f_star = np.append(f_star,r.all_star_phot[filt_i])
fac = 1
if filt in x.keys():
fac = x[filt]
if p.upperlim[ok][i]:
f_lim = np.append(f_lim,p.fnujy[ok][i]*fac)
f_det = np.append(f_det, 0)
e_det = np.append(e_det, 0)
else:
# 1sigma uncertainty, observed and star in quadrature
unc = np.sqrt(
p.e_fnujy[ok][i]**2 + \
0.25*(r.all_star_phot_1sig_lo[filt_i] + r.all_star_phot_1sig_hi[filt_i])**2
)
f_lim = np.append(f_lim,3*unc*fac)
f_det = np.append(f_det, p.fnujy[ok][i] - f_star[-1])
e_det = np.append(e_det, unc)
lims = self.f_limits(waves,flux_limits=f_lim,
stellar_flux=f_star,lstar_1pc=lstar)
dets = self.f_limits(waves,flux_limits=f_det,
stellar_flux=f_star,lstar_1pc=lstar)
ok = e_det > 0
sn_dets = np.zeros(lims.shape[1])
sn_dets[ok] = f_det[ok] / e_det[ok]
# now compute limit ranges for detections, first get ranges
det_lo = np.zeros(lims.shape)
det_hi = lims.copy()
both_hi = lims.copy()
for i in range(lims.shape[1]):
if sn_dets[i]>sn:
fac = 1
if filters[i] in x_det.keys():
fac = x_det[filters[i]]
det_lo[:,i] = dets[:,i]*(1-sn/sn_dets[i])
det_hi[:,i] = dets[:,i]*(fac+sn/sn_dets[i])
both_hi[:,i] = np.max([[det_hi[:,i]],[lims[:,i]]], axis=0)
# now adjust high limit based on other limits
for i in range(lims.shape[1]):
other = np.arange(lims.shape[1]) != i
det_hi[:,i] = np.min( np.hstack((both_hi[:,other],det_hi[:,i].reshape((-1,1)))), axis=1 )
return lims, det_lo, det_hi, sn_dets, filters
def f_limits_togrid(self, lims, f=None):
'''Return boolean grid in f - r/T space indicating detectability.
Sum multiple of these to get the grid that shows how many of the
systems it was possible to detect a disk for.
Parameters
----------
lims : array
Array of f limits (i.e. n_temperatures x n_lim).
f : array, optional
Array of f to use in grid.
'''
if f is None:
f = 10**np.linspace(-7,-1,100)
fs, _ = np.meshgrid(f, self.temperatures)
return fs > np.min(lim, axis=1), f
| 37.731788 | 101 | 0.526196 |
import numpy as np
from . import photometry
from . import filter
from . import utils
class BB_Disk(object):
def __init__(self,wavelengths=None,temperatures=None,
lstar=None,tstar=None,distance=None):
if wavelengths is None:
self.wavelengths = 10**np.linspace(-1,4,1000)
else:
self.wavelengths = wavelengths
if temperatures is None:
self.temperatures = 10**np.linspace(1,3,1000)
else:
self.temperatures = temperatures
self.lstar = lstar
self.tstar = tstar
self.distance = distance
def blackbody_radii(self):
return (278.3/self.temperatures)**2 * self.lstar**0.5
def radiance(self):
return 5.67e-8 * self.temperatures**4 / np.pi
def f_limits(self,lim_waves,flux_limits=None,r_limits=None,
stellar_flux=None,fwhm=None,lstar_1pc=None):
if flux_limits is not None and r_limits is not None:
if len(flux_limits) != len(r_limits):
raise RuntimeError(
'flux_limits must be same length as r_limits')
if flux_limits is not None:
slims = np.zeros((len(self.temperatures),len(flux_limits)))
for i,temp in enumerate(self.temperatures):
if self.distance is not None:
slims[i,:] = 3.4e9 * flux_limits * self.distance**2 / \
self.blackbody_radii()[i]**2 / \
utils.bnu_wav_micron(lim_waves,temp)
else:
ldisk_1pc = 4 * 5.6704e-8 * flux_limits * 2487305. * \
temp**4 / utils.bnu_wav_micron(lim_waves,temp)
slims[i,:] = ldisk_1pc / lstar_1pc
# apply correction for resolved disks
if self.distance is not None and fwhm is not None:
fwhm_fact = 2 * self.blackbody_radii()[i] / self.distance / fwhm
resolved = fwhm_fact > 1.0
slims[i,resolved] *= fwhm_fact[resolved]
# calibration limit, use actual stellar flux if given
if r_limits is not None:
if stellar_flux is not None:
if len(stellar_flux) != len(r_limits):
raise RuntimeError(
'Stellar flux ({}) must have same '
'length as r_limits ({})'.format(
len(stellar_flux),
len(r_limits)
)
)
fstar = stellar_flux
else:
fstar = 1.77 * utils.bnu_wav_micron(lim_waves,self.tstar) * \
self.lstar / self.tstar**4 / self.distance**2
clims = np.zeros((len(self.temperatures),len(r_limits)))
for i,temp in enumerate(self.temperatures):
clims[i,:] = 6e9/1.77 * r_limits * fstar / \
utils.bnu_wav_micron(lim_waves,temp) * \
(self.distance/self.blackbody_radii()[i])**2
if flux_limits is not None and r_limits is not None:
return np.minimum(slims,clims)
elif flux_limits is not None:
return slims
elif r_limits is not None:
return clims
else:
raise RuntimeError('Need to pass flux_limits or r_limits')
def f_limits_from_result(self,r,min_wavelength=8.0, sn=3,
x={}, x_det={},
skip_filters=[],keep_filters=None):
waves = np.array([])
filters = np.array([])
f_lim = np.array([])
f_det = np.array([])
e_det = np.array([])
f_star = np.array([])
# get stellar luminosity at 1pc if no distance
lstar = None
if self.distance is None:
lstar = 0.0
if hasattr(r,'star'):
for s in r.star:
lstar += s['lstar_1pc']
if lstar == 0.0:
raise utils.SdfError('dont have lstar_1pc or distance')
for p in r.obs:
if not isinstance(p,photometry.Photometry):
continue
ok = np.invert(p.ignore)
# loop to grab correct stellar photometry
for i,filt in enumerate(p.filters[ok]):
new_wave = p.mean_wavelength()[ok][i]
if (filter.iscolour(filt) or
new_wave < min_wavelength or
filt in skip_filters):
continue
if keep_filters is not None:
if filt not in keep_filters:
continue
waves = np.append(waves,new_wave)
filters = np.append(filters,filt)
filt_i = np.where(filt == np.array(r.all_filters))[0]
f_star = np.append(f_star,r.all_star_phot[filt_i])
fac = 1
if filt in x.keys():
fac = x[filt]
if p.upperlim[ok][i]:
f_lim = np.append(f_lim,p.fnujy[ok][i]*fac)
f_det = np.append(f_det, 0)
e_det = np.append(e_det, 0)
else:
# 1sigma uncertainty, observed and star in quadrature
unc = np.sqrt(
p.e_fnujy[ok][i]**2 + \
0.25*(r.all_star_phot_1sig_lo[filt_i] + r.all_star_phot_1sig_hi[filt_i])**2
)
f_lim = np.append(f_lim,3*unc*fac)
f_det = np.append(f_det, p.fnujy[ok][i] - f_star[-1])
e_det = np.append(e_det, unc)
lims = self.f_limits(waves,flux_limits=f_lim,
stellar_flux=f_star,lstar_1pc=lstar)
dets = self.f_limits(waves,flux_limits=f_det,
stellar_flux=f_star,lstar_1pc=lstar)
ok = e_det > 0
sn_dets = np.zeros(lims.shape[1])
sn_dets[ok] = f_det[ok] / e_det[ok]
# now compute limit ranges for detections, first get ranges
det_lo = np.zeros(lims.shape)
det_hi = lims.copy()
both_hi = lims.copy()
for i in range(lims.shape[1]):
if sn_dets[i]>sn:
fac = 1
if filters[i] in x_det.keys():
fac = x_det[filters[i]]
det_lo[:,i] = dets[:,i]*(1-sn/sn_dets[i])
det_hi[:,i] = dets[:,i]*(fac+sn/sn_dets[i])
both_hi[:,i] = np.max([[det_hi[:,i]],[lims[:,i]]], axis=0)
# now adjust high limit based on other limits
for i in range(lims.shape[1]):
other = np.arange(lims.shape[1]) != i
det_hi[:,i] = np.min( np.hstack((both_hi[:,other],det_hi[:,i].reshape((-1,1)))), axis=1 )
return lims, det_lo, det_hi, sn_dets, filters
def f_limits_togrid(self, lims, f=None):
if f is None:
f = 10**np.linspace(-7,-1,100)
fs, _ = np.meshgrid(f, self.temperatures)
return fs > np.min(lim, axis=1), f
| true | true |
f7fe3f9bc200f644573ec804033d199944028ccc | 920 | py | Python | pcstools/compat.py | DIKU-PCS/pcstools | cba37914f566b3a848de6de785ada66a41c7be4d | [
"MIT"
] | 1 | 2019-01-03T15:41:33.000Z | 2019-01-03T15:41:33.000Z | pcstools/compat.py | DIKU-PCS/pcstools | cba37914f566b3a848de6de785ada66a41c7be4d | [
"MIT"
] | 1 | 2019-01-07T12:12:26.000Z | 2019-01-10T14:06:03.000Z | pcstools/compat.py | DIKU-PCS/pcstools | cba37914f566b3a848de6de785ada66a41c7be4d | [
"MIT"
] | null | null | null | from __future__ import unicode_literals, print_function, absolute_import, division
import sys
import collections
if sys.version_info < (3,):
integer_types = (int, long,)
bytes_type = bytes # "bytes" == "str" == a slice of bytes
unicode_type = unicode # "unicode" == a valid unicode string
bytearray_type = bytearray
iterable_type = collections.Iterable
else:
integer_types = (int,)
bytes_type = bytes # "bytes" == a slice of bytes
unicode_type = str # "str" == a valid unicode string, "unicode" is undefined
bytearray_type = bytearray
iterable_type = collections.abc.Iterable
def is_int(n):
return isinstance(n, integer_types)
def is_bytes(s):
return isinstance(s, bytes_type)
def is_unicode(s):
return isinstance(s, unicode_type)
def is_bytearray(s):
return isinstance(s, bytearray_type)
def is_iterable(s):
return isinstance(s, iterable_type)
| 24.864865 | 82 | 0.709783 | from __future__ import unicode_literals, print_function, absolute_import, division
import sys
import collections
if sys.version_info < (3,):
integer_types = (int, long,)
bytes_type = bytes
unicode_type = unicode
bytearray_type = bytearray
iterable_type = collections.Iterable
else:
integer_types = (int,)
bytes_type = bytes
unicode_type = str
bytearray_type = bytearray
iterable_type = collections.abc.Iterable
def is_int(n):
return isinstance(n, integer_types)
def is_bytes(s):
return isinstance(s, bytes_type)
def is_unicode(s):
return isinstance(s, unicode_type)
def is_bytearray(s):
return isinstance(s, bytearray_type)
def is_iterable(s):
return isinstance(s, iterable_type)
| true | true |
f7fe3fcde652c902f9e6ec03eec61d9066243d33 | 1,799 | py | Python | pytouch/main.py | aboutNisblee/PyTouch | 0098aa07ac78ec1868e0a155c92342512d07613a | [
"MIT"
] | 3 | 2016-08-19T16:17:53.000Z | 2017-07-19T18:55:37.000Z | pytouch/main.py | mNisblee/PyTouch | 0098aa07ac78ec1868e0a155c92342512d07613a | [
"MIT"
] | null | null | null | pytouch/main.py | mNisblee/PyTouch | 0098aa07ac78ec1868e0a155c92342512d07613a | [
"MIT"
] | null | null | null | import logging
import argparse
from pytouch.model import get_engine, Session, reset_db
from pytouch.gui.tk import window
def init_db(args):
db_uri = getattr(args, 'database')
logging.debug('Database URI: {}'.format(db_uri))
engine = get_engine({'sqlalchemy.url': db_uri})
Session.configure(bind=engine)
def reset_database(args):
from pytouch.service import CourseService
init_db(args)
reset_db()
CourseService.init_courses()
def run(args=None):
init_db(args)
window.MainWindow().show()
def manage():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
verbosity_group = parser.add_mutually_exclusive_group()
verbosity_group.add_argument('--verbose', '-v', action='count', default=0, help='Increase verbosity')
verbosity_group.add_argument('--quiet', '-q', action='store_true', help='Reduce verbosity')
# FIXME: Database path incorrect! Depends on installation path!
parser.add_argument('--database', type=str, default='sqlite:///tests.sqlite', help='Change the default database')
parser.set_defaults(fun=run)
parser_setup = subparsers.add_parser('reset-database')
parser_setup.set_defaults(fun=reset_database)
args = parser.parse_args()
lut_verbosity = {0: logging.WARNING, 1: logging.INFO, 2: logging.DEBUG}
level = logging.FATAL if getattr(args, 'quiet') else lut_verbosity.get(getattr(args, 'verbose', 0), logging.DEBUG)
# logging.basicConfig(level=level, format='%(asctime)s - %(levelname)s - %(message)s')
logging.basicConfig(level=level, format='%(name)s: %(levelname)s: %(message)s')
logging.info('Configuration:\n\t{}'.format('\n\t'.join(['{}: {}'.format(k, getattr(v, '__name__', v)) for k, v in sorted(args.__dict__.items())])))
args.fun(args)
| 33.943396 | 151 | 0.704836 | import logging
import argparse
from pytouch.model import get_engine, Session, reset_db
from pytouch.gui.tk import window
def init_db(args):
db_uri = getattr(args, 'database')
logging.debug('Database URI: {}'.format(db_uri))
engine = get_engine({'sqlalchemy.url': db_uri})
Session.configure(bind=engine)
def reset_database(args):
from pytouch.service import CourseService
init_db(args)
reset_db()
CourseService.init_courses()
def run(args=None):
init_db(args)
window.MainWindow().show()
def manage():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
verbosity_group = parser.add_mutually_exclusive_group()
verbosity_group.add_argument('--verbose', '-v', action='count', default=0, help='Increase verbosity')
verbosity_group.add_argument('--quiet', '-q', action='store_true', help='Reduce verbosity')
parser.add_argument('--database', type=str, default='sqlite:///tests.sqlite', help='Change the default database')
parser.set_defaults(fun=run)
parser_setup = subparsers.add_parser('reset-database')
parser_setup.set_defaults(fun=reset_database)
args = parser.parse_args()
lut_verbosity = {0: logging.WARNING, 1: logging.INFO, 2: logging.DEBUG}
level = logging.FATAL if getattr(args, 'quiet') else lut_verbosity.get(getattr(args, 'verbose', 0), logging.DEBUG)
logging.basicConfig(level=level, format='%(name)s: %(levelname)s: %(message)s')
logging.info('Configuration:\n\t{}'.format('\n\t'.join(['{}: {}'.format(k, getattr(v, '__name__', v)) for k, v in sorted(args.__dict__.items())])))
args.fun(args)
| true | true |
f7fe4058ec82de1e42ecdc36092e184e8c69410d | 286 | py | Python | labgraph/zmq_node/zmq_message.py | Yunusbcr/labgraph | a00ae7098b7b0e0eda8ce2e7e62dae86854616fb | [
"MIT"
] | 124 | 2021-07-14T21:25:59.000Z | 2022-03-08T20:40:16.000Z | labgraph/zmq_node/zmq_message.py | VanEdward/labgraph | 9488feac59f9ef86091befdeaddb69d84e4d6fb3 | [
"MIT"
] | 46 | 2021-07-16T18:41:11.000Z | 2022-03-31T20:53:00.000Z | labgraph/zmq_node/zmq_message.py | VanEdward/labgraph | 9488feac59f9ef86091befdeaddb69d84e4d6fb3 | [
"MIT"
] | 22 | 2021-07-16T18:34:56.000Z | 2022-03-31T15:12:06.000Z | #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
from ..messages import Message
from ..messages.types import BytesType
class ZMQMessage(Message):
"""
A message representing data that was/will be communicated
to ZMQ.
"""
data: bytes
| 19.066667 | 61 | 0.70979 |
from ..messages import Message
from ..messages.types import BytesType
class ZMQMessage(Message):
data: bytes
| true | true |
f7fe4083e0a07eb9bc7331a5d85aaf33088a38c8 | 1,938 | py | Python | official/vision/beta/projects/volumetric_models/modeling/factory_test.py | VidyaKamath1089/models | 09459d2afa4cae31fc85cd6108d7559aa2ddd521 | [
"Apache-2.0"
] | 1 | 2021-07-12T07:27:47.000Z | 2021-07-12T07:27:47.000Z | official/vision/beta/projects/volumetric_models/modeling/factory_test.py | Zed1014/1 | 0e74158f72160a5d25b977de7f6f2cf4d1908dba | [
"Apache-2.0"
] | null | null | null | official/vision/beta/projects/volumetric_models/modeling/factory_test.py | Zed1014/1 | 0e74158f72160a5d25b977de7f6f2cf4d1908dba | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for factory.py."""
from absl.testing import parameterized
import tensorflow as tf
# pylint: disable=unused-import
from official.vision.beta.projects.volumetric_models.configs import semantic_segmentation_3d as exp_cfg
from official.vision.beta.projects.volumetric_models.modeling import backbones
from official.vision.beta.projects.volumetric_models.modeling import decoders
from official.vision.beta.projects.volumetric_models.modeling import factory
class SegmentationModelBuilderTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(((128, 128, 128), 5e-5), ((64, 64, 64), None))
def test_unet3d_builder(self, input_size, weight_decay):
num_classes = 3
input_specs = tf.keras.layers.InputSpec(
shape=[None, input_size[0], input_size[1], input_size[2], 3])
model_config = exp_cfg.SemanticSegmentationModel3D(num_classes=num_classes)
l2_regularizer = (
tf.keras.regularizers.l2(weight_decay) if weight_decay else None)
model = factory.build_segmentation_model_3d(
input_specs=input_specs,
model_config=model_config,
l2_regularizer=l2_regularizer)
self.assertIsInstance(
model, tf.keras.Model,
'Output should be a tf.keras.Model instance but got %s' % type(model))
if __name__ == '__main__':
tf.test.main()
| 40.375 | 103 | 0.762126 |
from absl.testing import parameterized
import tensorflow as tf
from official.vision.beta.projects.volumetric_models.configs import semantic_segmentation_3d as exp_cfg
from official.vision.beta.projects.volumetric_models.modeling import backbones
from official.vision.beta.projects.volumetric_models.modeling import decoders
from official.vision.beta.projects.volumetric_models.modeling import factory
class SegmentationModelBuilderTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(((128, 128, 128), 5e-5), ((64, 64, 64), None))
def test_unet3d_builder(self, input_size, weight_decay):
num_classes = 3
input_specs = tf.keras.layers.InputSpec(
shape=[None, input_size[0], input_size[1], input_size[2], 3])
model_config = exp_cfg.SemanticSegmentationModel3D(num_classes=num_classes)
l2_regularizer = (
tf.keras.regularizers.l2(weight_decay) if weight_decay else None)
model = factory.build_segmentation_model_3d(
input_specs=input_specs,
model_config=model_config,
l2_regularizer=l2_regularizer)
self.assertIsInstance(
model, tf.keras.Model,
'Output should be a tf.keras.Model instance but got %s' % type(model))
if __name__ == '__main__':
tf.test.main()
| true | true |
f7fe41870dedf643cef5f5c5ef6085322163adde | 14,370 | py | Python | kubernetes/client/models/v1_node_system_info.py | fsduser/python | 2b20069ebc05283352fbdc95bbdca2b6133a4175 | [
"Apache-2.0"
] | 1 | 2021-10-15T13:05:45.000Z | 2021-10-15T13:05:45.000Z | kubernetes/client/models/v1_node_system_info.py | belajarqywok/python | b15bea16a87ad03136a4627941ac437582ea4657 | [
"Apache-2.0"
] | 10 | 2020-10-01T03:15:01.000Z | 2022-03-01T03:06:31.000Z | kubernetes/client/models/v1_node_system_info.py | belajarqywok/python | b15bea16a87ad03136a4627941ac437582ea4657 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.19
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1NodeSystemInfo(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'architecture': 'str',
'boot_id': 'str',
'container_runtime_version': 'str',
'kernel_version': 'str',
'kube_proxy_version': 'str',
'kubelet_version': 'str',
'machine_id': 'str',
'operating_system': 'str',
'os_image': 'str',
'system_uuid': 'str'
}
attribute_map = {
'architecture': 'architecture',
'boot_id': 'bootID',
'container_runtime_version': 'containerRuntimeVersion',
'kernel_version': 'kernelVersion',
'kube_proxy_version': 'kubeProxyVersion',
'kubelet_version': 'kubeletVersion',
'machine_id': 'machineID',
'operating_system': 'operatingSystem',
'os_image': 'osImage',
'system_uuid': 'systemUUID'
}
def __init__(self, architecture=None, boot_id=None, container_runtime_version=None, kernel_version=None, kube_proxy_version=None, kubelet_version=None, machine_id=None, operating_system=None, os_image=None, system_uuid=None, local_vars_configuration=None): # noqa: E501
"""V1NodeSystemInfo - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._architecture = None
self._boot_id = None
self._container_runtime_version = None
self._kernel_version = None
self._kube_proxy_version = None
self._kubelet_version = None
self._machine_id = None
self._operating_system = None
self._os_image = None
self._system_uuid = None
self.discriminator = None
self.architecture = architecture
self.boot_id = boot_id
self.container_runtime_version = container_runtime_version
self.kernel_version = kernel_version
self.kube_proxy_version = kube_proxy_version
self.kubelet_version = kubelet_version
self.machine_id = machine_id
self.operating_system = operating_system
self.os_image = os_image
self.system_uuid = system_uuid
@property
def architecture(self):
"""Gets the architecture of this V1NodeSystemInfo. # noqa: E501
The Architecture reported by the node # noqa: E501
:return: The architecture of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._architecture
@architecture.setter
def architecture(self, architecture):
"""Sets the architecture of this V1NodeSystemInfo.
The Architecture reported by the node # noqa: E501
:param architecture: The architecture of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and architecture is None: # noqa: E501
raise ValueError("Invalid value for `architecture`, must not be `None`") # noqa: E501
self._architecture = architecture
@property
def boot_id(self):
"""Gets the boot_id of this V1NodeSystemInfo. # noqa: E501
Boot ID reported by the node. # noqa: E501
:return: The boot_id of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._boot_id
@boot_id.setter
def boot_id(self, boot_id):
"""Sets the boot_id of this V1NodeSystemInfo.
Boot ID reported by the node. # noqa: E501
:param boot_id: The boot_id of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and boot_id is None: # noqa: E501
raise ValueError("Invalid value for `boot_id`, must not be `None`") # noqa: E501
self._boot_id = boot_id
@property
def container_runtime_version(self):
"""Gets the container_runtime_version of this V1NodeSystemInfo. # noqa: E501
ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0). # noqa: E501
:return: The container_runtime_version of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._container_runtime_version
@container_runtime_version.setter
def container_runtime_version(self, container_runtime_version):
"""Sets the container_runtime_version of this V1NodeSystemInfo.
ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0). # noqa: E501
:param container_runtime_version: The container_runtime_version of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and container_runtime_version is None: # noqa: E501
raise ValueError("Invalid value for `container_runtime_version`, must not be `None`") # noqa: E501
self._container_runtime_version = container_runtime_version
@property
def kernel_version(self):
"""Gets the kernel_version of this V1NodeSystemInfo. # noqa: E501
Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64). # noqa: E501
:return: The kernel_version of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._kernel_version
@kernel_version.setter
def kernel_version(self, kernel_version):
"""Sets the kernel_version of this V1NodeSystemInfo.
Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64). # noqa: E501
:param kernel_version: The kernel_version of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and kernel_version is None: # noqa: E501
raise ValueError("Invalid value for `kernel_version`, must not be `None`") # noqa: E501
self._kernel_version = kernel_version
@property
def kube_proxy_version(self):
"""Gets the kube_proxy_version of this V1NodeSystemInfo. # noqa: E501
KubeProxy Version reported by the node. # noqa: E501
:return: The kube_proxy_version of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._kube_proxy_version
@kube_proxy_version.setter
def kube_proxy_version(self, kube_proxy_version):
"""Sets the kube_proxy_version of this V1NodeSystemInfo.
KubeProxy Version reported by the node. # noqa: E501
:param kube_proxy_version: The kube_proxy_version of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and kube_proxy_version is None: # noqa: E501
raise ValueError("Invalid value for `kube_proxy_version`, must not be `None`") # noqa: E501
self._kube_proxy_version = kube_proxy_version
@property
def kubelet_version(self):
"""Gets the kubelet_version of this V1NodeSystemInfo. # noqa: E501
Kubelet Version reported by the node. # noqa: E501
:return: The kubelet_version of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._kubelet_version
@kubelet_version.setter
def kubelet_version(self, kubelet_version):
"""Sets the kubelet_version of this V1NodeSystemInfo.
Kubelet Version reported by the node. # noqa: E501
:param kubelet_version: The kubelet_version of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and kubelet_version is None: # noqa: E501
raise ValueError("Invalid value for `kubelet_version`, must not be `None`") # noqa: E501
self._kubelet_version = kubelet_version
@property
def machine_id(self):
"""Gets the machine_id of this V1NodeSystemInfo. # noqa: E501
MachineID reported by the node. For unique machine identification in the cluster this field is preferred. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html # noqa: E501
:return: The machine_id of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._machine_id
@machine_id.setter
def machine_id(self, machine_id):
"""Sets the machine_id of this V1NodeSystemInfo.
MachineID reported by the node. For unique machine identification in the cluster this field is preferred. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html # noqa: E501
:param machine_id: The machine_id of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and machine_id is None: # noqa: E501
raise ValueError("Invalid value for `machine_id`, must not be `None`") # noqa: E501
self._machine_id = machine_id
@property
def operating_system(self):
"""Gets the operating_system of this V1NodeSystemInfo. # noqa: E501
The Operating System reported by the node # noqa: E501
:return: The operating_system of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._operating_system
@operating_system.setter
def operating_system(self, operating_system):
"""Sets the operating_system of this V1NodeSystemInfo.
The Operating System reported by the node # noqa: E501
:param operating_system: The operating_system of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and operating_system is None: # noqa: E501
raise ValueError("Invalid value for `operating_system`, must not be `None`") # noqa: E501
self._operating_system = operating_system
@property
def os_image(self):
"""Gets the os_image of this V1NodeSystemInfo. # noqa: E501
OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)). # noqa: E501
:return: The os_image of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._os_image
@os_image.setter
def os_image(self, os_image):
"""Sets the os_image of this V1NodeSystemInfo.
OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)). # noqa: E501
:param os_image: The os_image of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and os_image is None: # noqa: E501
raise ValueError("Invalid value for `os_image`, must not be `None`") # noqa: E501
self._os_image = os_image
@property
def system_uuid(self):
"""Gets the system_uuid of this V1NodeSystemInfo. # noqa: E501
SystemUUID reported by the node. For unique machine identification MachineID is preferred. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html/rhsm/uuid # noqa: E501
:return: The system_uuid of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._system_uuid
@system_uuid.setter
def system_uuid(self, system_uuid):
"""Sets the system_uuid of this V1NodeSystemInfo.
SystemUUID reported by the node. For unique machine identification MachineID is preferred. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html/rhsm/uuid # noqa: E501
:param system_uuid: The system_uuid of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and system_uuid is None: # noqa: E501
raise ValueError("Invalid value for `system_uuid`, must not be `None`") # noqa: E501
self._system_uuid = system_uuid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1NodeSystemInfo):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1NodeSystemInfo):
return True
return self.to_dict() != other.to_dict()
| 37.324675 | 274 | 0.653236 |
import pprint
import re
import six
from kubernetes.client.configuration import Configuration
class V1NodeSystemInfo(object):
openapi_types = {
'architecture': 'str',
'boot_id': 'str',
'container_runtime_version': 'str',
'kernel_version': 'str',
'kube_proxy_version': 'str',
'kubelet_version': 'str',
'machine_id': 'str',
'operating_system': 'str',
'os_image': 'str',
'system_uuid': 'str'
}
attribute_map = {
'architecture': 'architecture',
'boot_id': 'bootID',
'container_runtime_version': 'containerRuntimeVersion',
'kernel_version': 'kernelVersion',
'kube_proxy_version': 'kubeProxyVersion',
'kubelet_version': 'kubeletVersion',
'machine_id': 'machineID',
'operating_system': 'operatingSystem',
'os_image': 'osImage',
'system_uuid': 'systemUUID'
}
def __init__(self, architecture=None, boot_id=None, container_runtime_version=None, kernel_version=None, kube_proxy_version=None, kubelet_version=None, machine_id=None, operating_system=None, os_image=None, system_uuid=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._architecture = None
self._boot_id = None
self._container_runtime_version = None
self._kernel_version = None
self._kube_proxy_version = None
self._kubelet_version = None
self._machine_id = None
self._operating_system = None
self._os_image = None
self._system_uuid = None
self.discriminator = None
self.architecture = architecture
self.boot_id = boot_id
self.container_runtime_version = container_runtime_version
self.kernel_version = kernel_version
self.kube_proxy_version = kube_proxy_version
self.kubelet_version = kubelet_version
self.machine_id = machine_id
self.operating_system = operating_system
self.os_image = os_image
self.system_uuid = system_uuid
@property
def architecture(self):
return self._architecture
@architecture.setter
def architecture(self, architecture):
if self.local_vars_configuration.client_side_validation and architecture is None:
raise ValueError("Invalid value for `architecture`, must not be `None`")
self._architecture = architecture
@property
def boot_id(self):
return self._boot_id
@boot_id.setter
def boot_id(self, boot_id):
if self.local_vars_configuration.client_side_validation and boot_id is None:
raise ValueError("Invalid value for `boot_id`, must not be `None`")
self._boot_id = boot_id
@property
def container_runtime_version(self):
return self._container_runtime_version
@container_runtime_version.setter
def container_runtime_version(self, container_runtime_version):
if self.local_vars_configuration.client_side_validation and container_runtime_version is None:
raise ValueError("Invalid value for `container_runtime_version`, must not be `None`")
self._container_runtime_version = container_runtime_version
@property
def kernel_version(self):
return self._kernel_version
@kernel_version.setter
def kernel_version(self, kernel_version):
if self.local_vars_configuration.client_side_validation and kernel_version is None:
raise ValueError("Invalid value for `kernel_version`, must not be `None`")
self._kernel_version = kernel_version
@property
def kube_proxy_version(self):
return self._kube_proxy_version
@kube_proxy_version.setter
def kube_proxy_version(self, kube_proxy_version):
if self.local_vars_configuration.client_side_validation and kube_proxy_version is None:
raise ValueError("Invalid value for `kube_proxy_version`, must not be `None`")
self._kube_proxy_version = kube_proxy_version
@property
def kubelet_version(self):
return self._kubelet_version
@kubelet_version.setter
def kubelet_version(self, kubelet_version):
if self.local_vars_configuration.client_side_validation and kubelet_version is None:
raise ValueError("Invalid value for `kubelet_version`, must not be `None`")
self._kubelet_version = kubelet_version
@property
def machine_id(self):
return self._machine_id
@machine_id.setter
def machine_id(self, machine_id):
if self.local_vars_configuration.client_side_validation and machine_id is None:
raise ValueError("Invalid value for `machine_id`, must not be `None`")
self._machine_id = machine_id
@property
def operating_system(self):
return self._operating_system
@operating_system.setter
def operating_system(self, operating_system):
if self.local_vars_configuration.client_side_validation and operating_system is None:
raise ValueError("Invalid value for `operating_system`, must not be `None`")
self._operating_system = operating_system
@property
def os_image(self):
return self._os_image
@os_image.setter
def os_image(self, os_image):
if self.local_vars_configuration.client_side_validation and os_image is None:
raise ValueError("Invalid value for `os_image`, must not be `None`")
self._os_image = os_image
@property
def system_uuid(self):
return self._system_uuid
@system_uuid.setter
def system_uuid(self, system_uuid):
if self.local_vars_configuration.client_side_validation and system_uuid is None:
raise ValueError("Invalid value for `system_uuid`, must not be `None`")
self._system_uuid = system_uuid
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, V1NodeSystemInfo):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, V1NodeSystemInfo):
return True
return self.to_dict() != other.to_dict()
| true | true |
f7fe41f49752828ebc760e8651e0f1f355762ceb | 3,447 | py | Python | insta/settings.py | alerin345/Instagram | 082e4a64042ae94f3eacfc10144f925e3dfc2492 | [
"MIT"
] | 1 | 2020-12-31T00:28:24.000Z | 2020-12-31T00:28:24.000Z | insta/settings.py | alerin345/Instagram | 082e4a64042ae94f3eacfc10144f925e3dfc2492 | [
"MIT"
] | null | null | null | insta/settings.py | alerin345/Instagram | 082e4a64042ae94f3eacfc10144f925e3dfc2492 | [
"MIT"
] | null | null | null | """
Django settings for insta project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=om6hirndn-h)u5kft^nozqr)_!jn)50*z&j5=kbv0u6d(#c+2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django_extensions',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap4',
'main',
'users',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'insta.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'insta.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
# TIME_ZONE = 'UTC'
TIME_ZONE = 'Europe/Warsaw'
USE_I18N = True
USE_L10N = True
USE_TZ = True
#STATIC_ROOT = (os.path.join(SITE_ROOT, 'static_files/'))
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL ='/images/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR,'/static/'),
]
MEDIA_ROOT = os.path.join(BASE_DIR, 'static/images')
ALLOWED_HOSTS = ['*']
# ALLOWED_HOSTS = ['192.168.1.132','192.168.1.186','127.0.0.1']
| 25.917293 | 91 | 0.691036 |
import os
from pathlib import Path
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = '=om6hirndn-h)u5kft^nozqr)_!jn)50*z&j5=kbv0u6d(#c+2'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django_extensions',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap4',
'main',
'users',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'insta.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'insta.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
# TIME_ZONE = 'UTC'
TIME_ZONE = 'Europe/Warsaw'
USE_I18N = True
USE_L10N = True
USE_TZ = True
#STATIC_ROOT = (os.path.join(SITE_ROOT, 'static_files/'))
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL ='/images/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR,'/static/'),
]
MEDIA_ROOT = os.path.join(BASE_DIR, 'static/images')
ALLOWED_HOSTS = ['*']
# ALLOWED_HOSTS = ['192.168.1.132','192.168.1.186','127.0.0.1']
| true | true |
f7fe4217b716ceb186ca33d9ef2ad0751688a9fc | 1,413 | py | Python | tools/build/v2/util/sequence.py | jmuskaan72/Boost | 047e36c01841a8cd6a5c74d4e3034da46e327bc1 | [
"BSL-1.0"
] | 198 | 2015-01-13T05:47:18.000Z | 2022-03-09T04:46:46.000Z | libs/boost/tools/build/src/util/sequence.py | flingone/frameworks_base_cmds_remoted | 4509d9f0468137ed7fd8d100179160d167e7d943 | [
"Apache-2.0"
] | 9 | 2015-01-28T16:33:19.000Z | 2020-04-12T23:03:28.000Z | libs/boost/tools/build/src/util/sequence.py | flingone/frameworks_base_cmds_remoted | 4509d9f0468137ed7fd8d100179160d167e7d943 | [
"Apache-2.0"
] | 139 | 2015-01-15T20:09:31.000Z | 2022-01-31T15:21:16.000Z | # (C) Copyright David Abrahams 2002. Permission to copy, use, modify, sell and
# distribute this software is granted provided this copyright notice appears in
# all copies. This software is provided "as is" without express or implied
# warranty, and with no claim as to its suitability for any purpose.
import operator
def unique (values, stable=False):
if stable:
s = set()
r = []
for v in values:
if not v in s:
r.append(v)
s.add(v)
return r
else:
return list(set(values))
def max_element (elements, ordered = None):
""" Returns the maximum number in 'elements'. Uses 'ordered' for comparisons,
or '<' is none is provided.
"""
if not ordered: ordered = operator.lt
max = elements [0]
for e in elements [1:]:
if ordered (max, e):
max = e
return max
def select_highest_ranked (elements, ranks):
""" Returns all of 'elements' for which corresponding element in parallel
list 'rank' is equal to the maximum value in 'rank'.
"""
if not elements:
return []
max_rank = max_element (ranks)
result = []
while elements:
if ranks [0] == max_rank:
result.append (elements [0])
elements = elements [1:]
ranks = ranks [1:]
return result
| 27.705882 | 82 | 0.578202 |
import operator
def unique (values, stable=False):
if stable:
s = set()
r = []
for v in values:
if not v in s:
r.append(v)
s.add(v)
return r
else:
return list(set(values))
def max_element (elements, ordered = None):
if not ordered: ordered = operator.lt
max = elements [0]
for e in elements [1:]:
if ordered (max, e):
max = e
return max
def select_highest_ranked (elements, ranks):
if not elements:
return []
max_rank = max_element (ranks)
result = []
while elements:
if ranks [0] == max_rank:
result.append (elements [0])
elements = elements [1:]
ranks = ranks [1:]
return result
| true | true |
f7fe42203cc4e6e4be9c60491fd33d46beae5fce | 2,368 | py | Python | train/Detrac_data_preprocess/params_init_Detrac_shulffnetv2_yolov3.py | yuanliangxie/YOLOv3_simple_baseline | 325e2963ae770e6f45912f3142941d3bddaf9d6e | [
"Apache-2.0"
] | 1 | 2022-02-26T10:13:08.000Z | 2022-02-26T10:13:08.000Z | train/Detrac_data_preprocess/params_init_Detrac_shulffnetv2_yolov3.py | yuanliangxie/YOLOv3_simple_baseline | 325e2963ae770e6f45912f3142941d3bddaf9d6e | [
"Apache-2.0"
] | null | null | null | train/Detrac_data_preprocess/params_init_Detrac_shulffnetv2_yolov3.py | yuanliangxie/YOLOv3_simple_baseline | 325e2963ae770e6f45912f3142941d3bddaf9d6e | [
"Apache-2.0"
] | null | null | null | TRAINING_PARAMS = \
{
"model_params": {
"backbone_name": "darknet53",
"backbone_weight": "",
},
"model": {
"anchors": [[[116, 90], [156, 198], [373, 326]],
[[30, 61], [62, 45], [59, 119]],
[[10, 13], [16, 30], [33, 23]]],
"classes": 1,
#"classes_category": ['car', 'bus', 'van', 'others']
"classes_category": ["car"]
},
"lr": {
"backbone_lr": 1e-4,
"other_lr": 1e-4,
"LR_INIT": 1e-4,
"LR_END": 1e-6,
"WARMUP_EPOCHS": 1,
"freeze_backbone": False, # freeze backbone wegiths to finetune
"decay_step": [60, 80],
"decay_gamma": 0.1
},
"optimizer": {
"type": "sgd",
"weight_decay": 5e-04,
},
"data_path":"/media/xyl/6418a039-786d-4cd8-b0bb-1ed36a649668/Datasets/UA-DETRAC",
"batch_size": 8,
"train_path": "../data/detrac/train.txt",#../data/coco/vehecal/vehecal_train.txt",
"train_ignore_region": "../data/detrac/train_ignore_region.txt",
"train_labels_path": "../data/detrac/labels",
"epochs": 32,
"Multi-scale training": True, #要增加多尺度训练!
"img_h": 640,#如果Multi-scale training是False,则使用此单尺度训练
"img_w": 640,
"parallels": [0], # config GPU device
"working_dir": "/home/xyl/PycharmProjects/YOLOV3_SUPER", # replace with your working dir
# restore_model_weight:
"pretrain_snapshot": "",
"self_train_weight": True,
"resume_start_epoch": 0,
# train_eval:
"start_eval": 31,
"interval_epoch_eval": 1, #每隔多少个epoch进行验证
"epoch_eval_times": 1, #每个epoch验证多少轮
#train_eval参数的含义为:从"start_eval"第2个epoch开始进行验证,此时"epoch_eval_times"第2个epoch总共
# 会验证两次,然后间隔"interval_epoch_eval"2个epoch会再次进行验证
#tricks
"try": '_yolov3_shulffnetv2_SSDaug_coco_pretrain_loss_div_all_objects_test_UA_detrac',
"scheduler_way": "Cosdecay",
"label_smooth": False, #label_smooth还有一些问题要跟ce适应
"GIOU": False,
"mix_up": False,
"ce": False,
"bce": True,
"accumulate":1
}
Eval = {
"PROJECT_PATH": "/home/xyl/PycharmProjects/YOLOV3_SUPER", #即本项目的地址
"TEST_IMG_SIZE":640,
"BATCH_SIZE":32,
"NUMBER_WORKERS":0,
"CONF_THRESH":0.01,
"NMS_THRESH":0.5,
"MULTI_SCALE_TEST":False,
"FLIP_TEST":False,
"test_path": "../data/detrac/test.txt",#../data/coco/vehecal/vehecal_train.txt",
"test_ignore_region": "../data/detrac/test_ignore_region.txt",
"test_labels_path": "../data/detrac/labels_test",
#不产生结果分析图
"generate_analyze_figure": False,
}
| 28.878049 | 104 | 0.65625 | TRAINING_PARAMS = \
{
"model_params": {
"backbone_name": "darknet53",
"backbone_weight": "",
},
"model": {
"anchors": [[[116, 90], [156, 198], [373, 326]],
[[30, 61], [62, 45], [59, 119]],
[[10, 13], [16, 30], [33, 23]]],
"classes": 1,
"classes_category": ["car"]
},
"lr": {
"backbone_lr": 1e-4,
"other_lr": 1e-4,
"LR_INIT": 1e-4,
"LR_END": 1e-6,
"WARMUP_EPOCHS": 1,
"freeze_backbone": False,
"decay_step": [60, 80],
"decay_gamma": 0.1
},
"optimizer": {
"type": "sgd",
"weight_decay": 5e-04,
},
"data_path":"/media/xyl/6418a039-786d-4cd8-b0bb-1ed36a649668/Datasets/UA-DETRAC",
"batch_size": 8,
"train_path": "../data/detrac/train.txt",
"train_ignore_region": "../data/detrac/train_ignore_region.txt",
"train_labels_path": "../data/detrac/labels",
"epochs": 32,
"Multi-scale training": True, #要增加多尺度训练!
"img_h": 640,#如果Multi-scale training是False,则使用此单尺度训练
"img_w": 640,
"parallels": [0], # config GPU device
"working_dir": "/home/xyl/PycharmProjects/YOLOV3_SUPER", # replace with your working dir
# restore_model_weight:
"pretrain_snapshot": "",
"self_train_weight": True,
"resume_start_epoch": 0,
# train_eval:
"start_eval": 31,
"interval_epoch_eval": 1, #每隔多少个epoch进行验证
"epoch_eval_times": 1, #每个epoch验证多少轮
#train_eval参数的含义为:从"start_eval"第2个epoch开始进行验证,此时"epoch_eval_times"第2个epoch总共
# 会验证两次,然后间隔"interval_epoch_eval"2个epoch会再次进行验证
#tricks
"try": '_yolov3_shulffnetv2_SSDaug_coco_pretrain_loss_div_all_objects_test_UA_detrac',
"scheduler_way": "Cosdecay",
"label_smooth": False, #label_smooth还有一些问题要跟ce适应
"GIOU": False,
"mix_up": False,
"ce": False,
"bce": True,
"accumulate":1
}
Eval = {
"PROJECT_PATH": "/home/xyl/PycharmProjects/YOLOV3_SUPER", #即本项目的地址
"TEST_IMG_SIZE":640,
"BATCH_SIZE":32,
"NUMBER_WORKERS":0,
"CONF_THRESH":0.01,
"NMS_THRESH":0.5,
"MULTI_SCALE_TEST":False,
"FLIP_TEST":False,
"test_path": "../data/detrac/test.txt",#../data/coco/vehecal/vehecal_train.txt",
"test_ignore_region": "../data/detrac/test_ignore_region.txt",
"test_labels_path": "../data/detrac/labels_test",
"generate_analyze_figure": False,
}
| true | true |
f7fe428801f26fbcc418e80b37e066c5da680594 | 1,281 | py | Python | examples/bsa.py | KenjiHarada/FSS-tools | e45f4ac19cad1843f8efba420fd46e3e5210a77a | [
"MIT"
] | 2 | 2022-03-15T03:02:15.000Z | 2022-03-15T03:03:16.000Z | examples/bsa.py | KenjiHarada/FSS-tools | e45f4ac19cad1843f8efba420fd46e3e5210a77a | [
"MIT"
] | null | null | null | examples/bsa.py | KenjiHarada/FSS-tools | e45f4ac19cad1843f8efba420fd46e3e5210a77a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Example code of FSS by GP with fss_torch.
Copyright 2022 Kenji Harada
"""
import sys, os
import math
import torch.optim as optim
sys.path.append(os.path.abspath("../src"))
import fss_torch
# the square Ising model
tc_true = 0.5 * math.log(1 + math.sqrt(2))
# c1_true, c2_true = (1.0, 0)
# fname = "Data/Ising2D/ising-square-B.dat"
c1_true, c2_true = (1.0, 0.125)
fname = "./Data/Ising2D/ising-square-M.dat"
# c1_true, c2_true = (1.0, -1.75)
# fname = "Data/Ising2D/ising-square-X.dat"
# Dataset
dataset = fss_torch.fss.Dataset.fromFile(fname=fname)
# Transformer
rtc, rc1, rc2 = 0.97, 0.9, 0.9
initial_values = [dataset.transform_t(tc_true * rtc), c1_true * rc1, c2_true * rc2]
transform = fss_torch.fss.Transform(initial_values)
# Model
model = fss_torch.bsa_util.GP()
# Optimizer
optimizer = optim.Adam(params=model.parameters(), lr=0.1)
optimizer.add_param_group({"params": transform.parameters(), "lr": 0.01})
# Doing FSS by GP
tc, c1, c2 = fss_torch.bsa_util.do_fss(dataset, model, optimizer, transform)
# Results
print(
"%g %g %g %g %g %g"
% (
dataset.inv_transform_t(tc),
c1,
c2,
dataset.inv_transform_t(initial_values[0]),
initial_values[1],
initial_values[2],
),
flush=True,
)
| 26.6875 | 83 | 0.672131 |
import sys, os
import math
import torch.optim as optim
sys.path.append(os.path.abspath("../src"))
import fss_torch
tc_true = 0.5 * math.log(1 + math.sqrt(2))
c1_true, c2_true = (1.0, 0.125)
fname = "./Data/Ising2D/ising-square-M.dat"
dataset = fss_torch.fss.Dataset.fromFile(fname=fname)
rtc, rc1, rc2 = 0.97, 0.9, 0.9
initial_values = [dataset.transform_t(tc_true * rtc), c1_true * rc1, c2_true * rc2]
transform = fss_torch.fss.Transform(initial_values)
model = fss_torch.bsa_util.GP()
optimizer = optim.Adam(params=model.parameters(), lr=0.1)
optimizer.add_param_group({"params": transform.parameters(), "lr": 0.01})
tc, c1, c2 = fss_torch.bsa_util.do_fss(dataset, model, optimizer, transform)
print(
"%g %g %g %g %g %g"
% (
dataset.inv_transform_t(tc),
c1,
c2,
dataset.inv_transform_t(initial_values[0]),
initial_values[1],
initial_values[2],
),
flush=True,
)
| true | true |
f7fe42b7a332321fc6a1ab8da30c276e6825a104 | 92,355 | py | Python | kubernetes/client/apis/batch_v2alpha1_api.py | Scalr/kubernetes-client-python | 07442bdb76f0876ec96c0b0da6f9c4b06d7e5e38 | [
"Apache-2.0"
] | 3 | 2019-05-19T05:05:37.000Z | 2020-03-20T04:56:20.000Z | kubernetes/client/apis/batch_v2alpha1_api.py | Scalr/kubernetes-client-python | 07442bdb76f0876ec96c0b0da6f9c4b06d7e5e38 | [
"Apache-2.0"
] | null | null | null | kubernetes/client/apis/batch_v2alpha1_api.py | Scalr/kubernetes-client-python | 07442bdb76f0876ec96c0b0da6f9c4b06d7e5e38 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class BatchV2alpha1Api(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_namespaced_cron_job(self, namespace, body, **kwargs):
"""
create a CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_cron_job(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V2alpha1CronJob body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_cron_job_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_cron_job_with_http_info(namespace, body, **kwargs)
return data
def create_namespaced_cron_job_with_http_info(self, namespace, body, **kwargs):
"""
create a CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_cron_job_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V2alpha1CronJob body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_cron_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_cron_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_cron_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJob',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_cron_job(self, namespace, **kwargs):
"""
delete collection of CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_cron_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_cron_job_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_cron_job_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_cron_job_with_http_info(self, namespace, **kwargs):
"""
delete collection of CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_cron_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_cron_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_cron_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_cron_job(self, name, namespace, **kwargs):
"""
delete a CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_cron_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_cron_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.delete_namespaced_cron_job_with_http_info(name, namespace, **kwargs)
return data
def delete_namespaced_cron_job_with_http_info(self, name, namespace, **kwargs):
"""
delete a CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_cron_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_cron_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_cron_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_cron_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
def get_api_resources_with_http_info(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v2alpha1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_cron_job_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_cron_job_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V2alpha1CronJobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_cron_job_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_cron_job_for_all_namespaces_with_http_info(**kwargs)
return data
def list_cron_job_for_all_namespaces_with_http_info(self, **kwargs):
"""
list or watch objects of kind CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_cron_job_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V2alpha1CronJobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_cron_job_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v2alpha1/cronjobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_cron_job(self, namespace, **kwargs):
"""
list or watch objects of kind CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_cron_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V2alpha1CronJobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_cron_job_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_cron_job_with_http_info(namespace, **kwargs)
return data
def list_namespaced_cron_job_with_http_info(self, namespace, **kwargs):
"""
list or watch objects of kind CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_cron_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V2alpha1CronJobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_cron_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_cron_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_cron_job(self, name, namespace, body, **kwargs):
"""
partially update the specified CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_cron_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_cron_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_cron_job_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_cron_job_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update the specified CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_cron_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_cron_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_cron_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_cron_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_cron_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJob',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_cron_job_status(self, name, namespace, body, **kwargs):
"""
partially update status of the specified CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_cron_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_cron_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_cron_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_cron_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update status of the specified CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_cron_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_cron_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_cron_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_cron_job_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_cron_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJob',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_cron_job(self, name, namespace, **kwargs):
"""
read the specified CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_cron_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_cron_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_cron_job_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_cron_job_with_http_info(self, name, namespace, **kwargs):
"""
read the specified CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_cron_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_cron_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_cron_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_cron_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJob',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_cron_job_status(self, name, namespace, **kwargs):
"""
read status of the specified CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_cron_job_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_cron_job_status_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_cron_job_status_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_cron_job_status_with_http_info(self, name, namespace, **kwargs):
"""
read status of the specified CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_cron_job_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_cron_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_cron_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_cron_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJob',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_cron_job(self, name, namespace, body, **kwargs):
"""
replace the specified CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_cron_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V2alpha1CronJob body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_cron_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_cron_job_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_cron_job_with_http_info(self, name, namespace, body, **kwargs):
"""
replace the specified CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_cron_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V2alpha1CronJob body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_cron_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_cron_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_cron_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_cron_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJob',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_cron_job_status(self, name, namespace, body, **kwargs):
"""
replace status of the specified CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_cron_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V2alpha1CronJob body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_cron_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_cron_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_cron_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
replace status of the specified CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_cron_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V2alpha1CronJob body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_cron_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_cron_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_cron_job_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_cron_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJob',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 63.693103 | 1,390 | 0.649851 |
from __future__ import absolute_import
import sys
import os
import re
from six import iteritems
from ..api_client import ApiClient
class BatchV2alpha1Api(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_namespaced_cron_job(self, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_cron_job_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_cron_job_with_http_info(namespace, body, **kwargs)
return data
def create_namespaced_cron_job_with_http_info(self, namespace, body, **kwargs):
all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_cron_job" % key
)
params[key] = val
del params['kwargs']
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_cron_job`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_cron_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJob',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_cron_job(self, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_cron_job_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_cron_job_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_cron_job_with_http_info(self, namespace, **kwargs):
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_cron_job" % key
)
params[key] = val
del params['kwargs']
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_cron_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_cron_job(self, name, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_cron_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.delete_namespaced_cron_job_with_http_info(name, namespace, **kwargs)
return data
def delete_namespaced_cron_job_with_http_info(self, name, namespace, **kwargs):
all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_cron_job" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_cron_job`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_cron_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
def get_api_resources_with_http_info(self, **kwargs):
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v2alpha1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_cron_job_for_all_namespaces(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_cron_job_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_cron_job_for_all_namespaces_with_http_info(**kwargs)
return data
def list_cron_job_for_all_namespaces_with_http_info(self, **kwargs):
all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_cron_job_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v2alpha1/cronjobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_cron_job(self, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_cron_job_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_cron_job_with_http_info(namespace, **kwargs)
return data
def list_namespaced_cron_job_with_http_info(self, namespace, **kwargs):
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_cron_job" % key
)
params[key] = val
del params['kwargs']
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_cron_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_cron_job(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_cron_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_cron_job_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_cron_job_with_http_info(self, name, namespace, body, **kwargs):
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_cron_job" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_cron_job`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_cron_job`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_cron_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJob',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_cron_job_status(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_cron_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_cron_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_cron_job_status_with_http_info(self, name, namespace, body, **kwargs):
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_cron_job_status" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_cron_job_status`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_cron_job_status`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_cron_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJob',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_cron_job(self, name, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_cron_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_cron_job_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_cron_job_with_http_info(self, name, namespace, **kwargs):
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_cron_job" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_cron_job`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_cron_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJob',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_cron_job_status(self, name, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_cron_job_status_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_cron_job_status_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_cron_job_status_with_http_info(self, name, namespace, **kwargs):
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_cron_job_status" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_cron_job_status`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_cron_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJob',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_cron_job(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_cron_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_cron_job_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_cron_job_with_http_info(self, name, namespace, body, **kwargs):
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_cron_job" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_cron_job`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_cron_job`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_cron_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJob',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_cron_job_status(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_cron_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_cron_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_cron_job_status_with_http_info(self, name, namespace, body, **kwargs):
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_cron_job_status" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_cron_job_status`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_cron_job_status`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_cron_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJob',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| true | true |
f7fe42f0af33a41033a59be00494647cd2ccc3ac | 6,562 | py | Python | snip-tensorflow/main.py | isabuster/snip | 8e7644edd1f4dcca0f833666cf54474bcacf2aea | [
"MIT"
] | 1 | 2020-09-13T09:18:13.000Z | 2020-09-13T09:18:13.000Z | snip-tensorflow/main.py | isabuster/snip | 8e7644edd1f4dcca0f833666cf54474bcacf2aea | [
"MIT"
] | null | null | null | snip-tensorflow/main.py | isabuster/snip | 8e7644edd1f4dcca0f833666cf54474bcacf2aea | [
"MIT"
] | null | null | null | import os
import sys
import argparse
import tensorflow as tf
import numpy as np
import scipy.stats as st
import matplotlib.pyplot as plt
from dataset import Dataset
from model import Model
import prune
import train
import test
# from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file
def parse_arguments():
parser = argparse.ArgumentParser()
# Data options
parser.add_argument('--datasource', type=str, default='mnist', help='dataset to use')
parser.add_argument('--path_data', type=str, default='./data', help='location to dataset')
parser.add_argument('--aug_kinds', nargs='+', type=str, default=[], help='augmentations to perform')
# Model options
parser.add_argument('--arch', type=str, default='lenet5', help='network architecture to use')
parser.add_argument('--target_sparsity', type=float, default=0.9, help='level of sparsity to achieve')
# Train options
parser.add_argument('--batch_size', type=int, default=100, help='number of examples per mini-batch')
parser.add_argument('--train_iterations', type=int, default=10000, help='number of training iterations')
parser.add_argument('--optimizer', type=str, default='sgd', help='optimizer of choice')
parser.add_argument('--lr_decay_type', type=str, default='constant', help='learning rate decay type')
parser.add_argument('--lr', type=float, default=1e-2, help='initial learning rate')
parser.add_argument('--decay_boundaries', nargs='+', type=int, default=[], help='boundaries for piecewise_constant decay')
parser.add_argument('--decay_values', nargs='+', type=float, default=[], help='values for piecewise_constant decay')
# Initialization
parser.add_argument('--initializer_w_bp', type=str, default='vs', help='initializer for w before pruning')
parser.add_argument('--initializer_b_bp', type=str, default='zeros', help='initializer for b before pruning')
parser.add_argument('--initializer_w_ap', type=str, default='vs', help='initializer for w after pruning')
parser.add_argument('--initializer_b_ap', type=str, default='zeros', help='initializer for b after pruning')
# Logging, saving, options
parser.add_argument('--logdir', type=str, default='logs', help='location for summaries and checkpoints')
parser.add_argument('--check_interval', type=int, default=100, help='check interval during training')
parser.add_argument('--save_interval', type=int, default=1000, help='save interval during training')
args = parser.parse_args()
# Add more to args
args.path_summary = os.path.join(args.logdir, 'summary')
args.path_model = os.path.join(args.logdir, 'model')
args.path_assess = os.path.join(args.logdir, 'assess')
return args
def plot_distribution(sess, layers, pruned=False):
for idx, var in enumerate(layers):
if pruned == False:
layer = np.array(sess.run(var)).flatten()
else:
layer = var.flatten()[var.flatten() != 0]
ax = plt.axes()
ax.set_axisbelow(True)
plt.hist(layer, bins=30, label="Weights", density=True, edgecolor='white')
plt.grid(ls='--')
left, right = plt.xlim()
kde_xs = np.linspace(left, right)
kde = st.gaussian_kde(layer)
plt.plot(kde_xs, kde.pdf(kde_xs), label="PDF")
plt.legend(loc="upper left")
plt.ylabel('Density')
plt.xlabel('Weights')
if pruned == False:
plt.title("Histogram of Weights for layer{} before Pruning".format(idx+1))
plt.savefig('layer{} before pruning.png'.format(idx+1))
else:
plt.title("Histogram of Weights for layer{} after Pruning".format(idx+1))
plt.savefig('layer{} after pruning.png'.format(idx+1))
plt.close()
def main():
args = parse_arguments()
# Dataset
dataset = Dataset(**vars(args))
# Tensorflow 2.0 by default uses Eager-Execution, hence Placeholders are not getting executed
tf.compat.v1.disable_eager_execution()
# Reset the default graph and set a graph-level seed
tf.compat.v1.reset_default_graph()
tf.compat.v1.set_random_seed(9)
# Model
model = Model(num_classes=dataset.num_classes, **vars(args))
model.construct_model()
# Session
sess = tf.compat.v1.InteractiveSession()
saver = tf.compat.v1.train.Saver(var_list=tf.compat.v1.trainable_variables())
tf.compat.v1.global_variables_initializer().run()
tf.compat.v1.local_variables_initializer().run()
# saver.restore(sess, "/data1/liyilin/vgg/model0/itr-0")
# Calculate sparsity per layer using SNIP but not prune
num_weights, kappa = prune.prune_snip(args, model, sess, dataset)
sparsity_fraction = {k: 1 - kappa[k] / num_weights[k] for k in num_weights}
print('sparsity per layer:')
print(sparsity_fraction)
rewinding_weights0 = sess.run(model.weights, {model.pruned: True})
# Train and test the dense network
rewinding_weights1, rewinding_weights2 = train.train(args, model, sess, dataset, lr=args.lr, rewinding_itr1=60000, rewinding_itr2=120000)
print('|========= FINISH TRAINING DENSE NETWORK =========|')
test.test(args, model, sess, dataset)
# Prune each layer based on the magnitude of the weights according to sparsity per layer
prune.prune_magnitude(args, model, sess, dataset, kappa)
# Train and test with the sparse network
train.train(args, model, sess, dataset, lr=1e-1)
print('|========= FINISH TRAINING SPARSE NETWORK =========|')
test.test(args, model, sess, dataset)
# Rewind
prune.rewind(args, model, sess, dataset, rewinding_weights2, rewinding_itr=120000)
# Train and test with the sparse network
train.train(args, model, sess, dataset, lr=1e-1)
print('|========= FINISH TRAINING SPARSE NETWORK =========|')
test.test(args, model, sess, dataset)
# Rewind
prune.rewind(args, model, sess, dataset, rewinding_weights1, rewinding_itr=60000)
# Train and test with the sparse network
train.train(args, model, sess, dataset, lr=1e-1)
print('|========= FINISH TRAINING SPARSE NETWORK =========|')
test.test(args, model, sess, dataset)
# Rewind
prune.rewind(args, model, sess, dataset, rewinding_weights0, rewinding_itr=0)
# Train and test with the sparse network
train.train(args, model, sess, dataset, lr=1e-1)
print('|========= FINISH TRAINING SPARSE NETWORK =========|')
test.test(args, model, sess, dataset)
sess.close()
sys.exit()
if __name__ == "__main__":
main()
| 43.456954 | 141 | 0.685005 | import os
import sys
import argparse
import tensorflow as tf
import numpy as np
import scipy.stats as st
import matplotlib.pyplot as plt
from dataset import Dataset
from model import Model
import prune
import train
import test
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--datasource', type=str, default='mnist', help='dataset to use')
parser.add_argument('--path_data', type=str, default='./data', help='location to dataset')
parser.add_argument('--aug_kinds', nargs='+', type=str, default=[], help='augmentations to perform')
parser.add_argument('--arch', type=str, default='lenet5', help='network architecture to use')
parser.add_argument('--target_sparsity', type=float, default=0.9, help='level of sparsity to achieve')
parser.add_argument('--batch_size', type=int, default=100, help='number of examples per mini-batch')
parser.add_argument('--train_iterations', type=int, default=10000, help='number of training iterations')
parser.add_argument('--optimizer', type=str, default='sgd', help='optimizer of choice')
parser.add_argument('--lr_decay_type', type=str, default='constant', help='learning rate decay type')
parser.add_argument('--lr', type=float, default=1e-2, help='initial learning rate')
parser.add_argument('--decay_boundaries', nargs='+', type=int, default=[], help='boundaries for piecewise_constant decay')
parser.add_argument('--decay_values', nargs='+', type=float, default=[], help='values for piecewise_constant decay')
parser.add_argument('--initializer_w_bp', type=str, default='vs', help='initializer for w before pruning')
parser.add_argument('--initializer_b_bp', type=str, default='zeros', help='initializer for b before pruning')
parser.add_argument('--initializer_w_ap', type=str, default='vs', help='initializer for w after pruning')
parser.add_argument('--initializer_b_ap', type=str, default='zeros', help='initializer for b after pruning')
parser.add_argument('--logdir', type=str, default='logs', help='location for summaries and checkpoints')
parser.add_argument('--check_interval', type=int, default=100, help='check interval during training')
parser.add_argument('--save_interval', type=int, default=1000, help='save interval during training')
args = parser.parse_args()
args.path_summary = os.path.join(args.logdir, 'summary')
args.path_model = os.path.join(args.logdir, 'model')
args.path_assess = os.path.join(args.logdir, 'assess')
return args
def plot_distribution(sess, layers, pruned=False):
for idx, var in enumerate(layers):
if pruned == False:
layer = np.array(sess.run(var)).flatten()
else:
layer = var.flatten()[var.flatten() != 0]
ax = plt.axes()
ax.set_axisbelow(True)
plt.hist(layer, bins=30, label="Weights", density=True, edgecolor='white')
plt.grid(ls='--')
left, right = plt.xlim()
kde_xs = np.linspace(left, right)
kde = st.gaussian_kde(layer)
plt.plot(kde_xs, kde.pdf(kde_xs), label="PDF")
plt.legend(loc="upper left")
plt.ylabel('Density')
plt.xlabel('Weights')
if pruned == False:
plt.title("Histogram of Weights for layer{} before Pruning".format(idx+1))
plt.savefig('layer{} before pruning.png'.format(idx+1))
else:
plt.title("Histogram of Weights for layer{} after Pruning".format(idx+1))
plt.savefig('layer{} after pruning.png'.format(idx+1))
plt.close()
def main():
args = parse_arguments()
dataset = Dataset(**vars(args))
tf.compat.v1.disable_eager_execution()
tf.compat.v1.reset_default_graph()
tf.compat.v1.set_random_seed(9)
model = Model(num_classes=dataset.num_classes, **vars(args))
model.construct_model()
sess = tf.compat.v1.InteractiveSession()
saver = tf.compat.v1.train.Saver(var_list=tf.compat.v1.trainable_variables())
tf.compat.v1.global_variables_initializer().run()
tf.compat.v1.local_variables_initializer().run()
num_weights, kappa = prune.prune_snip(args, model, sess, dataset)
sparsity_fraction = {k: 1 - kappa[k] / num_weights[k] for k in num_weights}
print('sparsity per layer:')
print(sparsity_fraction)
rewinding_weights0 = sess.run(model.weights, {model.pruned: True})
rewinding_weights1, rewinding_weights2 = train.train(args, model, sess, dataset, lr=args.lr, rewinding_itr1=60000, rewinding_itr2=120000)
print('|========= FINISH TRAINING DENSE NETWORK =========|')
test.test(args, model, sess, dataset)
prune.prune_magnitude(args, model, sess, dataset, kappa)
train.train(args, model, sess, dataset, lr=1e-1)
print('|========= FINISH TRAINING SPARSE NETWORK =========|')
test.test(args, model, sess, dataset)
prune.rewind(args, model, sess, dataset, rewinding_weights2, rewinding_itr=120000)
train.train(args, model, sess, dataset, lr=1e-1)
print('|========= FINISH TRAINING SPARSE NETWORK =========|')
test.test(args, model, sess, dataset)
prune.rewind(args, model, sess, dataset, rewinding_weights1, rewinding_itr=60000)
train.train(args, model, sess, dataset, lr=1e-1)
print('|========= FINISH TRAINING SPARSE NETWORK =========|')
test.test(args, model, sess, dataset)
prune.rewind(args, model, sess, dataset, rewinding_weights0, rewinding_itr=0)
train.train(args, model, sess, dataset, lr=1e-1)
print('|========= FINISH TRAINING SPARSE NETWORK =========|')
test.test(args, model, sess, dataset)
sess.close()
sys.exit()
if __name__ == "__main__":
main()
| true | true |
f7fe43863559be980753001b2a85fea9c0ee5159 | 2,186 | py | Python | ab.py | sshanuraj/alpha-beta-prune | 779a25496f5a52f29771622baf6226c0798f216b | [
"MIT"
] | null | null | null | ab.py | sshanuraj/alpha-beta-prune | 779a25496f5a52f29771622baf6226c0798f216b | [
"MIT"
] | null | null | null | ab.py | sshanuraj/alpha-beta-prune | 779a25496f5a52f29771622baf6226c0798f216b | [
"MIT"
] | null | null | null | import numpy as np
import random
import pdb
#pdb.set_trace()
MIN = 0
MAX = 1
INF = 10000
NINF = -10000
class Node:
def __init__(self, parent, m):
self.val = NINF
self.alpha = NINF
self.beta = INF
self.children = []
self.agent = m
self.parent = parent
def resetValues(self):
self.val = NINF
self.alpha = NINF
self.beta = INF
self.children = []
self.agent = MAX
self.parent = None
self.ind = 0
def evaluate(self):
self.val = random.randint(0, 20)
class GameTree:
def __init__(self, root):
self.root = root
def getOptimumValue(self, dep):
depth = 0
k = dep
bf = 25
newVal = NINF
# self.root.resetValues()
curr = self.root
bestIndArr = []
pruneCount = 0
while self.root.val == NINF:
if depth == k:
curr.evaluate()
newVal = curr.val
depth -= 1
curr = curr.parent
continue
if newVal > NINF:
if curr.agent == MIN:
if (newVal < curr.beta and len(curr.children) > 1) or len(curr.children) == 1:
curr.beta = newVal
else:
if (newVal >= curr.alpha and len(curr.children) > 1) or len(curr.children) == 1:
if curr == self.root:
if curr.alpha < newVal:
bestIndArr = []
bestIndArr.append(len(curr.children) - 1)
if curr.alpha == newVal:
bestIndArr.append(len(curr.children) - 1)
curr.alpha = newVal
newVal = NINF
if curr.alpha >= curr.beta:
pruneCount += 1
if curr.agent == MIN:
curr.val = curr.beta
else:
curr.val = curr.alpha
depth -= 1
newVal = curr.val
curr = curr.parent
else:
l = len(curr.children)
if l < bf:
curr.children.append(Node(curr, 1-curr.agent))
curr = curr.children[l]
curr.alpha = curr.parent.alpha
curr.beta = curr.parent.beta
depth += 1
else:
if curr.agent == MIN:
curr.val = curr.beta
else:
curr.val = curr.alpha
newVal = curr.val
curr = curr.parent; depth -= 1
return self.root.val, bestIndArr, pruneCount, [i.val for i in self.root.children]
root = Node(None, MAX)
gt = GameTree(root)
print(gt.getOptimumValue(6))
| 20.240741 | 85 | 0.586002 | import numpy as np
import random
import pdb
MIN = 0
MAX = 1
INF = 10000
NINF = -10000
class Node:
def __init__(self, parent, m):
self.val = NINF
self.alpha = NINF
self.beta = INF
self.children = []
self.agent = m
self.parent = parent
def resetValues(self):
self.val = NINF
self.alpha = NINF
self.beta = INF
self.children = []
self.agent = MAX
self.parent = None
self.ind = 0
def evaluate(self):
self.val = random.randint(0, 20)
class GameTree:
def __init__(self, root):
self.root = root
def getOptimumValue(self, dep):
depth = 0
k = dep
bf = 25
newVal = NINF
curr = self.root
bestIndArr = []
pruneCount = 0
while self.root.val == NINF:
if depth == k:
curr.evaluate()
newVal = curr.val
depth -= 1
curr = curr.parent
continue
if newVal > NINF:
if curr.agent == MIN:
if (newVal < curr.beta and len(curr.children) > 1) or len(curr.children) == 1:
curr.beta = newVal
else:
if (newVal >= curr.alpha and len(curr.children) > 1) or len(curr.children) == 1:
if curr == self.root:
if curr.alpha < newVal:
bestIndArr = []
bestIndArr.append(len(curr.children) - 1)
if curr.alpha == newVal:
bestIndArr.append(len(curr.children) - 1)
curr.alpha = newVal
newVal = NINF
if curr.alpha >= curr.beta:
pruneCount += 1
if curr.agent == MIN:
curr.val = curr.beta
else:
curr.val = curr.alpha
depth -= 1
newVal = curr.val
curr = curr.parent
else:
l = len(curr.children)
if l < bf:
curr.children.append(Node(curr, 1-curr.agent))
curr = curr.children[l]
curr.alpha = curr.parent.alpha
curr.beta = curr.parent.beta
depth += 1
else:
if curr.agent == MIN:
curr.val = curr.beta
else:
curr.val = curr.alpha
newVal = curr.val
curr = curr.parent; depth -= 1
return self.root.val, bestIndArr, pruneCount, [i.val for i in self.root.children]
root = Node(None, MAX)
gt = GameTree(root)
print(gt.getOptimumValue(6))
| true | true |
f7fe440a5f232a4a64a15984d9fc0eac3e56057b | 3,602 | py | Python | bspump/declarative/expression/arithmetic.py | LibertyAces/BitSwanPump | 02301bfd4e807836403ce6a22030ad47058541d6 | [
"BSD-3-Clause"
] | 17 | 2019-02-14T09:26:03.000Z | 2022-03-11T09:23:52.000Z | bspump/declarative/expression/arithmetic.py | LibertyAces/BitSwanPump | 02301bfd4e807836403ce6a22030ad47058541d6 | [
"BSD-3-Clause"
] | 91 | 2019-05-06T18:59:02.000Z | 2022-01-11T06:22:32.000Z | bspump/declarative/expression/arithmetic.py | LibertyAces/BitSwanPump | 02301bfd4e807836403ce6a22030ad47058541d6 | [
"BSD-3-Clause"
] | 10 | 2019-04-23T08:48:58.000Z | 2022-02-13T14:24:28.000Z | import operator
from ..abc import SequenceExpression
class ADD(SequenceExpression):
"""
Add all values from expressions.
"""
Attributes = {
"Items": [
'si64', 'si8', 'si16', 'si32', 'si128', 'si256',
'ui8', 'ui16', 'ui32', 'ui64', 'ui128', 'ui256',
'fp64', 'fp16', 'fp32', 'fp128',
'str',
]
}
Category = "Arithmetic"
def __call__(self, context, event, *args, **kwargs):
return self.reduce(operator.add, context, event, *args, **kwargs)
def get_outlet_type(self):
return _get_outlet_type_from_first(self.Items)
def get_items_inlet_type(self):
# TODO: This is maybe not true for integer additions
return _get_outlet_type_from_first(self.Items)
class DIV(SequenceExpression):
"""
Divides values in expression
"""
Attributes = {
"Items": [
'si64', 'si8', 'si16', 'si32', 'si128', 'si256',
'ui8', 'ui16', 'ui32', 'ui64', 'ui128', 'ui256',
'fp64', 'fp16', 'fp32', 'fp128',
]
}
Category = "Arithmetic"
def __call__(self, context, event, *args, **kwargs):
return self.reduce(operator.truediv, context, event, *args, **kwargs)
def get_outlet_type(self):
# TODO: Check if there is float among integers
return _get_outlet_type_from_first(self.Items)
def get_items_inlet_type(self):
# TODO: Check if there is float among integers
return _get_outlet_type_from_first(self.Items)
class MUL(SequenceExpression):
"""
Multiplies values in expression.
"""
Attributes = {
"Items": [
'si64', 'si8', 'si16', 'si32', 'si128', 'si256',
'ui8', 'ui16', 'ui32', 'ui64', 'ui128', 'ui256',
'fp64', 'fp16', 'fp32', 'fp128',
]
}
Category = "Arithmetic"
def __call__(self, context, event, *args, **kwargs):
return self.reduce(operator.mul, context, event, *args, **kwargs)
def get_outlet_type(self):
# TODO: Check if there is float among integers
return _get_outlet_type_from_first(self.Items)
def get_items_inlet_type(self):
# TODO: Check if there is float among integers
return _get_outlet_type_from_first(self.Items)
class SUB(SequenceExpression):
"""
Subtracts values in expression
"""
Attributes = {
"Items": [
'si64', 'si8', 'si16', 'si32', 'si128', 'si256',
'ui8', 'ui16', 'ui32', 'ui64', 'ui128', 'ui256',
'fp64', 'fp16', 'fp32', 'fp128',
]
}
Category = "Arithmetic"
def __call__(self, context, event, *args, **kwargs):
return self.reduce(operator.sub, context, event, *args, **kwargs)
def get_outlet_type(self):
# TODO: Check if there is float among integers
return _get_outlet_type_from_first(self.Items)
def get_items_inlet_type(self):
# TODO: Check if there is float among integers
return _get_outlet_type_from_first(self.Items)
class MOD(SequenceExpression):
"""
Modules values in expression.
"""
Attributes = {
"Items": [
'si64', 'si8', 'si16', 'si32', 'si64', 'si128', 'si256',
'ui8', 'ui16', 'ui32', 'ui64', 'ui128', 'ui256',
]
}
Category = "Arithmetic"
def __call__(self, context, event, *args, **kwargs):
return self.reduce(operator.mod, context, event, *args, **kwargs)
def get_outlet_type(self):
return _get_outlet_type_from_first(self.Items)
def get_items_inlet_type(self):
return _get_outlet_type_from_first(self.Items)
class POW(SequenceExpression):
Attributes = {
"Items": [
'si64'
]
}
Category = "Arithmetic"
def __call__(self, context, event, *args, **kwargs):
return self.reduce(operator.pow, context, event, *args, **kwargs)
def _get_outlet_type_from_first(items):
if len(items) == 0:
return int.__name__
# Take the type of the first item in the list
return items[0].get_outlet_type()
| 22.09816 | 71 | 0.673515 | import operator
from ..abc import SequenceExpression
class ADD(SequenceExpression):
Attributes = {
"Items": [
'si64', 'si8', 'si16', 'si32', 'si128', 'si256',
'ui8', 'ui16', 'ui32', 'ui64', 'ui128', 'ui256',
'fp64', 'fp16', 'fp32', 'fp128',
'str',
]
}
Category = "Arithmetic"
def __call__(self, context, event, *args, **kwargs):
return self.reduce(operator.add, context, event, *args, **kwargs)
def get_outlet_type(self):
return _get_outlet_type_from_first(self.Items)
def get_items_inlet_type(self):
return _get_outlet_type_from_first(self.Items)
class DIV(SequenceExpression):
Attributes = {
"Items": [
'si64', 'si8', 'si16', 'si32', 'si128', 'si256',
'ui8', 'ui16', 'ui32', 'ui64', 'ui128', 'ui256',
'fp64', 'fp16', 'fp32', 'fp128',
]
}
Category = "Arithmetic"
def __call__(self, context, event, *args, **kwargs):
return self.reduce(operator.truediv, context, event, *args, **kwargs)
def get_outlet_type(self):
return _get_outlet_type_from_first(self.Items)
def get_items_inlet_type(self):
return _get_outlet_type_from_first(self.Items)
class MUL(SequenceExpression):
Attributes = {
"Items": [
'si64', 'si8', 'si16', 'si32', 'si128', 'si256',
'ui8', 'ui16', 'ui32', 'ui64', 'ui128', 'ui256',
'fp64', 'fp16', 'fp32', 'fp128',
]
}
Category = "Arithmetic"
def __call__(self, context, event, *args, **kwargs):
return self.reduce(operator.mul, context, event, *args, **kwargs)
def get_outlet_type(self):
return _get_outlet_type_from_first(self.Items)
def get_items_inlet_type(self):
return _get_outlet_type_from_first(self.Items)
class SUB(SequenceExpression):
Attributes = {
"Items": [
'si64', 'si8', 'si16', 'si32', 'si128', 'si256',
'ui8', 'ui16', 'ui32', 'ui64', 'ui128', 'ui256',
'fp64', 'fp16', 'fp32', 'fp128',
]
}
Category = "Arithmetic"
def __call__(self, context, event, *args, **kwargs):
return self.reduce(operator.sub, context, event, *args, **kwargs)
def get_outlet_type(self):
return _get_outlet_type_from_first(self.Items)
def get_items_inlet_type(self):
return _get_outlet_type_from_first(self.Items)
class MOD(SequenceExpression):
Attributes = {
"Items": [
'si64', 'si8', 'si16', 'si32', 'si64', 'si128', 'si256',
'ui8', 'ui16', 'ui32', 'ui64', 'ui128', 'ui256',
]
}
Category = "Arithmetic"
def __call__(self, context, event, *args, **kwargs):
return self.reduce(operator.mod, context, event, *args, **kwargs)
def get_outlet_type(self):
return _get_outlet_type_from_first(self.Items)
def get_items_inlet_type(self):
return _get_outlet_type_from_first(self.Items)
class POW(SequenceExpression):
Attributes = {
"Items": [
'si64'
]
}
Category = "Arithmetic"
def __call__(self, context, event, *args, **kwargs):
return self.reduce(operator.pow, context, event, *args, **kwargs)
def _get_outlet_type_from_first(items):
if len(items) == 0:
return int.__name__
return items[0].get_outlet_type()
| true | true |
f7fe441ef09aff85ea028ddd19931fcfbe5a8d7a | 4,360 | py | Python | ranger/colorschemes/solarized.py | bryanwills/dotfiles | ebc885d51ab7849a953f1e447b87f172ab686b25 | [
"MIT"
] | null | null | null | ranger/colorschemes/solarized.py | bryanwills/dotfiles | ebc885d51ab7849a953f1e447b87f172ab686b25 | [
"MIT"
] | null | null | null | ranger/colorschemes/solarized.py | bryanwills/dotfiles | ebc885d51ab7849a953f1e447b87f172ab686b25 | [
"MIT"
] | null | null | null | # This file is part of ranger, the console file manager.
# License: GNU GPL version 3, see the file "AUTHORS" for details.
# Author: Joseph Tannhuber <sepp.tannhuber@yahoo.de>, 2013
# Solarized like colorscheme, similar to solarized-dircolors
# from https://github.com/seebi/dircolors-solarized.
# This is a modification of Roman Zimbelmann's default colorscheme.
from __future__ import (absolute_import, division, print_function)
from ranger.gui.colorscheme import ColorScheme
from ranger.gui.color import (
cyan, magenta, red, white, default,
normal, bold, reverse,
default_colors,
)
class Solarized(ColorScheme):
progress_bar_color = 33
def use(self, context): # pylint: disable=too-many-branches,too-many-statements
fg, bg, attr = default_colors
if context.reset:
return default_colors
elif context.in_browser:
fg = 244
if context.selected:
attr = reverse
else:
attr = normal
if context.empty or context.error:
fg = 235
bg = 160
if context.border:
fg = default
if context.media:
if context.image:
fg = 136
else:
fg = 166
if context.container:
fg = 61
if context.directory:
fg = 33
elif context.executable and not \
any((context.media, context.container,
context.fifo, context.socket)):
fg = 64
attr |= bold
if context.socket:
fg = 136
bg = 230
attr |= bold
if context.fifo:
fg = 136
bg = 230
attr |= bold
if context.device:
fg = 244
bg = 230
attr |= bold
if context.link:
fg = 37 if context.good else 160
attr |= bold
if context.bad:
bg = 235
if context.tag_marker and not context.selected:
attr |= bold
if fg in (red, magenta):
fg = white
else:
fg = red
if not context.selected and (context.cut or context.copied):
fg = 234
attr |= bold
if context.main_column:
if context.selected:
attr |= bold
if context.marked:
attr |= bold
bg = 237
if context.badinfo:
if attr & reverse:
bg = magenta
else:
fg = magenta
if context.inactive_pane:
fg = 241
elif context.in_titlebar:
attr |= bold
if context.hostname:
fg = 16 if context.bad else 255
if context.bad:
bg = 166
elif context.directory:
fg = 33
elif context.tab:
fg = 47 if context.good else 33
bg = 239
elif context.link:
fg = cyan
elif context.in_statusbar:
if context.permissions:
if context.good:
fg = 93
elif context.bad:
fg = 160
bg = 235
if context.marked:
attr |= bold | reverse
fg = 237
bg = 47
if context.message:
if context.bad:
attr |= bold
fg = 160
bg = 235
if context.loaded:
bg = self.progress_bar_color
if context.text:
if context.highlight:
attr |= reverse
if context.in_taskview:
if context.title:
fg = 93
if context.selected:
attr |= reverse
if context.loaded:
if context.selected:
fg = self.progress_bar_color
else:
bg = self.progress_bar_color
return fg, bg, attr | 30.48951 | 84 | 0.452752 |
from __future__ import (absolute_import, division, print_function)
from ranger.gui.colorscheme import ColorScheme
from ranger.gui.color import (
cyan, magenta, red, white, default,
normal, bold, reverse,
default_colors,
)
class Solarized(ColorScheme):
progress_bar_color = 33
def use(self, context): # pylint: disable=too-many-branches,too-many-statements
fg, bg, attr = default_colors
if context.reset:
return default_colors
elif context.in_browser:
fg = 244
if context.selected:
attr = reverse
else:
attr = normal
if context.empty or context.error:
fg = 235
bg = 160
if context.border:
fg = default
if context.media:
if context.image:
fg = 136
else:
fg = 166
if context.container:
fg = 61
if context.directory:
fg = 33
elif context.executable and not \
any((context.media, context.container,
context.fifo, context.socket)):
fg = 64
attr |= bold
if context.socket:
fg = 136
bg = 230
attr |= bold
if context.fifo:
fg = 136
bg = 230
attr |= bold
if context.device:
fg = 244
bg = 230
attr |= bold
if context.link:
fg = 37 if context.good else 160
attr |= bold
if context.bad:
bg = 235
if context.tag_marker and not context.selected:
attr |= bold
if fg in (red, magenta):
fg = white
else:
fg = red
if not context.selected and (context.cut or context.copied):
fg = 234
attr |= bold
if context.main_column:
if context.selected:
attr |= bold
if context.marked:
attr |= bold
bg = 237
if context.badinfo:
if attr & reverse:
bg = magenta
else:
fg = magenta
if context.inactive_pane:
fg = 241
elif context.in_titlebar:
attr |= bold
if context.hostname:
fg = 16 if context.bad else 255
if context.bad:
bg = 166
elif context.directory:
fg = 33
elif context.tab:
fg = 47 if context.good else 33
bg = 239
elif context.link:
fg = cyan
elif context.in_statusbar:
if context.permissions:
if context.good:
fg = 93
elif context.bad:
fg = 160
bg = 235
if context.marked:
attr |= bold | reverse
fg = 237
bg = 47
if context.message:
if context.bad:
attr |= bold
fg = 160
bg = 235
if context.loaded:
bg = self.progress_bar_color
if context.text:
if context.highlight:
attr |= reverse
if context.in_taskview:
if context.title:
fg = 93
if context.selected:
attr |= reverse
if context.loaded:
if context.selected:
fg = self.progress_bar_color
else:
bg = self.progress_bar_color
return fg, bg, attr | true | true |
f7fe44ba34d8e1302c7841680e9f10366332e077 | 2,699 | py | Python | deployment/config.prod.py | hemangandhi/lcs | 9dc96ae51b6389a72ee36cb205b4a2372858df1e | [
"MIT"
] | 7 | 2018-07-09T01:54:02.000Z | 2022-02-25T21:10:14.000Z | deployment/config.prod.py | hemangandhi/lcs | 9dc96ae51b6389a72ee36cb205b4a2372858df1e | [
"MIT"
] | 101 | 2018-06-25T03:57:03.000Z | 2022-01-13T01:40:23.000Z | deployment/config.prod.py | hemangandhi/lcs | 9dc96ae51b6389a72ee36cb205b4a2372858df1e | [
"MIT"
] | 2 | 2021-07-22T00:39:48.000Z | 2022-03-04T19:55:33.000Z | from datetime import datetime, timezone, timedelta
import os
# uri should contain auth and default database
DB_URI = os.getenv("PRODUCTION_DB_URI", "")
DB_COLLECTIONS = {
"users": "users",
"magic links": "magicLinks",
"slack messages": "slackMessages"
}
SPARKPOST_KEY = os.getenv("PRODUCTION_SPARKPOST_KEY", "")
SLACK_KEYS = {
'token': os.getenv("PRODUCTION_SLACK_API_TOKEN"),
'channel': os.getenv("PRODUCTION_SLACK_CHANNEL_ID")
}
class GOOGLE_CAL:
CAL_ID = os.getenv("PRODUCTION_GOOGLE_CAL_ID", "")
CAL_API_KEY = os.getenv("PRODUCTION_GOOGLE_CAL_API_KEY", "")
MAPS_API_KEY = os.getenv("PRODUCTION_MAPS_API_KEY", "")
# Currently not used as reimbursement is disabled
# class TRAVEL:
# HACKRU_LOCATION = "New Brunswick, NJ, USA"
# MAX_REIMBURSE = float(os.getenv("PRODUCTION_MAX_REIMBURSE", ""))
# BUDGET = float(os.getenv("PRODUCTION_TRAVEL_BUDGET", ""))
# MULTIPLIERS = {
# "car": float(os.getenv("PRODUCTION_CAR_REIMBURSE", )),
# "train": float(os.getenv("PRODUCTION_TRAIN_REIMBURSE", )),
# "bus": float(os.getenv("PRODUCTION_BUS_REIMBURSE", )),
# "plane": float(os.getenv("PRODUCTION_PLANE_REIMBURSE", ))
# }
# edt
TIMEZONE = timezone(timedelta(hours=-4))
# first is open second is close, currently registration dates are not open information
REGISTRATION_DATES = [
# open for registration
[datetime(int(os.getenv("PRODUCTION_START_YEAR", )), int(os.getenv("PRODUCTION_START_MONTH", )), int(os.getenv("PRODUCTION_START_DAY", )), tzinfo=TIMEZONE),
datetime(int(os.getenv("PRODUCTION_END_YEAR", )), int(os.getenv("PRODUCTION_END_MONTH", )), int(os.getenv("PRODUCTION_END_DAY", )), int(os.getenv("PRODUCTION_END_HOUR", )), tzinfo=TIMEZONE)],
# reopen during the event of
[datetime(int(os.getenv("PRODUCTION_DAY_OF_START_YEAR", )), int(os.getenv("PRODUCTION_DAY_OF_START_MONTH", )), int(os.getenv("PRODUCTION_DAY_OF_START_DAY", )), int(os.getenv("PRODUCTION_DAY_OF_START_HOUR", )), tzinfo=TIMEZONE),
datetime(int(os.getenv("PRODUCTION_DAY_OF_END_YEAR", )), int(os.getenv("PRODUCTION_DAY_OF_END_MONTH", )), int(os.getenv("PRODUCTION_DAY_OF_END_DAY", )), int(os.getenv("PRODUCTION_DAY_OF_END_HOUR", )), tzinfo=TIMEZONE)]
]
AWS = {
"aws_access_key_id": os.environ.get("PRODUCTION_AWS_ACCESS_KEY_ID"),
"aws_secret_access_key": os.environ.get("PRODUCTION_AWS_SECRET_ACCESS_KEY"),
"region_name": os.environ.get("PRODUCTION_REGION_NAME"),
}
RESUME_BUCKET = os.getenv("PRODUCTION_RESUME_BUCKET", "")
WAIVER_BUCKET = os.getenv("PRODUCTION_WAIVER_BUCKET", "")
# Json webtoken
JWT_SECRET = os.getenv("PRODUCTION_JWT_SECRET", "")
JWT_ALGO = os.getenv("PRODUCTION_JWT_ALGO", "")
| 42.171875 | 231 | 0.720267 | from datetime import datetime, timezone, timedelta
import os
DB_URI = os.getenv("PRODUCTION_DB_URI", "")
DB_COLLECTIONS = {
"users": "users",
"magic links": "magicLinks",
"slack messages": "slackMessages"
}
SPARKPOST_KEY = os.getenv("PRODUCTION_SPARKPOST_KEY", "")
SLACK_KEYS = {
'token': os.getenv("PRODUCTION_SLACK_API_TOKEN"),
'channel': os.getenv("PRODUCTION_SLACK_CHANNEL_ID")
}
class GOOGLE_CAL:
CAL_ID = os.getenv("PRODUCTION_GOOGLE_CAL_ID", "")
CAL_API_KEY = os.getenv("PRODUCTION_GOOGLE_CAL_API_KEY", "")
MAPS_API_KEY = os.getenv("PRODUCTION_MAPS_API_KEY", "")
TIMEZONE = timezone(timedelta(hours=-4))
REGISTRATION_DATES = [
[datetime(int(os.getenv("PRODUCTION_START_YEAR", )), int(os.getenv("PRODUCTION_START_MONTH", )), int(os.getenv("PRODUCTION_START_DAY", )), tzinfo=TIMEZONE),
datetime(int(os.getenv("PRODUCTION_END_YEAR", )), int(os.getenv("PRODUCTION_END_MONTH", )), int(os.getenv("PRODUCTION_END_DAY", )), int(os.getenv("PRODUCTION_END_HOUR", )), tzinfo=TIMEZONE)],
[datetime(int(os.getenv("PRODUCTION_DAY_OF_START_YEAR", )), int(os.getenv("PRODUCTION_DAY_OF_START_MONTH", )), int(os.getenv("PRODUCTION_DAY_OF_START_DAY", )), int(os.getenv("PRODUCTION_DAY_OF_START_HOUR", )), tzinfo=TIMEZONE),
datetime(int(os.getenv("PRODUCTION_DAY_OF_END_YEAR", )), int(os.getenv("PRODUCTION_DAY_OF_END_MONTH", )), int(os.getenv("PRODUCTION_DAY_OF_END_DAY", )), int(os.getenv("PRODUCTION_DAY_OF_END_HOUR", )), tzinfo=TIMEZONE)]
]
AWS = {
"aws_access_key_id": os.environ.get("PRODUCTION_AWS_ACCESS_KEY_ID"),
"aws_secret_access_key": os.environ.get("PRODUCTION_AWS_SECRET_ACCESS_KEY"),
"region_name": os.environ.get("PRODUCTION_REGION_NAME"),
}
RESUME_BUCKET = os.getenv("PRODUCTION_RESUME_BUCKET", "")
WAIVER_BUCKET = os.getenv("PRODUCTION_WAIVER_BUCKET", "")
JWT_SECRET = os.getenv("PRODUCTION_JWT_SECRET", "")
JWT_ALGO = os.getenv("PRODUCTION_JWT_ALGO", "")
| true | true |
f7fe45a5e11f988cd525106c769bc19a28f266e8 | 2,447 | py | Python | app/models.py | billowbashir/MaNeighba | 84ee7f86ac471c5449d94bd592adf004b3288823 | [
"Unlicense"
] | null | null | null | app/models.py | billowbashir/MaNeighba | 84ee7f86ac471c5449d94bd592adf004b3288823 | [
"Unlicense"
] | 4 | 2020-06-05T19:26:19.000Z | 2021-09-08T00:33:53.000Z | app/models.py | billowbashir/MaNeighba | 84ee7f86ac471c5449d94bd592adf004b3288823 | [
"Unlicense"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
class Neighbourhood(models.Model):
name=models.CharField(max_length=60)
location=models.CharField(max_length=60)
population=models.IntegerField()
admin=models.ForeignKey(User,on_delete=models.CASCADE)
def create_neigborhood(self):
self.save()
def delete_neigborhood(self):
self.delete()
@classmethod
def find_neigborhood(cls,neighborhood_id):
neighborhood=cls.objects.filter(id=neighborhood_id)
return neighborhood
@classmethod
def update_neighborhood(cls,neighborhood_id):
neighborhood=cls.objects.filter(id=neighborhood_id)
neighborhood.name=Value
neighborhood.save()
return neighborhood
@classmethod
def update_neighborhood(cls,neighborhood_id):
neighborhood=cls.objects.filter(id=neighborhood_id)
neighborhood.occupants=Value
neighborhood.save()
return neighborhood
@classmethod
def search_by_name(cls,search_term):
neighborhood=cls.objects.filter(name__icontains=search_term)
return neighborhood
class Profile(models.Model):
profile_pic=models.ImageField(upload_to='profile_photos/')
bio=models.CharField(max_length=300)
user = models.ForeignKey(User,on_delete=models.CASCADE)
# neighborhood=models.ForeignKey(Neighbourhood,on_delete=models.CASCADE)
def create_profile(self):
self.save()
def delete_profile(self):
self.delete()
class Business(models.Model):
name=models.CharField(max_length=60)
description=models.CharField(max_length=200)
user=models.ForeignKey(User,on_delete=models.CASCADE)
neighborhood=models.ForeignKey(Neighbourhood,on_delete=models.CASCADE)
email=models.EmailField()
def create_business(self):
self.save()
def delete_business(self):
self.delete()
@classmethod
def find_business(cls,business_id):
business=cls.objects.filter(id=business_id)
return business
@classmethod
def update_business(cls,business_id):
business=cls.objects.filter(id=business_id)
business.name=Value
business.save()
return business
class Post(models.Model):
post=models.CharField(max_length=200)
user=models.ForeignKey(User,on_delete=models.CASCADE)
neighborhood=models.ForeignKey(Neighbourhood,on_delete=models.CASCADE)
| 31.371795 | 78 | 0.7217 | from django.db import models
from django.contrib.auth.models import User
class Neighbourhood(models.Model):
name=models.CharField(max_length=60)
location=models.CharField(max_length=60)
population=models.IntegerField()
admin=models.ForeignKey(User,on_delete=models.CASCADE)
def create_neigborhood(self):
self.save()
def delete_neigborhood(self):
self.delete()
@classmethod
def find_neigborhood(cls,neighborhood_id):
neighborhood=cls.objects.filter(id=neighborhood_id)
return neighborhood
@classmethod
def update_neighborhood(cls,neighborhood_id):
neighborhood=cls.objects.filter(id=neighborhood_id)
neighborhood.name=Value
neighborhood.save()
return neighborhood
@classmethod
def update_neighborhood(cls,neighborhood_id):
neighborhood=cls.objects.filter(id=neighborhood_id)
neighborhood.occupants=Value
neighborhood.save()
return neighborhood
@classmethod
def search_by_name(cls,search_term):
neighborhood=cls.objects.filter(name__icontains=search_term)
return neighborhood
class Profile(models.Model):
profile_pic=models.ImageField(upload_to='profile_photos/')
bio=models.CharField(max_length=300)
user = models.ForeignKey(User,on_delete=models.CASCADE)
def create_profile(self):
self.save()
def delete_profile(self):
self.delete()
class Business(models.Model):
name=models.CharField(max_length=60)
description=models.CharField(max_length=200)
user=models.ForeignKey(User,on_delete=models.CASCADE)
neighborhood=models.ForeignKey(Neighbourhood,on_delete=models.CASCADE)
email=models.EmailField()
def create_business(self):
self.save()
def delete_business(self):
self.delete()
@classmethod
def find_business(cls,business_id):
business=cls.objects.filter(id=business_id)
return business
@classmethod
def update_business(cls,business_id):
business=cls.objects.filter(id=business_id)
business.name=Value
business.save()
return business
class Post(models.Model):
post=models.CharField(max_length=200)
user=models.ForeignKey(User,on_delete=models.CASCADE)
neighborhood=models.ForeignKey(Neighbourhood,on_delete=models.CASCADE)
| true | true |
f7fe47a4927438bfc838f1831fa8c68f9d1deb1a | 1,873 | py | Python | ginga/mockw/ImageViewCanvasMock.py | Cadair/ginga | 5afdd8824f27c7ae7d8d82b5013b0ff0068bd8b8 | [
"BSD-3-Clause"
] | null | null | null | ginga/mockw/ImageViewCanvasMock.py | Cadair/ginga | 5afdd8824f27c7ae7d8d82b5013b0ff0068bd8b8 | [
"BSD-3-Clause"
] | null | null | null | ginga/mockw/ImageViewCanvasMock.py | Cadair/ginga | 5afdd8824f27c7ae7d8d82b5013b0ff0068bd8b8 | [
"BSD-3-Clause"
] | null | null | null | #
# ImageViewCanvasMock.py -- A Ginga image widget with canvas drawing in mock
# widget set
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from ginga.mockw import ImageViewMock
from ginga.canvas.mixins import DrawingMixin, CanvasMixin, CompoundMixin
class ImageViewCanvasError(ImageViewMock.ImageViewMockError):
pass
class ImageViewCanvas(ImageViewMock.ImageViewZoom,
DrawingMixin, CanvasMixin, CompoundMixin):
def __init__(self, logger=None, settings=None,
rgbmap=None, bindmap=None, bindings=None):
ImageViewMock.ImageViewZoom.__init__(self, logger=logger,
settings=settings,
rgbmap=rgbmap,
bindmap=bindmap,
bindings=bindings)
CompoundMixin.__init__(self)
CanvasMixin.__init__(self)
DrawingMixin.__init__(self)
# we are both a viewer and a canvas
self.set_canvas(self, private_canvas=self)
# METHODS THAT WERE IN IPG
def add_canvas(self, tag=None):
# add a canvas to the view
DrawingCanvas = self.getDrawClass('drawingcanvas')
canvas = DrawingCanvas()
# enable drawing on the canvas
canvas.enable_draw(True)
canvas.ui_setActive(True)
canvas.setSurface(self)
# add the canvas to the view.
self.add(canvas, tag=tag)
return canvas
def show(self):
from IPython.display import Image
return Image(data=bytes(self.get_rgb_image_as_bytes(format='png')),
format='png', embed=True)
#END
| 34.054545 | 76 | 0.610785 |
from ginga.mockw import ImageViewMock
from ginga.canvas.mixins import DrawingMixin, CanvasMixin, CompoundMixin
class ImageViewCanvasError(ImageViewMock.ImageViewMockError):
pass
class ImageViewCanvas(ImageViewMock.ImageViewZoom,
DrawingMixin, CanvasMixin, CompoundMixin):
def __init__(self, logger=None, settings=None,
rgbmap=None, bindmap=None, bindings=None):
ImageViewMock.ImageViewZoom.__init__(self, logger=logger,
settings=settings,
rgbmap=rgbmap,
bindmap=bindmap,
bindings=bindings)
CompoundMixin.__init__(self)
CanvasMixin.__init__(self)
DrawingMixin.__init__(self)
self.set_canvas(self, private_canvas=self)
def add_canvas(self, tag=None):
DrawingCanvas = self.getDrawClass('drawingcanvas')
canvas = DrawingCanvas()
canvas.enable_draw(True)
canvas.ui_setActive(True)
canvas.setSurface(self)
self.add(canvas, tag=tag)
return canvas
def show(self):
from IPython.display import Image
return Image(data=bytes(self.get_rgb_image_as_bytes(format='png')),
format='png', embed=True)
| true | true |
f7fe47edb7673a8f735c3e787ec10a0751ea2d7a | 13,015 | py | Python | ML_venv/Lib/site-packages/jupyter_client/ssh/tunnel.py | ashokjohn/ML_RealWorld | 8508c8cd6a9fd0467ee68954850179ab2506bc04 | [
"MIT"
] | null | null | null | ML_venv/Lib/site-packages/jupyter_client/ssh/tunnel.py | ashokjohn/ML_RealWorld | 8508c8cd6a9fd0467ee68954850179ab2506bc04 | [
"MIT"
] | null | null | null | ML_venv/Lib/site-packages/jupyter_client/ssh/tunnel.py | ashokjohn/ML_RealWorld | 8508c8cd6a9fd0467ee68954850179ab2506bc04 | [
"MIT"
] | null | null | null | """Basic ssh tunnel utilities, and convenience functions for tunneling
zeromq connections.
"""
# Copyright (C) 2010-2011 IPython Development Team
# Copyright (C) 2011- PyZMQ Developers
#
# Redistributed from IPython under the terms of the BSD License.
import atexit
import os
import re
import signal
import socket
import sys
import warnings
from getpass import getpass
from getpass import getuser
from multiprocessing import Process
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
import paramiko
SSHException = paramiko.ssh_exception.SSHException
except ImportError:
paramiko = None # type: ignore
class SSHException(Exception): # type: ignore
pass
else:
from .forward import forward_tunnel
try:
import pexpect # type: ignore
except ImportError:
pexpect = None
from zmq.utils.strtypes import b
def select_random_ports(n):
"""Select and return n random ports that are available."""
ports = []
sockets = []
for i in range(n):
sock = socket.socket()
sock.bind(("", 0))
ports.append(sock.getsockname()[1])
sockets.append(sock)
for sock in sockets:
sock.close()
return ports
# -----------------------------------------------------------------------------
# Check for passwordless login
# -----------------------------------------------------------------------------
_password_pat = re.compile(b(r"pass(word|phrase):"), re.IGNORECASE)
def try_passwordless_ssh(server, keyfile, paramiko=None):
"""Attempt to make an ssh connection without a password.
This is mainly used for requiring password input only once
when many tunnels may be connected to the same server.
If paramiko is None, the default for the platform is chosen.
"""
if paramiko is None:
paramiko = sys.platform == "win32"
if not paramiko:
f = _try_passwordless_openssh
else:
f = _try_passwordless_paramiko
return f(server, keyfile)
def _try_passwordless_openssh(server, keyfile):
"""Try passwordless login with shell ssh command."""
if pexpect is None:
raise ImportError("pexpect unavailable, use paramiko")
cmd = "ssh -f " + server
if keyfile:
cmd += " -i " + keyfile
cmd += " exit"
# pop SSH_ASKPASS from env
env = os.environ.copy()
env.pop("SSH_ASKPASS", None)
ssh_newkey = "Are you sure you want to continue connecting"
p = pexpect.spawn(cmd, env=env)
while True:
try:
i = p.expect([ssh_newkey, _password_pat], timeout=0.1)
if i == 0:
raise SSHException("The authenticity of the host can't be established.")
except pexpect.TIMEOUT:
continue
except pexpect.EOF:
return True
else:
return False
def _try_passwordless_paramiko(server, keyfile):
"""Try passwordless login with paramiko."""
if paramiko is None:
msg = "Paramiko unavailable, "
if sys.platform == "win32":
msg += "Paramiko is required for ssh tunneled connections on Windows."
else:
msg += "use OpenSSH."
raise ImportError(msg)
username, server, port = _split_server(server)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
try:
client.connect(server, port, username=username, key_filename=keyfile, look_for_keys=True)
except paramiko.AuthenticationException:
return False
else:
client.close()
return True
def tunnel_connection(socket, addr, server, keyfile=None, password=None, paramiko=None, timeout=60):
"""Connect a socket to an address via an ssh tunnel.
This is a wrapper for socket.connect(addr), when addr is not accessible
from the local machine. It simply creates an ssh tunnel using the remaining args,
and calls socket.connect('tcp://localhost:lport') where lport is the randomly
selected local port of the tunnel.
"""
new_url, tunnel = open_tunnel(
addr,
server,
keyfile=keyfile,
password=password,
paramiko=paramiko,
timeout=timeout,
)
socket.connect(new_url)
return tunnel
def open_tunnel(addr, server, keyfile=None, password=None, paramiko=None, timeout=60):
"""Open a tunneled connection from a 0MQ url.
For use inside tunnel_connection.
Returns
-------
(url, tunnel) : (str, object)
The 0MQ url that has been forwarded, and the tunnel object
"""
lport = select_random_ports(1)[0]
transport, addr = addr.split("://")
ip, rport = addr.split(":")
rport = int(rport)
if paramiko is None:
paramiko = sys.platform == "win32"
if paramiko:
tunnelf = paramiko_tunnel
else:
tunnelf = openssh_tunnel
tunnel = tunnelf(
lport,
rport,
server,
remoteip=ip,
keyfile=keyfile,
password=password,
timeout=timeout,
)
return "tcp://127.0.0.1:%i" % lport, tunnel
def openssh_tunnel(
lport, rport, server, remoteip="127.0.0.1", keyfile=None, password=None, timeout=60
):
"""Create an ssh tunnel using command-line ssh that connects port lport
on this machine to localhost:rport on server. The tunnel
will automatically close when not in use, remaining open
for a minimum of timeout seconds for an initial connection.
This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,
as seen from `server`.
keyfile and password may be specified, but ssh config is checked for defaults.
Parameters
----------
lport : int
local port for connecting to the tunnel from this machine.
rport : int
port on the remote machine to connect to.
server : str
The ssh server to connect to. The full ssh server string will be parsed.
user@server:port
remoteip : str [Default: 127.0.0.1]
The remote ip, specifying the destination of the tunnel.
Default is localhost, which means that the tunnel would redirect
localhost:lport on this machine to localhost:rport on the *server*.
keyfile : str; path to public key file
This specifies a key to be used in ssh login, default None.
Regular default ssh keys will be used without specifying this argument.
password : str;
Your ssh password to the ssh server. Note that if this is left None,
you will be prompted for it if passwordless key based login is unavailable.
timeout : int [default: 60]
The time (in seconds) after which no activity will result in the tunnel
closing. This prevents orphaned tunnels from running forever.
"""
if pexpect is None:
raise ImportError("pexpect unavailable, use paramiko_tunnel")
ssh = "ssh "
if keyfile:
ssh += "-i " + keyfile
if ":" in server:
server, port = server.split(":")
ssh += " -p %s" % port
cmd = "%s -O check %s" % (ssh, server)
(output, exitstatus) = pexpect.run(cmd, withexitstatus=True)
if not exitstatus:
pid = int(output[output.find(b"(pid=") + 5 : output.find(b")")]) # noqa
cmd = "%s -O forward -L 127.0.0.1:%i:%s:%i %s" % (
ssh,
lport,
remoteip,
rport,
server,
)
(output, exitstatus) = pexpect.run(cmd, withexitstatus=True)
if not exitstatus:
atexit.register(_stop_tunnel, cmd.replace("-O forward", "-O cancel", 1))
return pid
cmd = "%s -f -S none -L 127.0.0.1:%i:%s:%i %s sleep %i" % (
ssh,
lport,
remoteip,
rport,
server,
timeout,
)
# pop SSH_ASKPASS from env
env = os.environ.copy()
env.pop("SSH_ASKPASS", None)
ssh_newkey = "Are you sure you want to continue connecting"
tunnel = pexpect.spawn(cmd, env=env)
failed = False
while True:
try:
i = tunnel.expect([ssh_newkey, _password_pat], timeout=0.1)
if i == 0:
raise SSHException("The authenticity of the host can't be established.")
except pexpect.TIMEOUT:
continue
except pexpect.EOF as e:
if tunnel.exitstatus:
print(tunnel.exitstatus)
print(tunnel.before)
print(tunnel.after)
raise RuntimeError("tunnel '%s' failed to start" % (cmd)) from e
else:
return tunnel.pid
else:
if failed:
print("Password rejected, try again")
password = None
if password is None:
password = getpass("%s's password: " % (server))
tunnel.sendline(password)
failed = True
def _stop_tunnel(cmd):
pexpect.run(cmd)
def _split_server(server):
if "@" in server:
username, server = server.split("@", 1)
else:
username = getuser()
if ":" in server:
server, port = server.split(":")
port = int(port)
else:
port = 22
return username, server, port
def paramiko_tunnel(
lport, rport, server, remoteip="127.0.0.1", keyfile=None, password=None, timeout=60
):
"""launch a tunner with paramiko in a subprocess. This should only be used
when shell ssh is unavailable (e.g. Windows).
This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,
as seen from `server`.
If you are familiar with ssh tunnels, this creates the tunnel:
ssh server -L localhost:lport:remoteip:rport
keyfile and password may be specified, but ssh config is checked for defaults.
Parameters
----------
lport : int
local port for connecting to the tunnel from this machine.
rport : int
port on the remote machine to connect to.
server : str
The ssh server to connect to. The full ssh server string will be parsed.
user@server:port
remoteip : str [Default: 127.0.0.1]
The remote ip, specifying the destination of the tunnel.
Default is localhost, which means that the tunnel would redirect
localhost:lport on this machine to localhost:rport on the *server*.
keyfile : str; path to public key file
This specifies a key to be used in ssh login, default None.
Regular default ssh keys will be used without specifying this argument.
password : str;
Your ssh password to the ssh server. Note that if this is left None,
you will be prompted for it if passwordless key based login is unavailable.
timeout : int [default: 60]
The time (in seconds) after which no activity will result in the tunnel
closing. This prevents orphaned tunnels from running forever.
"""
if paramiko is None:
raise ImportError("Paramiko not available")
if password is None:
if not _try_passwordless_paramiko(server, keyfile):
password = getpass("%s's password: " % (server))
p = Process(
target=_paramiko_tunnel,
args=(lport, rport, server, remoteip),
kwargs=dict(keyfile=keyfile, password=password),
)
p.daemon = True
p.start()
return p
def _paramiko_tunnel(lport, rport, server, remoteip, keyfile=None, password=None):
"""Function for actually starting a paramiko tunnel, to be passed
to multiprocessing.Process(target=this), and not called directly.
"""
username, server, port = _split_server(server)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
try:
client.connect(
server,
port,
username=username,
key_filename=keyfile,
look_for_keys=True,
password=password,
)
# except paramiko.AuthenticationException:
# if password is None:
# password = getpass("%s@%s's password: "%(username, server))
# client.connect(server, port, username=username, password=password)
# else:
# raise
except Exception as e:
print("*** Failed to connect to %s:%d: %r" % (server, port, e))
sys.exit(1)
# Don't let SIGINT kill the tunnel subprocess
signal.signal(signal.SIGINT, signal.SIG_IGN)
try:
forward_tunnel(lport, remoteip, rport, client.get_transport())
except KeyboardInterrupt:
print("SIGINT: Port forwarding stopped cleanly")
sys.exit(0)
except Exception as e:
print("Port forwarding stopped uncleanly: %s" % e)
sys.exit(255)
if sys.platform == "win32":
ssh_tunnel = paramiko_tunnel
else:
ssh_tunnel = openssh_tunnel
__all__ = [
"tunnel_connection",
"ssh_tunnel",
"openssh_tunnel",
"paramiko_tunnel",
"try_passwordless_ssh",
]
| 31.062053 | 100 | 0.625279 |
import atexit
import os
import re
import signal
import socket
import sys
import warnings
from getpass import getpass
from getpass import getuser
from multiprocessing import Process
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
import paramiko
SSHException = paramiko.ssh_exception.SSHException
except ImportError:
paramiko = None
class SSHException(Exception):
pass
else:
from .forward import forward_tunnel
try:
import pexpect
except ImportError:
pexpect = None
from zmq.utils.strtypes import b
def select_random_ports(n):
ports = []
sockets = []
for i in range(n):
sock = socket.socket()
sock.bind(("", 0))
ports.append(sock.getsockname()[1])
sockets.append(sock)
for sock in sockets:
sock.close()
return ports
_password_pat = re.compile(b(r"pass(word|phrase):"), re.IGNORECASE)
def try_passwordless_ssh(server, keyfile, paramiko=None):
if paramiko is None:
paramiko = sys.platform == "win32"
if not paramiko:
f = _try_passwordless_openssh
else:
f = _try_passwordless_paramiko
return f(server, keyfile)
def _try_passwordless_openssh(server, keyfile):
if pexpect is None:
raise ImportError("pexpect unavailable, use paramiko")
cmd = "ssh -f " + server
if keyfile:
cmd += " -i " + keyfile
cmd += " exit"
env = os.environ.copy()
env.pop("SSH_ASKPASS", None)
ssh_newkey = "Are you sure you want to continue connecting"
p = pexpect.spawn(cmd, env=env)
while True:
try:
i = p.expect([ssh_newkey, _password_pat], timeout=0.1)
if i == 0:
raise SSHException("The authenticity of the host can't be established.")
except pexpect.TIMEOUT:
continue
except pexpect.EOF:
return True
else:
return False
def _try_passwordless_paramiko(server, keyfile):
if paramiko is None:
msg = "Paramiko unavailable, "
if sys.platform == "win32":
msg += "Paramiko is required for ssh tunneled connections on Windows."
else:
msg += "use OpenSSH."
raise ImportError(msg)
username, server, port = _split_server(server)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
try:
client.connect(server, port, username=username, key_filename=keyfile, look_for_keys=True)
except paramiko.AuthenticationException:
return False
else:
client.close()
return True
def tunnel_connection(socket, addr, server, keyfile=None, password=None, paramiko=None, timeout=60):
new_url, tunnel = open_tunnel(
addr,
server,
keyfile=keyfile,
password=password,
paramiko=paramiko,
timeout=timeout,
)
socket.connect(new_url)
return tunnel
def open_tunnel(addr, server, keyfile=None, password=None, paramiko=None, timeout=60):
lport = select_random_ports(1)[0]
transport, addr = addr.split("://")
ip, rport = addr.split(":")
rport = int(rport)
if paramiko is None:
paramiko = sys.platform == "win32"
if paramiko:
tunnelf = paramiko_tunnel
else:
tunnelf = openssh_tunnel
tunnel = tunnelf(
lport,
rport,
server,
remoteip=ip,
keyfile=keyfile,
password=password,
timeout=timeout,
)
return "tcp://127.0.0.1:%i" % lport, tunnel
def openssh_tunnel(
lport, rport, server, remoteip="127.0.0.1", keyfile=None, password=None, timeout=60
):
if pexpect is None:
raise ImportError("pexpect unavailable, use paramiko_tunnel")
ssh = "ssh "
if keyfile:
ssh += "-i " + keyfile
if ":" in server:
server, port = server.split(":")
ssh += " -p %s" % port
cmd = "%s -O check %s" % (ssh, server)
(output, exitstatus) = pexpect.run(cmd, withexitstatus=True)
if not exitstatus:
pid = int(output[output.find(b"(pid=") + 5 : output.find(b")")]) # noqa
cmd = "%s -O forward -L 127.0.0.1:%i:%s:%i %s" % (
ssh,
lport,
remoteip,
rport,
server,
)
(output, exitstatus) = pexpect.run(cmd, withexitstatus=True)
if not exitstatus:
atexit.register(_stop_tunnel, cmd.replace("-O forward", "-O cancel", 1))
return pid
cmd = "%s -f -S none -L 127.0.0.1:%i:%s:%i %s sleep %i" % (
ssh,
lport,
remoteip,
rport,
server,
timeout,
)
# pop SSH_ASKPASS from env
env = os.environ.copy()
env.pop("SSH_ASKPASS", None)
ssh_newkey = "Are you sure you want to continue connecting"
tunnel = pexpect.spawn(cmd, env=env)
failed = False
while True:
try:
i = tunnel.expect([ssh_newkey, _password_pat], timeout=0.1)
if i == 0:
raise SSHException("The authenticity of the host can't be established.")
except pexpect.TIMEOUT:
continue
except pexpect.EOF as e:
if tunnel.exitstatus:
print(tunnel.exitstatus)
print(tunnel.before)
print(tunnel.after)
raise RuntimeError("tunnel '%s' failed to start" % (cmd)) from e
else:
return tunnel.pid
else:
if failed:
print("Password rejected, try again")
password = None
if password is None:
password = getpass("%s's password: " % (server))
tunnel.sendline(password)
failed = True
def _stop_tunnel(cmd):
pexpect.run(cmd)
def _split_server(server):
if "@" in server:
username, server = server.split("@", 1)
else:
username = getuser()
if ":" in server:
server, port = server.split(":")
port = int(port)
else:
port = 22
return username, server, port
def paramiko_tunnel(
lport, rport, server, remoteip="127.0.0.1", keyfile=None, password=None, timeout=60
):
if paramiko is None:
raise ImportError("Paramiko not available")
if password is None:
if not _try_passwordless_paramiko(server, keyfile):
password = getpass("%s's password: " % (server))
p = Process(
target=_paramiko_tunnel,
args=(lport, rport, server, remoteip),
kwargs=dict(keyfile=keyfile, password=password),
)
p.daemon = True
p.start()
return p
def _paramiko_tunnel(lport, rport, server, remoteip, keyfile=None, password=None):
username, server, port = _split_server(server)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
try:
client.connect(
server,
port,
username=username,
key_filename=keyfile,
look_for_keys=True,
password=password,
)
# client.connect(server, port, username=username, password=password)
# else:
# raise
except Exception as e:
print("*** Failed to connect to %s:%d: %r" % (server, port, e))
sys.exit(1)
# Don't let SIGINT kill the tunnel subprocess
signal.signal(signal.SIGINT, signal.SIG_IGN)
try:
forward_tunnel(lport, remoteip, rport, client.get_transport())
except KeyboardInterrupt:
print("SIGINT: Port forwarding stopped cleanly")
sys.exit(0)
except Exception as e:
print("Port forwarding stopped uncleanly: %s" % e)
sys.exit(255)
if sys.platform == "win32":
ssh_tunnel = paramiko_tunnel
else:
ssh_tunnel = openssh_tunnel
__all__ = [
"tunnel_connection",
"ssh_tunnel",
"openssh_tunnel",
"paramiko_tunnel",
"try_passwordless_ssh",
]
| true | true |
f7fe4938f843c5bb0fd2a660961b2113f91fec79 | 85 | py | Python | app/apps.py | MexsonFernandes/uploadDownloadFiles-Django | 60d01fd4301a82ae6f34cf8ae1b14476809e22a5 | [
"Apache-2.0"
] | 3 | 2021-06-27T13:25:19.000Z | 2021-07-16T07:49:07.000Z | app/apps.py | MexsonFernandes/upload-download-files-django | 60d01fd4301a82ae6f34cf8ae1b14476809e22a5 | [
"Apache-2.0"
] | null | null | null | app/apps.py | MexsonFernandes/upload-download-files-django | 60d01fd4301a82ae6f34cf8ae1b14476809e22a5 | [
"Apache-2.0"
] | 2 | 2021-06-27T13:25:20.000Z | 2021-07-06T12:11:58.000Z | from django.apps import AppConfig
class MainAppConfig(AppConfig):
name = 'app'
| 14.166667 | 33 | 0.741176 | from django.apps import AppConfig
class MainAppConfig(AppConfig):
name = 'app'
| true | true |
f7fe4a181670a2ed4ab959c5d3f11521856cdafa | 3,138 | py | Python | examples/ch05/snippets_py/05_17.py | germanngc/PythonFundamentals | 14d22baa30d7c3c5404fc11362709669e92474b8 | [
"Apache-2.0"
] | 4 | 2019-05-04T00:33:25.000Z | 2021-05-29T20:37:59.000Z | examples/ch05/snippets_py/05_17.py | germanngc/PythonFundamentals | 14d22baa30d7c3c5404fc11362709669e92474b8 | [
"Apache-2.0"
] | null | null | null | examples/ch05/snippets_py/05_17.py | germanngc/PythonFundamentals | 14d22baa30d7c3c5404fc11362709669e92474b8 | [
"Apache-2.0"
] | 3 | 2020-05-05T13:14:28.000Z | 2022-02-03T16:18:37.000Z | # Section 5.17 snippets
# 5.17.1 Sample Graphs for 600, 60,000 and 6,000,000 Die Rolls
# 5.17.2 Visualizing Die-Roll Frequencies and Percentages
# Launching IPython for Interactive Matplotlib Development
# Importing the Libraries
import matplotlib.pyplot as plt
import numpy as np
import random
import seaborn as sns
# Rolling the Die and Calculating Die Frequencies
rolls = [random.randrange(1, 7) for i in range(600)]
values, frequencies = np.unique(rolls, return_counts=True)
# Creating the Initial Bar Plot
title = f'Rolling a Six-Sided Die {len(rolls):,} Times'
sns.set_style('whitegrid')
axes = sns.barplot(x=values, y=frequencies, palette='bright')
# Setting the Window Title and Labeling the x- and y-Axes
axes.set_title(title)
axes.set(xlabel='Die Value', ylabel='Frequency')
# Finalizing the Bar Plot
axes.set_ylim(top=max(frequencies) * 1.10)
for bar, frequency in zip(axes.patches, frequencies):
text_x = bar.get_x() + bar.get_width() / 2.0
text_y = bar.get_height()
text = f'{frequency:,}\n{frequency / len(rolls):.3%}'
axes.text(text_x, text_y, text,
fontsize=11, ha='center', va='bottom')
# Rolling Again and Updating the Bar Plot—Introducing IPython Magics
plt.cla()
%recall 5
rolls = [random.randrange(1, 7) for i in range(600)]
rolls = [random.randrange(1, 7) for i in range(60000)]
%recall 6-13
values, frequencies = np.unique(rolls, return_counts=True)
title = f'Rolling a Six-Sided Die {len(rolls):,} Times'
sns.set_style('whitegrid')
axes = sns.barplot(x=values, y=frequencies, palette='bright')
axes.set_title(title)
axes.set(xlabel='Die Value', ylabel='Frequency')
axes.set_ylim(top=max(frequencies) * 1.10)
for bar, frequency in zip(axes.patches, frequencies):
text_x = bar.get_x() + bar.get_width() / 2.0
text_y = bar.get_height()
text = f'{frequency:,}\n{frequency / len(rolls):.3%}'
axes.text(text_x, text_y, text,
fontsize=11, ha='center', va='bottom')
# Saving Snippets to a File with the %save Magic
%save RollDie.py 1-13
##########################################################################
# (C) Copyright 2019 by Deitel & Associates, Inc. and #
# Pearson Education, Inc. All Rights Reserved. #
# #
# DISCLAIMER: The authors and publisher of this book have used their #
# best efforts in preparing the book. These efforts include the #
# development, research, and testing of the theories and programs #
# to determine their effectiveness. The authors and publisher make #
# no warranty of any kind, expressed or implied, with regard to these #
# programs or to the documentation contained in these books. The authors #
# and publisher shall not be liable in any event for incidental or #
# consequential damages in connection with, or arising out of, the #
# furnishing, performance, or use of these programs. #
##########################################################################
| 33.382979 | 74 | 0.635437 |
import matplotlib.pyplot as plt
import numpy as np
import random
import seaborn as sns
rolls = [random.randrange(1, 7) for i in range(600)]
values, frequencies = np.unique(rolls, return_counts=True)
title = f'Rolling a Six-Sided Die {len(rolls):,} Times'
sns.set_style('whitegrid')
axes = sns.barplot(x=values, y=frequencies, palette='bright')
axes.set_title(title)
axes.set(xlabel='Die Value', ylabel='Frequency')
axes.set_ylim(top=max(frequencies) * 1.10)
for bar, frequency in zip(axes.patches, frequencies):
text_x = bar.get_x() + bar.get_width() / 2.0
text_y = bar.get_height()
text = f'{frequency:,}\n{frequency / len(rolls):.3%}'
axes.text(text_x, text_y, text,
fontsize=11, ha='center', va='bottom')
plt.cla()
%recall 5
rolls = [random.randrange(1, 7) for i in range(600)]
rolls = [random.randrange(1, 7) for i in range(60000)]
%recall 6-13
values, frequencies = np.unique(rolls, return_counts=True)
title = f'Rolling a Six-Sided Die {len(rolls):,} Times'
sns.set_style('whitegrid')
axes = sns.barplot(x=values, y=frequencies, palette='bright')
axes.set_title(title)
axes.set(xlabel='Die Value', ylabel='Frequency')
axes.set_ylim(top=max(frequencies) * 1.10)
for bar, frequency in zip(axes.patches, frequencies):
text_x = bar.get_x() + bar.get_width() / 2.0
text_y = bar.get_height()
text = f'{frequency:,}\n{frequency / len(rolls):.3%}'
axes.text(text_x, text_y, text,
fontsize=11, ha='center', va='bottom')
%save RollDie.py 1-13
| false | true |
f7fe4a2403b84b78ebf1be11ce616fd1a1dea40d | 8,365 | py | Python | CommandRunner.py | lukud/raccoon- | 52bcc8667ec754a6b25908f2a6426aa913eed13f | [
"MIT"
] | null | null | null | CommandRunner.py | lukud/raccoon- | 52bcc8667ec754a6b25908f2a6426aa913eed13f | [
"MIT"
] | 1 | 2015-09-10T15:37:07.000Z | 2015-09-10T15:37:07.000Z | CommandRunner.py | lukud/raccoon- | 52bcc8667ec754a6b25908f2a6426aa913eed13f | [
"MIT"
] | null | null | null | """
This module was taken from the PBSUITE (v 14.9.9) available at http://sourceforge.net/projects/pb-jelly/
It has been published with the follwing licencsing:
##################################################
Copyright (c) '2013 Baylor College of Medicine
Contributors: Adam English (english@bcm.edu)
Affiliation: Human Genome Sequencing Center
URL: http://www.hgsc.bcm.tmc.edu/
https://sourceforge.net/projects/pb-jelly/
http://www.plosone.org/article/info%3Adoi%2F10.1371%2Fjournal.pone.0047768
http://www.biomedcentral.com/1471-2105/15/180
Citation: English, Adam C., Stephen Richards, Yi Han, Min Wang,
Vanesa Vee, Jiaxin Qu, Xiang Qin, et al. "Mind the
Gap: Upgrading Genomes with Pacific Biosciences RS
Long-Read Sequencing Technology." PLoS ONE 7, no. 11
(November 21, 2012): e47768.
doi:10.1371/journal.pone.0047768.
Citation: English, Adam C., William J. Salerno, Jeffery G.
Reid. "PBHoney: identyfying genomic variants via
long-read discordance and interrupted mapping."
BMC Bioinformatics 2014, 15:180 (June 10, 2014).
doi:10.1186/1471-2105-15-180
'
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
##################################################
"""
from string import Template
import tempfile
import subprocess, signal, logging, os, stat, sys
class Alarm(Exception):
pass
def alarm_handler(signum, frame):
raise Alarm
def exe(cmd, timeout=-1):
"""
Executes a command through the shell.
timeout in minutes! so 1440 mean is 24 hours.
-1 means never
"""
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, \
stderr=subprocess.STDOUT, close_fds=True)
signal.signal(signal.SIGALRM, alarm_handler)
if timeout > 0:
signal.alarm(int(timeout*60))
try:
stdoutVal, stderrVal = proc.communicate()
print(cmd)
logging.debug("Executing {}".format(cmd))
logging.debug("STDERR:\n{}".format(stderrVal))
logging.debug("STDOUT:\n{}".format(stdoutVal))
signal.alarm(0) # reset the alarm
except Alarm:
logging.error(("Command was taking too long. "
"Automatic Timeout Initiated after %d minutes") \
% (timeout))
proc.kill()
return 214,None,None
retCode = proc.returncode
return retCode,stdoutVal,stderrVal
class Command():
def __init__(self, cmd, jobname, stdout, stderr):
self.cmd = cmd
self.jobname = jobname
self.stdout = stdout
self.stderr = stderr
def asDict(self):
return {"CMD":self.cmd, "JOBNAME":self.jobname, \
"STDOUT":self.stdout, "STDERR":self.stderr}
class CommandRunner():
"""
Uses a command template to run stuff. This is helpful for cluster commands
and chunking several commands together
"""
def __init__(self, template=None, njobs=0):
"""
template: a string that will become the template for submitting to your cluster:
#you can also go ahead and specify a string.Template
default is to not submit to your cluster
${CMD} > ${STDOUT} 2> ${STDERR}
njobs: (0)
for clumping commands together and submitting them in a script
"""
if template is None:
template = "${CMD} > ${STDOUT} 2> ${STDERR}"
self.runType = "Running"
else:
self.runType = "Submitting"
self.template = Template(template)
self.njobs = njobs
def __call__(self, cmds, wDir = None, id = None):
"""
Executes Commands - can either be a list or a single Command
wDir is the working directory where chunk scripts will be written
if id is None a random identifier will be applied when chunking
"""
if wDir is None:
wDir = "./"
if type(cmds) != list:
cmd = self.buildCommand(cmds)
return exe(cmd)
if self.njobs == 0:
outRet = []
for c in cmds:
outRet.append(exe(self.buildCommand(c)))
return outRet
if id is None:
id = tempfile.mkstemp(dir=wDir)[1]
outputRet =[]
for chunk, commands in enumerate( partition(cmds, self.njobs) ):
outScript = open(os.path.join(wDir, "%s_chunk%d.sh" % (id, chunk)),'w')
outScript.write("#!/bin/bash\n\n")
for c in commands:
outScript.write(c.cmd+"\n")
outScript.close()
#Add executeable
existing_permissions = stat.S_IMODE(os.stat(outScript.name).st_mode)
if not os.access(outScript.name, os.X_OK):
new_permissions = existing_permissions | stat.S_IXUSR
os.chmod(outScript.name, new_permissions)
submit = Command(outScript.name, \
id + "_chunk%d" % chunk, \
os.path.join(wDir, id + ("_chunk%d.out" % chunk)), \
os.path.join(wDir, id + ("_chunk%d.err" % chunk)))
cmd = self.buildCommand(submit)
outputRet.append(exe(cmd))
return outputRet
def checkTemplate(self):
"""
Checks that my template works okay
"""
temp.update({"CMD":"test", \
"STDOUT":"testo", \
"STDERR":"teste", \
"JOBNAME":"testn"})
try:
w = self.template.substitute(temp)
except KeyError:
logging.error("Your submission template is invalid ")
sys.exit(1)
def buildCommand(self, cmdSetup):
"""
substitutes a template with a Command
"""
return self.template.substitute(cmdSetup.asDict())
def partition(n,m):
"""
Helper function. splits list n into m partitions
"""
p = map(lambda x: list(), range(m))
p=list(p)
index = 0
for item in n:
p[index].append(item)
if index < m-1:
index += 1
else:
index = 0
return filter(lambda x: len(x)>0, p)
| 40.023923 | 105 | 0.564854 |
from string import Template
import tempfile
import subprocess, signal, logging, os, stat, sys
class Alarm(Exception):
pass
def alarm_handler(signum, frame):
raise Alarm
def exe(cmd, timeout=-1):
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, \
stderr=subprocess.STDOUT, close_fds=True)
signal.signal(signal.SIGALRM, alarm_handler)
if timeout > 0:
signal.alarm(int(timeout*60))
try:
stdoutVal, stderrVal = proc.communicate()
print(cmd)
logging.debug("Executing {}".format(cmd))
logging.debug("STDERR:\n{}".format(stderrVal))
logging.debug("STDOUT:\n{}".format(stdoutVal))
signal.alarm(0)
except Alarm:
logging.error(("Command was taking too long. "
"Automatic Timeout Initiated after %d minutes") \
% (timeout))
proc.kill()
return 214,None,None
retCode = proc.returncode
return retCode,stdoutVal,stderrVal
class Command():
def __init__(self, cmd, jobname, stdout, stderr):
self.cmd = cmd
self.jobname = jobname
self.stdout = stdout
self.stderr = stderr
def asDict(self):
return {"CMD":self.cmd, "JOBNAME":self.jobname, \
"STDOUT":self.stdout, "STDERR":self.stderr}
class CommandRunner():
def __init__(self, template=None, njobs=0):
if template is None:
template = "${CMD} > ${STDOUT} 2> ${STDERR}"
self.runType = "Running"
else:
self.runType = "Submitting"
self.template = Template(template)
self.njobs = njobs
def __call__(self, cmds, wDir = None, id = None):
if wDir is None:
wDir = "./"
if type(cmds) != list:
cmd = self.buildCommand(cmds)
return exe(cmd)
if self.njobs == 0:
outRet = []
for c in cmds:
outRet.append(exe(self.buildCommand(c)))
return outRet
if id is None:
id = tempfile.mkstemp(dir=wDir)[1]
outputRet =[]
for chunk, commands in enumerate( partition(cmds, self.njobs) ):
outScript = open(os.path.join(wDir, "%s_chunk%d.sh" % (id, chunk)),'w')
outScript.write("#!/bin/bash\n\n")
for c in commands:
outScript.write(c.cmd+"\n")
outScript.close()
existing_permissions = stat.S_IMODE(os.stat(outScript.name).st_mode)
if not os.access(outScript.name, os.X_OK):
new_permissions = existing_permissions | stat.S_IXUSR
os.chmod(outScript.name, new_permissions)
submit = Command(outScript.name, \
id + "_chunk%d" % chunk, \
os.path.join(wDir, id + ("_chunk%d.out" % chunk)), \
os.path.join(wDir, id + ("_chunk%d.err" % chunk)))
cmd = self.buildCommand(submit)
outputRet.append(exe(cmd))
return outputRet
def checkTemplate(self):
temp.update({"CMD":"test", \
"STDOUT":"testo", \
"STDERR":"teste", \
"JOBNAME":"testn"})
try:
w = self.template.substitute(temp)
except KeyError:
logging.error("Your submission template is invalid ")
sys.exit(1)
def buildCommand(self, cmdSetup):
return self.template.substitute(cmdSetup.asDict())
def partition(n,m):
p = map(lambda x: list(), range(m))
p=list(p)
index = 0
for item in n:
p[index].append(item)
if index < m-1:
index += 1
else:
index = 0
return filter(lambda x: len(x)>0, p)
| true | true |
f7fe4adf952708994eea70778baf0af38b493f75 | 8,214 | py | Python | knn.py | bergr7/KNN_from_scratch | 8c1e40f89b078a46b27c347d2faa3d3491a47958 | [
"MIT",
"Unlicense"
] | null | null | null | knn.py | bergr7/KNN_from_scratch | 8c1e40f89b078a46b27c347d2faa3d3491a47958 | [
"MIT",
"Unlicense"
] | null | null | null | knn.py | bergr7/KNN_from_scratch | 8c1e40f89b078a46b27c347d2faa3d3491a47958 | [
"MIT",
"Unlicense"
] | null | null | null | import numpy as np
from sklearn.metrics import confusion_matrix
class Knn:
"""Classifier implementing the k-nearest neighbors vote.
Parameters
__________
:param n_neighbors : int
Number of neighbors to use.
:param metric : {'manhattan', 'euclidean', 'minkowski'}, default='minkowski'
The distance metric to use for defining K-nearest neighbors. The default metric is minkowski, and with p=2 is
equivalent to the standard Euclidean metric.
:param p : int, default=2
Power parameter for the Minkowski metric. When p=1, this is equivalent to using manhattan_distance (l1), and
euclidean_distance (l2) for p=2.
:param weights : {'uniform', 'distance'}, default='uniform'
Weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood are weighted equally.
- 'distance' : weight points by the inverse of their distance. In this case, closer neighbors of a query point
will have a greater influence than neighbors which are further away.
Methods
__________
:method fit :
It fits the model using X as training data and y as target values.
:method predict :
Loop through all data points and predict the class labels for each of the new data point based on training data.
"""
def __init__(self, n_neighbors, metric='minkowski', p=2, weights='uniform'):
if p < 0:
raise ValueError("p should be larger than 0.")
if metric not in ['minkowski', 'manhattan', 'euclidean']:
raise ValueError("Distance method not supported. Must be {'manhattan', 'euclidean', 'minkowski'}")
if weights not in ['uniform', 'distance']:
raise ValueError(
"Weights can be only assigned uniformly or based on distance. Must be {'uniform', 'distance'}")
self.n_neighbors = n_neighbors
self.metric = metric
self.p = p
self.weights = weights
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
__________
:argument X: {array-like, sparse matrix}
Training data. If array or matrix, shape = [n_samples, n_features]
:argument y: {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
:return: Training data and associated labels
"""
# check data shape
if X.shape[0] == y.shape[0]:
self.X = X
self.y = y
else:
raise ValueError("Dimensional mismatch: Number of rows in X must be equal to the number of rows in y")
# check for missing values
if np.isnan(X).any() or np.isnan(y).any():
raise TypeError("There are missing values in the dataset. Consider removing samples with missing values"
"or imputation methods.")
return X, y
def _manhattan_distance(self, point):
"""Calculate manhattan distance from one data point to all the samples in the training set.
:param point: {array-like}
New data point of shape [n_features]
:return: numpy array with manhattan distances from the data point to all the samples in the training set.
"""
return np.sum(abs(self.X - point), axis=1)
def _euclidean_distance(self, point):
"""Calculate euclidean distance from one data point to all the samples in the training set.
:param point: {array-like}
New data point of shape [n_features]
:return: numpy array with euclidean distances from the data point to all the samples in the training set.
"""
return np.sqrt(np.sum((self.X - point) ** 2, axis=1))
def _minkowski_distance(self, point):
"""Calculate minkowski distance from one data point to all the samples in the training set.
:param point: {array-like}
New data point of shape [n_features]
:return: numpy array with minkowski distances from the data point to all the samples in the training set.
"""
return np.sum(abs(self.X - point) ** self.p, axis=1) ** (1 / self.p)
def _uniform_weights(self, distances):
"""Assign equal weights to all points.
:param distances: {array-like}
numpy array with distances from one data point to all the samples in the training set.
:return: numpy array with weight-distance pairs for each sample in the training set.
"""
return np.array([(1, d) for _, d in enumerate(distances)])
def _distance_weights(self, distances):
"""Weight points by the inverse of their distance.
:param distances: {array-like}
numpy array with distances from one data point to all the samples in the training set.
:return: numpy array with weight-distance pairs for each sample in the training set.
"""
return np.array([(1 / d, d) if d > 0 else (1, d) for _, d in enumerate(distances)])
def _predict_point(self, point):
""" Predict class label of a single data point.
:argument point: {array-like}
New data point of shape [n_features]
:return: str
Assigned class label based on training data.
"""
# calculate point distance from all other samples
if self.metric == 'manhattan':
distances = self._manhattan_distance(point)
elif self.metric == 'euclidean':
distances = self._euclidean_distance(point)
elif self.metric == 'minkowski':
distances = self._minkowski_distance(point)
else:
AttributeError("Distance method not supported. Must be {'manhattan', 'euclidean', 'minkowski'}")
# calculate point distance weights
if self.weights == 'uniform':
weights = self._uniform_weights(distances)
else:
weights = self._distance_weights(distances)
# sort index of distances from nearest to farthest and keep only first "n_neighbors" ones
sorted_distances_idxs = distances.argsort()[:self.n_neighbors]
# Vote - count number of classes for Knn
class_count = {}
if self.weights == 'uniform':
# assign uniform weights
for idx in sorted_distances_idxs:
vote_label = self.y[idx]
class_count[vote_label] = class_count.get(vote_label, 0) + 1
else:
# assign weights based on distance
for idx in sorted_distances_idxs:
vote_label = self.y[idx]
class_count[vote_label] = class_count.get(vote_label, 0) + weights[idx][0]
# Descending sort the resulting class counts dictionary by class counts values
sorted_class_count = sorted(class_count.items(),
key=lambda item: (item[1], item[0]),
reverse=True)
# Return the predicted label
return sorted_class_count[0][0]
def predict(self, x):
"""Loop through all data points and predict the class labels
:argument x: {array-like}
New data points to be assigned a label of shape [n_points, n_features]
:return: list
A list with class labels assign to each new data point.
"""
# Loop through all samples and predict the class labels and store the results
return [self._predict_point(point) for point in x]
def display_results(self, y_test, y_pred):
labels = np.unique(y_pred)
confusion_mat = confusion_matrix(y_test, y_pred, labels=labels)
accuracy = (y_pred == y_test).mean()
print("Labels:", labels)
print("Confusion Matrix:\n", confusion_mat)
print("Accuracy:", accuracy)
def __repr__(self):
return "<n_neighbors:"+self.n_neighbors+", metric:" +self.metric+", p:"+str(self.p)+", weights:"+self.weights+">"
def __str__(self):
return "Knn(n_neighbors="+self.n_neighbors+", metric=" +self.metric+", p="+str(self.p)+", weights="+self.weights+")"
| 40.264706 | 124 | 0.6271 | import numpy as np
from sklearn.metrics import confusion_matrix
class Knn:
def __init__(self, n_neighbors, metric='minkowski', p=2, weights='uniform'):
if p < 0:
raise ValueError("p should be larger than 0.")
if metric not in ['minkowski', 'manhattan', 'euclidean']:
raise ValueError("Distance method not supported. Must be {'manhattan', 'euclidean', 'minkowski'}")
if weights not in ['uniform', 'distance']:
raise ValueError(
"Weights can be only assigned uniformly or based on distance. Must be {'uniform', 'distance'}")
self.n_neighbors = n_neighbors
self.metric = metric
self.p = p
self.weights = weights
def fit(self, X, y):
if X.shape[0] == y.shape[0]:
self.X = X
self.y = y
else:
raise ValueError("Dimensional mismatch: Number of rows in X must be equal to the number of rows in y")
if np.isnan(X).any() or np.isnan(y).any():
raise TypeError("There are missing values in the dataset. Consider removing samples with missing values"
"or imputation methods.")
return X, y
def _manhattan_distance(self, point):
return np.sum(abs(self.X - point), axis=1)
def _euclidean_distance(self, point):
return np.sqrt(np.sum((self.X - point) ** 2, axis=1))
def _minkowski_distance(self, point):
return np.sum(abs(self.X - point) ** self.p, axis=1) ** (1 / self.p)
def _uniform_weights(self, distances):
return np.array([(1, d) for _, d in enumerate(distances)])
def _distance_weights(self, distances):
return np.array([(1 / d, d) if d > 0 else (1, d) for _, d in enumerate(distances)])
def _predict_point(self, point):
if self.metric == 'manhattan':
distances = self._manhattan_distance(point)
elif self.metric == 'euclidean':
distances = self._euclidean_distance(point)
elif self.metric == 'minkowski':
distances = self._minkowski_distance(point)
else:
AttributeError("Distance method not supported. Must be {'manhattan', 'euclidean', 'minkowski'}")
if self.weights == 'uniform':
weights = self._uniform_weights(distances)
else:
weights = self._distance_weights(distances)
sorted_distances_idxs = distances.argsort()[:self.n_neighbors]
class_count = {}
if self.weights == 'uniform':
for idx in sorted_distances_idxs:
vote_label = self.y[idx]
class_count[vote_label] = class_count.get(vote_label, 0) + 1
else:
for idx in sorted_distances_idxs:
vote_label = self.y[idx]
class_count[vote_label] = class_count.get(vote_label, 0) + weights[idx][0]
sorted_class_count = sorted(class_count.items(),
key=lambda item: (item[1], item[0]),
reverse=True)
return sorted_class_count[0][0]
def predict(self, x):
return [self._predict_point(point) for point in x]
def display_results(self, y_test, y_pred):
labels = np.unique(y_pred)
confusion_mat = confusion_matrix(y_test, y_pred, labels=labels)
accuracy = (y_pred == y_test).mean()
print("Labels:", labels)
print("Confusion Matrix:\n", confusion_mat)
print("Accuracy:", accuracy)
def __repr__(self):
return "<n_neighbors:"+self.n_neighbors+", metric:" +self.metric+", p:"+str(self.p)+", weights:"+self.weights+">"
def __str__(self):
return "Knn(n_neighbors="+self.n_neighbors+", metric=" +self.metric+", p="+str(self.p)+", weights="+self.weights+")"
| true | true |
f7fe4aec16aa13407640895438d55a42bbe968af | 4,816 | py | Python | src/programy/storage/stores/nosql/mongo/store/lookups.py | cdoebler1/AIML2 | ee692ec5ea3794cd1bc4cc8ec2a6b5e5c20a0d6a | [
"MIT"
] | 345 | 2016-11-23T22:37:04.000Z | 2022-03-30T20:44:44.000Z | src/programy/storage/stores/nosql/mongo/store/lookups.py | MikeyBeez/program-y | 00d7a0c7d50062f18f0ab6f4a041068e119ef7f0 | [
"MIT"
] | 275 | 2016-12-07T10:30:28.000Z | 2022-02-08T21:28:33.000Z | src/programy/storage/stores/nosql/mongo/store/lookups.py | VProgramMist/modified-program-y | f32efcafafd773683b3fe30054d5485fe9002b7d | [
"MIT"
] | 159 | 2016-11-28T18:59:30.000Z | 2022-03-20T18:02:44.000Z | """
Copyright (c) 2016-2020 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.utils.logging.ylogger import YLogger
from programy.storage.stores.nosql.mongo.store.mongostore import MongoStore
from programy.storage.entities.lookups import LookupsStore
from programy.storage.stores.nosql.mongo.dao.lookup import Lookup
from programy.mappings.base import DoubleStringPatternSplitCollection
class MongoLookupStore(MongoStore, LookupsStore):
def __init__(self, storage_engine):
MongoStore.__init__(self, storage_engine)
LookupsStore.__init__(self)
def collection_name(self):
raise NotImplementedError() # pragma: no cover
def add_to_lookup(self, key, value, overwrite_existing=False):
collection = self.collection()
lookup = collection.find_one({'key': key})
if lookup is not None:
if overwrite_existing is True:
YLogger.info(self, "Updating lookup in Mongo [%s] [%s]", key, value)
lookup['value'] = value
result = collection.replace_one({'key': key}, lookup)
return bool(result.modified_count > 0)
else:
YLogger.error(self, "Existing value in Mongo lookup [%s] = [%s]", key, value)
else:
YLogger.debug(self, "Adding lookup to Mongo [%s] = [%s]", key, value)
lookup = Lookup(key, value)
return self.add_document(lookup)
return False
def remove_lookup(self):
YLogger.debug(self, "Removing lookup from Mongo [%s]", self.collection_name())
collection = self.collection()
collection.delete_many({})
def remove_lookup_key(self, key):
YLogger.debug(self, "Removing lookup key [%s] from [%s] in Mongo", key, self.collection_name())
collection = self.collection()
collection.delete_one({'key': key})
def get_lookup(self):
collection = self.collection()
lookups = collection.find()
data = {}
for lookup in lookups:
data[lookup['key']] = lookup['value']
return data
def load_all(self, collector):
self.load(collector)
def load(self, collector, name=None):
YLogger.debug(self, "Loading lookup from Mongo [%s]", self.collection_name())
collection = self.collection()
lookups = collection.find()
for lookup in lookups:
key, value = DoubleStringPatternSplitCollection.process_key_value(lookup['key'], lookup['value'])
collector.add_to_lookup(key, value)
return True
def process_line(self, name, fields, verbose=False):
if fields:
key = fields[0].upper()
value = fields[1]
return self.add_to_lookup(key, value)
return False
class MongoDenormalStore(MongoLookupStore):
def __init__(self, storage_engine):
MongoLookupStore.__init__(self, storage_engine)
def collection_name(self):
return "denormals"
class MongoNormalStore(MongoLookupStore):
def __init__(self, storage_engine):
MongoLookupStore.__init__(self, storage_engine)
def collection_name(self):
return "normals"
class MongoGenderStore(MongoLookupStore):
def __init__(self, storage_engine):
MongoLookupStore.__init__(self, storage_engine)
def collection_name(self):
return "genders"
class MongoPersonStore(MongoLookupStore):
def __init__(self, storage_engine):
MongoLookupStore.__init__(self, storage_engine)
def collection_name(self):
return "persons"
class MongoPerson2Store(MongoLookupStore):
def __init__(self, storage_engine):
MongoLookupStore.__init__(self, storage_engine)
def collection_name(self):
return "person2s"
| 35.411765 | 120 | 0.69186 | from programy.utils.logging.ylogger import YLogger
from programy.storage.stores.nosql.mongo.store.mongostore import MongoStore
from programy.storage.entities.lookups import LookupsStore
from programy.storage.stores.nosql.mongo.dao.lookup import Lookup
from programy.mappings.base import DoubleStringPatternSplitCollection
class MongoLookupStore(MongoStore, LookupsStore):
def __init__(self, storage_engine):
MongoStore.__init__(self, storage_engine)
LookupsStore.__init__(self)
def collection_name(self):
raise NotImplementedError()
def add_to_lookup(self, key, value, overwrite_existing=False):
collection = self.collection()
lookup = collection.find_one({'key': key})
if lookup is not None:
if overwrite_existing is True:
YLogger.info(self, "Updating lookup in Mongo [%s] [%s]", key, value)
lookup['value'] = value
result = collection.replace_one({'key': key}, lookup)
return bool(result.modified_count > 0)
else:
YLogger.error(self, "Existing value in Mongo lookup [%s] = [%s]", key, value)
else:
YLogger.debug(self, "Adding lookup to Mongo [%s] = [%s]", key, value)
lookup = Lookup(key, value)
return self.add_document(lookup)
return False
def remove_lookup(self):
YLogger.debug(self, "Removing lookup from Mongo [%s]", self.collection_name())
collection = self.collection()
collection.delete_many({})
def remove_lookup_key(self, key):
YLogger.debug(self, "Removing lookup key [%s] from [%s] in Mongo", key, self.collection_name())
collection = self.collection()
collection.delete_one({'key': key})
def get_lookup(self):
collection = self.collection()
lookups = collection.find()
data = {}
for lookup in lookups:
data[lookup['key']] = lookup['value']
return data
def load_all(self, collector):
self.load(collector)
def load(self, collector, name=None):
YLogger.debug(self, "Loading lookup from Mongo [%s]", self.collection_name())
collection = self.collection()
lookups = collection.find()
for lookup in lookups:
key, value = DoubleStringPatternSplitCollection.process_key_value(lookup['key'], lookup['value'])
collector.add_to_lookup(key, value)
return True
def process_line(self, name, fields, verbose=False):
if fields:
key = fields[0].upper()
value = fields[1]
return self.add_to_lookup(key, value)
return False
class MongoDenormalStore(MongoLookupStore):
def __init__(self, storage_engine):
MongoLookupStore.__init__(self, storage_engine)
def collection_name(self):
return "denormals"
class MongoNormalStore(MongoLookupStore):
def __init__(self, storage_engine):
MongoLookupStore.__init__(self, storage_engine)
def collection_name(self):
return "normals"
class MongoGenderStore(MongoLookupStore):
def __init__(self, storage_engine):
MongoLookupStore.__init__(self, storage_engine)
def collection_name(self):
return "genders"
class MongoPersonStore(MongoLookupStore):
def __init__(self, storage_engine):
MongoLookupStore.__init__(self, storage_engine)
def collection_name(self):
return "persons"
class MongoPerson2Store(MongoLookupStore):
def __init__(self, storage_engine):
MongoLookupStore.__init__(self, storage_engine)
def collection_name(self):
return "person2s"
| true | true |
f7fe4b1b43e9516b0f744f723b691f4863a20ce6 | 6,365 | py | Python | signjoey/initialization.py | ChenYutongTHU/slt | 2fb617feccccedb008446d34dcf5b3527b004ce6 | [
"Apache-2.0"
] | 129 | 2020-04-01T20:12:22.000Z | 2022-03-28T15:24:18.000Z | signjoey/initialization.py | ChenYutongTHU/slt | 2fb617feccccedb008446d34dcf5b3527b004ce6 | [
"Apache-2.0"
] | 10 | 2020-11-13T19:01:29.000Z | 2022-03-12T00:45:45.000Z | signjoey/initialization.py | ChenYutongTHU/slt | 2fb617feccccedb008446d34dcf5b3527b004ce6 | [
"Apache-2.0"
] | 66 | 2020-06-18T10:38:17.000Z | 2022-03-28T21:41:03.000Z | # coding: utf-8
"""
Implements custom initialization
"""
import math
import torch
import torch.nn as nn
from torch import Tensor
from torch.nn.init import _calculate_fan_in_and_fan_out
def orthogonal_rnn_init_(cell: nn.RNNBase, gain: float = 1.0):
"""
Orthogonal initialization of recurrent weights
RNN parameters contain 3 or 4 matrices in one parameter, so we slice it.
"""
with torch.no_grad():
for _, hh, _, _ in cell.all_weights:
for i in range(0, hh.size(0), cell.hidden_size):
nn.init.orthogonal_(hh.data[i : i + cell.hidden_size], gain=gain)
def lstm_forget_gate_init_(cell: nn.RNNBase, value: float = 1.0) -> None:
"""
Initialize LSTM forget gates with `value`.
:param cell: LSTM cell
:param value: initial value, default: 1
"""
with torch.no_grad():
for _, _, ih_b, hh_b in cell.all_weights:
l = len(ih_b)
ih_b.data[l // 4 : l // 2].fill_(value)
hh_b.data[l // 4 : l // 2].fill_(value)
def xavier_uniform_n_(w: Tensor, gain: float = 1.0, n: int = 4) -> None:
"""
Xavier initializer for parameters that combine multiple matrices in one
parameter for efficiency. This is e.g. used for GRU and LSTM parameters,
where e.g. all gates are computed at the same time by 1 big matrix.
:param w: parameter
:param gain: default 1
:param n: default 4
"""
with torch.no_grad():
fan_in, fan_out = _calculate_fan_in_and_fan_out(w)
assert fan_out % n == 0, "fan_out should be divisible by n"
fan_out //= n
std = gain * math.sqrt(2.0 / (fan_in + fan_out))
a = math.sqrt(3.0) * std
nn.init.uniform_(w, -a, a)
# pylint: disable=too-many-branches
def initialize_model(model: nn.Module, cfg: dict, txt_padding_idx: int) -> None:
"""
This initializes a model based on the provided config.
All initializer configuration is part of the `model` section of the
configuration file.
For an example, see e.g. `https://github.com/joeynmt/joeynmt/
blob/master/configs/iwslt_envi_xnmt.yaml#L47`
The main initializer is set using the `initializer` key.
Possible values are `xavier`, `uniform`, `normal` or `zeros`.
(`xavier` is the default).
When an initializer is set to `uniform`, then `init_weight` sets the
range for the values (-init_weight, init_weight).
When an initializer is set to `normal`, then `init_weight` sets the
standard deviation for the weights (with mean 0).
The word embedding initializer is set using `embed_initializer` and takes
the same values. The default is `normal` with `embed_init_weight = 0.01`.
Biases are initialized separately using `bias_initializer`.
The default is `zeros`, but you can use the same initializers as
the main initializer.
Set `init_rnn_orthogonal` to True if you want RNN orthogonal initialization
(for recurrent matrices). Default is False.
`lstm_forget_gate` controls how the LSTM forget gate is initialized.
Default is `1`.
:param model: model to initialize
:param cfg: the model configuration
:param txt_padding_idx: index of spoken language text padding token
"""
# defaults: xavier, embeddings: normal 0.01, biases: zeros, no orthogonal
gain = float(cfg.get("init_gain", 1.0)) # for xavier
init = cfg.get("initializer", "xavier")
init_weight = float(cfg.get("init_weight", 0.01))
embed_init = cfg.get("embed_initializer", "normal")
embed_init_weight = float(cfg.get("embed_init_weight", 0.01))
embed_gain = float(cfg.get("embed_init_gain", 1.0)) # for xavier
bias_init = cfg.get("bias_initializer", "zeros")
bias_init_weight = float(cfg.get("bias_init_weight", 0.01))
# pylint: disable=unnecessary-lambda, no-else-return
def _parse_init(s, scale, _gain):
scale = float(scale)
assert scale > 0.0, "incorrect init_weight"
if s.lower() == "xavier":
return lambda p: nn.init.xavier_uniform_(p, gain=_gain)
elif s.lower() == "uniform":
return lambda p: nn.init.uniform_(p, a=-scale, b=scale)
elif s.lower() == "normal":
return lambda p: nn.init.normal_(p, mean=0.0, std=scale)
elif s.lower() == "zeros":
return lambda p: nn.init.zeros_(p)
else:
raise ValueError("unknown initializer")
init_fn_ = _parse_init(init, init_weight, gain)
embed_init_fn_ = _parse_init(embed_init, embed_init_weight, embed_gain)
bias_init_fn_ = _parse_init(bias_init, bias_init_weight, gain)
with torch.no_grad():
for name, p in model.named_parameters():
if "txt_embed" in name:
if "lut" in name:
embed_init_fn_(p)
elif "bias" in name:
bias_init_fn_(p)
elif len(p.size()) > 1:
# RNNs combine multiple matrices is one, which messes up
# xavier initialization
if init == "xavier" and "rnn" in name:
n = 1
if "encoder" in name:
n = 4 if isinstance(model.encoder.rnn, nn.LSTM) else 3
elif "decoder" in name:
n = 4 if isinstance(model.decoder.rnn, nn.LSTM) else 3
xavier_uniform_n_(p.data, gain=gain, n=n)
else:
init_fn_(p)
# zero out paddings
if model.txt_embed is not None:
model.txt_embed.lut.weight.data[txt_padding_idx].zero_()
orthogonal = cfg.get("init_rnn_orthogonal", False)
lstm_forget_gate = cfg.get("lstm_forget_gate", 1.0)
# encoder rnn orthogonal initialization & LSTM forget gate
if hasattr(model.encoder, "rnn"):
if orthogonal:
orthogonal_rnn_init_(model.encoder.rnn)
if isinstance(model.encoder.rnn, nn.LSTM):
lstm_forget_gate_init_(model.encoder.rnn, lstm_forget_gate)
# decoder rnn orthogonal initialization & LSTM forget gate
if hasattr(model.decoder, "rnn"):
if orthogonal:
orthogonal_rnn_init_(model.decoder.rnn)
if isinstance(model.decoder.rnn, nn.LSTM):
lstm_forget_gate_init_(model.decoder.rnn, lstm_forget_gate)
| 36.164773 | 81 | 0.631736 |
import math
import torch
import torch.nn as nn
from torch import Tensor
from torch.nn.init import _calculate_fan_in_and_fan_out
def orthogonal_rnn_init_(cell: nn.RNNBase, gain: float = 1.0):
with torch.no_grad():
for _, hh, _, _ in cell.all_weights:
for i in range(0, hh.size(0), cell.hidden_size):
nn.init.orthogonal_(hh.data[i : i + cell.hidden_size], gain=gain)
def lstm_forget_gate_init_(cell: nn.RNNBase, value: float = 1.0) -> None:
with torch.no_grad():
for _, _, ih_b, hh_b in cell.all_weights:
l = len(ih_b)
ih_b.data[l // 4 : l // 2].fill_(value)
hh_b.data[l // 4 : l // 2].fill_(value)
def xavier_uniform_n_(w: Tensor, gain: float = 1.0, n: int = 4) -> None:
with torch.no_grad():
fan_in, fan_out = _calculate_fan_in_and_fan_out(w)
assert fan_out % n == 0, "fan_out should be divisible by n"
fan_out //= n
std = gain * math.sqrt(2.0 / (fan_in + fan_out))
a = math.sqrt(3.0) * std
nn.init.uniform_(w, -a, a)
def initialize_model(model: nn.Module, cfg: dict, txt_padding_idx: int) -> None:
gain = float(cfg.get("init_gain", 1.0))
init = cfg.get("initializer", "xavier")
init_weight = float(cfg.get("init_weight", 0.01))
embed_init = cfg.get("embed_initializer", "normal")
embed_init_weight = float(cfg.get("embed_init_weight", 0.01))
embed_gain = float(cfg.get("embed_init_gain", 1.0))
bias_init = cfg.get("bias_initializer", "zeros")
bias_init_weight = float(cfg.get("bias_init_weight", 0.01))
def _parse_init(s, scale, _gain):
scale = float(scale)
assert scale > 0.0, "incorrect init_weight"
if s.lower() == "xavier":
return lambda p: nn.init.xavier_uniform_(p, gain=_gain)
elif s.lower() == "uniform":
return lambda p: nn.init.uniform_(p, a=-scale, b=scale)
elif s.lower() == "normal":
return lambda p: nn.init.normal_(p, mean=0.0, std=scale)
elif s.lower() == "zeros":
return lambda p: nn.init.zeros_(p)
else:
raise ValueError("unknown initializer")
init_fn_ = _parse_init(init, init_weight, gain)
embed_init_fn_ = _parse_init(embed_init, embed_init_weight, embed_gain)
bias_init_fn_ = _parse_init(bias_init, bias_init_weight, gain)
with torch.no_grad():
for name, p in model.named_parameters():
if "txt_embed" in name:
if "lut" in name:
embed_init_fn_(p)
elif "bias" in name:
bias_init_fn_(p)
elif len(p.size()) > 1:
if init == "xavier" and "rnn" in name:
n = 1
if "encoder" in name:
n = 4 if isinstance(model.encoder.rnn, nn.LSTM) else 3
elif "decoder" in name:
n = 4 if isinstance(model.decoder.rnn, nn.LSTM) else 3
xavier_uniform_n_(p.data, gain=gain, n=n)
else:
init_fn_(p)
if model.txt_embed is not None:
model.txt_embed.lut.weight.data[txt_padding_idx].zero_()
orthogonal = cfg.get("init_rnn_orthogonal", False)
lstm_forget_gate = cfg.get("lstm_forget_gate", 1.0)
if hasattr(model.encoder, "rnn"):
if orthogonal:
orthogonal_rnn_init_(model.encoder.rnn)
if isinstance(model.encoder.rnn, nn.LSTM):
lstm_forget_gate_init_(model.encoder.rnn, lstm_forget_gate)
if hasattr(model.decoder, "rnn"):
if orthogonal:
orthogonal_rnn_init_(model.decoder.rnn)
if isinstance(model.decoder.rnn, nn.LSTM):
lstm_forget_gate_init_(model.decoder.rnn, lstm_forget_gate)
| true | true |
f7fe4b5a45252e412b20a4a9a774a31967ea2ce9 | 9,009 | py | Python | python/tests/expressions/test_expressions_base.py | x-malet/iceberg | 04871b79e9aba02e9534498fa035b627bcb9af6e | [
"Apache-2.0"
] | 1 | 2020-12-24T07:48:54.000Z | 2020-12-24T07:48:54.000Z | python/tests/expressions/test_expressions_base.py | x-malet/iceberg | 04871b79e9aba02e9534498fa035b627bcb9af6e | [
"Apache-2.0"
] | null | null | null | python/tests/expressions/test_expressions_base.py | x-malet/iceberg | 04871b79e9aba02e9534498fa035b627bcb9af6e | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import uuid
from decimal import Decimal
import pytest
from iceberg.expressions import base
from iceberg.types import NestedField, Singleton, StringType
@pytest.mark.parametrize(
"operation,opposite_operation",
[
(base.Operation.TRUE, base.Operation.FALSE),
(base.Operation.FALSE, base.Operation.TRUE),
(base.Operation.IS_NULL, base.Operation.NOT_NULL),
(base.Operation.NOT_NULL, base.Operation.IS_NULL),
(base.Operation.IS_NAN, base.Operation.NOT_NAN),
(base.Operation.NOT_NAN, base.Operation.IS_NAN),
(base.Operation.LT, base.Operation.GT_EQ),
(base.Operation.LT_EQ, base.Operation.GT),
(base.Operation.GT, base.Operation.LT_EQ),
(base.Operation.GT_EQ, base.Operation.LT),
(base.Operation.EQ, base.Operation.NOT_EQ),
(base.Operation.NOT_EQ, base.Operation.EQ),
(base.Operation.IN, base.Operation.NOT_IN),
(base.Operation.NOT_IN, base.Operation.IN),
],
)
def test_negation_of_operations(operation, opposite_operation):
assert operation.negate() == opposite_operation
@pytest.mark.parametrize(
"operation",
[
base.Operation.NOT,
base.Operation.AND,
base.Operation.OR,
],
)
def test_raise_on_no_negation_for_operation(operation):
with pytest.raises(ValueError) as exc_info:
operation.negate()
assert str(exc_info.value) == f"No negation defined for operation {operation}"
class TestExpressionA(base.BooleanExpression, Singleton):
def __invert__(self):
return TestExpressionB()
def __repr__(self):
return "TestExpressionA()"
def __str__(self):
return "testexpra"
class TestExpressionB(base.BooleanExpression, Singleton):
def __invert__(self):
return TestExpressionA()
def __repr__(self):
return "TestExpressionB()"
def __str__(self):
return "testexprb"
@pytest.mark.parametrize(
"op, rep",
[
(
base.And(TestExpressionA(), TestExpressionB()),
"And(TestExpressionA(), TestExpressionB())",
),
(
base.Or(TestExpressionA(), TestExpressionB()),
"Or(TestExpressionA(), TestExpressionB())",
),
(base.Not(TestExpressionA()), "Not(TestExpressionA())"),
],
)
def test_reprs(op, rep):
assert repr(op) == rep
@pytest.mark.parametrize(
"op, string",
[
(base.And(TestExpressionA(), TestExpressionB()), "(testexpra and testexprb)"),
(base.Or(TestExpressionA(), TestExpressionB()), "(testexpra or testexprb)"),
(base.Not(TestExpressionA()), "(not testexpra)"),
],
)
def test_strs(op, string):
assert str(op) == string
@pytest.mark.parametrize(
"exp, testexpra, testexprb",
[
(
base.And(TestExpressionA(), TestExpressionB()),
base.And(TestExpressionA(), TestExpressionB()),
base.Or(TestExpressionA(), TestExpressionB()),
),
(
base.Or(TestExpressionA(), TestExpressionB()),
base.Or(TestExpressionA(), TestExpressionB()),
base.And(TestExpressionA(), TestExpressionB()),
),
(base.Not(TestExpressionA()), base.Not(TestExpressionA()), TestExpressionB()),
(TestExpressionA(), TestExpressionA(), TestExpressionB()),
(TestExpressionB(), TestExpressionB(), TestExpressionA()),
],
)
def test_eq(exp, testexpra, testexprb):
assert exp == testexpra and exp != testexprb
@pytest.mark.parametrize(
"lhs, rhs",
[
(
base.And(TestExpressionA(), TestExpressionB()),
base.Or(TestExpressionB(), TestExpressionA()),
),
(
base.Or(TestExpressionA(), TestExpressionB()),
base.And(TestExpressionB(), TestExpressionA()),
),
(base.Not(TestExpressionA()), TestExpressionA()),
(TestExpressionA(), TestExpressionB()),
],
)
def test_negate(lhs, rhs):
assert ~lhs == rhs
@pytest.mark.parametrize(
"lhs, rhs",
[
(
base.And(TestExpressionA(), TestExpressionB(), TestExpressionA()),
base.And(base.And(TestExpressionA(), TestExpressionB()), TestExpressionA()),
),
(
base.Or(TestExpressionA(), TestExpressionB(), TestExpressionA()),
base.Or(base.Or(TestExpressionA(), TestExpressionB()), TestExpressionA()),
),
(base.Not(base.Not(TestExpressionA())), TestExpressionA()),
],
)
def test_reduce(lhs, rhs):
assert lhs == rhs
@pytest.mark.parametrize(
"lhs, rhs",
[
(base.And(base.AlwaysTrue(), TestExpressionB()), TestExpressionB()),
(base.And(base.AlwaysFalse(), TestExpressionB()), base.AlwaysFalse()),
(base.Or(base.AlwaysTrue(), TestExpressionB()), base.AlwaysTrue()),
(base.Or(base.AlwaysFalse(), TestExpressionB()), TestExpressionB()),
(base.Not(base.Not(TestExpressionA())), TestExpressionA()),
],
)
def test_base_AlwaysTrue_base_AlwaysFalse(lhs, rhs):
assert lhs == rhs
def test_accessor_base_class(foo_struct):
"""Test retrieving a value at a position of a container using an accessor"""
uuid_value = uuid.uuid4()
foo_struct.set(0, "foo")
foo_struct.set(1, "bar")
foo_struct.set(2, "baz")
foo_struct.set(3, 1)
foo_struct.set(4, 2)
foo_struct.set(5, 3)
foo_struct.set(6, 1.234)
foo_struct.set(7, Decimal("1.234"))
foo_struct.set(8, uuid_value)
foo_struct.set(9, True)
foo_struct.set(10, False)
foo_struct.set(11, b"\x19\x04\x9e?")
assert base.Accessor(position=0).get(foo_struct) == "foo"
assert base.Accessor(position=1).get(foo_struct) == "bar"
assert base.Accessor(position=2).get(foo_struct) == "baz"
assert base.Accessor(position=3).get(foo_struct) == 1
assert base.Accessor(position=4).get(foo_struct) == 2
assert base.Accessor(position=5).get(foo_struct) == 3
assert base.Accessor(position=6).get(foo_struct) == 1.234
assert base.Accessor(position=7).get(foo_struct) == Decimal("1.234")
assert base.Accessor(position=8).get(foo_struct) == uuid_value
assert base.Accessor(position=9).get(foo_struct) == True
assert base.Accessor(position=10).get(foo_struct) == False
assert base.Accessor(position=11).get(foo_struct) == b"\x19\x04\x9e?"
def test_bound_reference_str_and_repr():
"""Test str and repr of BoundReference"""
field = NestedField(field_id=1, name="foo", field_type=StringType(), is_optional=False)
position1_accessor = base.Accessor(position=1)
bound_ref = base.BoundReference(field=field, accessor=position1_accessor)
assert str(bound_ref) == f"BoundReference(field={repr(field)}, accessor={repr(position1_accessor)})"
assert repr(bound_ref) == f"BoundReference(field={repr(field)}, accessor={repr(position1_accessor)})"
def test_bound_reference_field_property():
"""Test str and repr of BoundReference"""
field = NestedField(field_id=1, name="foo", field_type=StringType(), is_optional=False)
position1_accessor = base.Accessor(position=1)
bound_ref = base.BoundReference(field=field, accessor=position1_accessor)
assert bound_ref.field == NestedField(field_id=1, name="foo", field_type=StringType(), is_optional=False)
def test_bound_reference(table_schema_simple, foo_struct):
"""Test creating a BoundReference and evaluating it on a StructProtocol"""
foo_struct.set(pos=1, value="foovalue")
foo_struct.set(pos=2, value=123)
foo_struct.set(pos=3, value=True)
position1_accessor = base.Accessor(position=1)
position2_accessor = base.Accessor(position=2)
position3_accessor = base.Accessor(position=3)
field1 = table_schema_simple.find_field(1)
field2 = table_schema_simple.find_field(2)
field3 = table_schema_simple.find_field(3)
bound_ref1 = base.BoundReference(field=field1, accessor=position1_accessor)
bound_ref2 = base.BoundReference(field=field2, accessor=position2_accessor)
bound_ref3 = base.BoundReference(field=field3, accessor=position3_accessor)
assert bound_ref1.eval(foo_struct) == "foovalue"
assert bound_ref2.eval(foo_struct) == 123
assert bound_ref3.eval(foo_struct) == True
| 34.65 | 109 | 0.675214 |
import uuid
from decimal import Decimal
import pytest
from iceberg.expressions import base
from iceberg.types import NestedField, Singleton, StringType
@pytest.mark.parametrize(
"operation,opposite_operation",
[
(base.Operation.TRUE, base.Operation.FALSE),
(base.Operation.FALSE, base.Operation.TRUE),
(base.Operation.IS_NULL, base.Operation.NOT_NULL),
(base.Operation.NOT_NULL, base.Operation.IS_NULL),
(base.Operation.IS_NAN, base.Operation.NOT_NAN),
(base.Operation.NOT_NAN, base.Operation.IS_NAN),
(base.Operation.LT, base.Operation.GT_EQ),
(base.Operation.LT_EQ, base.Operation.GT),
(base.Operation.GT, base.Operation.LT_EQ),
(base.Operation.GT_EQ, base.Operation.LT),
(base.Operation.EQ, base.Operation.NOT_EQ),
(base.Operation.NOT_EQ, base.Operation.EQ),
(base.Operation.IN, base.Operation.NOT_IN),
(base.Operation.NOT_IN, base.Operation.IN),
],
)
def test_negation_of_operations(operation, opposite_operation):
assert operation.negate() == opposite_operation
@pytest.mark.parametrize(
"operation",
[
base.Operation.NOT,
base.Operation.AND,
base.Operation.OR,
],
)
def test_raise_on_no_negation_for_operation(operation):
with pytest.raises(ValueError) as exc_info:
operation.negate()
assert str(exc_info.value) == f"No negation defined for operation {operation}"
class TestExpressionA(base.BooleanExpression, Singleton):
def __invert__(self):
return TestExpressionB()
def __repr__(self):
return "TestExpressionA()"
def __str__(self):
return "testexpra"
class TestExpressionB(base.BooleanExpression, Singleton):
def __invert__(self):
return TestExpressionA()
def __repr__(self):
return "TestExpressionB()"
def __str__(self):
return "testexprb"
@pytest.mark.parametrize(
"op, rep",
[
(
base.And(TestExpressionA(), TestExpressionB()),
"And(TestExpressionA(), TestExpressionB())",
),
(
base.Or(TestExpressionA(), TestExpressionB()),
"Or(TestExpressionA(), TestExpressionB())",
),
(base.Not(TestExpressionA()), "Not(TestExpressionA())"),
],
)
def test_reprs(op, rep):
assert repr(op) == rep
@pytest.mark.parametrize(
"op, string",
[
(base.And(TestExpressionA(), TestExpressionB()), "(testexpra and testexprb)"),
(base.Or(TestExpressionA(), TestExpressionB()), "(testexpra or testexprb)"),
(base.Not(TestExpressionA()), "(not testexpra)"),
],
)
def test_strs(op, string):
assert str(op) == string
@pytest.mark.parametrize(
"exp, testexpra, testexprb",
[
(
base.And(TestExpressionA(), TestExpressionB()),
base.And(TestExpressionA(), TestExpressionB()),
base.Or(TestExpressionA(), TestExpressionB()),
),
(
base.Or(TestExpressionA(), TestExpressionB()),
base.Or(TestExpressionA(), TestExpressionB()),
base.And(TestExpressionA(), TestExpressionB()),
),
(base.Not(TestExpressionA()), base.Not(TestExpressionA()), TestExpressionB()),
(TestExpressionA(), TestExpressionA(), TestExpressionB()),
(TestExpressionB(), TestExpressionB(), TestExpressionA()),
],
)
def test_eq(exp, testexpra, testexprb):
assert exp == testexpra and exp != testexprb
@pytest.mark.parametrize(
"lhs, rhs",
[
(
base.And(TestExpressionA(), TestExpressionB()),
base.Or(TestExpressionB(), TestExpressionA()),
),
(
base.Or(TestExpressionA(), TestExpressionB()),
base.And(TestExpressionB(), TestExpressionA()),
),
(base.Not(TestExpressionA()), TestExpressionA()),
(TestExpressionA(), TestExpressionB()),
],
)
def test_negate(lhs, rhs):
assert ~lhs == rhs
@pytest.mark.parametrize(
"lhs, rhs",
[
(
base.And(TestExpressionA(), TestExpressionB(), TestExpressionA()),
base.And(base.And(TestExpressionA(), TestExpressionB()), TestExpressionA()),
),
(
base.Or(TestExpressionA(), TestExpressionB(), TestExpressionA()),
base.Or(base.Or(TestExpressionA(), TestExpressionB()), TestExpressionA()),
),
(base.Not(base.Not(TestExpressionA())), TestExpressionA()),
],
)
def test_reduce(lhs, rhs):
assert lhs == rhs
@pytest.mark.parametrize(
"lhs, rhs",
[
(base.And(base.AlwaysTrue(), TestExpressionB()), TestExpressionB()),
(base.And(base.AlwaysFalse(), TestExpressionB()), base.AlwaysFalse()),
(base.Or(base.AlwaysTrue(), TestExpressionB()), base.AlwaysTrue()),
(base.Or(base.AlwaysFalse(), TestExpressionB()), TestExpressionB()),
(base.Not(base.Not(TestExpressionA())), TestExpressionA()),
],
)
def test_base_AlwaysTrue_base_AlwaysFalse(lhs, rhs):
assert lhs == rhs
def test_accessor_base_class(foo_struct):
uuid_value = uuid.uuid4()
foo_struct.set(0, "foo")
foo_struct.set(1, "bar")
foo_struct.set(2, "baz")
foo_struct.set(3, 1)
foo_struct.set(4, 2)
foo_struct.set(5, 3)
foo_struct.set(6, 1.234)
foo_struct.set(7, Decimal("1.234"))
foo_struct.set(8, uuid_value)
foo_struct.set(9, True)
foo_struct.set(10, False)
foo_struct.set(11, b"\x19\x04\x9e?")
assert base.Accessor(position=0).get(foo_struct) == "foo"
assert base.Accessor(position=1).get(foo_struct) == "bar"
assert base.Accessor(position=2).get(foo_struct) == "baz"
assert base.Accessor(position=3).get(foo_struct) == 1
assert base.Accessor(position=4).get(foo_struct) == 2
assert base.Accessor(position=5).get(foo_struct) == 3
assert base.Accessor(position=6).get(foo_struct) == 1.234
assert base.Accessor(position=7).get(foo_struct) == Decimal("1.234")
assert base.Accessor(position=8).get(foo_struct) == uuid_value
assert base.Accessor(position=9).get(foo_struct) == True
assert base.Accessor(position=10).get(foo_struct) == False
assert base.Accessor(position=11).get(foo_struct) == b"\x19\x04\x9e?"
def test_bound_reference_str_and_repr():
field = NestedField(field_id=1, name="foo", field_type=StringType(), is_optional=False)
position1_accessor = base.Accessor(position=1)
bound_ref = base.BoundReference(field=field, accessor=position1_accessor)
assert str(bound_ref) == f"BoundReference(field={repr(field)}, accessor={repr(position1_accessor)})"
assert repr(bound_ref) == f"BoundReference(field={repr(field)}, accessor={repr(position1_accessor)})"
def test_bound_reference_field_property():
field = NestedField(field_id=1, name="foo", field_type=StringType(), is_optional=False)
position1_accessor = base.Accessor(position=1)
bound_ref = base.BoundReference(field=field, accessor=position1_accessor)
assert bound_ref.field == NestedField(field_id=1, name="foo", field_type=StringType(), is_optional=False)
def test_bound_reference(table_schema_simple, foo_struct):
foo_struct.set(pos=1, value="foovalue")
foo_struct.set(pos=2, value=123)
foo_struct.set(pos=3, value=True)
position1_accessor = base.Accessor(position=1)
position2_accessor = base.Accessor(position=2)
position3_accessor = base.Accessor(position=3)
field1 = table_schema_simple.find_field(1)
field2 = table_schema_simple.find_field(2)
field3 = table_schema_simple.find_field(3)
bound_ref1 = base.BoundReference(field=field1, accessor=position1_accessor)
bound_ref2 = base.BoundReference(field=field2, accessor=position2_accessor)
bound_ref3 = base.BoundReference(field=field3, accessor=position3_accessor)
assert bound_ref1.eval(foo_struct) == "foovalue"
assert bound_ref2.eval(foo_struct) == 123
assert bound_ref3.eval(foo_struct) == True
| true | true |
f7fe4bfe1f462944fb31ee9a6c93aa914fa7f0fc | 93 | py | Python | mlopen/mlopenapp/apps.py | AUTH-MINT/MLOpen | 17ce5a3a4bae683612d3e46ea0fb9d07414c9904 | [
"MIT"
] | null | null | null | mlopen/mlopenapp/apps.py | AUTH-MINT/MLOpen | 17ce5a3a4bae683612d3e46ea0fb9d07414c9904 | [
"MIT"
] | null | null | null | mlopen/mlopenapp/apps.py | AUTH-MINT/MLOpen | 17ce5a3a4bae683612d3e46ea0fb9d07414c9904 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class MlopenappConfig(AppConfig):
name = 'mlopenapp'
| 15.5 | 33 | 0.763441 | from django.apps import AppConfig
class MlopenappConfig(AppConfig):
name = 'mlopenapp'
| true | true |
f7fe4d46008763b887c9dcd70f81c378c721f39b | 1,334 | py | Python | bloomfilter.py | sonicskye/smart-stamp-duty-UI | 9cde6ce98acde28ed0df34bb1149896600cc840b | [
"MIT"
] | 2 | 2018-11-26T14:19:23.000Z | 2018-12-20T22:09:41.000Z | bloomfilter.py | sonicskye/smart-stamp-duty-UI | 9cde6ce98acde28ed0df34bb1149896600cc840b | [
"MIT"
] | null | null | null | bloomfilter.py | sonicskye/smart-stamp-duty-UI | 9cde6ce98acde28ed0df34bb1149896600cc840b | [
"MIT"
] | null | null | null | '''
sonicskye
bloomfilter.py
Requirements:
bitarray (https://github.com/ilanschnell/bitarray)
pybloof (https://github.com/jhgg/pybloof)
Pybloof library is used due to its built-in export and import features
These features are convenient for storing the bloom filter information to the smart contract
and import them if needed
'''
import pybloof
import utilities as u
def createstringbloomfilter(wordlist):
# if the size is too small then the result is inaccurate
# makes sure that there is an extra 500 allowance; false positive still exists
# the input wordlist is converted into setwordlist to remove duplicates
setwordlist = set(wordlist)
sz = len(setwordlist) + 500
bf = pybloof.StringBloomFilter(size=sz, hashes=9)
for word in wordlist:
#bf.add(word)
if word not in bf:
bf.add(word)
return bf.to_base64().decode('utf-8')
def teststringbloomfilter(bfValue, wordlist):
bf = pybloof.StringBloomFilter.from_base64(bfValue.encode('utf-8'))
setwordlist = set(wordlist)
wlength = len(setwordlist)
#print (str(wlength))
positive = 0
for word in setwordlist:
if word in bf:
positive += 1;
res = round(positive/wlength *100)
return res
##################################### test ################################
| 27.791667 | 92 | 0.667166 |
import pybloof
import utilities as u
def createstringbloomfilter(wordlist):
setwordlist = set(wordlist)
sz = len(setwordlist) + 500
bf = pybloof.StringBloomFilter(size=sz, hashes=9)
for word in wordlist:
if word not in bf:
bf.add(word)
return bf.to_base64().decode('utf-8')
def teststringbloomfilter(bfValue, wordlist):
bf = pybloof.StringBloomFilter.from_base64(bfValue.encode('utf-8'))
setwordlist = set(wordlist)
wlength = len(setwordlist)
positive = 0
for word in setwordlist:
if word in bf:
positive += 1;
res = round(positive/wlength *100)
return res
| true | true |
f7fe4e1736c5b1b484ff68e6225ccd406608f75e | 5,110 | py | Python | decision_tree.py | hcbh96/Random-Forest-FYP | 4af2c85a4f4d998f616751f9c366329bdc559b13 | [
"MIT"
] | null | null | null | decision_tree.py | hcbh96/Random-Forest-FYP | 4af2c85a4f4d998f616751f9c366329bdc559b13 | [
"MIT"
] | null | null | null | decision_tree.py | hcbh96/Random-Forest-FYP | 4af2c85a4f4d998f616751f9c366329bdc559b13 | [
"MIT"
] | null | null | null | """
In this file I want to:
create DT
Train DT
Test DT
Analyse Accurancy
Analyse Sensitivity
Analyse Precision
Check Feature Importance
"""
import pandas as pd
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from evaluate_model import evaluate_model, performance_assessor
from confusion_matrix import plot_confusion_matrix
from sklearn.metrics import confusion_matrix
import graphviz
# Set random seed to ensure reproducible runs
RSEED = 30
dtfm=pd.read_excel('cleaned_data.xlsx', sheet_name='Sheet1')
#Remove columns not to be used in modelling
dtfm = dtfm.drop(columns=['ORDEM','DATA','AMOSTRA','REPLICATA','ANIMAL','PARTIDA','CLIV','CELLS_COUNT'])
print("Describe Output Vars: \n {}".format(dtfm["BLAST_D8"].describe()))
"""
One of the thigns i need to do is categorise the output data
Where:
- 0 is bad quality 0 - 50%
- 1 is good quality 50 - 100%
I will use the following statistics to make the decsion:
Statistics for each column after outlier removal
CLIV BLAST_D8 CELLS_COUNT
count 313.000000 313.000000 180.000000
mean 72.070374 21.475320 171.115891
std 8.942164 11.093061 42.876076
min 49.350649 0.000000 57.000000
25% 65.079365 12.121212 144.875000
50% 72.151899 20.312500 169.875000
75% 79.487179 29.629630 195.437500
max 90.140845 53.623188 269.000000
For BLAST_D8:
0 < 21.475320
1 >= 21.475320
"""
# Update Labels in Blast_D8 and CLIV
dtfm['BLAST_D8'] = dtfm['BLAST_D8'].where(dtfm['BLAST_D8'] >= 21.475320, other=0)
dtfm['BLAST_D8'] = dtfm['BLAST_D8'].where(dtfm['BLAST_D8'] < 21.475320, other=1)
# Make a copy for dtfm blast
print("Blast_D8 value counts:\n {}".format(dtfm['BLAST_D8'].value_counts()))
# Extract the labels
labels = np.array(dtfm.pop('BLAST_D8'))
# 30% examples in test data
train, test, train_labels, test_labels = train_test_split(dtfm, labels, stratify = labels, test_size = 0.3, random_state = RSEED)
#imputation of missing values
train = train.fillna(train.mean())
test = test.fillna(test.mean())
# Features for feature importances
features = list(train.columns)
print("Train Shape: {}".format(train.shape))
print("Test Shape: {}".format(test.shape))
"""
Train decision tree on data with unlimited depth to check for overfitting
"""
# Make a decision tree and train
tree = DecisionTreeClassifier(random_state=RSEED)
# Train tree
tree.fit(train, train_labels)
print('Decision tree has {} nodes with maximum depth {}.'.format(tree.tree_.node_count, tree.tree_.max_depth))
"""
Assess decision tree performance
I would expect this to overfit but we want to make sure
"""
# Make probability predictions
train_probs = tree.predict_proba(train)[:, 1]
probs = tree.predict_proba(test)[:, 1]
train_predictions = tree.predict(train)
predictions = tree.predict(test)
# evaluate model
evaluate_model(predictions, probs, train_predictions, train_probs, test_labels, train_labels, title='Tree ROC Curve')
# print other metrics
performance_assessor(predictions, probs, train_predictions, train_probs, test_labels, train_labels, logger=True)
# display example decision tree
export_graphviz(tree, out_file='tree.dot',
filled=True, rounded=True,
special_characters=True)
print('\033[94m' + "To view decision tree example run the following command in terminal:\ndot -Tpng tree.dot -o tree.png" + '\033[0m')
# Plot confusion matrix
cm = confusion_matrix(test_labels, predictions)
plot_confusion_matrix(cm, classes = ['Poor Health', 'Good Health'],
title = 'Tree Confusion Matrix')
"""
Confusion Matrix:
[[35 9]
[10 35]]
Classification Accuracy: 0.7865168539325843
Classification Sensitivity: 0.7865168539325843
From a single with a confusion matrix we can see above Accuracy and Sesitivity
These should form our base projection or possibly projections from Mayra?
Should we instead maybe take two classes as this would allow the plotting of
ROC curves etc -
Mayra mentioned that
**The idea with this project more than predict embryo production is to see if there is any variables from sperm analysis that can predict these production.
That's why we used so many bulls. Ore research is based on these ideas, the bull effect, which sperm analysis can we do to predict embryo production. **
Consider this when deciding whether to use binary or non binary classification
Let check out feature importance in the decision tree
"""
fi = pd.DataFrame({'feature': features,
'importance': tree.feature_importances_}).\
sort_values('importance', ascending = False)
print("Features of most importance in decision tree: \n{}".format(fi.head()))
"""
This porucdes the following results
Features of most importance in decision tree:
feature importance
17 ALH 0.151271
3 SUB_3_LS 0.145387
8 FRAG_CRO 0.079971
18 BCF 0.077984
20 LIN 0.065810
I want to at some point check co-linearity between the above variables.
"""
| 30.058824 | 155 | 0.731311 | import pandas as pd
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from evaluate_model import evaluate_model, performance_assessor
from confusion_matrix import plot_confusion_matrix
from sklearn.metrics import confusion_matrix
import graphviz
RSEED = 30
dtfm=pd.read_excel('cleaned_data.xlsx', sheet_name='Sheet1')
dtfm = dtfm.drop(columns=['ORDEM','DATA','AMOSTRA','REPLICATA','ANIMAL','PARTIDA','CLIV','CELLS_COUNT'])
print("Describe Output Vars: \n {}".format(dtfm["BLAST_D8"].describe()))
dtfm['BLAST_D8'] = dtfm['BLAST_D8'].where(dtfm['BLAST_D8'] >= 21.475320, other=0)
dtfm['BLAST_D8'] = dtfm['BLAST_D8'].where(dtfm['BLAST_D8'] < 21.475320, other=1)
print("Blast_D8 value counts:\n {}".format(dtfm['BLAST_D8'].value_counts()))
labels = np.array(dtfm.pop('BLAST_D8'))
train, test, train_labels, test_labels = train_test_split(dtfm, labels, stratify = labels, test_size = 0.3, random_state = RSEED)
train = train.fillna(train.mean())
test = test.fillna(test.mean())
features = list(train.columns)
print("Train Shape: {}".format(train.shape))
print("Test Shape: {}".format(test.shape))
tree = DecisionTreeClassifier(random_state=RSEED)
tree.fit(train, train_labels)
print('Decision tree has {} nodes with maximum depth {}.'.format(tree.tree_.node_count, tree.tree_.max_depth))
train_probs = tree.predict_proba(train)[:, 1]
probs = tree.predict_proba(test)[:, 1]
train_predictions = tree.predict(train)
predictions = tree.predict(test)
evaluate_model(predictions, probs, train_predictions, train_probs, test_labels, train_labels, title='Tree ROC Curve')
performance_assessor(predictions, probs, train_predictions, train_probs, test_labels, train_labels, logger=True)
export_graphviz(tree, out_file='tree.dot',
filled=True, rounded=True,
special_characters=True)
print('\033[94m' + "To view decision tree example run the following command in terminal:\ndot -Tpng tree.dot -o tree.png" + '\033[0m')
cm = confusion_matrix(test_labels, predictions)
plot_confusion_matrix(cm, classes = ['Poor Health', 'Good Health'],
title = 'Tree Confusion Matrix')
fi = pd.DataFrame({'feature': features,
'importance': tree.feature_importances_}).\
sort_values('importance', ascending = False)
print("Features of most importance in decision tree: \n{}".format(fi.head()))
| true | true |
f7fe4e211472caf271200cc3d30b7f679222a4f4 | 1,407 | py | Python | examples/text-classification/config_for_attention_visualization_for_loading_lex_model_hpc.py | mithunpaul08/transformers | 55d5e0a1d88f0922dc2af3be140e077850c66fee | [
"Apache-2.0"
] | null | null | null | examples/text-classification/config_for_attention_visualization_for_loading_lex_model_hpc.py | mithunpaul08/transformers | 55d5e0a1d88f0922dc2af3be140e077850c66fee | [
"Apache-2.0"
] | null | null | null | examples/text-classification/config_for_attention_visualization_for_loading_lex_model_hpc.py | mithunpaul08/transformers | 55d5e0a1d88f0922dc2af3be140e077850c66fee | [
"Apache-2.0"
] | null | null | null | [BERT]
model_name_or_path="bert-base-cased"
task_name="fevercrossdomain"
do_train=True
do_eval=True
do_predict=True
#do_train_1student_1teacher=True
data_dir="/home/u11/mithunpaul/xdisk/huggingface_bert_fever_to_fnc_load_lex/data/fever/fevercrossdomain/lex/figerspecific/"
max_seq_length="128"
per_device_eval_batch_size="16"
per_device_train_batch_size="16"
learning_rate="1e-5"
# Note: use num_train_epochs=1 only. For testing purposes, in trainery.py we are as of now, sep 2020 we are returning the dev and test partition evaluation results for epoch1
# if you want to test for more than 1 eopch, return best_dev and best_test values
num_train_epochs="1"
output_dir="/home/u11/mithunpaul/xdisk/huggingface_bert_fever_to_fnc_load_lex/output/fever/fevercrossdomain/lex/figerspecific/bert-base-cased/128/"
overwrite_output_dir=True
weight_decay="0.01"
adam_epsilon="1e-6"
evaluate_during_training=True
task_type="lex"
subtask_type="figerspecific"
machine_to_run_on="hpc"
toy_data_dir_path="/Users/mordor/research/huggingface/src/transformers/data/datasets/fever/fevercrossdomain/lex/figerspecific/toydata/"
#what are the scores that you should assert against. i.e the ones we got when we ran the toy data alone
fever_in_domain_accuracy_on_toy_data_17_datapoints=0.9375
fever_cross_domain_accuracy_on_toy_data_17_datapoints=0.875
fever_cross_domain_fncscore_on_toy_data_17_datapoints=0.9
overwrite_cache=True | 48.517241 | 174 | 0.851457 | [BERT]
model_name_or_path="bert-base-cased"
task_name="fevercrossdomain"
do_train=True
do_eval=True
do_predict=True
data_dir="/home/u11/mithunpaul/xdisk/huggingface_bert_fever_to_fnc_load_lex/data/fever/fevercrossdomain/lex/figerspecific/"
max_seq_length="128"
per_device_eval_batch_size="16"
per_device_train_batch_size="16"
learning_rate="1e-5"
num_train_epochs="1"
output_dir="/home/u11/mithunpaul/xdisk/huggingface_bert_fever_to_fnc_load_lex/output/fever/fevercrossdomain/lex/figerspecific/bert-base-cased/128/"
overwrite_output_dir=True
weight_decay="0.01"
adam_epsilon="1e-6"
evaluate_during_training=True
task_type="lex"
subtask_type="figerspecific"
machine_to_run_on="hpc"
toy_data_dir_path="/Users/mordor/research/huggingface/src/transformers/data/datasets/fever/fevercrossdomain/lex/figerspecific/toydata/"
fever_in_domain_accuracy_on_toy_data_17_datapoints=0.9375
fever_cross_domain_accuracy_on_toy_data_17_datapoints=0.875
fever_cross_domain_fncscore_on_toy_data_17_datapoints=0.9
overwrite_cache=True | true | true |
f7fe4e7d10080f1d2252b5fa851aef8e2f9eb15f | 9,762 | py | Python | test/functional/p2p_blockfilters.py | XbitCC/xbitcoin | 2c71b1adc311680e8f3aa977b42029b1a26164f1 | [
"MIT"
] | 2 | 2021-10-16T06:16:02.000Z | 2022-03-26T21:48:38.000Z | test/functional/p2p_blockfilters.py | XbitCC/xbitcoin | 2c71b1adc311680e8f3aa977b42029b1a26164f1 | [
"MIT"
] | null | null | null | test/functional/p2p_blockfilters.py | XbitCC/xbitcoin | 2c71b1adc311680e8f3aa977b42029b1a26164f1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2019 The XBit Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests NODE_COMPACT_FILTERS (BIP 157/158).
Tests that a node configured with -blockfilterindex and -peerblockfilters signals
NODE_COMPACT_FILTERS and can serve cfilters, cfheaders and cfcheckpts.
"""
from test_framework.messages import (
FILTER_TYPE_BASIC,
NODE_COMPACT_FILTERS,
hash256,
msg_getcfcheckpt,
msg_getcfheaders,
msg_getcfilters,
ser_uint256,
uint256_from_str,
)
from test_framework.p2p import P2PInterface
from test_framework.test_framework import XBitTestFramework
from test_framework.util import (
assert_equal,
)
class CFiltersClient(P2PInterface):
def __init__(self):
super().__init__()
# Store the cfilters received.
self.cfilters = []
def pop_cfilters(self):
cfilters = self.cfilters
self.cfilters = []
return cfilters
def on_cfilter(self, message):
"""Store cfilters received in a list."""
self.cfilters.append(message)
class CompactFiltersTest(XBitTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.rpc_timeout = 480
self.num_nodes = 2
self.extra_args = [
["-blockfilterindex", "-peerblockfilters"],
["-blockfilterindex"],
]
def run_test(self):
# Node 0 supports COMPACT_FILTERS, node 1 does not.
node0 = self.nodes[0].add_p2p_connection(CFiltersClient())
node1 = self.nodes[1].add_p2p_connection(CFiltersClient())
# Nodes 0 & 1 share the same first 999 blocks in the chain.
self.nodes[0].generate(999)
self.sync_blocks(timeout=600)
# Stale blocks by disconnecting nodes 0 & 1, mining, then reconnecting
self.disconnect_nodes(0, 1)
self.nodes[0].generate(1)
self.wait_until(lambda: self.nodes[0].getblockcount() == 1000)
stale_block_hash = self.nodes[0].getblockhash(1000)
self.nodes[1].generate(1001)
self.wait_until(lambda: self.nodes[1].getblockcount() == 2000)
# Check that nodes have signalled NODE_COMPACT_FILTERS correctly.
assert node0.nServices & NODE_COMPACT_FILTERS != 0
assert node1.nServices & NODE_COMPACT_FILTERS == 0
# Check that the localservices is as expected.
assert int(self.nodes[0].getnetworkinfo()['localservices'], 16) & NODE_COMPACT_FILTERS != 0
assert int(self.nodes[1].getnetworkinfo()['localservices'], 16) & NODE_COMPACT_FILTERS == 0
self.log.info("get cfcheckpt on chain to be re-orged out.")
request = msg_getcfcheckpt(
filter_type=FILTER_TYPE_BASIC,
stop_hash=int(stale_block_hash, 16)
)
node0.send_and_ping(message=request)
response = node0.last_message['cfcheckpt']
assert_equal(response.filter_type, request.filter_type)
assert_equal(response.stop_hash, request.stop_hash)
assert_equal(len(response.headers), 1)
self.log.info("Reorg node 0 to a new chain.")
self.connect_nodes(0, 1)
self.sync_blocks(timeout=600)
main_block_hash = self.nodes[0].getblockhash(1000)
assert main_block_hash != stale_block_hash, "node 0 chain did not reorganize"
self.log.info("Check that peers can fetch cfcheckpt on active chain.")
tip_hash = self.nodes[0].getbestblockhash()
request = msg_getcfcheckpt(
filter_type=FILTER_TYPE_BASIC,
stop_hash=int(tip_hash, 16)
)
node0.send_and_ping(request)
response = node0.last_message['cfcheckpt']
assert_equal(response.filter_type, request.filter_type)
assert_equal(response.stop_hash, request.stop_hash)
main_cfcheckpt = self.nodes[0].getblockfilter(main_block_hash, 'basic')['header']
tip_cfcheckpt = self.nodes[0].getblockfilter(tip_hash, 'basic')['header']
assert_equal(
response.headers,
[int(header, 16) for header in (main_cfcheckpt, tip_cfcheckpt)]
)
self.log.info("Check that peers can fetch cfcheckpt on stale chain.")
request = msg_getcfcheckpt(
filter_type=FILTER_TYPE_BASIC,
stop_hash=int(stale_block_hash, 16)
)
node0.send_and_ping(request)
response = node0.last_message['cfcheckpt']
stale_cfcheckpt = self.nodes[0].getblockfilter(stale_block_hash, 'basic')['header']
assert_equal(
response.headers,
[int(header, 16) for header in (stale_cfcheckpt,)]
)
self.log.info("Check that peers can fetch cfheaders on active chain.")
request = msg_getcfheaders(
filter_type=FILTER_TYPE_BASIC,
start_height=1,
stop_hash=int(main_block_hash, 16)
)
node0.send_and_ping(request)
response = node0.last_message['cfheaders']
main_cfhashes = response.hashes
assert_equal(len(main_cfhashes), 1000)
assert_equal(
compute_last_header(response.prev_header, response.hashes),
int(main_cfcheckpt, 16)
)
self.log.info("Check that peers can fetch cfheaders on stale chain.")
request = msg_getcfheaders(
filter_type=FILTER_TYPE_BASIC,
start_height=1,
stop_hash=int(stale_block_hash, 16)
)
node0.send_and_ping(request)
response = node0.last_message['cfheaders']
stale_cfhashes = response.hashes
assert_equal(len(stale_cfhashes), 1000)
assert_equal(
compute_last_header(response.prev_header, response.hashes),
int(stale_cfcheckpt, 16)
)
self.log.info("Check that peers can fetch cfilters.")
stop_hash = self.nodes[0].getblockhash(10)
request = msg_getcfilters(
filter_type=FILTER_TYPE_BASIC,
start_height=1,
stop_hash=int(stop_hash, 16)
)
node0.send_message(request)
node0.sync_with_ping()
response = node0.pop_cfilters()
assert_equal(len(response), 10)
self.log.info("Check that cfilter responses are correct.")
for cfilter, cfhash, height in zip(response, main_cfhashes, range(1, 11)):
block_hash = self.nodes[0].getblockhash(height)
assert_equal(cfilter.filter_type, FILTER_TYPE_BASIC)
assert_equal(cfilter.block_hash, int(block_hash, 16))
computed_cfhash = uint256_from_str(hash256(cfilter.filter_data))
assert_equal(computed_cfhash, cfhash)
self.log.info("Check that peers can fetch cfilters for stale blocks.")
request = msg_getcfilters(
filter_type=FILTER_TYPE_BASIC,
start_height=1000,
stop_hash=int(stale_block_hash, 16)
)
node0.send_message(request)
node0.sync_with_ping()
response = node0.pop_cfilters()
assert_equal(len(response), 1)
cfilter = response[0]
assert_equal(cfilter.filter_type, FILTER_TYPE_BASIC)
assert_equal(cfilter.block_hash, int(stale_block_hash, 16))
computed_cfhash = uint256_from_str(hash256(cfilter.filter_data))
assert_equal(computed_cfhash, stale_cfhashes[999])
self.log.info("Requests to node 1 without NODE_COMPACT_FILTERS results in disconnection.")
requests = [
msg_getcfcheckpt(
filter_type=FILTER_TYPE_BASIC,
stop_hash=int(main_block_hash, 16)
),
msg_getcfheaders(
filter_type=FILTER_TYPE_BASIC,
start_height=1000,
stop_hash=int(main_block_hash, 16)
),
msg_getcfilters(
filter_type=FILTER_TYPE_BASIC,
start_height=1000,
stop_hash=int(main_block_hash, 16)
),
]
for request in requests:
node1 = self.nodes[1].add_p2p_connection(P2PInterface())
node1.send_message(request)
node1.wait_for_disconnect()
self.log.info("Check that invalid requests result in disconnection.")
requests = [
# Requesting too many filters results in disconnection.
msg_getcfilters(
filter_type=FILTER_TYPE_BASIC,
start_height=0,
stop_hash=int(main_block_hash, 16)
),
# Requesting too many filter headers results in disconnection.
msg_getcfheaders(
filter_type=FILTER_TYPE_BASIC,
start_height=0,
stop_hash=int(tip_hash, 16)
),
# Requesting unknown filter type results in disconnection.
msg_getcfcheckpt(
filter_type=255,
stop_hash=int(main_block_hash, 16)
),
# Requesting unknown hash results in disconnection.
msg_getcfcheckpt(
filter_type=FILTER_TYPE_BASIC,
stop_hash=123456789,
),
]
for request in requests:
node0 = self.nodes[0].add_p2p_connection(P2PInterface())
node0.send_message(request)
node0.wait_for_disconnect()
def compute_last_header(prev_header, hashes):
"""Compute the last filter header from a starting header and a sequence of filter hashes."""
header = ser_uint256(prev_header)
for filter_hash in hashes:
header = hash256(ser_uint256(filter_hash) + header)
return uint256_from_str(header)
if __name__ == '__main__':
CompactFiltersTest().main()
| 37.984436 | 99 | 0.641262 |
from test_framework.messages import (
FILTER_TYPE_BASIC,
NODE_COMPACT_FILTERS,
hash256,
msg_getcfcheckpt,
msg_getcfheaders,
msg_getcfilters,
ser_uint256,
uint256_from_str,
)
from test_framework.p2p import P2PInterface
from test_framework.test_framework import XBitTestFramework
from test_framework.util import (
assert_equal,
)
class CFiltersClient(P2PInterface):
def __init__(self):
super().__init__()
self.cfilters = []
def pop_cfilters(self):
cfilters = self.cfilters
self.cfilters = []
return cfilters
def on_cfilter(self, message):
self.cfilters.append(message)
class CompactFiltersTest(XBitTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.rpc_timeout = 480
self.num_nodes = 2
self.extra_args = [
["-blockfilterindex", "-peerblockfilters"],
["-blockfilterindex"],
]
def run_test(self):
node0 = self.nodes[0].add_p2p_connection(CFiltersClient())
node1 = self.nodes[1].add_p2p_connection(CFiltersClient())
self.nodes[0].generate(999)
self.sync_blocks(timeout=600)
self.disconnect_nodes(0, 1)
self.nodes[0].generate(1)
self.wait_until(lambda: self.nodes[0].getblockcount() == 1000)
stale_block_hash = self.nodes[0].getblockhash(1000)
self.nodes[1].generate(1001)
self.wait_until(lambda: self.nodes[1].getblockcount() == 2000)
assert node0.nServices & NODE_COMPACT_FILTERS != 0
assert node1.nServices & NODE_COMPACT_FILTERS == 0
assert int(self.nodes[0].getnetworkinfo()['localservices'], 16) & NODE_COMPACT_FILTERS != 0
assert int(self.nodes[1].getnetworkinfo()['localservices'], 16) & NODE_COMPACT_FILTERS == 0
self.log.info("get cfcheckpt on chain to be re-orged out.")
request = msg_getcfcheckpt(
filter_type=FILTER_TYPE_BASIC,
stop_hash=int(stale_block_hash, 16)
)
node0.send_and_ping(message=request)
response = node0.last_message['cfcheckpt']
assert_equal(response.filter_type, request.filter_type)
assert_equal(response.stop_hash, request.stop_hash)
assert_equal(len(response.headers), 1)
self.log.info("Reorg node 0 to a new chain.")
self.connect_nodes(0, 1)
self.sync_blocks(timeout=600)
main_block_hash = self.nodes[0].getblockhash(1000)
assert main_block_hash != stale_block_hash, "node 0 chain did not reorganize"
self.log.info("Check that peers can fetch cfcheckpt on active chain.")
tip_hash = self.nodes[0].getbestblockhash()
request = msg_getcfcheckpt(
filter_type=FILTER_TYPE_BASIC,
stop_hash=int(tip_hash, 16)
)
node0.send_and_ping(request)
response = node0.last_message['cfcheckpt']
assert_equal(response.filter_type, request.filter_type)
assert_equal(response.stop_hash, request.stop_hash)
main_cfcheckpt = self.nodes[0].getblockfilter(main_block_hash, 'basic')['header']
tip_cfcheckpt = self.nodes[0].getblockfilter(tip_hash, 'basic')['header']
assert_equal(
response.headers,
[int(header, 16) for header in (main_cfcheckpt, tip_cfcheckpt)]
)
self.log.info("Check that peers can fetch cfcheckpt on stale chain.")
request = msg_getcfcheckpt(
filter_type=FILTER_TYPE_BASIC,
stop_hash=int(stale_block_hash, 16)
)
node0.send_and_ping(request)
response = node0.last_message['cfcheckpt']
stale_cfcheckpt = self.nodes[0].getblockfilter(stale_block_hash, 'basic')['header']
assert_equal(
response.headers,
[int(header, 16) for header in (stale_cfcheckpt,)]
)
self.log.info("Check that peers can fetch cfheaders on active chain.")
request = msg_getcfheaders(
filter_type=FILTER_TYPE_BASIC,
start_height=1,
stop_hash=int(main_block_hash, 16)
)
node0.send_and_ping(request)
response = node0.last_message['cfheaders']
main_cfhashes = response.hashes
assert_equal(len(main_cfhashes), 1000)
assert_equal(
compute_last_header(response.prev_header, response.hashes),
int(main_cfcheckpt, 16)
)
self.log.info("Check that peers can fetch cfheaders on stale chain.")
request = msg_getcfheaders(
filter_type=FILTER_TYPE_BASIC,
start_height=1,
stop_hash=int(stale_block_hash, 16)
)
node0.send_and_ping(request)
response = node0.last_message['cfheaders']
stale_cfhashes = response.hashes
assert_equal(len(stale_cfhashes), 1000)
assert_equal(
compute_last_header(response.prev_header, response.hashes),
int(stale_cfcheckpt, 16)
)
self.log.info("Check that peers can fetch cfilters.")
stop_hash = self.nodes[0].getblockhash(10)
request = msg_getcfilters(
filter_type=FILTER_TYPE_BASIC,
start_height=1,
stop_hash=int(stop_hash, 16)
)
node0.send_message(request)
node0.sync_with_ping()
response = node0.pop_cfilters()
assert_equal(len(response), 10)
self.log.info("Check that cfilter responses are correct.")
for cfilter, cfhash, height in zip(response, main_cfhashes, range(1, 11)):
block_hash = self.nodes[0].getblockhash(height)
assert_equal(cfilter.filter_type, FILTER_TYPE_BASIC)
assert_equal(cfilter.block_hash, int(block_hash, 16))
computed_cfhash = uint256_from_str(hash256(cfilter.filter_data))
assert_equal(computed_cfhash, cfhash)
self.log.info("Check that peers can fetch cfilters for stale blocks.")
request = msg_getcfilters(
filter_type=FILTER_TYPE_BASIC,
start_height=1000,
stop_hash=int(stale_block_hash, 16)
)
node0.send_message(request)
node0.sync_with_ping()
response = node0.pop_cfilters()
assert_equal(len(response), 1)
cfilter = response[0]
assert_equal(cfilter.filter_type, FILTER_TYPE_BASIC)
assert_equal(cfilter.block_hash, int(stale_block_hash, 16))
computed_cfhash = uint256_from_str(hash256(cfilter.filter_data))
assert_equal(computed_cfhash, stale_cfhashes[999])
self.log.info("Requests to node 1 without NODE_COMPACT_FILTERS results in disconnection.")
requests = [
msg_getcfcheckpt(
filter_type=FILTER_TYPE_BASIC,
stop_hash=int(main_block_hash, 16)
),
msg_getcfheaders(
filter_type=FILTER_TYPE_BASIC,
start_height=1000,
stop_hash=int(main_block_hash, 16)
),
msg_getcfilters(
filter_type=FILTER_TYPE_BASIC,
start_height=1000,
stop_hash=int(main_block_hash, 16)
),
]
for request in requests:
node1 = self.nodes[1].add_p2p_connection(P2PInterface())
node1.send_message(request)
node1.wait_for_disconnect()
self.log.info("Check that invalid requests result in disconnection.")
requests = [
msg_getcfilters(
filter_type=FILTER_TYPE_BASIC,
start_height=0,
stop_hash=int(main_block_hash, 16)
),
msg_getcfheaders(
filter_type=FILTER_TYPE_BASIC,
start_height=0,
stop_hash=int(tip_hash, 16)
),
msg_getcfcheckpt(
filter_type=255,
stop_hash=int(main_block_hash, 16)
),
msg_getcfcheckpt(
filter_type=FILTER_TYPE_BASIC,
stop_hash=123456789,
),
]
for request in requests:
node0 = self.nodes[0].add_p2p_connection(P2PInterface())
node0.send_message(request)
node0.wait_for_disconnect()
def compute_last_header(prev_header, hashes):
header = ser_uint256(prev_header)
for filter_hash in hashes:
header = hash256(ser_uint256(filter_hash) + header)
return uint256_from_str(header)
if __name__ == '__main__':
CompactFiltersTest().main()
| true | true |
f7fe4e86a1e1f5023e81366bb1e1239755c00b59 | 940 | py | Python | src/probabilistic_models/probabilistic_model.py | pfreifer/zxcvbn | 22674a65bc6ff56281bdd5415ebdb30bb19811ef | [
"MIT"
] | null | null | null | src/probabilistic_models/probabilistic_model.py | pfreifer/zxcvbn | 22674a65bc6ff56281bdd5415ebdb30bb19811ef | [
"MIT"
] | null | null | null | src/probabilistic_models/probabilistic_model.py | pfreifer/zxcvbn | 22674a65bc6ff56281bdd5415ebdb30bb19811ef | [
"MIT"
] | null | null | null | import pickle
from src.probabilistic_models.grammar_utils import score, update
from math import log
from decimal import Decimal
def probabilistic_model_guesses(password):
scores = pickle.load(open("scores.p", "rb"))
(cb_counter, Q) = pickle.load(open("cb_dictionary.p", "rb"))
(sb_counter, B) = pickle.load(open("sb_dictionary.p", "rb"))
score_password = score(password, cb_counter, sb_counter, Q, B)
len_score = len(scores)
rank_password = 0
for i in range(len_score) :
if scores[i] > score_password :
rank_password += 1/ (scores[i]*len_score)
return int(rank_password)
def probabilistic_model_result(password):
guesses = probabilistic_model_guesses(password)
update(password)
return {
"guesses_log10" : log(guesses, 10),
"guesses" : Decimal(guesses),
"sequence" : [],
"password" : password,
"pattern" : "probabilistic_model"
}
| 32.413793 | 66 | 0.670213 | import pickle
from src.probabilistic_models.grammar_utils import score, update
from math import log
from decimal import Decimal
def probabilistic_model_guesses(password):
scores = pickle.load(open("scores.p", "rb"))
(cb_counter, Q) = pickle.load(open("cb_dictionary.p", "rb"))
(sb_counter, B) = pickle.load(open("sb_dictionary.p", "rb"))
score_password = score(password, cb_counter, sb_counter, Q, B)
len_score = len(scores)
rank_password = 0
for i in range(len_score) :
if scores[i] > score_password :
rank_password += 1/ (scores[i]*len_score)
return int(rank_password)
def probabilistic_model_result(password):
guesses = probabilistic_model_guesses(password)
update(password)
return {
"guesses_log10" : log(guesses, 10),
"guesses" : Decimal(guesses),
"sequence" : [],
"password" : password,
"pattern" : "probabilistic_model"
}
| true | true |
f7fe4fbbf04ddc7f858ba1868ed2188887e30a06 | 316 | py | Python | __main__.py | HsOjo/QiniuSyncer | 92e21179cd97ef1b5baf294d04dbfb7ce3db7aa9 | [
"MIT"
] | null | null | null | __main__.py | HsOjo/QiniuSyncer | 92e21179cd97ef1b5baf294d04dbfb7ce3db7aa9 | [
"MIT"
] | null | null | null | __main__.py | HsOjo/QiniuSyncer | 92e21179cd97ef1b5baf294d04dbfb7ce3db7aa9 | [
"MIT"
] | null | null | null | import sys
from app import Application
from app.util import pyinstaller
from app.util.log import Log
if getattr(sys, 'frozen', False):
# is run at pyinstaller
pyinstaller.fix_encoding_in_pyinstaller()
Log.init_app()
app = Application(sys.argv)
try:
app.run()
except:
app.callback_exception()
| 17.555556 | 45 | 0.731013 | import sys
from app import Application
from app.util import pyinstaller
from app.util.log import Log
if getattr(sys, 'frozen', False):
pyinstaller.fix_encoding_in_pyinstaller()
Log.init_app()
app = Application(sys.argv)
try:
app.run()
except:
app.callback_exception()
| true | true |
f7fe4fd5cf6a4fdc152ccc972689d709caa4b252 | 907 | py | Python | backend/migrations/versions/425803e3d9cd_.py | cclauss/lineage | 065cf182095cd7ff3fe5c9f38e1009f1f2a81c19 | [
"MIT"
] | 1 | 2021-09-06T15:26:46.000Z | 2021-09-06T15:26:46.000Z | backend/migrations/versions/425803e3d9cd_.py | cclauss/lineage | 065cf182095cd7ff3fe5c9f38e1009f1f2a81c19 | [
"MIT"
] | null | null | null | backend/migrations/versions/425803e3d9cd_.py | cclauss/lineage | 065cf182095cd7ff3fe5c9f38e1009f1f2a81c19 | [
"MIT"
] | 1 | 2020-11-12T05:23:09.000Z | 2020-11-12T05:23:09.000Z | """empty message
Revision ID: 425803e3d9cd
Revises: 0f846b00d0db
Create Date: 2020-10-21 17:12:57.595639
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '425803e3d9cd'
down_revision = '0f846b00d0db'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('timeline_node', 'attachment',
existing_type=sa.VARCHAR(length=32),
type_=sa.String(length=512),
existing_nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('timeline_node', 'attachment',
existing_type=sa.String(length=512),
type_=sa.VARCHAR(length=32),
existing_nullable=True)
# ### end Alembic commands ###
| 25.914286 | 65 | 0.652701 | from alembic import op
import sqlalchemy as sa
revision = '425803e3d9cd'
down_revision = '0f846b00d0db'
branch_labels = None
depends_on = None
def upgrade():
=True)
| true | true |
f7fe5221a2acc8a6d7580062576b6257b385ee02 | 1,333 | py | Python | polling_stations/apps/data_importers/management/commands/import_newcastle_under_lyme.py | danielgriffin48/UK-Polling-Stations | 0e5273357a4fdc00c2af794c71558b6f8f2a0a49 | [
"BSD-3-Clause"
] | null | null | null | polling_stations/apps/data_importers/management/commands/import_newcastle_under_lyme.py | danielgriffin48/UK-Polling-Stations | 0e5273357a4fdc00c2af794c71558b6f8f2a0a49 | [
"BSD-3-Clause"
] | 364 | 2020-10-19T07:16:41.000Z | 2022-03-31T06:10:55.000Z | polling_stations/apps/data_importers/management/commands/import_newcastle_under_lyme.py | danielgriffin48/UK-Polling-Stations | 0e5273357a4fdc00c2af794c71558b6f8f2a0a49 | [
"BSD-3-Clause"
] | null | null | null | from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E07000195"
addresses_name = "local.2018-05-03/Version 1/Democracy_Club__03May2018.tsv"
stations_name = "local.2018-05-03/Version 1/Democracy_Club__03May2018.tsv"
elections = ["local.2018-05-03"]
csv_delimiter = "\t"
def station_record_to_dict(self, record):
# Postcode supplied for Higherland Methodist Church is incorrect
# remove the grid ref and we'll fall back to UPRN
if record.polling_place_id == "488":
record = record._replace(polling_place_easting="0")
record = record._replace(polling_place_northing="0")
return super().station_record_to_dict(record)
def address_record_to_dict(self, record):
uprn = record.property_urn.strip().lstrip("0")
if record.addressline6 == "CW9 9PW":
return None
if uprn == "200004611540":
rec = super().address_record_to_dict(record)
rec["postcode"] = "TF9 4JG"
return rec
if uprn == "200004601964":
rec = super().address_record_to_dict(record)
rec["postcode"] = "CW3 9LE"
return rec
return super().address_record_to_dict(record)
| 34.179487 | 81 | 0.663166 | from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E07000195"
addresses_name = "local.2018-05-03/Version 1/Democracy_Club__03May2018.tsv"
stations_name = "local.2018-05-03/Version 1/Democracy_Club__03May2018.tsv"
elections = ["local.2018-05-03"]
csv_delimiter = "\t"
def station_record_to_dict(self, record):
if record.polling_place_id == "488":
record = record._replace(polling_place_easting="0")
record = record._replace(polling_place_northing="0")
return super().station_record_to_dict(record)
def address_record_to_dict(self, record):
uprn = record.property_urn.strip().lstrip("0")
if record.addressline6 == "CW9 9PW":
return None
if uprn == "200004611540":
rec = super().address_record_to_dict(record)
rec["postcode"] = "TF9 4JG"
return rec
if uprn == "200004601964":
rec = super().address_record_to_dict(record)
rec["postcode"] = "CW3 9LE"
return rec
return super().address_record_to_dict(record)
| true | true |
f7fe5286d21de170cf376ee8f55f7bc73a6479e5 | 2,045 | py | Python | esdlvalidator/validation/functions/utils.py | ESDLMapEditorESSIM/ESDLValidator | 4573deec9b8206179ff6e61f37b4ba1847b3dbfb | [
"MIT"
] | null | null | null | esdlvalidator/validation/functions/utils.py | ESDLMapEditorESSIM/ESDLValidator | 4573deec9b8206179ff6e61f37b4ba1847b3dbfb | [
"MIT"
] | null | null | null | esdlvalidator/validation/functions/utils.py | ESDLMapEditorESSIM/ESDLValidator | 4573deec9b8206179ff6e61f37b4ba1847b3dbfb | [
"MIT"
] | 1 | 2021-02-25T09:25:35.000Z | 2021-02-25T09:25:35.000Z | import builtins
from pyecore.ecore import EValue
def has_attribute(obj, name: str) -> bool:
# give a default "nothing_found" since None can be the actual returned value
result = get_attribute(obj, name, "nothing_found")
return False if result is "nothing_found" else True
def get_attribute(obj, name: str, default=None) -> bool:
"""Get a property from args send to the function, property name casing will be ignored
Args:
obj: List, class or dictionary to get a property value from
name (str): The property to get
default (Object): Optional default value that will return when property not found, defaults to None
Returns:
property value: The property value found for given name, default if not found
"""
parts = name.split(".", 1)
value = default
if not isinstance(obj, dict):
if isinstance(obj, list):
value = []
for sub_obj in obj:
value.append(get_attribute(sub_obj, parts[0], default))
else:
for a in dir(obj):
if a.lower() == parts[0].lower():
value = builtins.getattr(obj, a, default)
break
else:
attributes = {k.lower(): v for k, v in obj.items()}
key = parts[0].lower()
value = attributes[key] if key in attributes.keys() else default
if isinstance(value, list):
vals = []
for v in value:
if isinstance(v, EValue):
vals.append(v._value)
else:
vals.append(v)
value = vals
elif value is not None and value != default and len(parts) > 1:
value = get_attribute(value, parts[1])
if isinstance(value, EValue):
value = value._value
return value
def create_offending_asset_msg(value):
return {"offending_asset": value.id} if has_attribute(value, "id") else {}
def is_iterable(obj) -> bool:
try:
iter(obj)
return True
except TypeError:
return False
| 29.637681 | 107 | 0.599511 | import builtins
from pyecore.ecore import EValue
def has_attribute(obj, name: str) -> bool:
result = get_attribute(obj, name, "nothing_found")
return False if result is "nothing_found" else True
def get_attribute(obj, name: str, default=None) -> bool:
parts = name.split(".", 1)
value = default
if not isinstance(obj, dict):
if isinstance(obj, list):
value = []
for sub_obj in obj:
value.append(get_attribute(sub_obj, parts[0], default))
else:
for a in dir(obj):
if a.lower() == parts[0].lower():
value = builtins.getattr(obj, a, default)
break
else:
attributes = {k.lower(): v for k, v in obj.items()}
key = parts[0].lower()
value = attributes[key] if key in attributes.keys() else default
if isinstance(value, list):
vals = []
for v in value:
if isinstance(v, EValue):
vals.append(v._value)
else:
vals.append(v)
value = vals
elif value is not None and value != default and len(parts) > 1:
value = get_attribute(value, parts[1])
if isinstance(value, EValue):
value = value._value
return value
def create_offending_asset_msg(value):
return {"offending_asset": value.id} if has_attribute(value, "id") else {}
def is_iterable(obj) -> bool:
try:
iter(obj)
return True
except TypeError:
return False
| true | true |
f7fe52b6188fec90349cb6f1213359500cc6f26f | 16,149 | py | Python | tests/test_text.py | fhoehle/fletcher | 09f1c9bc03c1603fa0facefb7a485a84f136a578 | [
"MIT"
] | null | null | null | tests/test_text.py | fhoehle/fletcher | 09f1c9bc03c1603fa0facefb7a485a84f136a578 | [
"MIT"
] | 2 | 2020-07-17T13:41:00.000Z | 2020-10-13T19:20:49.000Z | tests/test_text.py | fhoehle/fletcher | 09f1c9bc03c1603fa0facefb7a485a84f136a578 | [
"MIT"
] | null | null | null | import math
import string
from typing import Optional, Sequence, Tuple
import hypothesis.strategies as st
import numpy as np
import pandas as pd
import pandas.testing as tm
import pyarrow as pa
import pytest
from hypothesis import example, given, settings
import fletcher as fr
from fletcher.testing import examples
try:
# Only available in pandas 1.2+
# When this class is defined, we can also use `.str` on fletcher columns.
from pandas.core.strings.object_array import ObjectStringArrayMixin # noqa F401
_str_accessors = ["str", "fr_str"]
except ImportError:
_str_accessors = ["fr_str"]
@pytest.fixture(params=_str_accessors, scope="module")
def str_accessor(request):
return request.param
@st.composite
def string_patterns_st(draw, max_len=50) -> Tuple[Sequence[Optional[str]], str, int]:
ab_charset_st = st.sampled_from("ab")
ascii_charset_st = st.sampled_from(string.ascii_letters)
charset_st = st.sampled_from((ab_charset_st, ascii_charset_st))
charset = draw(charset_st)
fixed_pattern_st = st.sampled_from(["a", "aab", "aabaa"])
generated_pattern_st = st.text(alphabet=charset, max_size=max_len)
pattern_st = st.one_of(fixed_pattern_st, generated_pattern_st)
pattern = draw(pattern_st)
min_str_size = 0 if len(pattern) > 0 else 1
raw_str_st = st.one_of(
st.none(), st.lists(charset, min_size=min_str_size, max_size=max_len)
)
raw_seq_st = st.lists(raw_str_st, max_size=max_len)
raw_seq = draw(raw_seq_st)
for s in raw_seq:
if s is None:
continue
"""
There seems to be a bug in pandas for this edge case
>>> pd.Series(['']).str.replace('', 'abc', n=1)
0
dtype: object
But
>>> pd.Series(['']).str.replace('', 'abc')
0 abc
dtype: object
I believe the second result is the correct one and this is what the
fletcher implementation returns.
"""
max_ind = len(s) - len(pattern)
if max_ind < 0:
continue
repl_ind_st = st.integers(min_value=0, max_value=max_ind)
repl_ind_list_st = st.lists(repl_ind_st, max_size=math.ceil(max_len / 10))
repl_ind_list = draw(repl_ind_list_st)
for j in repl_ind_list:
s[j : j + len(pattern)] = pattern
seq = ["".join(s) if s is not None else None for s in raw_seq]
offset = draw(st.integers(min_value=0, max_value=len(seq)))
return (seq, pattern, offset)
string_patterns = pytest.mark.parametrize(
"data, pat",
[
([], ""),
(["a", "b"], ""),
(["aa", "ab", "ba"], "a"),
(["aa", "ab", "ba", "bb", None], "a"),
(["aa", "ab", "ba", "bb", None], "A"),
(["aa", "ab", "bA", "bB", None], "a"),
(["aa", "AB", "ba", "BB", None], "A"),
],
)
def _fr_series_from_data(data, fletcher_variant, dtype=pa.string()):
arrow_data = pa.array(data, type=dtype)
if fletcher_variant == "chunked":
fr_array = fr.FletcherChunkedArray(arrow_data)
else:
fr_array = fr.FletcherContinuousArray(arrow_data)
return pd.Series(fr_array)
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
def test_text_cat(data, str_accessor, fletcher_variant, fletcher_variant_2):
if any("\x00" in x for x in data if x):
# pytest.skip("pandas cannot handle \\x00 characters in tests")
# Skip is not working properly with hypothesis
return
ser_pd = pd.Series(data, dtype=str)
ser_fr = _fr_series_from_data(data, fletcher_variant)
ser_fr_other = _fr_series_from_data(data, fletcher_variant_2)
result_pd = ser_pd.str.cat(ser_pd)
result_fr = getattr(ser_fr, str_accessor).cat(ser_fr_other)
result_fr = result_fr.astype(object)
# Pandas returns np.nan for NA values in cat, keep this in line
result_fr[result_fr.isna()] = np.nan
tm.assert_series_equal(result_fr, result_pd)
def _check_series_equal(result_fr, result_pd):
result_fr = result_fr.astype(result_pd.dtype)
tm.assert_series_equal(result_fr, result_pd)
def _check_str_to_t(
t, func, data, str_accessor, fletcher_variant, test_offset=0, *args, **kwargs
):
"""Check a .str. function that returns a series with type t."""
tail_len = len(data) - test_offset
ser_pd = pd.Series(data, dtype=str).tail(tail_len)
result_pd = getattr(ser_pd.str, func)(*args, **kwargs)
ser_fr = _fr_series_from_data(data, fletcher_variant).tail(tail_len)
result_fr = getattr(getattr(ser_fr, str_accessor), func)(*args, **kwargs)
_check_series_equal(result_fr, result_pd)
def _check_str_to_str(func, data, str_accessor, fletcher_variant, *args, **kwargs):
_check_str_to_t(str, func, data, str_accessor, fletcher_variant, *args, **kwargs)
def _check_str_to_bool(func, data, str_accessor, fletcher_variant, *args, **kwargs):
_check_str_to_t(bool, func, data, str_accessor, fletcher_variant, *args, **kwargs)
@string_patterns
def test_text_endswith(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool("endswith", data, str_accessor, fletcher_variant, pat=pat)
@string_patterns
def test_text_startswith(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool("startswith", data, str_accessor, fletcher_variant, pat=pat)
@string_patterns
def test_contains_no_regex(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool(
"contains", data, str_accessor, fletcher_variant, pat=pat, regex=False
)
@pytest.mark.parametrize(
"data, pat, expected",
[
([], "", []),
(["a", "b"], "", [True, True]),
(["aa", "Ab", "ba", "bb", None], "a", [True, False, True, False, None]),
],
)
def test_contains_no_regex_ascii(data, pat, expected, str_accessor, fletcher_variant):
if str_accessor == "str":
pytest.skip(
"return types not stable yet, might sometimes return null instead of bool"
)
return
fr_series = _fr_series_from_data(data, fletcher_variant)
fr_expected = _fr_series_from_data(expected, fletcher_variant, pa.bool_())
# Run over slices to check offset handling code
for i in range(len(data)):
ser = fr_series.tail(len(data) - i)
expected = fr_expected.tail(len(data) - i)
result = getattr(ser, str_accessor).contains(pat, regex=False)
tm.assert_series_equal(result, expected)
@settings(deadline=None)
@given(data_tuple=string_patterns_st())
def test_contains_no_regex_case_sensitive(data_tuple, str_accessor, fletcher_variant):
data, pat, test_offset = data_tuple
_check_str_to_bool(
"contains",
data,
str_accessor,
fletcher_variant,
test_offset=test_offset,
pat=pat,
case=True,
regex=False,
)
@string_patterns
def test_contains_no_regex_ignore_case(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool(
"contains",
data,
str_accessor,
fletcher_variant,
pat=pat,
regex=False,
case=False,
)
regex_patterns = pytest.mark.parametrize(
"data, pat",
[
([], ""),
(["a", "b"], ""),
(["aa", "ab", "ba"], "a"),
(["aa", "ab", "ba", None], "a"),
(["aa", "ab", "ba", None], "a$"),
(["aa", "ab", "ba", None], "^a"),
(["Aa", "ab", "ba", None], "A"),
(["aa", "AB", "ba", None], "A$"),
(["aa", "AB", "ba", None], "^A"),
],
)
@regex_patterns
def test_contains_regex(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool(
"contains", data, str_accessor, fletcher_variant, pat=pat, regex=True
)
@regex_patterns
def test_contains_regex_ignore_case(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool(
"contains",
data,
str_accessor,
fletcher_variant,
pat=pat,
regex=True,
case=False,
)
@settings(deadline=None)
@given(
data_tuple=string_patterns_st(),
n=st.integers(min_value=0, max_value=10),
repl=st.sampled_from(["len4", "", "z"]),
)
@example(
data_tuple=(["aababaa"], "aabaa", 0),
repl="len4",
n=1,
fletcher_variant="continuous",
)
@example(data_tuple=(["aaa"], "a", 0), repl="len4", n=1, fletcher_variant="continuous")
def test_replace_no_regex_case_sensitive(
data_tuple, repl, n, str_accessor, fletcher_variant
):
data, pat, test_offset = data_tuple
_check_str_to_str(
"replace",
data,
str_accessor,
fletcher_variant,
test_offset=test_offset,
pat=pat,
repl=repl,
n=n,
case=True,
regex=False,
)
@settings(deadline=None)
@given(data_tuple=string_patterns_st())
@example(data_tuple=(["a"], "", 0), fletcher_variant="chunked")
def test_count_no_regex(data_tuple, str_accessor, fletcher_variant):
"""Check a .str. function that returns a series with type t."""
data, pat, test_offset = data_tuple
tail_len = len(data) - test_offset
ser_pd = pd.Series(data, dtype=str).tail(tail_len)
result_pd = getattr(ser_pd.str, "count")(pat=pat)
ser_fr = _fr_series_from_data(data, fletcher_variant).tail(tail_len)
kwargs = {}
if str_accessor.startswith("fr_"):
kwargs["regex"] = False
result_fr = getattr(ser_fr, str_accessor).count(pat=pat, **kwargs)
_check_series_equal(result_fr, result_pd)
def _optional_len(x: Optional[str]) -> int:
if x is not None:
return len(x)
else:
return 0
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
def test_text_zfill(data, str_accessor, fletcher_variant):
if any("\x00" in x for x in data if x):
# pytest.skip("pandas cannot handle \\x00 characters in tests")
# Skip is not working properly with hypothesis
return
ser_pd = pd.Series(data, dtype=str)
max_str_len = ser_pd.map(_optional_len).max()
if pd.isna(max_str_len):
max_str_len = 0
arrow_data = pa.array(data, type=pa.string())
if fletcher_variant == "chunked":
fr_array = fr.FletcherChunkedArray(arrow_data)
else:
fr_array = fr.FletcherContinuousArray(arrow_data)
ser_fr = pd.Series(fr_array)
result_pd = ser_pd.str.zfill(max_str_len + 1)
result_fr = getattr(ser_fr, str_accessor).zfill(max_str_len + 1)
result_fr = result_fr.astype(object)
# Pandas returns np.nan for NA values in cat, keep this in line
result_fr[result_fr.isna()] = np.nan
tm.assert_series_equal(result_fr, result_pd)
@settings(deadline=None, max_examples=3)
@given(data=st.lists(st.one_of(st.text(), st.none())))
@examples(
example_list=[
[
" 000000000000000000000000000000000000000000İࠀࠀࠀࠀ𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐤱000000000000𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀"
],
["\x80 "],
[],
],
example_kword="data",
)
def test_text_strip_offset(str_accessor, fletcher_variant, fletcher_slice_offset, data):
_do_test_text_strip(str_accessor, fletcher_variant, fletcher_slice_offset, data)
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
@examples(
example_list=[
[],
[""],
[None],
[" "],
["\u2000"],
[" a"],
["a "],
[" a "],
# https://github.com/xhochy/fletcher/issues/174
["\xa0"],
["\u2000a\u2000"],
["\u2000\u200C\u2000"],
["\n\u200C\r"],
["\u2000\x80\u2000"],
["\t\x80\x0b"],
["\u2000\u10FFFF\u2000"],
[" \u10FFFF "],
]
+ [
[c]
for c in " \t\r\n\x1f\x1e\x1d\x1c\x0c\x0b"
"\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2000\u2009\u200A\u200B\u2028\u2029\u202F\u205F"
]
+ [[chr(c)] for c in range(0x32)]
+ [[chr(c)] for c in range(0x80, 0x85)]
+ [[chr(c)] for c in range(0x200C, 0x2030)]
+ [[chr(c)] for c in range(0x2060, 0x2070)]
+ [[chr(c)] for c in range(0x10FFFE, 0x110000)],
example_kword="data",
)
def test_text_strip(str_accessor, fletcher_variant, data):
_do_test_text_strip(str_accessor, fletcher_variant, 1, data)
def _do_test_text_strip(str_accessor, fletcher_variant, fletcher_slice_offset, data):
if any("\x00" in x for x in data if x):
# pytest.skip("pandas cannot handle \\x00 characters in tests")
# Skip is not working properly with hypothesis
return
ser_pd = pd.Series(data, dtype=str)
arrow_data = pa.array(
[None for _ in range(fletcher_slice_offset)] + data, type=pa.string()
)
if fletcher_variant == "chunked":
fr_array = fr.FletcherChunkedArray(arrow_data)
else:
fr_array = fr.FletcherContinuousArray(arrow_data)
ser_fr = pd.Series(fr_array[fletcher_slice_offset:])
result_pd = ser_pd.str.strip()
result_fr = getattr(ser_fr, str_accessor).strip()
result_fr = result_fr.astype(object)
# Pandas returns np.nan for NA values in cat, keep this in line
result_fr[result_fr.isna()] = np.nan
result_pd[result_pd.isna()] = np.nan
tm.assert_series_equal(result_fr, result_pd)
def test_fr_str_accessor(fletcher_array):
data = ["a", "b"]
ser_pd = pd.Series(data)
# object series is returned
s = ser_pd.fr_str.encode("utf8")
assert s.dtype == np.dtype("O")
# test fletcher functionality and fallback to pandas
arrow_data = pa.array(data, type=pa.string())
fr_array = fletcher_array(arrow_data)
ser_fr = pd.Series(fr_array)
# pandas strings only method
s = ser_fr.fr_str.encode("utf8")
assert isinstance(s.values, fr.FletcherBaseArray)
def test_fr_str_accessor_fail(fletcher_variant):
data = [1, 2]
ser_pd = pd.Series(data)
with pytest.raises(Exception):
ser_pd.fr_str.startswith("a")
@pytest.mark.parametrize("regex", ["([0-9]+)", "([0-9]+)\\+([a-z]+)*"])
@pytest.mark.parametrize(
"data", [["123+"], ["123+a"], ["123+a", "123+"], ["123+", "123+a"]]
)
def test_text_extractall(str_accessor, fletcher_variant, data, regex):
if str_accessor == "str":
pytest.skip("extractall is not yet dispatched to the ExtensionArray")
return
ser_fr = _fr_series_from_data(data, fletcher_variant)
result_fr = getattr(ser_fr, str_accessor).extractall(regex)
assert isinstance(result_fr[0].dtype, fr.FletcherBaseDtype)
ser_pd = pd.Series(data)
result_pd = ser_pd.str.extractall(regex)
tm.assert_frame_equal(result_pd, result_fr.astype(object))
@pytest.mark.parametrize("data", [["123"], ["123+"], ["123+a+", "123+"]])
@pytest.mark.parametrize("expand", [True, False])
def test_text_split(str_accessor, fletcher_variant, data, expand):
ser_fr = _fr_series_from_data(data, fletcher_variant)
result_fr = getattr(ser_fr, str_accessor).split("+", expand=expand)
ser_pd = pd.Series(data)
result_pd = ser_pd.str.split("+", expand=expand)
if expand:
tm.assert_frame_equal(result_pd, result_fr.astype(object))
else:
tm.assert_series_equal(result_pd, result_fr.astype(object))
@settings(deadline=None)
@given(
data=st.lists(st.one_of(st.text(), st.none())),
slice_=st.tuples(st.integers(-20, 20), st.integers(-20, 20), st.integers(-20, 20)),
)
def test_slice(data, slice_, str_accessor, fletcher_variant):
if slice_[2] == 0:
pytest.raises(ValueError)
return
if data == [None] or data == [""]:
return
ser_fr = _fr_series_from_data(data, fletcher_variant)
result_fr = getattr(ser_fr, str_accessor).slice(*slice_)
result_fr = result_fr.astype(object)
# Pandas returns np.nan for NA values in cat, keep this in line
result_fr[result_fr.isna()] = np.nan
ser_pd = pd.Series(data, dtype=object)
result_pd = ser_pd.str.slice(*slice_)
tm.assert_series_equal(result_fr, result_pd)
@settings(deadline=None)
@given(char=st.characters(blacklist_categories=("Cs",)))
def test_utf8_size(char):
char_bytes = char.encode("utf-8")
expected = len(char_bytes)
computed = fr.algorithms.string.get_utf8_size(char_bytes[0])
assert computed == expected
| 31.055769 | 121 | 0.65391 | import math
import string
from typing import Optional, Sequence, Tuple
import hypothesis.strategies as st
import numpy as np
import pandas as pd
import pandas.testing as tm
import pyarrow as pa
import pytest
from hypothesis import example, given, settings
import fletcher as fr
from fletcher.testing import examples
try:
from pandas.core.strings.object_array import ObjectStringArrayMixin
_str_accessors = ["str", "fr_str"]
except ImportError:
_str_accessors = ["fr_str"]
@pytest.fixture(params=_str_accessors, scope="module")
def str_accessor(request):
return request.param
@st.composite
def string_patterns_st(draw, max_len=50) -> Tuple[Sequence[Optional[str]], str, int]:
ab_charset_st = st.sampled_from("ab")
ascii_charset_st = st.sampled_from(string.ascii_letters)
charset_st = st.sampled_from((ab_charset_st, ascii_charset_st))
charset = draw(charset_st)
fixed_pattern_st = st.sampled_from(["a", "aab", "aabaa"])
generated_pattern_st = st.text(alphabet=charset, max_size=max_len)
pattern_st = st.one_of(fixed_pattern_st, generated_pattern_st)
pattern = draw(pattern_st)
min_str_size = 0 if len(pattern) > 0 else 1
raw_str_st = st.one_of(
st.none(), st.lists(charset, min_size=min_str_size, max_size=max_len)
)
raw_seq_st = st.lists(raw_str_st, max_size=max_len)
raw_seq = draw(raw_seq_st)
for s in raw_seq:
if s is None:
continue
max_ind = len(s) - len(pattern)
if max_ind < 0:
continue
repl_ind_st = st.integers(min_value=0, max_value=max_ind)
repl_ind_list_st = st.lists(repl_ind_st, max_size=math.ceil(max_len / 10))
repl_ind_list = draw(repl_ind_list_st)
for j in repl_ind_list:
s[j : j + len(pattern)] = pattern
seq = ["".join(s) if s is not None else None for s in raw_seq]
offset = draw(st.integers(min_value=0, max_value=len(seq)))
return (seq, pattern, offset)
string_patterns = pytest.mark.parametrize(
"data, pat",
[
([], ""),
(["a", "b"], ""),
(["aa", "ab", "ba"], "a"),
(["aa", "ab", "ba", "bb", None], "a"),
(["aa", "ab", "ba", "bb", None], "A"),
(["aa", "ab", "bA", "bB", None], "a"),
(["aa", "AB", "ba", "BB", None], "A"),
],
)
def _fr_series_from_data(data, fletcher_variant, dtype=pa.string()):
arrow_data = pa.array(data, type=dtype)
if fletcher_variant == "chunked":
fr_array = fr.FletcherChunkedArray(arrow_data)
else:
fr_array = fr.FletcherContinuousArray(arrow_data)
return pd.Series(fr_array)
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
def test_text_cat(data, str_accessor, fletcher_variant, fletcher_variant_2):
if any("\x00" in x for x in data if x):
return
ser_pd = pd.Series(data, dtype=str)
ser_fr = _fr_series_from_data(data, fletcher_variant)
ser_fr_other = _fr_series_from_data(data, fletcher_variant_2)
result_pd = ser_pd.str.cat(ser_pd)
result_fr = getattr(ser_fr, str_accessor).cat(ser_fr_other)
result_fr = result_fr.astype(object)
result_fr[result_fr.isna()] = np.nan
tm.assert_series_equal(result_fr, result_pd)
def _check_series_equal(result_fr, result_pd):
result_fr = result_fr.astype(result_pd.dtype)
tm.assert_series_equal(result_fr, result_pd)
def _check_str_to_t(
t, func, data, str_accessor, fletcher_variant, test_offset=0, *args, **kwargs
):
tail_len = len(data) - test_offset
ser_pd = pd.Series(data, dtype=str).tail(tail_len)
result_pd = getattr(ser_pd.str, func)(*args, **kwargs)
ser_fr = _fr_series_from_data(data, fletcher_variant).tail(tail_len)
result_fr = getattr(getattr(ser_fr, str_accessor), func)(*args, **kwargs)
_check_series_equal(result_fr, result_pd)
def _check_str_to_str(func, data, str_accessor, fletcher_variant, *args, **kwargs):
_check_str_to_t(str, func, data, str_accessor, fletcher_variant, *args, **kwargs)
def _check_str_to_bool(func, data, str_accessor, fletcher_variant, *args, **kwargs):
_check_str_to_t(bool, func, data, str_accessor, fletcher_variant, *args, **kwargs)
@string_patterns
def test_text_endswith(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool("endswith", data, str_accessor, fletcher_variant, pat=pat)
@string_patterns
def test_text_startswith(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool("startswith", data, str_accessor, fletcher_variant, pat=pat)
@string_patterns
def test_contains_no_regex(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool(
"contains", data, str_accessor, fletcher_variant, pat=pat, regex=False
)
@pytest.mark.parametrize(
"data, pat, expected",
[
([], "", []),
(["a", "b"], "", [True, True]),
(["aa", "Ab", "ba", "bb", None], "a", [True, False, True, False, None]),
],
)
def test_contains_no_regex_ascii(data, pat, expected, str_accessor, fletcher_variant):
if str_accessor == "str":
pytest.skip(
"return types not stable yet, might sometimes return null instead of bool"
)
return
fr_series = _fr_series_from_data(data, fletcher_variant)
fr_expected = _fr_series_from_data(expected, fletcher_variant, pa.bool_())
for i in range(len(data)):
ser = fr_series.tail(len(data) - i)
expected = fr_expected.tail(len(data) - i)
result = getattr(ser, str_accessor).contains(pat, regex=False)
tm.assert_series_equal(result, expected)
@settings(deadline=None)
@given(data_tuple=string_patterns_st())
def test_contains_no_regex_case_sensitive(data_tuple, str_accessor, fletcher_variant):
data, pat, test_offset = data_tuple
_check_str_to_bool(
"contains",
data,
str_accessor,
fletcher_variant,
test_offset=test_offset,
pat=pat,
case=True,
regex=False,
)
@string_patterns
def test_contains_no_regex_ignore_case(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool(
"contains",
data,
str_accessor,
fletcher_variant,
pat=pat,
regex=False,
case=False,
)
regex_patterns = pytest.mark.parametrize(
"data, pat",
[
([], ""),
(["a", "b"], ""),
(["aa", "ab", "ba"], "a"),
(["aa", "ab", "ba", None], "a"),
(["aa", "ab", "ba", None], "a$"),
(["aa", "ab", "ba", None], "^a"),
(["Aa", "ab", "ba", None], "A"),
(["aa", "AB", "ba", None], "A$"),
(["aa", "AB", "ba", None], "^A"),
],
)
@regex_patterns
def test_contains_regex(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool(
"contains", data, str_accessor, fletcher_variant, pat=pat, regex=True
)
@regex_patterns
def test_contains_regex_ignore_case(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool(
"contains",
data,
str_accessor,
fletcher_variant,
pat=pat,
regex=True,
case=False,
)
@settings(deadline=None)
@given(
data_tuple=string_patterns_st(),
n=st.integers(min_value=0, max_value=10),
repl=st.sampled_from(["len4", "", "z"]),
)
@example(
data_tuple=(["aababaa"], "aabaa", 0),
repl="len4",
n=1,
fletcher_variant="continuous",
)
@example(data_tuple=(["aaa"], "a", 0), repl="len4", n=1, fletcher_variant="continuous")
def test_replace_no_regex_case_sensitive(
data_tuple, repl, n, str_accessor, fletcher_variant
):
data, pat, test_offset = data_tuple
_check_str_to_str(
"replace",
data,
str_accessor,
fletcher_variant,
test_offset=test_offset,
pat=pat,
repl=repl,
n=n,
case=True,
regex=False,
)
@settings(deadline=None)
@given(data_tuple=string_patterns_st())
@example(data_tuple=(["a"], "", 0), fletcher_variant="chunked")
def test_count_no_regex(data_tuple, str_accessor, fletcher_variant):
data, pat, test_offset = data_tuple
tail_len = len(data) - test_offset
ser_pd = pd.Series(data, dtype=str).tail(tail_len)
result_pd = getattr(ser_pd.str, "count")(pat=pat)
ser_fr = _fr_series_from_data(data, fletcher_variant).tail(tail_len)
kwargs = {}
if str_accessor.startswith("fr_"):
kwargs["regex"] = False
result_fr = getattr(ser_fr, str_accessor).count(pat=pat, **kwargs)
_check_series_equal(result_fr, result_pd)
def _optional_len(x: Optional[str]) -> int:
if x is not None:
return len(x)
else:
return 0
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
def test_text_zfill(data, str_accessor, fletcher_variant):
if any("\x00" in x for x in data if x):
return
ser_pd = pd.Series(data, dtype=str)
max_str_len = ser_pd.map(_optional_len).max()
if pd.isna(max_str_len):
max_str_len = 0
arrow_data = pa.array(data, type=pa.string())
if fletcher_variant == "chunked":
fr_array = fr.FletcherChunkedArray(arrow_data)
else:
fr_array = fr.FletcherContinuousArray(arrow_data)
ser_fr = pd.Series(fr_array)
result_pd = ser_pd.str.zfill(max_str_len + 1)
result_fr = getattr(ser_fr, str_accessor).zfill(max_str_len + 1)
result_fr = result_fr.astype(object)
result_fr[result_fr.isna()] = np.nan
tm.assert_series_equal(result_fr, result_pd)
@settings(deadline=None, max_examples=3)
@given(data=st.lists(st.one_of(st.text(), st.none())))
@examples(
example_list=[
[
" 000000000000000000000000000000000000000000İࠀࠀࠀࠀ𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐤱000000000000𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀𐀀"
],
["\x80 "],
[],
],
example_kword="data",
)
def test_text_strip_offset(str_accessor, fletcher_variant, fletcher_slice_offset, data):
_do_test_text_strip(str_accessor, fletcher_variant, fletcher_slice_offset, data)
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
@examples(
example_list=[
[],
[""],
[None],
[" "],
["\u2000"],
[" a"],
["a "],
[" a "],
["\xa0"],
["\u2000a\u2000"],
["\u2000\u200C\u2000"],
["\n\u200C\r"],
["\u2000\x80\u2000"],
["\t\x80\x0b"],
["\u2000\u10FFFF\u2000"],
[" \u10FFFF "],
]
+ [
[c]
for c in " \t\r\n\x1f\x1e\x1d\x1c\x0c\x0b"
"\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2000\u2009\u200A\u200B\u2028\u2029\u202F\u205F"
]
+ [[chr(c)] for c in range(0x32)]
+ [[chr(c)] for c in range(0x80, 0x85)]
+ [[chr(c)] for c in range(0x200C, 0x2030)]
+ [[chr(c)] for c in range(0x2060, 0x2070)]
+ [[chr(c)] for c in range(0x10FFFE, 0x110000)],
example_kword="data",
)
def test_text_strip(str_accessor, fletcher_variant, data):
_do_test_text_strip(str_accessor, fletcher_variant, 1, data)
def _do_test_text_strip(str_accessor, fletcher_variant, fletcher_slice_offset, data):
if any("\x00" in x for x in data if x):
return
ser_pd = pd.Series(data, dtype=str)
arrow_data = pa.array(
[None for _ in range(fletcher_slice_offset)] + data, type=pa.string()
)
if fletcher_variant == "chunked":
fr_array = fr.FletcherChunkedArray(arrow_data)
else:
fr_array = fr.FletcherContinuousArray(arrow_data)
ser_fr = pd.Series(fr_array[fletcher_slice_offset:])
result_pd = ser_pd.str.strip()
result_fr = getattr(ser_fr, str_accessor).strip()
result_fr = result_fr.astype(object)
result_fr[result_fr.isna()] = np.nan
result_pd[result_pd.isna()] = np.nan
tm.assert_series_equal(result_fr, result_pd)
def test_fr_str_accessor(fletcher_array):
data = ["a", "b"]
ser_pd = pd.Series(data)
s = ser_pd.fr_str.encode("utf8")
assert s.dtype == np.dtype("O")
arrow_data = pa.array(data, type=pa.string())
fr_array = fletcher_array(arrow_data)
ser_fr = pd.Series(fr_array)
s = ser_fr.fr_str.encode("utf8")
assert isinstance(s.values, fr.FletcherBaseArray)
def test_fr_str_accessor_fail(fletcher_variant):
data = [1, 2]
ser_pd = pd.Series(data)
with pytest.raises(Exception):
ser_pd.fr_str.startswith("a")
@pytest.mark.parametrize("regex", ["([0-9]+)", "([0-9]+)\\+([a-z]+)*"])
@pytest.mark.parametrize(
"data", [["123+"], ["123+a"], ["123+a", "123+"], ["123+", "123+a"]]
)
def test_text_extractall(str_accessor, fletcher_variant, data, regex):
if str_accessor == "str":
pytest.skip("extractall is not yet dispatched to the ExtensionArray")
return
ser_fr = _fr_series_from_data(data, fletcher_variant)
result_fr = getattr(ser_fr, str_accessor).extractall(regex)
assert isinstance(result_fr[0].dtype, fr.FletcherBaseDtype)
ser_pd = pd.Series(data)
result_pd = ser_pd.str.extractall(regex)
tm.assert_frame_equal(result_pd, result_fr.astype(object))
@pytest.mark.parametrize("data", [["123"], ["123+"], ["123+a+", "123+"]])
@pytest.mark.parametrize("expand", [True, False])
def test_text_split(str_accessor, fletcher_variant, data, expand):
ser_fr = _fr_series_from_data(data, fletcher_variant)
result_fr = getattr(ser_fr, str_accessor).split("+", expand=expand)
ser_pd = pd.Series(data)
result_pd = ser_pd.str.split("+", expand=expand)
if expand:
tm.assert_frame_equal(result_pd, result_fr.astype(object))
else:
tm.assert_series_equal(result_pd, result_fr.astype(object))
@settings(deadline=None)
@given(
data=st.lists(st.one_of(st.text(), st.none())),
slice_=st.tuples(st.integers(-20, 20), st.integers(-20, 20), st.integers(-20, 20)),
)
def test_slice(data, slice_, str_accessor, fletcher_variant):
if slice_[2] == 0:
pytest.raises(ValueError)
return
if data == [None] or data == [""]:
return
ser_fr = _fr_series_from_data(data, fletcher_variant)
result_fr = getattr(ser_fr, str_accessor).slice(*slice_)
result_fr = result_fr.astype(object)
result_fr[result_fr.isna()] = np.nan
ser_pd = pd.Series(data, dtype=object)
result_pd = ser_pd.str.slice(*slice_)
tm.assert_series_equal(result_fr, result_pd)
@settings(deadline=None)
@given(char=st.characters(blacklist_categories=("Cs",)))
def test_utf8_size(char):
char_bytes = char.encode("utf-8")
expected = len(char_bytes)
computed = fr.algorithms.string.get_utf8_size(char_bytes[0])
assert computed == expected
| true | true |
f7fe53a14ebd06e839cdac7c0ba4f6b270edc0a5 | 4,913 | py | Python | code_snippets/chap2.py | sgheb/ml-for-asset-managers | 53f9ee5a59a00004ac67920ad11e244ffc02a503 | [
"MIT"
] | null | null | null | code_snippets/chap2.py | sgheb/ml-for-asset-managers | 53f9ee5a59a00004ac67920ad11e244ffc02a503 | [
"MIT"
] | 2 | 2020-08-09T22:32:31.000Z | 2020-08-30T02:21:07.000Z | code_snippets/chap2.py | sgheb/ml-for-asset-managers | 53f9ee5a59a00004ac67920ad11e244ffc02a503 | [
"MIT"
] | 1 | 2020-08-15T06:08:51.000Z | 2020-08-15T06:08:51.000Z | import numpy as np
import pandas as pd
from sklearn.neighbors import KernelDensity
from scipy.optimize import minimize
from scipy.linalg import block_diag
from sklearn.covariance import LedoitWolf
def fix_shape(x):
if len(x.shape) == 1:
x = x.reshape(-1, 1)
return x
# Snippet 2.1
def mpPDF(var, q, pts):
"""Marcenko--Pastur PDF"""
# q = T/N
eMin, eMax = var * (1 - (1.0 / q) ** 0.5) ** 2, var * (1 + (1.0 / q) ** 0.5) ** 2
eVal = np.linspace(eMin, eMax, pts)
pdf = q / (2 * np.pi * var * eVal) * ((eMax - eVal) * (eVal - eMin)) ** 0.5
pdf = pd.Series(pdf, index=eVal)
return pdf
# Snippet 2.2
def getPCA(matrix):
"""Get eVal, eVec from a Hermitian matrix"""
eVal, eVec = np.linalg.eigh(matrix)
indices = eVal.argsort()[::-1] # arguments for sorting eVal desc
eVal, eVec = eVal[indices], eVec[:, indices]
eVal = np.diagflat(eVal)
return eVal, eVec
def fitKDE(obs, bWidth=0.25, kernel="gaussian", x=None):
"""
Fit kernel to a series of observations `obs` and derive the probability.
`x` is the array of values on which the fit KDE will be evaluated
"""
obs = fix_shape(obs)
kde = KernelDensity(kernel=kernel, bandwidth=bWidth).fit(obs)
if x is None:
x = np.unique(obs).reshape(-1, 1)
x = fix_shape(x)
logProb = kde.score_samples(x) # log(density)
pdf = pd.Series(np.exp(logProb), index=x.flatten())
return pdf
# Snippet 2.3
def getRndCov(nCols, nFacts):
w = np.random.normal(size=(nCols, nFacts))
cov = np.dot(w, w.T)
cov += np.diag(np.random.uniform(size=nCols))
return cov
def cov2corr(cov):
"""Derive the correlation matrix from covariance matrix"""
std = np.sqrt(np.diag(cov))
corr = cov / np.outer(std, std)
corr[corr < -1], corr[corr > 1] = -1, 1 # numerical error
return corr
# Snippet 2.4
def errPDFs(var, eVal, q, bWidth, pts=1000):
"""Fit error"""
pdf0 = mpPDF(var, q, pts) # theoretical pdf
pdf1 = fitKDE(
eVal, bWidth, x=pdf0.index.values
) # empirical pdf with same x values as theoretical
sse = np.sum((pdf1 - pdf0) ** 2) # sum of square error
return sse
def findMaxEval(eVal, q, bWidth):
"""Find max random eVal by fitting Marcenko's distribution"""
out = minimize(
lambda x, *args: errPDFs(x[0], *args),
0.5,
args=(eVal, q, bWidth),
bounds=((1e-5, 1 - 1e-5),),
)
if out["success"]:
var = out["x"][0]
else:
var = 1
eMax = var * (1 + (1.0 / q) ** 0.5) ** 2
return eMax, var
# Snippet 2.5
def denoisedCorr(eVal, eVec, nFacts):
"""Remove noise from corr by fixing random eigenvalues"""
eVal_ = np.diag(eVal).copy()
eVal_[nFacts:] = eVal_[nFacts:].sum() / float(eVal_.shape[0] - nFacts)
eVal_ = np.diag(eVal_)
corr1 = np.dot(eVec, eVal_).dot(eVec.T)
corr1 = cov2corr(corr1)
return corr1
# Snippet 2.6
def denoisedCorr2(eVal, eVec, nFacts, alpha=0):
"""Remove noise from corr through targeted shrinkage"""
eValL, eVecL = eVal[:nFacts, :nFacts], eVec[:, :nFacts]
eValR, eVecR = eVal[nFacts:, nFacts:], eVec[:, nFacts:]
corr0 = np.dot(eVecL, eValL).dot(eVecL.T)
corr1 = np.dot(eVecR, eValR).dot(eVecR.T)
corr2 = corr0 + alpha * corr1 + (1 - alpha) * np.diag(np.diag(corr1))
return corr2
# Snippet 2.7
def formBlockMatrix(nBlocks, bSize, bCorr):
block = np.ones((bSize, bSize)) * bCorr
block[range(bSize), range(bSize)] = 1
corr = block_diag(*([block] * nBlocks))
return corr
def formTrueMatrix(nBlocks, bSize, bCorr):
corr0 = formBlockMatrix(nBlocks, bSize, bCorr)
corr0 = pd.DataFrame(corr0)
cols = corr0.columns.tolist()
np.random.shuffle(cols)
corr0 = corr0[cols].loc[cols].copy(deep=True)
std0 = np.random.uniform(0.05, 0.2, corr0.shape[0])
cov0 = corr2cov(corr0, std0)
mu0 = np.random.normal(std0, std0, cov0.shape[0]).reshape(-1, 1)
return mu0, cov0
# Snippet 2.8
def simCovMu(mu0, cov0, nObs, shrink=False):
x = np.random.multivariate_normal(mu0.flatten(), cov0, size=nObs)
mu1 = x.mean(axis=0).reshape(-1, 1)
if shrink:
cov1 = LedoitWolf().fit(x).covariance_
else:
cov1 = np.cov(x, rowvar=0)
return mu1, cov1
# Snippet 2.9
def corr2cov(corr, std):
cov = corr * np.outer(std, std)
return cov
def deNoiseCov(cov0, q, bWidth):
corr0 = cov2corr(cov0)
eVal0, eVec0 = getPCA(corr0)
eMax0, var0 = findMaxEval(np.diag(eVal0), q, bWidth)
nFacts0 = eVal0.shape[0] - np.diag(eVal0)[::-1].searchsorted(eMax0)
corr1 = denoisedCorr(eVal0, eVec0, nFacts0)
cov1 = corr2cov(corr1, np.diag(cov0) ** 0.5)
return cov1
# Snippet 2.10
def optPort(cov, mu=None):
inv = np.linalg.inv(cov)
ones = np.ones(shape=(inv.shape[0], 1))
if mu is None:
mu = ones
w = np.dot(inv, mu)
w /= np.dot(ones.T, w)
return w
| 28.235632 | 85 | 0.615103 | import numpy as np
import pandas as pd
from sklearn.neighbors import KernelDensity
from scipy.optimize import minimize
from scipy.linalg import block_diag
from sklearn.covariance import LedoitWolf
def fix_shape(x):
if len(x.shape) == 1:
x = x.reshape(-1, 1)
return x
def mpPDF(var, q, pts):
eMin, eMax = var * (1 - (1.0 / q) ** 0.5) ** 2, var * (1 + (1.0 / q) ** 0.5) ** 2
eVal = np.linspace(eMin, eMax, pts)
pdf = q / (2 * np.pi * var * eVal) * ((eMax - eVal) * (eVal - eMin)) ** 0.5
pdf = pd.Series(pdf, index=eVal)
return pdf
def getPCA(matrix):
eVal, eVec = np.linalg.eigh(matrix)
indices = eVal.argsort()[::-1]
eVal, eVec = eVal[indices], eVec[:, indices]
eVal = np.diagflat(eVal)
return eVal, eVec
def fitKDE(obs, bWidth=0.25, kernel="gaussian", x=None):
obs = fix_shape(obs)
kde = KernelDensity(kernel=kernel, bandwidth=bWidth).fit(obs)
if x is None:
x = np.unique(obs).reshape(-1, 1)
x = fix_shape(x)
logProb = kde.score_samples(x)
pdf = pd.Series(np.exp(logProb), index=x.flatten())
return pdf
def getRndCov(nCols, nFacts):
w = np.random.normal(size=(nCols, nFacts))
cov = np.dot(w, w.T)
cov += np.diag(np.random.uniform(size=nCols))
return cov
def cov2corr(cov):
std = np.sqrt(np.diag(cov))
corr = cov / np.outer(std, std)
corr[corr < -1], corr[corr > 1] = -1, 1
return corr
def errPDFs(var, eVal, q, bWidth, pts=1000):
pdf0 = mpPDF(var, q, pts)
pdf1 = fitKDE(
eVal, bWidth, x=pdf0.index.values
)
sse = np.sum((pdf1 - pdf0) ** 2)
return sse
def findMaxEval(eVal, q, bWidth):
out = minimize(
lambda x, *args: errPDFs(x[0], *args),
0.5,
args=(eVal, q, bWidth),
bounds=((1e-5, 1 - 1e-5),),
)
if out["success"]:
var = out["x"][0]
else:
var = 1
eMax = var * (1 + (1.0 / q) ** 0.5) ** 2
return eMax, var
def denoisedCorr(eVal, eVec, nFacts):
eVal_ = np.diag(eVal).copy()
eVal_[nFacts:] = eVal_[nFacts:].sum() / float(eVal_.shape[0] - nFacts)
eVal_ = np.diag(eVal_)
corr1 = np.dot(eVec, eVal_).dot(eVec.T)
corr1 = cov2corr(corr1)
return corr1
def denoisedCorr2(eVal, eVec, nFacts, alpha=0):
eValL, eVecL = eVal[:nFacts, :nFacts], eVec[:, :nFacts]
eValR, eVecR = eVal[nFacts:, nFacts:], eVec[:, nFacts:]
corr0 = np.dot(eVecL, eValL).dot(eVecL.T)
corr1 = np.dot(eVecR, eValR).dot(eVecR.T)
corr2 = corr0 + alpha * corr1 + (1 - alpha) * np.diag(np.diag(corr1))
return corr2
def formBlockMatrix(nBlocks, bSize, bCorr):
block = np.ones((bSize, bSize)) * bCorr
block[range(bSize), range(bSize)] = 1
corr = block_diag(*([block] * nBlocks))
return corr
def formTrueMatrix(nBlocks, bSize, bCorr):
corr0 = formBlockMatrix(nBlocks, bSize, bCorr)
corr0 = pd.DataFrame(corr0)
cols = corr0.columns.tolist()
np.random.shuffle(cols)
corr0 = corr0[cols].loc[cols].copy(deep=True)
std0 = np.random.uniform(0.05, 0.2, corr0.shape[0])
cov0 = corr2cov(corr0, std0)
mu0 = np.random.normal(std0, std0, cov0.shape[0]).reshape(-1, 1)
return mu0, cov0
def simCovMu(mu0, cov0, nObs, shrink=False):
x = np.random.multivariate_normal(mu0.flatten(), cov0, size=nObs)
mu1 = x.mean(axis=0).reshape(-1, 1)
if shrink:
cov1 = LedoitWolf().fit(x).covariance_
else:
cov1 = np.cov(x, rowvar=0)
return mu1, cov1
def corr2cov(corr, std):
cov = corr * np.outer(std, std)
return cov
def deNoiseCov(cov0, q, bWidth):
corr0 = cov2corr(cov0)
eVal0, eVec0 = getPCA(corr0)
eMax0, var0 = findMaxEval(np.diag(eVal0), q, bWidth)
nFacts0 = eVal0.shape[0] - np.diag(eVal0)[::-1].searchsorted(eMax0)
corr1 = denoisedCorr(eVal0, eVec0, nFacts0)
cov1 = corr2cov(corr1, np.diag(cov0) ** 0.5)
return cov1
def optPort(cov, mu=None):
inv = np.linalg.inv(cov)
ones = np.ones(shape=(inv.shape[0], 1))
if mu is None:
mu = ones
w = np.dot(inv, mu)
w /= np.dot(ones.T, w)
return w
| true | true |
f7fe548bf7e0c7777c9472172083def0a4094adf | 2,538 | py | Python | MPC_expert.py | confiwent/Comyco_linear_QoE | 087834ce4abfb203041de39d92f72e9adb9b976c | [
"MIT"
] | 1 | 2022-03-16T06:56:07.000Z | 2022-03-16T06:56:07.000Z | MPC_expert.py | confiwent/Comyco_linear_QoE | 087834ce4abfb203041de39d92f72e9adb9b976c | [
"MIT"
] | null | null | null | MPC_expert.py | confiwent/Comyco_linear_QoE | 087834ce4abfb203041de39d92f72e9adb9b976c | [
"MIT"
] | null | null | null | """
In this version, the MPC is adopted to control the rate adaptation, with the future bandwidth having been known in advance. So we call this version MPC-Oracal
"""
import numpy as np
from pruning_v2 import solving_opt
MPC_FUTURE_CHUNK_COUNT = 7
M_IN_K = 1000.0
DEFAULT_QUALITY = 1 # default video quality without agent
RANDOM_SEED = 42
RAND_RANGE = 1000000
class ABRExpert:
''' a MPC-based planning method to optimize the expected returns in adaptive video streaming, with the throughput dynamics being known in advance '''
def __init__(self, abr_env, rebuf_p, smooth_p, mpc_horizon = MPC_FUTURE_CHUNK_COUNT, total_chunk_num = 48):
self.env = abr_env
self.rebuf_p = rebuf_p
self.smooth_p = smooth_p
self.mpc_horizon = mpc_horizon
self.total_chunk_num = total_chunk_num
self.video_chunk_remain = total_chunk_num
self.time_stamp = 0
self.start_buffer = 0
self.last_bit_rate = DEFAULT_QUALITY
self.bit_rate = DEFAULT_QUALITY
def optimal_action(self):
# future chunks length (try 4 if that many remaining)
last_index = int(self.total_chunk_num - self.video_chunk_remain -1)
future_chunk_length = self.mpc_horizon
if (self.total_chunk_num - last_index < self.mpc_horizon ):
future_chunk_length = self.total_chunk_num - last_index
# planning for the optimal choice for next chunk
opt_a = solving_opt(self.env, self.start_buffer, self.last_bit_rate, future_chunk_length, self.rebuf_p, self.smooth_p)
return opt_a
def step(self, action): # execute the action
# the action is from the last decision
# this is to make the framework similar to the real
delay, sleep_time, buffer_size, rebuf, \
video_chunk_size, next_chunk_sizes, next_chunk_psnrs, \
end_of_video, video_chunk_remain, curr_chunk_sizes, curr_chunk_psnrs \
= self.env.get_video_chunk(action)
self.time_stamp += delay # in ms
self.time_stamp += sleep_time # in ms
self.last_bit_rate = self.bit_rate
self.bit_rate = action
self.start_buffer = buffer_size
self.video_chunk_remain = video_chunk_remain
if end_of_video:
self.time_stamp = 0
self.last_bit_rate = DEFAULT_QUALITY
return delay, sleep_time, buffer_size, rebuf, video_chunk_size, next_chunk_sizes, next_chunk_psnrs, end_of_video, video_chunk_remain, curr_chunk_sizes, curr_chunk_psnrs
| 40.935484 | 176 | 0.702522 | import numpy as np
from pruning_v2 import solving_opt
MPC_FUTURE_CHUNK_COUNT = 7
M_IN_K = 1000.0
DEFAULT_QUALITY = 1
RANDOM_SEED = 42
RAND_RANGE = 1000000
class ABRExpert:
def __init__(self, abr_env, rebuf_p, smooth_p, mpc_horizon = MPC_FUTURE_CHUNK_COUNT, total_chunk_num = 48):
self.env = abr_env
self.rebuf_p = rebuf_p
self.smooth_p = smooth_p
self.mpc_horizon = mpc_horizon
self.total_chunk_num = total_chunk_num
self.video_chunk_remain = total_chunk_num
self.time_stamp = 0
self.start_buffer = 0
self.last_bit_rate = DEFAULT_QUALITY
self.bit_rate = DEFAULT_QUALITY
def optimal_action(self):
last_index = int(self.total_chunk_num - self.video_chunk_remain -1)
future_chunk_length = self.mpc_horizon
if (self.total_chunk_num - last_index < self.mpc_horizon ):
future_chunk_length = self.total_chunk_num - last_index
opt_a = solving_opt(self.env, self.start_buffer, self.last_bit_rate, future_chunk_length, self.rebuf_p, self.smooth_p)
return opt_a
def step(self, action):
delay, sleep_time, buffer_size, rebuf, \
video_chunk_size, next_chunk_sizes, next_chunk_psnrs, \
end_of_video, video_chunk_remain, curr_chunk_sizes, curr_chunk_psnrs \
= self.env.get_video_chunk(action)
self.time_stamp += delay
self.time_stamp += sleep_time
self.last_bit_rate = self.bit_rate
self.bit_rate = action
self.start_buffer = buffer_size
self.video_chunk_remain = video_chunk_remain
if end_of_video:
self.time_stamp = 0
self.last_bit_rate = DEFAULT_QUALITY
return delay, sleep_time, buffer_size, rebuf, video_chunk_size, next_chunk_sizes, next_chunk_psnrs, end_of_video, video_chunk_remain, curr_chunk_sizes, curr_chunk_psnrs
| true | true |
f7fe548dd2c18145b4b7be05422191d3bf2b77c6 | 2,054 | py | Python | src/fhs_pia_wireguard_netns/pia_class/server.py | foxhunt72/fhs-pia-wireguard-netns | 237b258c30c8db2da0ca3fddb8e33027bce65a81 | [
"MIT"
] | null | null | null | src/fhs_pia_wireguard_netns/pia_class/server.py | foxhunt72/fhs-pia-wireguard-netns | 237b258c30c8db2da0ca3fddb8e33027bce65a81 | [
"MIT"
] | null | null | null | src/fhs_pia_wireguard_netns/pia_class/server.py | foxhunt72/fhs-pia-wireguard-netns | 237b258c30c8db2da0ca3fddb8e33027bce65a81 | [
"MIT"
] | null | null | null | """All pia server functions."""
import os
import sys
import pkg_resources
import subprocess
import json
class server():
"""Create pia sever class."""
def __init__(self):
super(server, self).__init__()
#self.curl_command='/usr/bin/curl'
self.curl_command='curl'
def __server_run_curl(self, arguments: list,stdin=None):
"""Run curl."""
run_is = [self.curl_command] + arguments
if self.debug is True:
print(run_is)
try:
result = subprocess.run(run_is, capture_output=True, input=stdin)
except FileNotFoundError:
print("Error: curl not found.")
return None
if self.debug is True:
print(f"result: {result.stdout}")
print(f"error : {result.stderr}")
print(f"code : {result.returncode}")
if result.returncode != 0:
print(f"Error: {result.returncode} {result.stderr}")
return None
return result
def server_addkey(self, *, url, ip, pubkey):
"""Get the token for a user.
curl -s -G --connect-to "WG_HOSTNAME::10.1.2.3:" --cacert "ca.rsa.4096.crt" --data-urlencode "pt=token" --data-urlencode "pubkey=my_key" "https://WG_HOSTNAME:1337/addKey"
https://www.python-httpx.org/advanced/
"""
ca_file = pkg_resources.resource_filename(__name__, 'data/ca.rsa.4096.crt')
if self.token is None:
self.token_get()
arguments = ['--max-time', '20', '-s', '-G', '--connect-to', f'{url}::{ip}:', '--cacert', ca_file, '--data-urlencode', f'pt={self.token}', '--data-urlencode', f'pubkey={pubkey}', f'https://{url}:1337/addKey']
result = self.__server_run_curl(arguments)
try:
result_list=json.loads(result.stdout)
except (json.decoder.JSONDecodeError, AttributeError):
return None
if result_list.get('status', 'unknown') != 'OK':
print(f"status failed: {result_list}")
return None
return result_list
| 34.233333 | 216 | 0.590555 |
import os
import sys
import pkg_resources
import subprocess
import json
class server():
def __init__(self):
super(server, self).__init__()
self.curl_command='curl'
def __server_run_curl(self, arguments: list,stdin=None):
run_is = [self.curl_command] + arguments
if self.debug is True:
print(run_is)
try:
result = subprocess.run(run_is, capture_output=True, input=stdin)
except FileNotFoundError:
print("Error: curl not found.")
return None
if self.debug is True:
print(f"result: {result.stdout}")
print(f"error : {result.stderr}")
print(f"code : {result.returncode}")
if result.returncode != 0:
print(f"Error: {result.returncode} {result.stderr}")
return None
return result
def server_addkey(self, *, url, ip, pubkey):
ca_file = pkg_resources.resource_filename(__name__, 'data/ca.rsa.4096.crt')
if self.token is None:
self.token_get()
arguments = ['--max-time', '20', '-s', '-G', '--connect-to', f'{url}::{ip}:', '--cacert', ca_file, '--data-urlencode', f'pt={self.token}', '--data-urlencode', f'pubkey={pubkey}', f'https://{url}:1337/addKey']
result = self.__server_run_curl(arguments)
try:
result_list=json.loads(result.stdout)
except (json.decoder.JSONDecodeError, AttributeError):
return None
if result_list.get('status', 'unknown') != 'OK':
print(f"status failed: {result_list}")
return None
return result_list
| true | true |
f7fe549aa971337a01662235fde201c76ea2bc61 | 3,530 | py | Python | rqalpha/cmds/mod.py | ForrestLin0805/rqalpha | b10d03f9a23180a93e39c40fcb048ba6db37b573 | [
"Apache-2.0"
] | 1 | 2021-02-26T11:11:31.000Z | 2021-02-26T11:11:31.000Z | rqalpha/cmds/mod.py | ForrestLin0805/rqalpha | b10d03f9a23180a93e39c40fcb048ba6db37b573 | [
"Apache-2.0"
] | null | null | null | rqalpha/cmds/mod.py | ForrestLin0805/rqalpha | b10d03f9a23180a93e39c40fcb048ba6db37b573 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# 版权所有 2020 深圳米筐科技有限公司(下称“米筐科技”)
#
# 除非遵守当前许可,否则不得使用本软件。
#
# * 非商业用途(非商业用途指个人出于非商业目的使用本软件,或者高校、研究所等非营利机构出于教育、科研等目的使用本软件):
# 遵守 Apache License 2.0(下称“Apache 2.0 许可”),您可以在以下位置获得 Apache 2.0 许可的副本:
# http://www.apache.org/licenses/LICENSE-2.0。
# 除非法律有要求或以书面形式达成协议,否则本软件分发时需保持当前许可“原样”不变,且不得附加任何条件。
#
# * 商业用途(商业用途指个人出于任何商业目的使用本软件,或者法人或其他组织出于任何目的使用本软件):
# 未经米筐科技授权,任何个人不得出于任何商业目的使用本软件(包括但不限于向第三方提供、销售、出租、出借、转让本软件、本软件的衍生产品、引用或借鉴了本软件功能或源代码的产品或服务),任何法人或其他组织不得出于任何目的使用本软件,否则米筐科技有权追究相应的知识产权侵权责任。
# 在此前提下,对本软件的使用同样需要遵守 Apache 2.0 许可,Apache 2.0 许可与本许可冲突之处,以本许可为准。
# 详细的授权流程,请联系 public@ricequant.com 获取。
import os
from importlib import import_module
import six
import click
from rqalpha.utils.config import dump_config
from .entry import cli
@cli.command(context_settings=dict(
ignore_unknown_options=True,
))
@click.help_option('-h', '--help')
@click.argument('cmd', nargs=1, type=click.Choice(['list', 'enable', 'disable']))
@click.argument('params', nargs=-1)
def mod(cmd, params):
"""
Mod management command
rqalpha mod list \n
rqalpha mod enable xxx \n
rqalpha mod disable xxx \n
"""
def list(params):
"""
List all mod configuration
"""
from tabulate import tabulate
from rqalpha.utils.config import get_mod_conf
mod_config = get_mod_conf()
table = []
for mod_name, mod in six.iteritems(mod_config['mod']):
table.append([
mod_name,
("enabled" if mod['enabled'] else "disabled")
])
headers = [
"name",
"status"
]
six.print_(tabulate(table, headers=headers, tablefmt="psql"))
six.print_("You can use `rqalpha mod list/install/uninstall/enable/disable` to manage your mods")
def enable(params):
"""
enable mod
"""
mod_name = params[0]
if "rqalpha_mod_" in mod_name:
mod_name = mod_name.replace("rqalpha_mod_", "")
# check whether is installed
module_name = "rqalpha_mod_" + mod_name
if module_name.startswith("rqalpha_mod_sys_"):
module_name = "rqalpha.mod." + module_name
import_module(module_name)
from rqalpha.utils.config import user_mod_conf_path, load_yaml
user_conf = load_yaml(user_mod_conf_path()) if os.path.exists(user_mod_conf_path()) else {'mod': {}}
try:
user_conf['mod'][mod_name]['enabled'] = True
except KeyError:
user_conf['mod'][mod_name] = {'enabled': True}
dump_config(user_mod_conf_path(), user_conf)
def disable(params):
"""
disable mod
"""
mod_name = params[0]
if "rqalpha_mod_" in mod_name:
mod_name = mod_name.replace("rqalpha_mod_", "")
from rqalpha.utils.config import user_mod_conf_path, load_yaml
user_conf = load_yaml(user_mod_conf_path()) if os.path.exists(user_mod_conf_path()) else {'mod': {}}
try:
user_conf['mod'][mod_name]['enabled'] = False
except KeyError:
user_conf['mod'][mod_name] = {'enabled': False}
dump_config(user_mod_conf_path(), user_conf)
locals()[cmd](params)
def _detect_package_name_from_dir(params):
setup_path = os.path.join(os.path.abspath(params[-1]), 'setup.py')
if not os.path.exists(setup_path):
return None
return os.path.split(os.path.dirname(setup_path))[1]
| 30.17094 | 144 | 0.631161 |
import os
from importlib import import_module
import six
import click
from rqalpha.utils.config import dump_config
from .entry import cli
@cli.command(context_settings=dict(
ignore_unknown_options=True,
))
@click.help_option('-h', '--help')
@click.argument('cmd', nargs=1, type=click.Choice(['list', 'enable', 'disable']))
@click.argument('params', nargs=-1)
def mod(cmd, params):
def list(params):
from tabulate import tabulate
from rqalpha.utils.config import get_mod_conf
mod_config = get_mod_conf()
table = []
for mod_name, mod in six.iteritems(mod_config['mod']):
table.append([
mod_name,
("enabled" if mod['enabled'] else "disabled")
])
headers = [
"name",
"status"
]
six.print_(tabulate(table, headers=headers, tablefmt="psql"))
six.print_("You can use `rqalpha mod list/install/uninstall/enable/disable` to manage your mods")
def enable(params):
mod_name = params[0]
if "rqalpha_mod_" in mod_name:
mod_name = mod_name.replace("rqalpha_mod_", "")
module_name = "rqalpha_mod_" + mod_name
if module_name.startswith("rqalpha_mod_sys_"):
module_name = "rqalpha.mod." + module_name
import_module(module_name)
from rqalpha.utils.config import user_mod_conf_path, load_yaml
user_conf = load_yaml(user_mod_conf_path()) if os.path.exists(user_mod_conf_path()) else {'mod': {}}
try:
user_conf['mod'][mod_name]['enabled'] = True
except KeyError:
user_conf['mod'][mod_name] = {'enabled': True}
dump_config(user_mod_conf_path(), user_conf)
def disable(params):
mod_name = params[0]
if "rqalpha_mod_" in mod_name:
mod_name = mod_name.replace("rqalpha_mod_", "")
from rqalpha.utils.config import user_mod_conf_path, load_yaml
user_conf = load_yaml(user_mod_conf_path()) if os.path.exists(user_mod_conf_path()) else {'mod': {}}
try:
user_conf['mod'][mod_name]['enabled'] = False
except KeyError:
user_conf['mod'][mod_name] = {'enabled': False}
dump_config(user_mod_conf_path(), user_conf)
locals()[cmd](params)
def _detect_package_name_from_dir(params):
setup_path = os.path.join(os.path.abspath(params[-1]), 'setup.py')
if not os.path.exists(setup_path):
return None
return os.path.split(os.path.dirname(setup_path))[1]
| true | true |
f7fe54ae334da138bfff6c36b7cecae55fa11b44 | 334 | py | Python | server/ui/admin/dashboard/urls.py | elise-baumgartner/onramp | beb3c807264fcb70d8069ff2e3990b0ce3f59912 | [
"BSD-3-Clause"
] | 2 | 2016-09-09T04:19:01.000Z | 2019-02-15T20:28:13.000Z | server/ui/admin/dashboard/urls.py | elise-baumgartner/onramp | beb3c807264fcb70d8069ff2e3990b0ce3f59912 | [
"BSD-3-Clause"
] | 67 | 2016-06-02T19:37:56.000Z | 2018-02-22T05:23:45.000Z | server/ui/admin/dashboard/urls.py | elise-baumgartner/onramp | beb3c807264fcb70d8069ff2e3990b0ce3f59912 | [
"BSD-3-Clause"
] | 9 | 2015-06-22T22:10:22.000Z | 2016-04-26T15:35:45.000Z | from django.conf.urls import url
import views
urlpatterns = [
url(r'^GetUsers/$', views.get_all_users),
url(r'^GetJobs/$', views.get_all_jobs),
url(r'^GetWorkspaces/$', views.get_all_workspaces),
url(r'^GetPces/$', views.get_all_pces),
url(r'^GetModules/$', views.get_all_modules),
url(r'^$', views.main),
]
| 25.692308 | 55 | 0.664671 | from django.conf.urls import url
import views
urlpatterns = [
url(r'^GetUsers/$', views.get_all_users),
url(r'^GetJobs/$', views.get_all_jobs),
url(r'^GetWorkspaces/$', views.get_all_workspaces),
url(r'^GetPces/$', views.get_all_pces),
url(r'^GetModules/$', views.get_all_modules),
url(r'^$', views.main),
]
| true | true |
f7fe555b19c467a600d4435591599878183546f4 | 1,052 | py | Python | Proper/proper/prop_fits_read.py | RupertDodkins/medis | bdb1f00fb93506da2a1f251bc6780e70e97a16c5 | [
"MIT"
] | 1 | 2021-06-25T17:35:56.000Z | 2021-06-25T17:35:56.000Z | Proper/proper/prop_fits_read.py | RupertDodkins/medis | bdb1f00fb93506da2a1f251bc6780e70e97a16c5 | [
"MIT"
] | null | null | null | Proper/proper/prop_fits_read.py | RupertDodkins/medis | bdb1f00fb93506da2a1f251bc6780e70e97a16c5 | [
"MIT"
] | 2 | 2018-12-08T15:05:13.000Z | 2019-08-08T17:28:24.000Z | # Copyright 2016, 2017 California Institute of Technology
# Users must agree to abide by the restrictions listed in the
# file "LegalStuff.txt" in the PROPER library directory.
#
# PROPER developed at Jet Propulsion Laboratory/California Inst. Technology
# Original IDL version by John Krist
# Python translation by Navtej Saini, with Luis Marchen and Nikta Amiri
try:
import astropy.io.fits as pyfits
except IOError:
import pyfits
def prop_fits_read(fname, header = False):
"""Function to read an input FITS image.
Parameters
----------
fname : str
FITS image name
header : bool
Get FITS image header? Default is False.
Returns
-------
fitsarr : numpy ndarray
2D array of input image
"""
try:
imgarr, imgheader = pyfits.getdata(fname, header = True, ignore_missing_end = True)
except IOError:
raise IOError("Unable to read FITS image %s. Stopping" %(fname))
if header:
return (imgarr, imgheader)
else:
return imgarr
| 26.3 | 91 | 0.66635 |
try:
import astropy.io.fits as pyfits
except IOError:
import pyfits
def prop_fits_read(fname, header = False):
try:
imgarr, imgheader = pyfits.getdata(fname, header = True, ignore_missing_end = True)
except IOError:
raise IOError("Unable to read FITS image %s. Stopping" %(fname))
if header:
return (imgarr, imgheader)
else:
return imgarr
| true | true |
f7fe55e11a248045d5350a78d287edd5c35d0f8b | 508 | py | Python | rowingdata/painsledplot.py | sanderroosendaal/rowingdata | efd8aa1566a926f11fb3f6b5b340665bc26028c4 | [
"MIT"
] | 4 | 2017-04-24T15:20:46.000Z | 2021-02-12T23:03:29.000Z | build/lib/rowingdata/painsledplot.py | sanderroosendaal/rowingdata | efd8aa1566a926f11fb3f6b5b340665bc26028c4 | [
"MIT"
] | 38 | 2016-11-02T07:57:50.000Z | 2022-01-22T13:25:14.000Z | build/lib/rowingdata/painsledplot.py | sanderroosendaal/rowingdata | efd8aa1566a926f11fb3f6b5b340665bc26028c4 | [
"MIT"
] | 6 | 2017-01-19T21:39:46.000Z | 2021-11-16T14:48:58.000Z | #! /usr/bin/python
from __future__ import absolute_import
from __future__ import print_function
from . import rowingdata
from sys import argv
def main():
readFile=argv[1]
try:
rowerFile=argv[2]
except IndexError:
rowerFile="defaultrower.txt"
rower=rowingdata.getrower(rowerFile)
row=rowingdata.rowingdata(readFile,rowtype="Indoor Rower",
rower=rower)
print((row.allstats()))
row.plotmeters_erg()
print(("done "+readFile))
| 18.142857 | 62 | 0.661417 |
from __future__ import absolute_import
from __future__ import print_function
from . import rowingdata
from sys import argv
def main():
readFile=argv[1]
try:
rowerFile=argv[2]
except IndexError:
rowerFile="defaultrower.txt"
rower=rowingdata.getrower(rowerFile)
row=rowingdata.rowingdata(readFile,rowtype="Indoor Rower",
rower=rower)
print((row.allstats()))
row.plotmeters_erg()
print(("done "+readFile))
| true | true |
f7fe56956ff351d8cb37de5c48d6b401354b13e6 | 212 | py | Python | app/forms.py | ErikBoesen/afterthebeep | c4bafc4c6b90c8e43232e7b917f9184bfb7e6a06 | [
"MIT"
] | null | null | null | app/forms.py | ErikBoesen/afterthebeep | c4bafc4c6b90c8e43232e7b917f9184bfb7e6a06 | [
"MIT"
] | null | null | null | app/forms.py | ErikBoesen/afterthebeep | c4bafc4c6b90c8e43232e7b917f9184bfb7e6a06 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import TextAreaField
from wtforms.validators import DataRequired
class InputForm(FlaskForm):
content = TextAreaField('Write here.', validators=[DataRequired()])
| 26.5 | 71 | 0.806604 | from flask_wtf import FlaskForm
from wtforms import TextAreaField
from wtforms.validators import DataRequired
class InputForm(FlaskForm):
content = TextAreaField('Write here.', validators=[DataRequired()])
| true | true |
f7fe5705be91c29dfc3d7ae69d08e07664585877 | 9,558 | py | Python | applications/BasketballAction/predict/eval.py | txyugood/PaddleTableTennis | be4d33b5990da9c75fcd11f341ae09a73bfdbaba | [
"Apache-2.0"
] | 5 | 2022-01-30T07:35:58.000Z | 2022-02-08T05:45:20.000Z | applications/BasketballAction/predict/eval.py | txyugood/PaddleTableTennis | be4d33b5990da9c75fcd11f341ae09a73bfdbaba | [
"Apache-2.0"
] | 1 | 2022-01-14T02:33:28.000Z | 2022-01-14T02:33:28.000Z | applications/BasketballAction/predict/eval.py | txyugood/PaddleTableTennis | be4d33b5990da9c75fcd11f341ae09a73bfdbaba | [
"Apache-2.0"
] | 1 | 2022-03-07T10:51:21.000Z | 2022-03-07T10:51:21.000Z | """
get instance for lstm
根据gts计算每个proposal_bmn的iou、ioa、label等信息
"""
import os
import sys
import json
import random
import pickle
import numpy as np
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding = 'utf-8')
dataset = "datasets/"
label_index_file = './configs_basketball/index_label_basketball_6.json'
eval_datasets = ['EuroCup2016']
label_files = {'train': 'label_cls6_train.json',
'validation': 'label_cls6_val.json'}
global fps, mode
label_index = json.load(open(label_index_file, 'rb'))
def load_gts():
global fps
gts_data = {'fps': 0, 'gts': {}}
for eval_data in eval_datasets:
for item, value in label_files.items():
label_file = '{}/{}/{}'.format(dataset, eval_data, value)
gts = json.load(open(label_file, 'rb'))
gts_data['fps'] = gts['fps']
fps = gts['fps']
for gt in gts['gts']:
gt['mode'] = item
basename = '{}/{}/mp4/{}'.format(dataset, eval_data, os.path.basename(gt['url']))
gts_data['gts'][basename] = gt
return gts_data['gts']
def computeIoU(e1, e2):
"""
clc iou and ioa
"""
if not (e1['label'] == e2['label'] and e1['basename'] == e2['basename']):
return 0.
area1 = e1["end"] - e1["start"]
area2 = e2["end"] - e2["start"]
x1 = np.maximum(e1["start"], e2["start"])
x2 = np.minimum(e1["end"], e2["end"])
inter = np.maximum(0.0, x2 - x1)
iou = 0.0 if (area1 + area2 - inter) == 0 else inter * 1.0 / (area1 + area2 - inter)
if not mode == 'proposal':
iou = 0.0 if area2 == 0 else inter * 1.0 / area2
return iou
def convert_proposal(boxes, basename, score_threshold=0.01):
boxes = sorted(boxes, key=lambda x:float(x['score']), reverse=True)
res = []
for box in boxes:
if not float(box['score']) >= score_threshold:
continue
res.append({'basename': basename,
'start': int(float(box['start']) / fps),
'end': int(float(box['end']) / fps),
'label': 0})
return res
def convert_classify(boxes, basename, iou_threshold, score_threshold):
boxes = sorted(boxes, key=lambda x:(float(x['classify_score']), float(x['iou_score'])), reverse=True)
def convert_time_to_frame(time_type):
return int(time_type)
h, m, s = time_type.split(':')
return int(h) * 3600 + int(m) * 60 + int(s)
res = []
for box in boxes:
if not (box['iou_score'] >= iou_threshold and
box['classify_score'] >= score_threshold):
continue
res.append({'basename': basename,
'start': convert_time_to_frame(box['start_time']),
'end': convert_time_to_frame(box['end_time']),
'label': box['label_id']})
return res
def convert_groundtruth(boxes, basename, phase=None):
res = []
for box in boxes:
for item in box['label_ids']:
label = 0 if phase == 'proposal' else item
res.append({'basename': basename,
'start': box['start_id'],
'end': box['end_id'],
'label': label})
return res
def print_head(iou):
print("\nioa = {:.1f}".format(iou))
res_str = ''
for item in ['label_name']:
res_str += '{:<12s}'.format(item)
for item in ['label_id', 'precision', 'recall', 'hit_prop', 'num_prop', 'hit_gts', 'num_gts']:
res_str += '{:<10s}'.format(item)
print(res_str)
def print_result(res_dict, label='avg'):
if label == 'avg':
res_str = '{:<22s}'.format(str(label))
else:
res_str = '{0:{2}<6s}{1:<10s}'.format(label_index[str(label)], str(label), chr(12288))
for item in ['prec', 'recall']:
res_str += '{:<10.4f}'.format(res_dict[item])
for item in ['hit_prop', 'num_prop', 'hit_gts', 'num_gts']:
res_str += '{:<10d}'.format(res_dict[item])
print(res_str)
def evaluation(res_boxes, gts_boxes, label_range, iou_range, show_sub = False):
iou_map = [computeIoU(resId, gtsId) for resId in res_boxes \
for gtsId in gts_boxes]
iou_map = np.array(iou_map).reshape((len(res_boxes), len(gts_boxes)))
hit_map_prop_total = np.max(iou_map, axis=1)
hit_map_index_total = np.argmax(iou_map, axis=1)
res_dict = ['hit_prop', 'num_prop', 'hit_gts', 'num_gts']
for iou_threshold in iou_range:
if show_sub:
print_head(iou_threshold)
iou_prop = np.array([k >= iou_threshold for k in hit_map_prop_total])
average_results = {}
for label_id in label_range:
sub_results = {}
label_prop = np.array([k['label'] == label_id for k in res_boxes])
label_gts = np.array([k['label'] == label_id for k in gts_boxes])
sub_results['num_prop'] = sum(label_prop)
sub_results['num_gts'] = sum(label_gts)
if sub_results['num_prop'] == 0:
hit_prop_index = []
else:
hit_prop_index = label_prop & iou_prop
sub_results['hit_prop'] = sum(hit_prop_index)
sub_results['hit_gts'] = len(set(hit_map_index_total[hit_prop_index]))
sub_results['prec'] = 0.0 if sub_results['num_prop'] == 0 \
else sub_results['hit_prop'] * 1.0 / sub_results['num_prop']
sub_results['recall'] = 0.0 if sub_results['num_gts'] == 0 \
else sub_results['hit_gts'] * 1.0 / sub_results['num_gts']
if show_sub:
print_result(sub_results, label=label_id)
for item in res_dict:
if not item in average_results:
average_results[item] = 0
average_results[item] += sub_results[item]
if len(label_range) == 1: # proposal 不需要输出average值
continue
average_results['prec'] = 0.0 if average_results['num_prop'] == 0 \
else average_results['hit_prop'] * 1.0 / average_results['num_prop']
average_results['recall'] = 0.0 if average_results['num_gts'] == 0 \
else average_results['hit_gts'] * 1.0 / average_results['num_gts']
if show_sub:
print_result(average_results)
average_results['F1'] = 0.0 if (average_results['prec'] + average_results['recall'] == 0) \
else 2 * average_results['prec'] * average_results['recall'] / \
(average_results['prec'] + average_results['recall'])
return average_results
def get_eval_results(predicts, gts_data, phase, iou_threshold = 0.3, score_threshold = 0.3, show_sub = False):
global mode
mode = phase
res_boxes = []
gts_boxes = []
for ped_data in predicts:
basename = ped_data['video_name']
# eval sub data
such_eval = False
for eval_name in eval_datasets:
if eval_name in basename:
such_eval = True
break
if not such_eval:
continue
gts = gts_data[basename]['actions']
if phase == 'proposal':
res_boxes.extend(convert_proposal(ped_data['bmn_results'], basename, score_threshold))
gts_boxes.extend(convert_groundtruth(gts, basename, phase='proposal'))
label_range = [0]
iou_range = np.arange(0.1, 1, 0.1)
else:
res_boxes.extend(convert_classify(ped_data['action_results'], basename, iou_threshold, score_threshold))
gts_boxes.extend(convert_groundtruth(gts, basename))
label_range = range(1, len(label_index))
iou_range = np.arange(0.5, 0.6, 0.1)
eval_results = evaluation(res_boxes, gts_boxes, label_range, iou_range, show_sub = show_sub)
return eval_results
if __name__ == "__main__":
result_file = sys.argv[1]
predicts = json.load(open(result_file, 'r', encoding='utf-8'))
gts_data = load_gts()
get_eval_results(predicts, gts_data, 'proposal',
score_threshold = 0.03,
show_sub = True)
#get_eval_results(predicts, gts_data, 'actions')
best_F1 = -0.1
best_res = {}
best_iou_threshold = 0.
best_score_threshold = 0.
for iou_threshold in np.arange(0.1, 0.9, 0.1):
for score_threshold in np.arange(0.1, 1, 0.1):
avg_res = get_eval_results(predicts, gts_data, 'actions',
iou_threshold = iou_threshold,
score_threshold = score_threshold,
show_sub = False)
if best_F1 < avg_res['F1']:
best_F1 = avg_res['F1']
best_res = avg_res
best_iou_threshold = iou_threshold
best_score_threshold = score_threshold
print("best iou threshold = {:.1f}".format(best_iou_threshold))
print("best score threshold = {:.1f}".format(best_score_threshold))
print('best F1 score = {:.4f}'.format(best_F1))
print_head(0.5)
print_result(best_res)
get_eval_results(predicts, gts_data, 'actions', iou_threshold = best_iou_threshold,
score_threshold = best_score_threshold,
show_sub = True)
| 39.991632 | 116 | 0.565076 | import os
import sys
import json
import random
import pickle
import numpy as np
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding = 'utf-8')
dataset = "datasets/"
label_index_file = './configs_basketball/index_label_basketball_6.json'
eval_datasets = ['EuroCup2016']
label_files = {'train': 'label_cls6_train.json',
'validation': 'label_cls6_val.json'}
global fps, mode
label_index = json.load(open(label_index_file, 'rb'))
def load_gts():
global fps
gts_data = {'fps': 0, 'gts': {}}
for eval_data in eval_datasets:
for item, value in label_files.items():
label_file = '{}/{}/{}'.format(dataset, eval_data, value)
gts = json.load(open(label_file, 'rb'))
gts_data['fps'] = gts['fps']
fps = gts['fps']
for gt in gts['gts']:
gt['mode'] = item
basename = '{}/{}/mp4/{}'.format(dataset, eval_data, os.path.basename(gt['url']))
gts_data['gts'][basename] = gt
return gts_data['gts']
def computeIoU(e1, e2):
if not (e1['label'] == e2['label'] and e1['basename'] == e2['basename']):
return 0.
area1 = e1["end"] - e1["start"]
area2 = e2["end"] - e2["start"]
x1 = np.maximum(e1["start"], e2["start"])
x2 = np.minimum(e1["end"], e2["end"])
inter = np.maximum(0.0, x2 - x1)
iou = 0.0 if (area1 + area2 - inter) == 0 else inter * 1.0 / (area1 + area2 - inter)
if not mode == 'proposal':
iou = 0.0 if area2 == 0 else inter * 1.0 / area2
return iou
def convert_proposal(boxes, basename, score_threshold=0.01):
boxes = sorted(boxes, key=lambda x:float(x['score']), reverse=True)
res = []
for box in boxes:
if not float(box['score']) >= score_threshold:
continue
res.append({'basename': basename,
'start': int(float(box['start']) / fps),
'end': int(float(box['end']) / fps),
'label': 0})
return res
def convert_classify(boxes, basename, iou_threshold, score_threshold):
boxes = sorted(boxes, key=lambda x:(float(x['classify_score']), float(x['iou_score'])), reverse=True)
def convert_time_to_frame(time_type):
return int(time_type)
h, m, s = time_type.split(':')
return int(h) * 3600 + int(m) * 60 + int(s)
res = []
for box in boxes:
if not (box['iou_score'] >= iou_threshold and
box['classify_score'] >= score_threshold):
continue
res.append({'basename': basename,
'start': convert_time_to_frame(box['start_time']),
'end': convert_time_to_frame(box['end_time']),
'label': box['label_id']})
return res
def convert_groundtruth(boxes, basename, phase=None):
res = []
for box in boxes:
for item in box['label_ids']:
label = 0 if phase == 'proposal' else item
res.append({'basename': basename,
'start': box['start_id'],
'end': box['end_id'],
'label': label})
return res
def print_head(iou):
print("\nioa = {:.1f}".format(iou))
res_str = ''
for item in ['label_name']:
res_str += '{:<12s}'.format(item)
for item in ['label_id', 'precision', 'recall', 'hit_prop', 'num_prop', 'hit_gts', 'num_gts']:
res_str += '{:<10s}'.format(item)
print(res_str)
def print_result(res_dict, label='avg'):
if label == 'avg':
res_str = '{:<22s}'.format(str(label))
else:
res_str = '{0:{2}<6s}{1:<10s}'.format(label_index[str(label)], str(label), chr(12288))
for item in ['prec', 'recall']:
res_str += '{:<10.4f}'.format(res_dict[item])
for item in ['hit_prop', 'num_prop', 'hit_gts', 'num_gts']:
res_str += '{:<10d}'.format(res_dict[item])
print(res_str)
def evaluation(res_boxes, gts_boxes, label_range, iou_range, show_sub = False):
iou_map = [computeIoU(resId, gtsId) for resId in res_boxes \
for gtsId in gts_boxes]
iou_map = np.array(iou_map).reshape((len(res_boxes), len(gts_boxes)))
hit_map_prop_total = np.max(iou_map, axis=1)
hit_map_index_total = np.argmax(iou_map, axis=1)
res_dict = ['hit_prop', 'num_prop', 'hit_gts', 'num_gts']
for iou_threshold in iou_range:
if show_sub:
print_head(iou_threshold)
iou_prop = np.array([k >= iou_threshold for k in hit_map_prop_total])
average_results = {}
for label_id in label_range:
sub_results = {}
label_prop = np.array([k['label'] == label_id for k in res_boxes])
label_gts = np.array([k['label'] == label_id for k in gts_boxes])
sub_results['num_prop'] = sum(label_prop)
sub_results['num_gts'] = sum(label_gts)
if sub_results['num_prop'] == 0:
hit_prop_index = []
else:
hit_prop_index = label_prop & iou_prop
sub_results['hit_prop'] = sum(hit_prop_index)
sub_results['hit_gts'] = len(set(hit_map_index_total[hit_prop_index]))
sub_results['prec'] = 0.0 if sub_results['num_prop'] == 0 \
else sub_results['hit_prop'] * 1.0 / sub_results['num_prop']
sub_results['recall'] = 0.0 if sub_results['num_gts'] == 0 \
else sub_results['hit_gts'] * 1.0 / sub_results['num_gts']
if show_sub:
print_result(sub_results, label=label_id)
for item in res_dict:
if not item in average_results:
average_results[item] = 0
average_results[item] += sub_results[item]
if len(label_range) == 1:
continue
average_results['prec'] = 0.0 if average_results['num_prop'] == 0 \
else average_results['hit_prop'] * 1.0 / average_results['num_prop']
average_results['recall'] = 0.0 if average_results['num_gts'] == 0 \
else average_results['hit_gts'] * 1.0 / average_results['num_gts']
if show_sub:
print_result(average_results)
average_results['F1'] = 0.0 if (average_results['prec'] + average_results['recall'] == 0) \
else 2 * average_results['prec'] * average_results['recall'] / \
(average_results['prec'] + average_results['recall'])
return average_results
def get_eval_results(predicts, gts_data, phase, iou_threshold = 0.3, score_threshold = 0.3, show_sub = False):
global mode
mode = phase
res_boxes = []
gts_boxes = []
for ped_data in predicts:
basename = ped_data['video_name']
such_eval = False
for eval_name in eval_datasets:
if eval_name in basename:
such_eval = True
break
if not such_eval:
continue
gts = gts_data[basename]['actions']
if phase == 'proposal':
res_boxes.extend(convert_proposal(ped_data['bmn_results'], basename, score_threshold))
gts_boxes.extend(convert_groundtruth(gts, basename, phase='proposal'))
label_range = [0]
iou_range = np.arange(0.1, 1, 0.1)
else:
res_boxes.extend(convert_classify(ped_data['action_results'], basename, iou_threshold, score_threshold))
gts_boxes.extend(convert_groundtruth(gts, basename))
label_range = range(1, len(label_index))
iou_range = np.arange(0.5, 0.6, 0.1)
eval_results = evaluation(res_boxes, gts_boxes, label_range, iou_range, show_sub = show_sub)
return eval_results
if __name__ == "__main__":
result_file = sys.argv[1]
predicts = json.load(open(result_file, 'r', encoding='utf-8'))
gts_data = load_gts()
get_eval_results(predicts, gts_data, 'proposal',
score_threshold = 0.03,
show_sub = True)
best_F1 = -0.1
best_res = {}
best_iou_threshold = 0.
best_score_threshold = 0.
for iou_threshold in np.arange(0.1, 0.9, 0.1):
for score_threshold in np.arange(0.1, 1, 0.1):
avg_res = get_eval_results(predicts, gts_data, 'actions',
iou_threshold = iou_threshold,
score_threshold = score_threshold,
show_sub = False)
if best_F1 < avg_res['F1']:
best_F1 = avg_res['F1']
best_res = avg_res
best_iou_threshold = iou_threshold
best_score_threshold = score_threshold
print("best iou threshold = {:.1f}".format(best_iou_threshold))
print("best score threshold = {:.1f}".format(best_score_threshold))
print('best F1 score = {:.4f}'.format(best_F1))
print_head(0.5)
print_result(best_res)
get_eval_results(predicts, gts_data, 'actions', iou_threshold = best_iou_threshold,
score_threshold = best_score_threshold,
show_sub = True)
| true | true |
f7fe575f84224bf9dc7c3e775e916c4000f4985f | 41,888 | py | Python | query_by_free.py | liuKai0312/tyc_query | 971aca447a801de53a52df4d3fb0af52bdf0a366 | [
"MIT"
] | 1 | 2022-03-25T22:25:46.000Z | 2022-03-25T22:25:46.000Z | query_by_free.py | liuKai0312/tyc_query | 971aca447a801de53a52df4d3fb0af52bdf0a366 | [
"MIT"
] | 1 | 2022-03-26T08:31:04.000Z | 2022-03-26T08:31:04.000Z | query_by_free.py | liuKai0312/tyc_query | 971aca447a801de53a52df4d3fb0af52bdf0a366 | [
"MIT"
] | 1 | 2021-10-08T09:32:26.000Z | 2021-10-08T09:32:26.000Z | import sys
import time
import importlib
import urllib
import urllib.request
import requests
from bs4 import BeautifulSoup
import pymysql
import numpy as np
import threading
import redis
from fake_useragent import UserAgent
import math
db_ = redis.Redis(host="localhost",password=123, port=6379,decode_responses=True)
thread_proxy_list = []
ua = UserAgent()
OPEN_PROXY_IP_THREAD = False
GET_PROXY_IP_BY_FREE = True
GET_RESPONSE_BY_PROXY = False
current_cookies = None
def main():
# updateComponyInfoByCompanyName([1,"腾讯"])
# getSbByComponyName("深圳中基集团有限公司",1)
# getZlByCompanyName("")
# getZpzzqByCompanyName("北京学",1)
# save_proxy_ip_by_agent(200)
if OPEN_PROXY_IP_THREAD:
run_get_proxy_fun_by_thread()
companyList = get_all_companys("zb_tbCompanyInfo",[2,3])
for companyDatas in companyList:
try:
company = companyDatas["data"]
# 更新
get_compony_info_by_companyname(company)
except Exception as e:
formartPrint("请求错误", "请求被拦截")
print(e)
for thread_proxy in thread_proxy_list:
thread_proxy.stop()
# 根据公司名称查找公司
def get_compony_info_by_companyname(company):
company_name = company[1]
company_local_id = company[0]
selectors = [
'div.search-item:nth-child(1)>.search-result-single',
]
url_temp = {'url': 'http://www.tianyancha.com/search/p1', 'selectors': selectors}
response = getHttpResponse(url_temp["url"]+'?key=%s' % urllib.request.quote(company_name))
if response is None:
formartPrint("获取公司出错", "响应为空")
elif response.status_code is not 200:
formartPrint("获取公司出错", "被拦截")
elif response.status_code == 200:
soup = BeautifulSoup(response.text, 'lxml')
result = soup.select(url_temp["selectors"][0])
if len(result) > 0:
# 获得远端companyId
company_remote_id = result[0]["data-id"]
if not company_remote_id:
formartPrint("未找到公司id", company_local_id)
else:
get_compony_detaile_by_id(company_name, company_remote_id, company_local_id)
else:
formartPrint("没有该企业", company_name)
else:
formartPrint("获取公司出错","未知异常")
# 查找公司的详细信息
def get_compony_detaile_by_id(company_name,company_remote_id,company_local_id):
url_temp = {'url': 'http://www.tianyancha.com/company/'+company_remote_id}
response = getHttpResponse(url_temp["url"])
# 更新企业信息
update_company_info(response,company_name,company_remote_id,company_local_id)
# 更新商标信息
get_sb_by_componyname(response,company_name, company_remote_id,company_local_id)
# 更新软件著作权
get_rz_by_componyname(response,company_name, company_remote_id,company_local_id)
# 更新专利
get_zl_by_componyname(response,company_name, company_remote_id,company_local_id)
# 更新作品著作权
get_zp_by_componyname(response,company_name, company_remote_id,company_local_id)
# 更新企业是否高新
def update_company_info(company_detail_response,company_name,company_remote_id,company_local_id):
soup = BeautifulSoup(company_detail_response.text, 'lxml')
result = soup.select('div.content>.tag-list-content>.tag-list>.tag-common.-hint')
if len(result) == 0:
formartPrint("更新高新类型" + company_name, "非高新")
# 不是高新企业
mysqlDaoeExecutor_update(
"zb_tbCompanyInfo",
[
{"name": "gxlx", "data": "非高新"}
],
[
{"name": "compy_id", "data": company_local_id}
]
)
else:
formartPrint("更新高新类型" + company_name, "高新企业")
zs_count_res = beauifulHtmlEleAndGetValue(company_detail_response, ["#nav-main-certificateCount>span.data-count"])
if len(zs_count_res) > 0 and len(zs_count_res[0]) > 0:
zs_count = zs_count_res[0][0]
# 查询证书的详情
selectors = [
'table>tbody>tr>td:nth-child(2)>span',
'table>tbody>tr>td:nth-child(4)>span'
]
page_count = math.ceil(int(zs_count) / 5)
formartPrint(company_name + "的证书列表页数", page_count)
url_temp = {'url': 'https://www.tianyancha.com/pagination/certificate.xhtml?ps=5&pn=$num&id=' + str(company_remote_id) + '&_=1555935634142', 'selectors': selectors}
formartPrint("发送请求", "正在获取【" + company_name + "】的证书列表")
for num in range(1,page_count+1):
url = url_temp["url"].replace("$num",str(num))
response = getHttpResponse(url)
if response is None:
formartPrint("获取"+company_name+"证书列表错误","请求超时")
elif response.status_code == 200:
zs_lists = beauifulHtmlEleAndGetValue(response,url_temp["selectors"])
exists = False
for zs in zs_lists:
if zs[0] == "高新技术企业":
exists = True
mysqlDaoeExecutor_update(
"zb_tbCompanyInfo",
[
{"name": "gxlx", "data": "国家高新"},
{"name": "gggx_time", "data": zs[1]}
],
[
{"name": "compy_id", "data": company_local_id}
]
)
break
if exists is True:
break
elif response.status_code is not 200:
formartPrint("获取" + company_name + "证书列表错误", "请求被拦截")
else:
formartPrint("获取" + company_name + "证书列表错误", "未知异常")
else:
formartPrint(company_name + "的证书列表为空", [])
# 根据公司id查找商标
def get_sb_by_componyname(company_detail_response, company_name, company_remote_id, company_local_id):
# 更新商标的个数
sb_count_res = beauifulHtmlEleAndGetValue(company_detail_response,["#nav-main-tmCount>span.data-count"])
sb_count = 0
if len(sb_count_res) > 0 and len(sb_count_res[0]) > 0:
sb_count = sb_count_res[0][0]
# 查询商标的详情
selectors = [
'div.data-content>table>tbody>tr>td:nth-child(4)>span',
'div.data-content>table>tbody>tr>td:nth-child(2)>span',
'div.data-content>table>tbody>tr>td:nth-child(6)>span',
'div.data-content>table>tbody>tr>td:nth-child(5)>span',
'div.data-content>table>tbody>tr>td:nth-child(7)>span'
]
page_count =math.ceil(int(sb_count)/10)
formartPrint(company_name + "的商标列表页数", page_count)
url_temp = {'url':'https://www.tianyancha.com/pagination/tmInfo.xhtml?ps=10&pn=$num&id='+str(company_remote_id)+'&_=1555775408946','selectors': selectors}
formartPrint("发送请求", "正在获取【" + company_name + "】的商标列表")
results = do_search_with_url_no_prarms(url_temp,1,page_count)
formartPrint(company_name + "的商标列表", results)
if len(results)> 0 :
# 先删除掉之前的数据
mysqlDaoeExecutor_delete("zb_tbSbinfo", [
{"name": "compy_id", "data": str(company_local_id)}
])
for result in results:
mysqlDaoeExecutor_insert(
"zb_tbSbinfo",
[
{"name": "park_id", "data": 'SZ00000001'},
{"name": "compy_id", "data": str(company_local_id)},
{"name": "sbname", "data": result[0]},
{"name": "sqrq", "data": result[1]},
{"name": "sblx", "data": result[2]},
{"name": "sbsn", "data": result[3]},
{"name": "sbstate", "data": result[4]}
]
)
else:
formartPrint(company_name + "的商标列表为空", [])
# 更新商标个数
mysqlDaoeExecutor_update(
"zb_tbCompanyInfo",
[
{"name": "sbnums", "data": sb_count},
],
[
{"name": "compy_id", "data": company_local_id}
]
)
# 根据公司id查找软件著作权
def get_rz_by_componyname(company_detail_response, company_name, company_remote_id, company_local_id):
# 更新商标的个数
rz_count_res = beauifulHtmlEleAndGetValue(company_detail_response,["#nav-main-cpoyRCount>span.data-count"])
rz_count = 0
if len(rz_count_res) > 0 and len(rz_count_res[0]) > 0:
rz_count = rz_count_res[0][0]
# 查询商标的详情
selectors = [
'table>tbody>tr>td:nth-child(3)>span',
'table>tbody>tr>td:nth-child(4)>span',
'table>tbody>tr>td:nth-child(5)>span',
'table>tbody>tr>td:nth-child(7)>span',
'table>tbody>tr>td:nth-child(2)>span'
]
page_count =math.ceil(int(rz_count)/5)
formartPrint(company_name + "的软件著作权列表页数", page_count)
url_temp = {'url':'https://www.tianyancha.com/pagination/copyright.xhtml?ps=5&pn=$num&id='+str(company_remote_id)+'&_=1555834707614','selectors': selectors}
formartPrint("发送请求", "正在获取【" + company_name + "】的软件著作权列表")
results = do_search_with_url_no_prarms(url_temp,1,page_count)
formartPrint(company_name + "的软件著作权列表", results)
if len(results)> 0 :
# 先删除掉之前的数据
mysqlDaoeExecutor_delete("zb_tbRzinfo", [
{"name": "compy_id", "data": str(company_local_id)}
])
for result in results:
mysqlDaoeExecutor_insert(
"zb_tbRzinfo",
[
{"name": "park_id", "data": 'SZ00000001'},
{"name": "compy_id", "data": str(company_local_id)},
{"name": "fullname", "data": result[0]},
{"name": "shortname", "data": result[1]},
{"name": "rzsn", "data": result[2]},
{"name": "rzver", "data": result[3]},
{"name": "sqrq", "data": result[4]}
]
)
else:
formartPrint(company_name + "的软件著作权列表为空", [])
# 更新软件著作权数量
mysqlDaoeExecutor_update(
"zb_tbCompanyInfo",
[
{"name": "rznums", "data": rz_count},
],
[
{"name": "compy_id", "data": company_local_id}
]
)
# 根据公司id查找专利
def get_zl_by_componyname(company_detail_response, company_name, company_remote_id, company_local_id):
# 获得专利链接
zl_count_res = beauifulHtmlEleAndGetValue(company_detail_response,["#nav-main-patentCount>span.data-count"])
# 先删除掉之前的数据
mysqlDaoeExecutor_delete(
"zb_tbZlinfo",
[
{"name": "compy_id", "data": str(company_local_id)}
]
)
if len(zl_count_res) > 0 and len(zl_count_res[0]) > 0:
zl_count = zl_count_res[0][0]
# 查询专利的详情
selectors = [
'table>tbody>tr>td:nth-child(4)>span',
'table>tbody>tr>td:nth-child(6)>span'
]
page_count =math.ceil(int(zl_count)/5)
formartPrint(company_name + "的专利列表页数", page_count)
url_temp = {'url':'https://www.tianyancha.com/pagination/patent.xhtml?ps=5&pn=$num&id='+str(company_remote_id)+'&_=1555834707617','selectors': selectors}
formartPrint("发送请求", "正在获取【" + company_name + "】的专利链接列表")
zl_links = []
sn_lx_lists = []
zlnums = 0
xyzlnums = 0
wgzlnums = 0
for num in range(1,page_count+1):
formartPrint(company_name + "当前正在处理的专利列表页数", str(num) + "/" + str(page_count))
# page_count+1
try:
response = getHttpResponse(url_temp["url"].replace("$num", str(num)))
if response is None:
formartPrint(company_name + "的专利列表获取超时", "休息一下")
continue
elif response.status_code == 200:
sn_lx_datas = beauifulHtmlEleAndGetValue(response, url_temp["selectors"])
# 收集专利类型
for data in sn_lx_datas:
sn_lx_lists.append(data)
if data[1] == "发明专利":
zlnums = zlnums + 1
elif data[1] == "外观专利":
wgzlnums = wgzlnums + 1
elif data[1] == "实用新型":
xyzlnums = xyzlnums + 1
else:
formartPrint(company_name+"的专利类型非采集类型",data[1])
soup = BeautifulSoup(response.text, 'lxml')
# 收集专利链接
a_lists = soup.select('table>tbody>tr>td:nth-child(7)>a')
for link in a_lists:
zl_links.append(link["href"])
else:
formartPrint(company_name + "获取专利链接时被拦截", response.status_code)
break
except Exception as e:
continue
formartPrint("获取"+company_name+"时出现异常",e)
# 更新公司专利内容
mysqlDaoeExecutor_update(
"zb_tbCompanyInfo",
[
{"name": "zlnums", "data": zlnums},
{"name": "xyzlnums", "data": xyzlnums},
{"name": "wgzlnums", "data": wgzlnums},
],
[
{"name": "compy_id", "data": company_local_id}
]
)
# 更新专利详情
update_zl_by_zl_links(company_name,company_local_id,zl_links,sn_lx_lists)
else:
formartPrint(company_name + "的专利列表为空", [])
# 根据链接查找专利详情并且更新内容
def update_zl_by_zl_links(company_name,company_local_id,zl_links,sn_lx_lists):
formartPrint(company_name + "的专利的链接数", len(zl_links))
zl_results_db_datas = []
for num in range(len(zl_links)):
formartPrint(company_name + "当前正在处理的专利链接索引", str(num+1)+"/"+str(len(zl_links)))
link = zl_links[num]
response = getHttpResponse(link)
if response is None:
formartPrint(company_name + "的专利获取超时", "休息一下")
continue
elif response.status_code == 200:
zl_selectors = [
"#patentTitle>table>tr:nth-child(1)>td:nth-child(4)",
"#patentTitle>table>tr:nth-child(6)>td:nth-child(4)",
"#patentTitle>table>tr:nth-child(4)>td:nth-child(2)",
"#patentTitle>table>tr:nth-child(6)>td:nth-child(2)"
]
zl_results = beauifulHtmlEleAndGetValue(response, zl_selectors)
if len(zl_results_db_datas) == 0:
zl_results_db_datas = zl_results
else:
zl_results_db_datas = np.concatenate((zl_results_db_datas, zl_results))
else:
formartPrint(company_name + "的专利获取被拦截", response.status_code)
break
formartPrint(company_name + "的专利列表", zl_results_db_datas)
# 更新专利详情
for zl in zl_results_db_datas:
mysqlDaoeExecutor_insert(
"zb_tbZlinfo",
[
{"name": "park_id", "data": 'SZ00000001'},
{"name": "compy_id", "data": str(company_local_id)},
{"name": "zlsn", "data": zl[0]},
{"name": "lzsj", "data": zl[1]},
{"name": "zlname", "data": zl[2]},
{"name": "sqrq", "data": zl[3]},
{"name": "zllx", "data": '-'},
]
)
# 更新专利类型
for sn_lx in sn_lx_lists:
# 更新公司专利内容
mysqlDaoeExecutor_update(
"zb_tbZlinfo",
[
{"name": "zllx", "data": sn_lx[1]},
],
[
{"name": "zlsn", "data": sn_lx[0]}
]
)
# 根据公司id查找作品著作权
def get_zp_by_componyname(company_detail_response, company_name, company_remote_id, company_local_id):
# 更新商标的个数
zp_count_res = beauifulHtmlEleAndGetValue(company_detail_response,["#nav-main-copyrightWorks>span.data-count"])
zp_count = 0
if len(zp_count_res) > 0 and len(zp_count_res[0]) > 0:
zp_count = zp_count_res[0][0]
# 查询作品著作权的详情
selectors = [
'table>tbody>tr>td:nth-child(2)>span',
'table>tbody>tr>td:nth-child(4)>span',
'table>tbody>tr>td:nth-child(3)>span',
'table>tbody>tr>td:nth-child(6)>span',
'table>tbody>tr>td:nth-child(5)>span',
'table>tbody>tr>td:nth-child(7)>span'
]
page_count =math.ceil(int(zp_count)/5)
formartPrint(company_name + "的作品著作权列表页数", page_count)
url_temp = {'url':'https://www.tianyancha.com/pagination/copyrightWorks.xhtml?ps=5&pn=$num&id='+str(company_remote_id)+'&_=1555844934449','selectors': selectors}
formartPrint("发送请求", "正在获取【" + company_name + "】的作品著作权列表")
results = do_search_with_url_no_prarms(url_temp,1,page_count)
formartPrint(company_name + "的作品著作权列表", results)
if len(results)> 0 :
# 先删除掉之前的数据
mysqlDaoeExecutor_delete("zb_tbZpinfo", [
{"name": "compy_id", "data": str(company_local_id)}
])
for result in results:
mysqlDaoeExecutor_insert(
"zb_tbZpinfo",
[
{"name": "park_id", "data": 'SZ00000001'},
{"name": "compy_id", "data": company_local_id},
{"name": "zpname", "data": result[0]},
{"name": "zplx", "data": result[1]},
{"name": "zpsn", "data": result[2]},
{"name": "sqrq", "data": result[3]},
{"name": "firstpub_rq", "data": result[4]},
{"name": "end_rq", "data": result[5]},
]
)
else:
formartPrint(company_name + "的作品著作权列表为空", [])
# 更新作品著作权数量
mysqlDaoeExecutor_update(
"zb_tbCompanyInfo",
[
{"name": "zpnums", "data": zp_count},
],
[
{"name": "compy_id", "data": company_local_id}
]
)
# 发送请求
def doSearch(url_temp,companyName,pageStart,pageEnd):
if not pageStart:
pageStart = 1
if not pageEnd:
pageEnd = 2
resultList = []
for num in range(pageStart,pageEnd):
# 爬取知识产权前十页数据,其他的url可以在首页模板中查询
url = url_temp["url"] + str(num) + '?key=%s' % urllib.request.quote(companyName)
try:
response = getHttpResponse(url)
if response.status_code is not 200:
formartPrint("请求被阻止,状态码",response.status_code)
# time.sleep(5)
result = beauifulHtmlEleAndGetValue(response, url_temp["selectors"])
if len(result) == 0:
break
else:
if len(resultList) == 0:
resultList = result
else:
resultList = np.concatenate((resultList, result))
except Exception as e:
formartPrint("爬取网页出错",url)
print(e)
break
return resultList
# 发送请求
def do_search_with_url_no_prarms(url_temp,pageStart,pageEnd):
if not pageStart:
pageStart = 1
if not pageEnd:
pageEnd = 2
resultList = []
for num in range(pageStart,pageEnd+1):
# 爬取知识产权前十页数据,其他的url可以在首页模板中查询
url = url_temp["url"].replace("$num",str(num))
try:
response = getHttpResponse(url)
if response is None:
continue
elif response.status_code is not 200:
formartPrint("请求被阻止,状态码",response.status_code)
break
# time.sleep(5)
result = beauifulHtmlEleAndGetValue(response, url_temp["selectors"])
if len(result) == 0:
break
else:
if len(resultList) == 0:
resultList = result
else:
resultList = np.concatenate((resultList, result))
except Exception as e:
formartPrint("爬取网页出错",url)
print(e)
continue
return resultList
def getHttpResponse(url):
if GET_RESPONSE_BY_PROXY:
return getHttpResponseWithProxy(url)
else:
return getHttpResponseWithoutProxy(url)
def getHttpResponseWithProxy(url):
formartPrint("请求地址", url)
try:
response = get_http_response_by_proxy_ip(url)
response.encoding = "utf-8"
return response
except Exception as e:
print(e)
formartPrint("发送请求失败", "网页请求被拦截")
def getHttpResponseWithoutProxy(url):
formartPrint("请求地址", url)
try:
response = do_get_response(url)
return response
except Exception as e:
print(e)
formartPrint("发送请求失败", "网页请求被拦截")
return None
def get_proxy():
while True:
try:
if db_.llen("proxies") == 0:
time.sleep(5)
else:
return {"http": "http://" + get_proxy_ip()}
break
except Exception:
formartPrint("代理ip为空","None")
def get_http_response_by_proxy_ip(url):
count = 0
proxy = get_proxy()
while True:
count = count + 1
formartPrint("循环监听代理次数",count)
try:
formartPrint("使用了代理",proxy)
response = get_http_response_by_proxy_ip_wapper(url,proxy)
formartPrint("响应状态码",response.status_code)
if response.status_code == 200 and response.text:
return response
break
elif response.status_code == 503:
time.sleep(3)
proxy = get_proxy()
elif count >= 10:
print('抓取网页失败')
break
else:
proxy = get_proxy()
except Exception as e:
formartPrint("获取请求连接报错",e)
proxy = get_proxy()
def get_http_response_by_proxy_ip_wapper(url,proxy):
# s = requests.session()
# s.keep_alive = False
return requests.get(url, headers=getHttpHeaders(), cookies=getHttpCookies(), proxies=proxy,timeout=10)
def run_get_proxy_fun_by_thread():
if GET_PROXY_IP_BY_FREE:
thread_proxy_list.append(threading.Thread(target=get_ip_proxys_kuaidaili))
thread_proxy_list.append(threading.Thread(target=get_ip_proxys_xici))
else:
thread_proxy_list.append(threading.Thread(target=get_proxy_ip_customer))
for thread_proxy in thread_proxy_list:
thread_proxy.start()
# 根据代理ip获得response
def do_proxy_get_response(url, proxy):
# 在请求中设定头,cookie
return requests.get(url, headers=getHttpHeaders(), cookies=getHttpCookies(), proxies={"http":proxy},timeout=5)
# 不根据代理ip获得response
def do_get_response(url):
return get_users_cookies_pool_response(url)
def get_users_cookies_pool_response(url):
try:
if current_cookies is None:
return do_get_users_cookies_pool_response(url)
else:
response = requests.get(url, headers=getHttpHeaders(), cookies=current_cookies["cookies"], timeout=10)
if response is None:
update_cookies_expire_error_code(current_cookies["user_cookies"][0])
return do_get_users_cookies_pool_response(url)
elif response.status_code is 200:
update_cookies_request_count_url(current_cookies["user_cookies"],url)
return response
elif response.status_code is not 200:
update_cookies_expire_error_code(current_cookies["user_cookies"][0],response.status_code)
return do_get_users_cookies_pool_response(url)
else:
update_cookies_expire_error_code(current_cookies["user_cookies"][0])
return do_get_users_cookies_pool_response(url)
except Exception as e:
formartPrint("使用临时cookies异常",e)
return do_get_users_cookies_pool_response(url)
def do_get_users_cookies_pool_response(url):
while True:
formartPrint("获取用户cookies", "正在切换cookies")
user_cookies = get_user_cookies()
if user_cookies is None:
formartPrint("获取用户cookies", "cookies为空")
return None
break
else:
cookies = format_user_cookies(user_cookies[3])
if cookies is None:
update_cookies_expire_error_code(user_cookies[0])
continue
else:
# 在请求中设定头,cookie
formartPrint("正在使用cookies获取response", "当前用户【"+user_cookies[1]+"】")
response = requests.get(url, headers=getHttpHeaders(), cookies=cookies, timeout=10)
if response is None:
formartPrint("获取用户cookies异常", "response为None")
update_cookies_expire_error_code(user_cookies[0])
continue
elif response.status_code is 200:
global current_cookies
current_cookies = {'user_cookies': user_cookies, 'cookies': cookies}
update_cookies_request_count_url(user_cookies, url)
formartPrint("当前用户", user_cookies[1])
response.encoding = "utf-8"
return response
break
elif response.status_code is not 200:
formartPrint("使用cookies请求异常", "当前用户被拦截【" + user_cookies[1] + "】")
update_cookies_expire_error_code(user_cookies[0],response.status_code)
continue
else:
formartPrint("使用cookies请求异常", "未知异常,当前用户【" + user_cookies[1] + "】")
update_cookies_expire_error_code(user_cookies[0])
continue
def update_cookies_expire_error_code(cookie_id,error_code = 414):
mysqlDaoeExecutor_update(
'tb_users_cookies_pool',
[
{'name': 'active', 'data': '0'},
{'name': 'last_expire_time', 'data': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())},
{'name': 'error_code', 'data': error_code}
],
[
{'name': 'id', 'data': cookie_id}
]
)
def update_cookies_request_count_url(user_cookies,url):
count = 0 if user_cookies[7] is None or user_cookies[7] == '' else int(user_cookies[7])
print("================",count)
mysqlDaoeExecutor_update(
'tb_users_cookies_pool',
[
{'name': 'request_count', 'data': count+1},
{'name': 'last_request_url', 'data': url}
],
[
{'name': 'id', 'data': user_cookies[0]}
]
)
def get_user_cookies():
while True:
try:
formartPrint("获取用户cookies", "正在获取")
user_cookies = mysqlDaoeExecutor_select('tb_users_cookies_pool', [
{'name': "active", 'data': '1'}
], 0, 1)
if len(user_cookies) == 0:
formartPrint("用户cookie的数量为空", "没有可用的用户cookie,请及时激活或补充")
return None
else:
formartPrint("获取用户cookies", "获取成功")
return user_cookies[0]
break
except Exception as e:
formartPrint("获取用户cookies的时候发生了异常", e)
continue
def get_ip_proxys_kuaidaili():
if db_.llen("proxies") == 0:
try:
# formartPrint("代理ip运营商","快代理")
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"}
for num in range(1, 2000):
response = requests.get("https://www.kuaidaili.com/free/inha/" + str(num), headers=header, timeout=1)
response.encoding = "utf-8"
proxies = beauifulHtmlEleAndGetValue(response, [
"#list>table>tbody>tr>td:nth-child(1)",
"#list>table>tbody>tr>td:nth-child(2)",
"#list>table>tbody>tr>td:nth-child(4)",
])
if len(proxies) == 0:
get_ip_proxys_kuaidaili()
else:
for proxy in proxies:
valid_proxy_ip(proxy[0] + ":" + proxy[1], proxy[2], "快代理")
except Exception as e:
print(e)
formartPrint("获取代理池失败", "快代理")
def get_ip_proxys_xici():
if db_.llen("proxies") == 0:
try:
# formartPrint("代理ip运营商", "西刺")
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"}
for num in range(1, 2000):
response = requests.get("https://www.xicidaili.com/wt/" + str(num), headers=header, timeout=1)
response.encoding = "utf-8"
proxies = beauifulHtmlEleAndGetValue(response, [
"#ip_list>tr.odd>td:nth-child(2)",
"#ip_list>tr.odd>td:nth-child(3)",
"#ip_list>tr.odd>td:nth-child(6)",
])
if len(proxies) == 0:
get_ip_proxys_xici()
else:
for proxy in proxies:
valid_proxy_ip(proxy[0] + ":" + proxy[1], proxy[2],"西刺")
except Exception as e:
print(e)
formartPrint("获取代理池失败","西刺")
def get_ip_proxys_daili66():
try:
# formartPrint("代理ip运营商", "代理66")
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"}
for num in range(1, 2000):
response = requests.get("http://www.66ip.cn/"+str(num)+".html", headers=header, timeout=1)
response.encoding = "utf-8"
print(response.text)
proxies = beauifulHtmlEleAndGetValue(response, [
"table>tr>td:nth-child(1)",
"table>tr>td:nth-child(2)",
])
print(proxies)
if len(proxies) == 0:
get_ip_proxys_daili66()
else:
for proxy in proxies:
valid_proxy_ip(proxy[0] + ":" + proxy[1], "代理66")
except Exception as e:
print(e)
# formartPrint("获取代理池失败","代理66")
def valid_proxy_ip(proxy,agreement,agent):
if agreement == "HTTP":
try:
url = "http://www.baidu.com/"
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"}
response = requests.get(url, headers=header, proxies={"http": proxy}, timeout=1)
if not response:
return
# formartPrint(agent+"该代理ip不可用", "")
elif response.status_code == 200:
push_proxy_ip(proxy)
formartPrint(agent + "ip存入成功,"+ agreement, proxy)
except Exception as e:
pass
# print(e)
# formartPrint(agent+"存储代理ip异常",proxy)
def push_proxy_ip(proxy_ip):
db_.lpush("proxies", proxy_ip)
def get_proxy_ip():
if db_.llen("proxies") == 0:
return None
else:
return db_.rpop("proxies")
def get_proxy_ip_customer():
while True:
if db_.llen("proxies") == 0:
formartPrint("获取蘑菇代理ip", "10条")
proxy_ip_datas = requests.get(
"http://piping.mogumiao.com/proxy/api/get_ip_bs?appKey=ecfc21c9cbd84190893bb255298093d3&count=10&expiryDate=0&format=2&newLine=1").text.strip()
for proxy_ip in proxy_ip_datas.split(" "):
valid_proxy_ip(proxy_ip, "HTTP", "蘑菇代理")
# 解析html
def beauifulHtmlEleAndGetValue(response,selectors):
try:
soup = BeautifulSoup(response.text, 'lxml')
htmlResult = [];
resData = [];
for selector in selectors:
result_temp = []
for res in soup.select(selector):
result_temp.append(res.get_text().strip())
if len(result_temp) > 0:
htmlResult.append(result_temp)
if len(htmlResult) == 0:
return []
else:
for pos in range(len(htmlResult[0])):
resData_temp = []
for num in range(len(htmlResult)):
resData_temp.append(htmlResult[num][pos])
resData.append(resData_temp)
return resData
except Exception as e:
print(e)
formartPrint("解析html","解析html异常")
return []
def excuteAndGetcursorByMysql(type,sql):
formartPrint("数据库执行"+type,sql)
try:
conn = pymysql.connect(host='localhost', user='root', passwd='123', db='tyc', port=3306, charset='utf8')
# conn = pymysql.connect(host='hdm13147088.my3w.com', user='hdm13147088', passwd='ZY0119S1220DGQ2012',db='hdm13147088_db', port=3306, charset='utf8')
cur = conn.cursor() # 获取一个游标
cur.execute(sql)
conn.commit()
return cur
except Exception as e:
formartPrint("数据库操作失败",sql)
print(e)
def get_all_companys(tableName,rowIndexs):
try:
results = []
results_temp = mysqlDaoeExecutor_select(tableName)
if len(results_temp) == 0:
formartPrint("无数据","查询")
else:
for result in results_temp:
rowData = {"data":[]}
if not rowIndexs:
rowData["data"].append(result)
else:
for rowIndex in rowIndexs:
rowData["data"].append(result[rowIndex])
if len(rowData["data"]) == 0:
formartPrint("无数据","查询")
else:
results.append(rowData)
return results
except Exception as e:
formartPrint("数据库异常","数据库【select】异常")
print(e)
def mysqlDaoeExecutor_select(tableName,searchDatas=[],limit_start=None,limit_end=None):
try:
sql = "select * from " + tableName
if not searchDatas or len(searchDatas) == 0:
sql = sql + ";"
else:
sql = sql + " where "
for index in range(len(searchDatas)):
column = searchDatas[index]
if index == len(searchDatas) - 1:
sql = sql + column["name"] + "='" + str(column["data"]) + "';"
else:
sql = sql + column["name"] + "='" + str(column["data"]) + "',"
if limit_start is not None and limit_end is not None:
sql = sql.replace(";","")
sql = sql + ' limit '+ str(limit_start) + ',' + str(limit_end) + ';'
return excuteAndGetcursorByMysql("select", sql).fetchall()
except Exception as e:
print(e)
formartPrint("数据库异常","数据库【select】异常")
def mysqlDaoeExecutor_update(tableName,columns,searchDatas):
try:
sql = "update " + tableName + " set "
for index in range(len(columns)):
column = columns[index]
if index == len(columns) - 1:
sql = sql + column["name"] + "='" + str(column["data"]) + "'"
else:
sql = sql + column["name"] + "='" + str(column["data"]) + "',"
if not searchDatas or len(searchDatas) == 0:
sql = sql + ";"
else:
sql = sql + " where "
for index in range(len(searchDatas)):
column = searchDatas[index]
if index == len(searchDatas) - 1:
sql = sql + column["name"] + "='" + str(column["data"]) + "';"
else:
sql = sql + column["name"] + "='" + str(column["data"]) + "',"
excuteAndGetcursorByMysql("update", sql)
except:
formartPrint("数据库异常","数据库【update】异常")
def mysqlDaoeExecutor_insert(tableName,columns):
try:
sql_column = "insert into " + tableName + " ("
sql_values = " values ("
for index in range(len(columns)):
column = columns[index]
if index == len(columns) - 1:
sql_column = sql_column + column["name"] + ")"
sql_values = sql_values + "'" + column["data"] + "');"
else:
sql_column = sql_column + column["name"] + ","
sql_values = sql_values + "'" + column["data"] + "',"
excuteAndGetcursorByMysql("insert", sql_column + sql_values)
except Exception as e:
print(e)
formartPrint("数据库异常","数据库【插入】异常")
def mysqlDaoeExecutor_delete(tableName,searchDatas=[]):
try:
sql = "delete from " + tableName
if not searchDatas or len(searchDatas) == 0:
sql = sql + ";"
else:
sql = sql + " where "
for index in range(len(searchDatas)):
column = searchDatas[index]
if index == len(searchDatas) - 1:
sql = sql + column["name"] + "='" + str(column["data"]) + "';"
else:
sql = sql + column["name"] + "='" + str(column["data"]) + "',"
excuteAndGetcursorByMysql("delete", sql)
except Exception as e:
print(e)
formartPrint("数据库异常","数据库【delete】异常")
def getHttpHeaders():
return {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Connection': 'close',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6,ja;q=0.4,zh-TW;q=0.2',
'Connection': 'keep-alive',
'DNT': '1',
'Host': 'www.tianyancha.com',
'Upgrade-Insecure-Requests': '1',
'User-Agent': ua.random
}
def getHttpCookies():
return {'aliyungf_tc': 'AQAAAFiQ724iBwUAvYrbchiXbweaUmpi',
'csrfToken': '4ObJTtL8wGkpkuSVmp5wvcmo',
'TYCID': 'b800b0e0194d11e99789e7e0149a2303',
'undefined': 'b800b0e0194d11e99789e7e0149a2303',
'ssuid': '1270484662',
'__insp_wid': '677961980',
'__insp_nv': 'true',
'__insp_targlpu': 'aHR0cHM6Ly93d3cudGlhbnlhbmNoYS5jb20v',
'__insp_targlpt': '5aSp55y85p_lLeWVhuS4muWuieWFqOW3peWFt1%2FkvIHkuJrkv6Hmga%2Fmn6Xor6Jf5YWs5Y_45p_l6K_iX_W3peWVhuafpeivol%2FkvIHkuJrkv6HnlKjkv6Hmga%2Fns7vnu58%3D',
'Hm_lvt_e92c8d65d92d534b0fc290df538b4758': '1547553402',
'_ga': 'GA1.2.1490777280.1547553402',
'_gid': 'GA1.2.229367458.1547553402',
'__insp_norec_sess': 'true',
'token': 'af3881f9e61542bbab43af2b0033409f',
'_utm': '3e1d7328237649498dd90fef5e80d15a',
'tyc-user-info': '%257B%2522claimEditPoint%2522%253A%25220%2522%252C%2522myQuestionCount%2522%253A%25220%2522%252C%2522explainPoint%2522%253A%25220%2522%252C%2522nickname%2522%253A%2522%25E7%25BB%25B4%25E6%258B%2589%25C2%25B7%25E6%25B3%2595%25E7%25B1%25B3%25E5%258A%25A0%2522%252C%2522integrity%2522%253A%25220%2525%2522%252C%2522state%2522%253A%25220%2522%252C%2522announcementPoint%2522%253A%25220%2522%252C%2522vipManager%2522%253A%25220%2522%252C%2522discussCommendCount%2522%253A%25221%2522%252C%2522monitorUnreadCount%2522%253A%25226%2522%252C%2522onum%2522%253A%25220%2522%252C%2522claimPoint%2522%253A%25220%2522%252C%2522token%2522%253A%2522eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiIxNzM1NDM0ODc5NiIsImlhdCI6MTU0NzYxMzIwNywiZXhwIjoxNTYzMTY1MjA3fQ.HLbft7wEp1Pf4QQur6EpNgjWFuTPwa3nNV_wVknQyCY8MWRai6pVxWsPmpVDxPGro7utyDDJutZ7kgflrSGO2Q%2522%252C%2522redPoint%2522%253A%25220%2522%252C%2522pleaseAnswerCount%2522%253A%25221%2522%252C%2522bizCardUnread%2522%253A%25220%2522%252C%2522vnum%2522%253A%25220%2522%252C%2522mobile%2522%253A%252217354348796%2522%257D',
'auth_token': 'eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiIxNzM1NDM0ODc5NiIsImlhdCI6MTU1NTczNDE3MSwiZXhwIjoxNTg3MjcwMTcxfQ.w0W_HzjiYs1W2BGHmYwc030uQ7d9jPUJEfn0gq42tn-2KDol8kvxiR1kZJQvUni6VIUoBmGEP-n3INmlE0plcQ',
'__insp_slim': '1547553988518',
'Hm_lpvt_e92c8d65d92d534b0fc290df538b4758': '1547553989'
}
def format_user_cookies(cookie_str):
try:
cookie_list = cookie_str.split("; ")
user_cookie = {}
for cookie in cookie_list:
cookie_name = cookie.split("=")[0]
cookie_value = cookie.split("=")[1]
user_cookie[cookie_name] = cookie_value
formartPrint("当前使用的cookies",user_cookie)
return user_cookie
except Exception as e:
formartPrint("获取cookies异常", e)
return None
def getHttpCookies1():
return {'auth_token': 'eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiIxNzM1NDM0ODc5NiIsImlhdCI6MTU1NDk5MDUwMSwiZXhwIjoxNTcwNTQyNTAxfQ.HUWVoqspH3lecZgyMgJ4CNcSQoiRA3x78nweastHpGf6Zz_IR8eV-r4FOZjymq-XK55x7zRFFQcwUU1sim-JrQ'}
def formartPrint(type,data):
print("【" + type + "】:",data)
log = open("./log_"+time.strftime("%Y%m%d", time.localtime())+".txt", "a+")
log.write("【" + type + "】:"+str(data) + "\n")
def test_zl():
count = 0
for num in range(1, 600):
url = 'https://www.tianyancha.com/pagination/patent.xhtml?ps=5&pn=' + str(num) + '&id=32377046&_=1555765944888'
response = requests.get(url, headers=getHttpHeaders(), cookies=getHttpCookies(), timeout=5)
response.encoding = "utf-8"
if response.status_code is not 200:
print(count)
break
else:
count = count + 1
soup = BeautifulSoup(response.text, 'lxml')
results = soup.select("table>tbody>tr>td.left-col>span")
for res in results:
mysqlDaoeExecutor_insert("t_test",[
{"name":"name","data":res.get_text().strip()}
])
if __name__ == '__main__':
importlib.reload(sys)
main()
# res = mysqlDaoeExecutor_select('tb_users_cookies_pool')
# print(res[0][7]) | 40.199616 | 1,072 | 0.562333 | import sys
import time
import importlib
import urllib
import urllib.request
import requests
from bs4 import BeautifulSoup
import pymysql
import numpy as np
import threading
import redis
from fake_useragent import UserAgent
import math
db_ = redis.Redis(host="localhost",password=123, port=6379,decode_responses=True)
thread_proxy_list = []
ua = UserAgent()
OPEN_PROXY_IP_THREAD = False
GET_PROXY_IP_BY_FREE = True
GET_RESPONSE_BY_PROXY = False
current_cookies = None
def main():
if OPEN_PROXY_IP_THREAD:
run_get_proxy_fun_by_thread()
companyList = get_all_companys("zb_tbCompanyInfo",[2,3])
for companyDatas in companyList:
try:
company = companyDatas["data"]
get_compony_info_by_companyname(company)
except Exception as e:
formartPrint("请求错误", "请求被拦截")
print(e)
for thread_proxy in thread_proxy_list:
thread_proxy.stop()
def get_compony_info_by_companyname(company):
company_name = company[1]
company_local_id = company[0]
selectors = [
'div.search-item:nth-child(1)>.search-result-single',
]
url_temp = {'url': 'http://www.tianyancha.com/search/p1', 'selectors': selectors}
response = getHttpResponse(url_temp["url"]+'?key=%s' % urllib.request.quote(company_name))
if response is None:
formartPrint("获取公司出错", "响应为空")
elif response.status_code is not 200:
formartPrint("获取公司出错", "被拦截")
elif response.status_code == 200:
soup = BeautifulSoup(response.text, 'lxml')
result = soup.select(url_temp["selectors"][0])
if len(result) > 0:
company_remote_id = result[0]["data-id"]
if not company_remote_id:
formartPrint("未找到公司id", company_local_id)
else:
get_compony_detaile_by_id(company_name, company_remote_id, company_local_id)
else:
formartPrint("没有该企业", company_name)
else:
formartPrint("获取公司出错","未知异常")
def get_compony_detaile_by_id(company_name,company_remote_id,company_local_id):
url_temp = {'url': 'http://www.tianyancha.com/company/'+company_remote_id}
response = getHttpResponse(url_temp["url"])
update_company_info(response,company_name,company_remote_id,company_local_id)
get_sb_by_componyname(response,company_name, company_remote_id,company_local_id)
get_rz_by_componyname(response,company_name, company_remote_id,company_local_id)
get_zl_by_componyname(response,company_name, company_remote_id,company_local_id)
get_zp_by_componyname(response,company_name, company_remote_id,company_local_id)
def update_company_info(company_detail_response,company_name,company_remote_id,company_local_id):
soup = BeautifulSoup(company_detail_response.text, 'lxml')
result = soup.select('div.content>.tag-list-content>.tag-list>.tag-common.-hint')
if len(result) == 0:
formartPrint("更新高新类型" + company_name, "非高新")
mysqlDaoeExecutor_update(
"zb_tbCompanyInfo",
[
{"name": "gxlx", "data": "非高新"}
],
[
{"name": "compy_id", "data": company_local_id}
]
)
else:
formartPrint("更新高新类型" + company_name, "高新企业")
zs_count_res = beauifulHtmlEleAndGetValue(company_detail_response, ["#nav-main-certificateCount>span.data-count"])
if len(zs_count_res) > 0 and len(zs_count_res[0]) > 0:
zs_count = zs_count_res[0][0]
selectors = [
'table>tbody>tr>td:nth-child(2)>span',
'table>tbody>tr>td:nth-child(4)>span'
]
page_count = math.ceil(int(zs_count) / 5)
formartPrint(company_name + "的证书列表页数", page_count)
url_temp = {'url': 'https://www.tianyancha.com/pagination/certificate.xhtml?ps=5&pn=$num&id=' + str(company_remote_id) + '&_=1555935634142', 'selectors': selectors}
formartPrint("发送请求", "正在获取【" + company_name + "】的证书列表")
for num in range(1,page_count+1):
url = url_temp["url"].replace("$num",str(num))
response = getHttpResponse(url)
if response is None:
formartPrint("获取"+company_name+"证书列表错误","请求超时")
elif response.status_code == 200:
zs_lists = beauifulHtmlEleAndGetValue(response,url_temp["selectors"])
exists = False
for zs in zs_lists:
if zs[0] == "高新技术企业":
exists = True
mysqlDaoeExecutor_update(
"zb_tbCompanyInfo",
[
{"name": "gxlx", "data": "国家高新"},
{"name": "gggx_time", "data": zs[1]}
],
[
{"name": "compy_id", "data": company_local_id}
]
)
break
if exists is True:
break
elif response.status_code is not 200:
formartPrint("获取" + company_name + "证书列表错误", "请求被拦截")
else:
formartPrint("获取" + company_name + "证书列表错误", "未知异常")
else:
formartPrint(company_name + "的证书列表为空", [])
def get_sb_by_componyname(company_detail_response, company_name, company_remote_id, company_local_id):
sb_count_res = beauifulHtmlEleAndGetValue(company_detail_response,["#nav-main-tmCount>span.data-count"])
sb_count = 0
if len(sb_count_res) > 0 and len(sb_count_res[0]) > 0:
sb_count = sb_count_res[0][0]
selectors = [
'div.data-content>table>tbody>tr>td:nth-child(4)>span',
'div.data-content>table>tbody>tr>td:nth-child(2)>span',
'div.data-content>table>tbody>tr>td:nth-child(6)>span',
'div.data-content>table>tbody>tr>td:nth-child(5)>span',
'div.data-content>table>tbody>tr>td:nth-child(7)>span'
]
page_count =math.ceil(int(sb_count)/10)
formartPrint(company_name + "的商标列表页数", page_count)
url_temp = {'url':'https://www.tianyancha.com/pagination/tmInfo.xhtml?ps=10&pn=$num&id='+str(company_remote_id)+'&_=1555775408946','selectors': selectors}
formartPrint("发送请求", "正在获取【" + company_name + "】的商标列表")
results = do_search_with_url_no_prarms(url_temp,1,page_count)
formartPrint(company_name + "的商标列表", results)
if len(results)> 0 :
mysqlDaoeExecutor_delete("zb_tbSbinfo", [
{"name": "compy_id", "data": str(company_local_id)}
])
for result in results:
mysqlDaoeExecutor_insert(
"zb_tbSbinfo",
[
{"name": "park_id", "data": 'SZ00000001'},
{"name": "compy_id", "data": str(company_local_id)},
{"name": "sbname", "data": result[0]},
{"name": "sqrq", "data": result[1]},
{"name": "sblx", "data": result[2]},
{"name": "sbsn", "data": result[3]},
{"name": "sbstate", "data": result[4]}
]
)
else:
formartPrint(company_name + "的商标列表为空", [])
mysqlDaoeExecutor_update(
"zb_tbCompanyInfo",
[
{"name": "sbnums", "data": sb_count},
],
[
{"name": "compy_id", "data": company_local_id}
]
)
def get_rz_by_componyname(company_detail_response, company_name, company_remote_id, company_local_id):
rz_count_res = beauifulHtmlEleAndGetValue(company_detail_response,["#nav-main-cpoyRCount>span.data-count"])
rz_count = 0
if len(rz_count_res) > 0 and len(rz_count_res[0]) > 0:
rz_count = rz_count_res[0][0]
selectors = [
'table>tbody>tr>td:nth-child(3)>span',
'table>tbody>tr>td:nth-child(4)>span',
'table>tbody>tr>td:nth-child(5)>span',
'table>tbody>tr>td:nth-child(7)>span',
'table>tbody>tr>td:nth-child(2)>span'
]
page_count =math.ceil(int(rz_count)/5)
formartPrint(company_name + "的软件著作权列表页数", page_count)
url_temp = {'url':'https://www.tianyancha.com/pagination/copyright.xhtml?ps=5&pn=$num&id='+str(company_remote_id)+'&_=1555834707614','selectors': selectors}
formartPrint("发送请求", "正在获取【" + company_name + "】的软件著作权列表")
results = do_search_with_url_no_prarms(url_temp,1,page_count)
formartPrint(company_name + "的软件著作权列表", results)
if len(results)> 0 :
mysqlDaoeExecutor_delete("zb_tbRzinfo", [
{"name": "compy_id", "data": str(company_local_id)}
])
for result in results:
mysqlDaoeExecutor_insert(
"zb_tbRzinfo",
[
{"name": "park_id", "data": 'SZ00000001'},
{"name": "compy_id", "data": str(company_local_id)},
{"name": "fullname", "data": result[0]},
{"name": "shortname", "data": result[1]},
{"name": "rzsn", "data": result[2]},
{"name": "rzver", "data": result[3]},
{"name": "sqrq", "data": result[4]}
]
)
else:
formartPrint(company_name + "的软件著作权列表为空", [])
mysqlDaoeExecutor_update(
"zb_tbCompanyInfo",
[
{"name": "rznums", "data": rz_count},
],
[
{"name": "compy_id", "data": company_local_id}
]
)
def get_zl_by_componyname(company_detail_response, company_name, company_remote_id, company_local_id):
zl_count_res = beauifulHtmlEleAndGetValue(company_detail_response,["#nav-main-patentCount>span.data-count"])
mysqlDaoeExecutor_delete(
"zb_tbZlinfo",
[
{"name": "compy_id", "data": str(company_local_id)}
]
)
if len(zl_count_res) > 0 and len(zl_count_res[0]) > 0:
zl_count = zl_count_res[0][0]
selectors = [
'table>tbody>tr>td:nth-child(4)>span',
'table>tbody>tr>td:nth-child(6)>span'
]
page_count =math.ceil(int(zl_count)/5)
formartPrint(company_name + "的专利列表页数", page_count)
url_temp = {'url':'https://www.tianyancha.com/pagination/patent.xhtml?ps=5&pn=$num&id='+str(company_remote_id)+'&_=1555834707617','selectors': selectors}
formartPrint("发送请求", "正在获取【" + company_name + "】的专利链接列表")
zl_links = []
sn_lx_lists = []
zlnums = 0
xyzlnums = 0
wgzlnums = 0
for num in range(1,page_count+1):
formartPrint(company_name + "当前正在处理的专利列表页数", str(num) + "/" + str(page_count))
try:
response = getHttpResponse(url_temp["url"].replace("$num", str(num)))
if response is None:
formartPrint(company_name + "的专利列表获取超时", "休息一下")
continue
elif response.status_code == 200:
sn_lx_datas = beauifulHtmlEleAndGetValue(response, url_temp["selectors"])
for data in sn_lx_datas:
sn_lx_lists.append(data)
if data[1] == "发明专利":
zlnums = zlnums + 1
elif data[1] == "外观专利":
wgzlnums = wgzlnums + 1
elif data[1] == "实用新型":
xyzlnums = xyzlnums + 1
else:
formartPrint(company_name+"的专利类型非采集类型",data[1])
soup = BeautifulSoup(response.text, 'lxml')
a_lists = soup.select('table>tbody>tr>td:nth-child(7)>a')
for link in a_lists:
zl_links.append(link["href"])
else:
formartPrint(company_name + "获取专利链接时被拦截", response.status_code)
break
except Exception as e:
continue
formartPrint("获取"+company_name+"时出现异常",e)
mysqlDaoeExecutor_update(
"zb_tbCompanyInfo",
[
{"name": "zlnums", "data": zlnums},
{"name": "xyzlnums", "data": xyzlnums},
{"name": "wgzlnums", "data": wgzlnums},
],
[
{"name": "compy_id", "data": company_local_id}
]
)
update_zl_by_zl_links(company_name,company_local_id,zl_links,sn_lx_lists)
else:
formartPrint(company_name + "的专利列表为空", [])
def update_zl_by_zl_links(company_name,company_local_id,zl_links,sn_lx_lists):
formartPrint(company_name + "的专利的链接数", len(zl_links))
zl_results_db_datas = []
for num in range(len(zl_links)):
formartPrint(company_name + "当前正在处理的专利链接索引", str(num+1)+"/"+str(len(zl_links)))
link = zl_links[num]
response = getHttpResponse(link)
if response is None:
formartPrint(company_name + "的专利获取超时", "休息一下")
continue
elif response.status_code == 200:
zl_selectors = [
"#patentTitle>table>tr:nth-child(1)>td:nth-child(4)",
"#patentTitle>table>tr:nth-child(6)>td:nth-child(4)",
"#patentTitle>table>tr:nth-child(4)>td:nth-child(2)",
"#patentTitle>table>tr:nth-child(6)>td:nth-child(2)"
]
zl_results = beauifulHtmlEleAndGetValue(response, zl_selectors)
if len(zl_results_db_datas) == 0:
zl_results_db_datas = zl_results
else:
zl_results_db_datas = np.concatenate((zl_results_db_datas, zl_results))
else:
formartPrint(company_name + "的专利获取被拦截", response.status_code)
break
formartPrint(company_name + "的专利列表", zl_results_db_datas)
for zl in zl_results_db_datas:
mysqlDaoeExecutor_insert(
"zb_tbZlinfo",
[
{"name": "park_id", "data": 'SZ00000001'},
{"name": "compy_id", "data": str(company_local_id)},
{"name": "zlsn", "data": zl[0]},
{"name": "lzsj", "data": zl[1]},
{"name": "zlname", "data": zl[2]},
{"name": "sqrq", "data": zl[3]},
{"name": "zllx", "data": '-'},
]
)
for sn_lx in sn_lx_lists:
mysqlDaoeExecutor_update(
"zb_tbZlinfo",
[
{"name": "zllx", "data": sn_lx[1]},
],
[
{"name": "zlsn", "data": sn_lx[0]}
]
)
def get_zp_by_componyname(company_detail_response, company_name, company_remote_id, company_local_id):
zp_count_res = beauifulHtmlEleAndGetValue(company_detail_response,["#nav-main-copyrightWorks>span.data-count"])
zp_count = 0
if len(zp_count_res) > 0 and len(zp_count_res[0]) > 0:
zp_count = zp_count_res[0][0]
selectors = [
'table>tbody>tr>td:nth-child(2)>span',
'table>tbody>tr>td:nth-child(4)>span',
'table>tbody>tr>td:nth-child(3)>span',
'table>tbody>tr>td:nth-child(6)>span',
'table>tbody>tr>td:nth-child(5)>span',
'table>tbody>tr>td:nth-child(7)>span'
]
page_count =math.ceil(int(zp_count)/5)
formartPrint(company_name + "的作品著作权列表页数", page_count)
url_temp = {'url':'https://www.tianyancha.com/pagination/copyrightWorks.xhtml?ps=5&pn=$num&id='+str(company_remote_id)+'&_=1555844934449','selectors': selectors}
formartPrint("发送请求", "正在获取【" + company_name + "】的作品著作权列表")
results = do_search_with_url_no_prarms(url_temp,1,page_count)
formartPrint(company_name + "的作品著作权列表", results)
if len(results)> 0 :
mysqlDaoeExecutor_delete("zb_tbZpinfo", [
{"name": "compy_id", "data": str(company_local_id)}
])
for result in results:
mysqlDaoeExecutor_insert(
"zb_tbZpinfo",
[
{"name": "park_id", "data": 'SZ00000001'},
{"name": "compy_id", "data": company_local_id},
{"name": "zpname", "data": result[0]},
{"name": "zplx", "data": result[1]},
{"name": "zpsn", "data": result[2]},
{"name": "sqrq", "data": result[3]},
{"name": "firstpub_rq", "data": result[4]},
{"name": "end_rq", "data": result[5]},
]
)
else:
formartPrint(company_name + "的作品著作权列表为空", [])
mysqlDaoeExecutor_update(
"zb_tbCompanyInfo",
[
{"name": "zpnums", "data": zp_count},
],
[
{"name": "compy_id", "data": company_local_id}
]
)
def doSearch(url_temp,companyName,pageStart,pageEnd):
if not pageStart:
pageStart = 1
if not pageEnd:
pageEnd = 2
resultList = []
for num in range(pageStart,pageEnd):
url = url_temp["url"] + str(num) + '?key=%s' % urllib.request.quote(companyName)
try:
response = getHttpResponse(url)
if response.status_code is not 200:
formartPrint("请求被阻止,状态码",response.status_code)
result = beauifulHtmlEleAndGetValue(response, url_temp["selectors"])
if len(result) == 0:
break
else:
if len(resultList) == 0:
resultList = result
else:
resultList = np.concatenate((resultList, result))
except Exception as e:
formartPrint("爬取网页出错",url)
print(e)
break
return resultList
def do_search_with_url_no_prarms(url_temp,pageStart,pageEnd):
if not pageStart:
pageStart = 1
if not pageEnd:
pageEnd = 2
resultList = []
for num in range(pageStart,pageEnd+1):
url = url_temp["url"].replace("$num",str(num))
try:
response = getHttpResponse(url)
if response is None:
continue
elif response.status_code is not 200:
formartPrint("请求被阻止,状态码",response.status_code)
break
result = beauifulHtmlEleAndGetValue(response, url_temp["selectors"])
if len(result) == 0:
break
else:
if len(resultList) == 0:
resultList = result
else:
resultList = np.concatenate((resultList, result))
except Exception as e:
formartPrint("爬取网页出错",url)
print(e)
continue
return resultList
def getHttpResponse(url):
if GET_RESPONSE_BY_PROXY:
return getHttpResponseWithProxy(url)
else:
return getHttpResponseWithoutProxy(url)
def getHttpResponseWithProxy(url):
formartPrint("请求地址", url)
try:
response = get_http_response_by_proxy_ip(url)
response.encoding = "utf-8"
return response
except Exception as e:
print(e)
formartPrint("发送请求失败", "网页请求被拦截")
def getHttpResponseWithoutProxy(url):
formartPrint("请求地址", url)
try:
response = do_get_response(url)
return response
except Exception as e:
print(e)
formartPrint("发送请求失败", "网页请求被拦截")
return None
def get_proxy():
while True:
try:
if db_.llen("proxies") == 0:
time.sleep(5)
else:
return {"http": "http://" + get_proxy_ip()}
break
except Exception:
formartPrint("代理ip为空","None")
def get_http_response_by_proxy_ip(url):
count = 0
proxy = get_proxy()
while True:
count = count + 1
formartPrint("循环监听代理次数",count)
try:
formartPrint("使用了代理",proxy)
response = get_http_response_by_proxy_ip_wapper(url,proxy)
formartPrint("响应状态码",response.status_code)
if response.status_code == 200 and response.text:
return response
break
elif response.status_code == 503:
time.sleep(3)
proxy = get_proxy()
elif count >= 10:
print('抓取网页失败')
break
else:
proxy = get_proxy()
except Exception as e:
formartPrint("获取请求连接报错",e)
proxy = get_proxy()
def get_http_response_by_proxy_ip_wapper(url,proxy):
return requests.get(url, headers=getHttpHeaders(), cookies=getHttpCookies(), proxies=proxy,timeout=10)
def run_get_proxy_fun_by_thread():
if GET_PROXY_IP_BY_FREE:
thread_proxy_list.append(threading.Thread(target=get_ip_proxys_kuaidaili))
thread_proxy_list.append(threading.Thread(target=get_ip_proxys_xici))
else:
thread_proxy_list.append(threading.Thread(target=get_proxy_ip_customer))
for thread_proxy in thread_proxy_list:
thread_proxy.start()
def do_proxy_get_response(url, proxy):
return requests.get(url, headers=getHttpHeaders(), cookies=getHttpCookies(), proxies={"http":proxy},timeout=5)
def do_get_response(url):
return get_users_cookies_pool_response(url)
def get_users_cookies_pool_response(url):
try:
if current_cookies is None:
return do_get_users_cookies_pool_response(url)
else:
response = requests.get(url, headers=getHttpHeaders(), cookies=current_cookies["cookies"], timeout=10)
if response is None:
update_cookies_expire_error_code(current_cookies["user_cookies"][0])
return do_get_users_cookies_pool_response(url)
elif response.status_code is 200:
update_cookies_request_count_url(current_cookies["user_cookies"],url)
return response
elif response.status_code is not 200:
update_cookies_expire_error_code(current_cookies["user_cookies"][0],response.status_code)
return do_get_users_cookies_pool_response(url)
else:
update_cookies_expire_error_code(current_cookies["user_cookies"][0])
return do_get_users_cookies_pool_response(url)
except Exception as e:
formartPrint("使用临时cookies异常",e)
return do_get_users_cookies_pool_response(url)
def do_get_users_cookies_pool_response(url):
while True:
formartPrint("获取用户cookies", "正在切换cookies")
user_cookies = get_user_cookies()
if user_cookies is None:
formartPrint("获取用户cookies", "cookies为空")
return None
break
else:
cookies = format_user_cookies(user_cookies[3])
if cookies is None:
update_cookies_expire_error_code(user_cookies[0])
continue
else:
formartPrint("正在使用cookies获取response", "当前用户【"+user_cookies[1]+"】")
response = requests.get(url, headers=getHttpHeaders(), cookies=cookies, timeout=10)
if response is None:
formartPrint("获取用户cookies异常", "response为None")
update_cookies_expire_error_code(user_cookies[0])
continue
elif response.status_code is 200:
global current_cookies
current_cookies = {'user_cookies': user_cookies, 'cookies': cookies}
update_cookies_request_count_url(user_cookies, url)
formartPrint("当前用户", user_cookies[1])
response.encoding = "utf-8"
return response
break
elif response.status_code is not 200:
formartPrint("使用cookies请求异常", "当前用户被拦截【" + user_cookies[1] + "】")
update_cookies_expire_error_code(user_cookies[0],response.status_code)
continue
else:
formartPrint("使用cookies请求异常", "未知异常,当前用户【" + user_cookies[1] + "】")
update_cookies_expire_error_code(user_cookies[0])
continue
def update_cookies_expire_error_code(cookie_id,error_code = 414):
mysqlDaoeExecutor_update(
'tb_users_cookies_pool',
[
{'name': 'active', 'data': '0'},
{'name': 'last_expire_time', 'data': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())},
{'name': 'error_code', 'data': error_code}
],
[
{'name': 'id', 'data': cookie_id}
]
)
def update_cookies_request_count_url(user_cookies,url):
count = 0 if user_cookies[7] is None or user_cookies[7] == '' else int(user_cookies[7])
print("================",count)
mysqlDaoeExecutor_update(
'tb_users_cookies_pool',
[
{'name': 'request_count', 'data': count+1},
{'name': 'last_request_url', 'data': url}
],
[
{'name': 'id', 'data': user_cookies[0]}
]
)
def get_user_cookies():
while True:
try:
formartPrint("获取用户cookies", "正在获取")
user_cookies = mysqlDaoeExecutor_select('tb_users_cookies_pool', [
{'name': "active", 'data': '1'}
], 0, 1)
if len(user_cookies) == 0:
formartPrint("用户cookie的数量为空", "没有可用的用户cookie,请及时激活或补充")
return None
else:
formartPrint("获取用户cookies", "获取成功")
return user_cookies[0]
break
except Exception as e:
formartPrint("获取用户cookies的时候发生了异常", e)
continue
def get_ip_proxys_kuaidaili():
if db_.llen("proxies") == 0:
try:
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"}
for num in range(1, 2000):
response = requests.get("https://www.kuaidaili.com/free/inha/" + str(num), headers=header, timeout=1)
response.encoding = "utf-8"
proxies = beauifulHtmlEleAndGetValue(response, [
"#list>table>tbody>tr>td:nth-child(1)",
"#list>table>tbody>tr>td:nth-child(2)",
"#list>table>tbody>tr>td:nth-child(4)",
])
if len(proxies) == 0:
get_ip_proxys_kuaidaili()
else:
for proxy in proxies:
valid_proxy_ip(proxy[0] + ":" + proxy[1], proxy[2], "快代理")
except Exception as e:
print(e)
formartPrint("获取代理池失败", "快代理")
def get_ip_proxys_xici():
if db_.llen("proxies") == 0:
try:
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"}
for num in range(1, 2000):
response = requests.get("https://www.xicidaili.com/wt/" + str(num), headers=header, timeout=1)
response.encoding = "utf-8"
proxies = beauifulHtmlEleAndGetValue(response, [
"#ip_list>tr.odd>td:nth-child(2)",
"#ip_list>tr.odd>td:nth-child(3)",
"#ip_list>tr.odd>td:nth-child(6)",
])
if len(proxies) == 0:
get_ip_proxys_xici()
else:
for proxy in proxies:
valid_proxy_ip(proxy[0] + ":" + proxy[1], proxy[2],"西刺")
except Exception as e:
print(e)
formartPrint("获取代理池失败","西刺")
def get_ip_proxys_daili66():
try:
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"}
for num in range(1, 2000):
response = requests.get("http://www.66ip.cn/"+str(num)+".html", headers=header, timeout=1)
response.encoding = "utf-8"
print(response.text)
proxies = beauifulHtmlEleAndGetValue(response, [
"table>tr>td:nth-child(1)",
"table>tr>td:nth-child(2)",
])
print(proxies)
if len(proxies) == 0:
get_ip_proxys_daili66()
else:
for proxy in proxies:
valid_proxy_ip(proxy[0] + ":" + proxy[1], "代理66")
except Exception as e:
print(e)
def valid_proxy_ip(proxy,agreement,agent):
if agreement == "HTTP":
try:
url = "http://www.baidu.com/"
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"}
response = requests.get(url, headers=header, proxies={"http": proxy}, timeout=1)
if not response:
return
elif response.status_code == 200:
push_proxy_ip(proxy)
formartPrint(agent + "ip存入成功,"+ agreement, proxy)
except Exception as e:
pass
def push_proxy_ip(proxy_ip):
db_.lpush("proxies", proxy_ip)
def get_proxy_ip():
if db_.llen("proxies") == 0:
return None
else:
return db_.rpop("proxies")
def get_proxy_ip_customer():
while True:
if db_.llen("proxies") == 0:
formartPrint("获取蘑菇代理ip", "10条")
proxy_ip_datas = requests.get(
"http://piping.mogumiao.com/proxy/api/get_ip_bs?appKey=ecfc21c9cbd84190893bb255298093d3&count=10&expiryDate=0&format=2&newLine=1").text.strip()
for proxy_ip in proxy_ip_datas.split(" "):
valid_proxy_ip(proxy_ip, "HTTP", "蘑菇代理")
def beauifulHtmlEleAndGetValue(response,selectors):
try:
soup = BeautifulSoup(response.text, 'lxml')
htmlResult = [];
resData = [];
for selector in selectors:
result_temp = []
for res in soup.select(selector):
result_temp.append(res.get_text().strip())
if len(result_temp) > 0:
htmlResult.append(result_temp)
if len(htmlResult) == 0:
return []
else:
for pos in range(len(htmlResult[0])):
resData_temp = []
for num in range(len(htmlResult)):
resData_temp.append(htmlResult[num][pos])
resData.append(resData_temp)
return resData
except Exception as e:
print(e)
formartPrint("解析html","解析html异常")
return []
def excuteAndGetcursorByMysql(type,sql):
formartPrint("数据库执行"+type,sql)
try:
conn = pymysql.connect(host='localhost', user='root', passwd='123', db='tyc', port=3306, charset='utf8')
cur = conn.cursor()
cur.execute(sql)
conn.commit()
return cur
except Exception as e:
formartPrint("数据库操作失败",sql)
print(e)
def get_all_companys(tableName,rowIndexs):
try:
results = []
results_temp = mysqlDaoeExecutor_select(tableName)
if len(results_temp) == 0:
formartPrint("无数据","查询")
else:
for result in results_temp:
rowData = {"data":[]}
if not rowIndexs:
rowData["data"].append(result)
else:
for rowIndex in rowIndexs:
rowData["data"].append(result[rowIndex])
if len(rowData["data"]) == 0:
formartPrint("无数据","查询")
else:
results.append(rowData)
return results
except Exception as e:
formartPrint("数据库异常","数据库【select】异常")
print(e)
def mysqlDaoeExecutor_select(tableName,searchDatas=[],limit_start=None,limit_end=None):
try:
sql = "select * from " + tableName
if not searchDatas or len(searchDatas) == 0:
sql = sql + ";"
else:
sql = sql + " where "
for index in range(len(searchDatas)):
column = searchDatas[index]
if index == len(searchDatas) - 1:
sql = sql + column["name"] + "='" + str(column["data"]) + "';"
else:
sql = sql + column["name"] + "='" + str(column["data"]) + "',"
if limit_start is not None and limit_end is not None:
sql = sql.replace(";","")
sql = sql + ' limit '+ str(limit_start) + ',' + str(limit_end) + ';'
return excuteAndGetcursorByMysql("select", sql).fetchall()
except Exception as e:
print(e)
formartPrint("数据库异常","数据库【select】异常")
def mysqlDaoeExecutor_update(tableName,columns,searchDatas):
try:
sql = "update " + tableName + " set "
for index in range(len(columns)):
column = columns[index]
if index == len(columns) - 1:
sql = sql + column["name"] + "='" + str(column["data"]) + "'"
else:
sql = sql + column["name"] + "='" + str(column["data"]) + "',"
if not searchDatas or len(searchDatas) == 0:
sql = sql + ";"
else:
sql = sql + " where "
for index in range(len(searchDatas)):
column = searchDatas[index]
if index == len(searchDatas) - 1:
sql = sql + column["name"] + "='" + str(column["data"]) + "';"
else:
sql = sql + column["name"] + "='" + str(column["data"]) + "',"
excuteAndGetcursorByMysql("update", sql)
except:
formartPrint("数据库异常","数据库【update】异常")
def mysqlDaoeExecutor_insert(tableName,columns):
try:
sql_column = "insert into " + tableName + " ("
sql_values = " values ("
for index in range(len(columns)):
column = columns[index]
if index == len(columns) - 1:
sql_column = sql_column + column["name"] + ")"
sql_values = sql_values + "'" + column["data"] + "');"
else:
sql_column = sql_column + column["name"] + ","
sql_values = sql_values + "'" + column["data"] + "',"
excuteAndGetcursorByMysql("insert", sql_column + sql_values)
except Exception as e:
print(e)
formartPrint("数据库异常","数据库【插入】异常")
def mysqlDaoeExecutor_delete(tableName,searchDatas=[]):
try:
sql = "delete from " + tableName
if not searchDatas or len(searchDatas) == 0:
sql = sql + ";"
else:
sql = sql + " where "
for index in range(len(searchDatas)):
column = searchDatas[index]
if index == len(searchDatas) - 1:
sql = sql + column["name"] + "='" + str(column["data"]) + "';"
else:
sql = sql + column["name"] + "='" + str(column["data"]) + "',"
excuteAndGetcursorByMysql("delete", sql)
except Exception as e:
print(e)
formartPrint("数据库异常","数据库【delete】异常")
def getHttpHeaders():
return {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Connection': 'close',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6,ja;q=0.4,zh-TW;q=0.2',
'Connection': 'keep-alive',
'DNT': '1',
'Host': 'www.tianyancha.com',
'Upgrade-Insecure-Requests': '1',
'User-Agent': ua.random
}
def getHttpCookies():
return {'aliyungf_tc': 'AQAAAFiQ724iBwUAvYrbchiXbweaUmpi',
'csrfToken': '4ObJTtL8wGkpkuSVmp5wvcmo',
'TYCID': 'b800b0e0194d11e99789e7e0149a2303',
'undefined': 'b800b0e0194d11e99789e7e0149a2303',
'ssuid': '1270484662',
'__insp_wid': '677961980',
'__insp_nv': 'true',
'__insp_targlpu': 'aHR0cHM6Ly93d3cudGlhbnlhbmNoYS5jb20v',
'__insp_targlpt': '5aSp55y85p_lLeWVhuS4muWuieWFqOW3peWFt1%2FkvIHkuJrkv6Hmga%2Fmn6Xor6Jf5YWs5Y_45p_l6K_iX_W3peWVhuafpeivol%2FkvIHkuJrkv6HnlKjkv6Hmga%2Fns7vnu58%3D',
'Hm_lvt_e92c8d65d92d534b0fc290df538b4758': '1547553402',
'_ga': 'GA1.2.1490777280.1547553402',
'_gid': 'GA1.2.229367458.1547553402',
'__insp_norec_sess': 'true',
'token': 'af3881f9e61542bbab43af2b0033409f',
'_utm': '3e1d7328237649498dd90fef5e80d15a',
'tyc-user-info': '%257B%2522claimEditPoint%2522%253A%25220%2522%252C%2522myQuestionCount%2522%253A%25220%2522%252C%2522explainPoint%2522%253A%25220%2522%252C%2522nickname%2522%253A%2522%25E7%25BB%25B4%25E6%258B%2589%25C2%25B7%25E6%25B3%2595%25E7%25B1%25B3%25E5%258A%25A0%2522%252C%2522integrity%2522%253A%25220%2525%2522%252C%2522state%2522%253A%25220%2522%252C%2522announcementPoint%2522%253A%25220%2522%252C%2522vipManager%2522%253A%25220%2522%252C%2522discussCommendCount%2522%253A%25221%2522%252C%2522monitorUnreadCount%2522%253A%25226%2522%252C%2522onum%2522%253A%25220%2522%252C%2522claimPoint%2522%253A%25220%2522%252C%2522token%2522%253A%2522eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiIxNzM1NDM0ODc5NiIsImlhdCI6MTU0NzYxMzIwNywiZXhwIjoxNTYzMTY1MjA3fQ.HLbft7wEp1Pf4QQur6EpNgjWFuTPwa3nNV_wVknQyCY8MWRai6pVxWsPmpVDxPGro7utyDDJutZ7kgflrSGO2Q%2522%252C%2522redPoint%2522%253A%25220%2522%252C%2522pleaseAnswerCount%2522%253A%25221%2522%252C%2522bizCardUnread%2522%253A%25220%2522%252C%2522vnum%2522%253A%25220%2522%252C%2522mobile%2522%253A%252217354348796%2522%257D',
'auth_token': 'eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiIxNzM1NDM0ODc5NiIsImlhdCI6MTU1NTczNDE3MSwiZXhwIjoxNTg3MjcwMTcxfQ.w0W_HzjiYs1W2BGHmYwc030uQ7d9jPUJEfn0gq42tn-2KDol8kvxiR1kZJQvUni6VIUoBmGEP-n3INmlE0plcQ',
'__insp_slim': '1547553988518',
'Hm_lpvt_e92c8d65d92d534b0fc290df538b4758': '1547553989'
}
def format_user_cookies(cookie_str):
try:
cookie_list = cookie_str.split("; ")
user_cookie = {}
for cookie in cookie_list:
cookie_name = cookie.split("=")[0]
cookie_value = cookie.split("=")[1]
user_cookie[cookie_name] = cookie_value
formartPrint("当前使用的cookies",user_cookie)
return user_cookie
except Exception as e:
formartPrint("获取cookies异常", e)
return None
def getHttpCookies1():
return {'auth_token': 'eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiIxNzM1NDM0ODc5NiIsImlhdCI6MTU1NDk5MDUwMSwiZXhwIjoxNTcwNTQyNTAxfQ.HUWVoqspH3lecZgyMgJ4CNcSQoiRA3x78nweastHpGf6Zz_IR8eV-r4FOZjymq-XK55x7zRFFQcwUU1sim-JrQ'}
def formartPrint(type,data):
print("【" + type + "】:",data)
log = open("./log_"+time.strftime("%Y%m%d", time.localtime())+".txt", "a+")
log.write("【" + type + "】:"+str(data) + "\n")
def test_zl():
count = 0
for num in range(1, 600):
url = 'https://www.tianyancha.com/pagination/patent.xhtml?ps=5&pn=' + str(num) + '&id=32377046&_=1555765944888'
response = requests.get(url, headers=getHttpHeaders(), cookies=getHttpCookies(), timeout=5)
response.encoding = "utf-8"
if response.status_code is not 200:
print(count)
break
else:
count = count + 1
soup = BeautifulSoup(response.text, 'lxml')
results = soup.select("table>tbody>tr>td.left-col>span")
for res in results:
mysqlDaoeExecutor_insert("t_test",[
{"name":"name","data":res.get_text().strip()}
])
if __name__ == '__main__':
importlib.reload(sys)
main()
| false | true |
f7fe58318b4bf438cd9c0ae887db715050a26b70 | 114 | py | Python | QuestoesBeecrowd-Iniciante/1006.py | AtosNeves/Beecrowd | f1192218eac3f6300290fe8234bbc720e9fb859e | [
"MIT"
] | null | null | null | QuestoesBeecrowd-Iniciante/1006.py | AtosNeves/Beecrowd | f1192218eac3f6300290fe8234bbc720e9fb859e | [
"MIT"
] | null | null | null | QuestoesBeecrowd-Iniciante/1006.py | AtosNeves/Beecrowd | f1192218eac3f6300290fe8234bbc720e9fb859e | [
"MIT"
] | null | null | null | a = float(input())
b = float(input())
c = float(input())
d = (a*2+b*3+c*5)/10
print("MEDIA = {:.1f}".format(d))
| 14.25 | 33 | 0.535088 | a = float(input())
b = float(input())
c = float(input())
d = (a*2+b*3+c*5)/10
print("MEDIA = {:.1f}".format(d))
| true | true |
f7fe5867564219630cc49ea38853f6e1d85ebff6 | 72 | py | Python | learn.py | python1803supera/helloteam | d21892abf0f9acc14efa758a502f10a63dddca8e | [
"Apache-2.0"
] | null | null | null | learn.py | python1803supera/helloteam | d21892abf0f9acc14efa758a502f10a63dddca8e | [
"Apache-2.0"
] | null | null | null | learn.py | python1803supera/helloteam | d21892abf0f9acc14efa758a502f10a63dddca8e | [
"Apache-2.0"
] | null | null | null | print("tiancai")
print("提升自己才有选择否则就是刀俎我为鱼肉“)
print("团队合作")
| 14.4 | 27 | 0.611111 | print("tiancai")
print("提升自己才有选择否则就是刀俎我为鱼肉“)
print("团队合作")
| false | true |
f7fe58a7a30039bc8ccff21f6d166ba1814e1590 | 10,475 | py | Python | test/test_download_file.py | lvarin/ega-download-client | bcbf0922af8bcea51ae62a3278b42c1491589923 | [
"Apache-2.0"
] | null | null | null | test/test_download_file.py | lvarin/ega-download-client | bcbf0922af8bcea51ae62a3278b42c1491589923 | [
"Apache-2.0"
] | null | null | null | test/test_download_file.py | lvarin/ega-download-client | bcbf0922af8bcea51ae62a3278b42c1491589923 | [
"Apache-2.0"
] | null | null | null | import hashlib
import math
import os
import tempfile
from collections import namedtuple
from unittest import mock
import pytest
from pyega3.libs.data_file import DataFile
OUTPUT_DIR = tempfile.gettempdir()
@pytest.fixture
def mock_writing_files():
files = {}
def open_wrapper(filename, mode):
filename = os.path.basename(filename)
if filename not in files:
if 'r' in mode:
raise Exception("Attempt to read mock file before it was created.")
files[filename] = bytearray()
content = bytes(files[filename])
content_len = len(content)
read_buf_sz = 65536
file_object = mock.mock_open(read_data=content).return_value
file_object.__iter__.return_value = [content[i:min(i + read_buf_sz, content_len)] for i in
range(0, content_len, read_buf_sz)]
file_object.write.side_effect = lambda write_buf: files[filename].extend(write_buf)
return file_object
def os_stat_mock(fn):
fn = os.path.basename(fn)
X = namedtuple('X', 'st_size f1 f2 f3 f4 f5 f6 f7 f8 f9')
result = X(*([None] * 10))
return result._replace(st_size=len(files[fn]))
def os_rename_mock(s, d):
files.__setitem__(os.path.basename(d), files.pop(os.path.basename(s)))
with mock.patch('builtins.open', new=open_wrapper):
with mock.patch('os.makedirs', return_value=None):
with mock.patch('os.path.exists', lambda path: os.path.basename(path) in files):
with mock.patch('os.stat', os_stat_mock):
with mock.patch('os.rename', os_rename_mock):
with mock.patch('shutil.rmtree'):
with mock.patch('os.listdir', return_value=[]):
yield files
def test_download_file(mock_data_server, random_binary_file, mock_writing_files, mock_server_config, mock_data_client):
file_id = "EGAF00000000001"
file_name = "resulting.file"
file_md5 = hashlib.md5(random_binary_file).hexdigest()
mock_data_server.file_content[file_id] = random_binary_file
file = DataFile(mock_data_client, file_id, display_file_name=file_name, file_name=file_name + ".cip",
size=len(random_binary_file) + 16, unencrypted_checksum=file_md5)
file.download_file_retry(1, output_dir=OUTPUT_DIR, genomic_range_args=None, max_retries=5, retry_wait=0)
assert random_binary_file == mock_writing_files[file_name]
def test_no_error_if_output_file_already_exists_with_correct_md5(mock_data_server, random_binary_file,
mock_writing_files, mock_server_config,
mock_data_client):
file_id = "EGAF00000000001"
file_name = "resulting.file"
file_md5 = hashlib.md5(random_binary_file).hexdigest()
mock_data_server.file_content[file_id] = random_binary_file
mock_writing_files[file_name] = random_binary_file
# add 16 bytes to file size ( IV adjustment )
file = DataFile(mock_data_client, file_id, display_file_name=file_name, file_name=file_name + ".cip",
size=len(random_binary_file) + 16, unencrypted_checksum=file_md5)
file.download_file_retry(1,
output_dir=OUTPUT_DIR,
genomic_range_args=None, max_retries=5, retry_wait=0)
def test_output_file_is_removed_if_md5_was_invalid(mock_data_server, random_binary_file, mock_writing_files,
mock_server_config,
mock_data_client):
file_id = "EGAF00000000001"
file_name = "resulting.file"
wrong_md5 = "wrong_md5_exactly_32_chars_longg"
mock_data_server.file_content[file_id] = random_binary_file
file = DataFile(mock_data_client, file_id, file_name, file_name + ".cip", len(random_binary_file) + 16, wrong_md5)
with mock.patch('os.remove') as mocked_remove:
with pytest.raises(Exception):
file.download_file_retry(1, OUTPUT_DIR, genomic_range_args=None, max_retries=5, retry_wait=0)
mocked_remove.assert_has_calls(
[mock.call(os.path.join(os.getcwd(), file_id, os.path.basename(f))) for f in
list(mock_writing_files.keys()) if file_name not in f],
any_order=True)
def test_genomic_range_calls_htsget(mock_data_server, random_binary_file, mock_writing_files, mock_server_config,
mock_data_client):
file_id = "EGAF00000000001"
file_name = "resulting.file"
file_md5 = hashlib.md5(random_binary_file).hexdigest()
mock_data_server.file_content[file_id] = random_binary_file
file = DataFile(mock_data_client, file_id, file_name, file_name + ".cip", len(random_binary_file) + 16, file_md5)
with mock.patch('htsget.get') as mocked_htsget:
file.download_file_retry(
1, output_dir=OUTPUT_DIR, genomic_range_args=("chr1", None, 1, 100, None),
max_retries=5,
retry_wait=0)
args, kwargs = mocked_htsget.call_args
assert args[0] == f'{mock_server_config.url_api_ticket}/files/EGAF00000000001'
assert kwargs.get('reference_name') == 'chr1'
assert kwargs.get('reference_md5') is None
assert kwargs.get('start') == 1
assert kwargs.get('end') == 100
assert kwargs.get('data_format') is None
def test_gpg_files_not_supported(mock_data_client):
file = DataFile(mock_data_client, "", "test.gz", "test.gz.gpg", 0, "")
file.download_file_retry(1, output_dir=OUTPUT_DIR, genomic_range_args=None, max_retries=5, retry_wait=5)
def test_temporary_chunk_files_stored_in_temp_folder_with_suffix_tmp(mock_data_server, random_binary_file,
mock_server_config,
mock_data_client):
# Given: a file that exist in EGA object store and the user has permissions to access to it
file_id = "EGAF00000000001"
file_name = "resulting.file"
file_md5 = hashlib.md5(random_binary_file).hexdigest()
mock_data_server.file_content[file_id] = random_binary_file
file = DataFile(mock_data_client, file_id, file_name, file_name + ".cip", len(random_binary_file) + 16, file_md5)
# When: the user starts to download a file
output_file = os.path.join(OUTPUT_DIR, file_id, file_name)
md5_file = output_file + ".md5"
if os.path.exists(output_file):
os.remove(output_file)
if os.path.exists(md5_file):
os.remove(md5_file)
with mock.patch('builtins.open', wraps=open) as wrapped_open:
file.download_file_retry(1, output_dir=OUTPUT_DIR, genomic_range_args=None, max_retries=5, retry_wait=0)
# Then: The temporary files for the chunks are in the temporary folder and has .tmp as a suffix
temporary_folder = os.path.join(OUTPUT_DIR, file_id, ".tmp_download")
slices_opened = set([call.args[0] for call in wrapped_open.mock_calls if len(call.args) == 2])
slices_opened.remove(output_file)
slices_opened.remove(md5_file)
for slice_file in slices_opened:
assert slice_file.startswith(temporary_folder)
assert slice_file.endswith(".tmp")
# Feature: The user can configure the slice sizes used when downloading a file.
def test_the_user_specifies_a_slice_size(mock_data_client):
# Given: a file that the user has permissions to download and a custom slice size
file = DataFile(mock_data_client, file_id="EGAF123456", size=12345, unencrypted_checksum="testChecksum")
slice_size = 1000
# When: when the user downloads the file
with mock.patch("pyega3.libs.data_file.DataFile.download_file_slice") as mock_download_slice:
with mock.patch("pyega3.libs.utils.md5", return_value=file.unencrypted_checksum):
with mock.patch("os.path.getsize", return_value=file.size):
file.download_file(output_file="output_file", num_connections=1, max_slice_size=slice_size)
# Then: the file is downloaded in multiple slices where each slice is at most the custom slice size
assert mock_download_slice.call_count == 13
def test_the_user_does_not_specifies_a_slice_size(mock_data_client):
# Given: a file that the user has permissions to download
file = DataFile(mock_data_client, file_id="EGAF123456", size=1234567890, unencrypted_checksum="testChecksum")
# When: when the user downloads the file
with mock.patch("pyega3.libs.data_file.DataFile.download_file_slice") as mock_download_slice:
with mock.patch("pyega3.libs.utils.md5", return_value=file.unencrypted_checksum):
with mock.patch("os.path.getsize", return_value=file.size):
file.download_file(output_file="output_file", num_connections=1)
# Then: The file is downloaded in multiple slices where each slice is at most the default slice size
assert mock_download_slice.call_count == math.ceil(file.size / DataFile.DEFAULT_SLICE_SIZE)
def test_the_user_specifies_a_custom_slice_size_different_to_before(mock_data_client, mock_data_server, random_binary_file, caplog):
# Given: a file that the user has permissions to download and a custom slice size and some slices that were already downloaded with different size.
mock_data_server.file_content["EGAF123456"] = random_binary_file
file = DataFile(mock_data_client, file_id="EGAF123456", size=12345, unencrypted_checksum="testChecksum")
slice_size = 1000
os.makedirs(".tmp_download", exist_ok=True)
extra_slice = file.download_file_slice(f'.tmp_download/{file.id}', 0, 1234)
assert os.path.exists(extra_slice)
# When: when the user downloads the file
with mock.patch("pyega3.libs.data_file.DataFile.download_file_slice") as mock_download_slice:
with mock.patch("pyega3.libs.utils.md5", return_value=file.unencrypted_checksum):
with mock.patch("os.path.getsize", return_value=file.size):
file.download_file(output_file="output_file", num_connections=1, max_slice_size=slice_size)
# Then: the file is downloaded in multiple slices where each slice is at most the custom slice size and delete the old slices with the warning.
assert mock_download_slice.call_count == 13
assert not os.path.exists(extra_slice)
assert "Deleting the leftover" in caplog.text
| 46.763393 | 151 | 0.696325 | import hashlib
import math
import os
import tempfile
from collections import namedtuple
from unittest import mock
import pytest
from pyega3.libs.data_file import DataFile
OUTPUT_DIR = tempfile.gettempdir()
@pytest.fixture
def mock_writing_files():
files = {}
def open_wrapper(filename, mode):
filename = os.path.basename(filename)
if filename not in files:
if 'r' in mode:
raise Exception("Attempt to read mock file before it was created.")
files[filename] = bytearray()
content = bytes(files[filename])
content_len = len(content)
read_buf_sz = 65536
file_object = mock.mock_open(read_data=content).return_value
file_object.__iter__.return_value = [content[i:min(i + read_buf_sz, content_len)] for i in
range(0, content_len, read_buf_sz)]
file_object.write.side_effect = lambda write_buf: files[filename].extend(write_buf)
return file_object
def os_stat_mock(fn):
fn = os.path.basename(fn)
X = namedtuple('X', 'st_size f1 f2 f3 f4 f5 f6 f7 f8 f9')
result = X(*([None] * 10))
return result._replace(st_size=len(files[fn]))
def os_rename_mock(s, d):
files.__setitem__(os.path.basename(d), files.pop(os.path.basename(s)))
with mock.patch('builtins.open', new=open_wrapper):
with mock.patch('os.makedirs', return_value=None):
with mock.patch('os.path.exists', lambda path: os.path.basename(path) in files):
with mock.patch('os.stat', os_stat_mock):
with mock.patch('os.rename', os_rename_mock):
with mock.patch('shutil.rmtree'):
with mock.patch('os.listdir', return_value=[]):
yield files
def test_download_file(mock_data_server, random_binary_file, mock_writing_files, mock_server_config, mock_data_client):
file_id = "EGAF00000000001"
file_name = "resulting.file"
file_md5 = hashlib.md5(random_binary_file).hexdigest()
mock_data_server.file_content[file_id] = random_binary_file
file = DataFile(mock_data_client, file_id, display_file_name=file_name, file_name=file_name + ".cip",
size=len(random_binary_file) + 16, unencrypted_checksum=file_md5)
file.download_file_retry(1, output_dir=OUTPUT_DIR, genomic_range_args=None, max_retries=5, retry_wait=0)
assert random_binary_file == mock_writing_files[file_name]
def test_no_error_if_output_file_already_exists_with_correct_md5(mock_data_server, random_binary_file,
mock_writing_files, mock_server_config,
mock_data_client):
file_id = "EGAF00000000001"
file_name = "resulting.file"
file_md5 = hashlib.md5(random_binary_file).hexdigest()
mock_data_server.file_content[file_id] = random_binary_file
mock_writing_files[file_name] = random_binary_file
file = DataFile(mock_data_client, file_id, display_file_name=file_name, file_name=file_name + ".cip",
size=len(random_binary_file) + 16, unencrypted_checksum=file_md5)
file.download_file_retry(1,
output_dir=OUTPUT_DIR,
genomic_range_args=None, max_retries=5, retry_wait=0)
def test_output_file_is_removed_if_md5_was_invalid(mock_data_server, random_binary_file, mock_writing_files,
mock_server_config,
mock_data_client):
file_id = "EGAF00000000001"
file_name = "resulting.file"
wrong_md5 = "wrong_md5_exactly_32_chars_longg"
mock_data_server.file_content[file_id] = random_binary_file
file = DataFile(mock_data_client, file_id, file_name, file_name + ".cip", len(random_binary_file) + 16, wrong_md5)
with mock.patch('os.remove') as mocked_remove:
with pytest.raises(Exception):
file.download_file_retry(1, OUTPUT_DIR, genomic_range_args=None, max_retries=5, retry_wait=0)
mocked_remove.assert_has_calls(
[mock.call(os.path.join(os.getcwd(), file_id, os.path.basename(f))) for f in
list(mock_writing_files.keys()) if file_name not in f],
any_order=True)
def test_genomic_range_calls_htsget(mock_data_server, random_binary_file, mock_writing_files, mock_server_config,
mock_data_client):
file_id = "EGAF00000000001"
file_name = "resulting.file"
file_md5 = hashlib.md5(random_binary_file).hexdigest()
mock_data_server.file_content[file_id] = random_binary_file
file = DataFile(mock_data_client, file_id, file_name, file_name + ".cip", len(random_binary_file) + 16, file_md5)
with mock.patch('htsget.get') as mocked_htsget:
file.download_file_retry(
1, output_dir=OUTPUT_DIR, genomic_range_args=("chr1", None, 1, 100, None),
max_retries=5,
retry_wait=0)
args, kwargs = mocked_htsget.call_args
assert args[0] == f'{mock_server_config.url_api_ticket}/files/EGAF00000000001'
assert kwargs.get('reference_name') == 'chr1'
assert kwargs.get('reference_md5') is None
assert kwargs.get('start') == 1
assert kwargs.get('end') == 100
assert kwargs.get('data_format') is None
def test_gpg_files_not_supported(mock_data_client):
file = DataFile(mock_data_client, "", "test.gz", "test.gz.gpg", 0, "")
file.download_file_retry(1, output_dir=OUTPUT_DIR, genomic_range_args=None, max_retries=5, retry_wait=5)
def test_temporary_chunk_files_stored_in_temp_folder_with_suffix_tmp(mock_data_server, random_binary_file,
mock_server_config,
mock_data_client):
file_id = "EGAF00000000001"
file_name = "resulting.file"
file_md5 = hashlib.md5(random_binary_file).hexdigest()
mock_data_server.file_content[file_id] = random_binary_file
file = DataFile(mock_data_client, file_id, file_name, file_name + ".cip", len(random_binary_file) + 16, file_md5)
output_file = os.path.join(OUTPUT_DIR, file_id, file_name)
md5_file = output_file + ".md5"
if os.path.exists(output_file):
os.remove(output_file)
if os.path.exists(md5_file):
os.remove(md5_file)
with mock.patch('builtins.open', wraps=open) as wrapped_open:
file.download_file_retry(1, output_dir=OUTPUT_DIR, genomic_range_args=None, max_retries=5, retry_wait=0)
temporary_folder = os.path.join(OUTPUT_DIR, file_id, ".tmp_download")
slices_opened = set([call.args[0] for call in wrapped_open.mock_calls if len(call.args) == 2])
slices_opened.remove(output_file)
slices_opened.remove(md5_file)
for slice_file in slices_opened:
assert slice_file.startswith(temporary_folder)
assert slice_file.endswith(".tmp")
def test_the_user_specifies_a_slice_size(mock_data_client):
file = DataFile(mock_data_client, file_id="EGAF123456", size=12345, unencrypted_checksum="testChecksum")
slice_size = 1000
with mock.patch("pyega3.libs.data_file.DataFile.download_file_slice") as mock_download_slice:
with mock.patch("pyega3.libs.utils.md5", return_value=file.unencrypted_checksum):
with mock.patch("os.path.getsize", return_value=file.size):
file.download_file(output_file="output_file", num_connections=1, max_slice_size=slice_size)
assert mock_download_slice.call_count == 13
def test_the_user_does_not_specifies_a_slice_size(mock_data_client):
file = DataFile(mock_data_client, file_id="EGAF123456", size=1234567890, unencrypted_checksum="testChecksum")
with mock.patch("pyega3.libs.data_file.DataFile.download_file_slice") as mock_download_slice:
with mock.patch("pyega3.libs.utils.md5", return_value=file.unencrypted_checksum):
with mock.patch("os.path.getsize", return_value=file.size):
file.download_file(output_file="output_file", num_connections=1)
assert mock_download_slice.call_count == math.ceil(file.size / DataFile.DEFAULT_SLICE_SIZE)
def test_the_user_specifies_a_custom_slice_size_different_to_before(mock_data_client, mock_data_server, random_binary_file, caplog):
mock_data_server.file_content["EGAF123456"] = random_binary_file
file = DataFile(mock_data_client, file_id="EGAF123456", size=12345, unencrypted_checksum="testChecksum")
slice_size = 1000
os.makedirs(".tmp_download", exist_ok=True)
extra_slice = file.download_file_slice(f'.tmp_download/{file.id}', 0, 1234)
assert os.path.exists(extra_slice)
with mock.patch("pyega3.libs.data_file.DataFile.download_file_slice") as mock_download_slice:
with mock.patch("pyega3.libs.utils.md5", return_value=file.unencrypted_checksum):
with mock.patch("os.path.getsize", return_value=file.size):
file.download_file(output_file="output_file", num_connections=1, max_slice_size=slice_size)
assert mock_download_slice.call_count == 13
assert not os.path.exists(extra_slice)
assert "Deleting the leftover" in caplog.text
| true | true |
f7fe5966a212cd9768a987ff1101f6223819bf6b | 25,117 | py | Python | Experiment.py | Stiltstiltstilts/Experiment-5 | b10e6bbfd0ebd5ee73c09a4ab9387ea1f9967048 | [
"MIT"
] | null | null | null | Experiment.py | Stiltstiltstilts/Experiment-5 | b10e6bbfd0ebd5ee73c09a4ab9387ea1f9967048 | [
"MIT"
] | null | null | null | Experiment.py | Stiltstiltstilts/Experiment-5 | b10e6bbfd0ebd5ee73c09a4ab9387ea1f9967048 | [
"MIT"
] | null | null | null |
################################################
################# Imports ######################
################################################
from psychopy import core, visual, logging, gui, event, prefs, data, sound, monitors
prefs.general['audioLib'] = ['pyo']
prefs.general['audioDriver'] = ['ASIO']
from numpy.random import random, randint, normal, shuffle
from psychopy.constants import (NOT_STARTED, STARTED, PLAYING, PAUSED,
STOPPED, FINISHED, PRESSED, RELEASED, FOREVER)
import os
import sys
import numpy as np
from constants import *
from customFunctions import trialCreator
GlobalClock = core.Clock() # Track time since experiment starts
#port = parallel.ParallelPort(address=0xd050) ################################
#port.setData(0)
#port.setData(trial['ref']) #Stim starts
#core.wait(0.001)
#port.setData(0)
################################################
############### Basic checks ###################
################################################
# check relative paths correct
_thisDir = os.path.abspath(os.path.dirname(__file__))
os.chdir(_thisDir)
################################################
####### Collect experiment session info ########
################################################
# Exp name
expName = 'Rhythm words'
# Define experiment info
expInfo = {'session':'001', 'participant':'001',
'handedness':'', 'gender':'', 'native language': '', 'age': ''}
dlg = gui.DlgFromDict(dictionary=expInfo, title=expName,)
if dlg.OK == False:
core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr()
# Create filename for data file (absolute path + name)
filename = _thisDir + os.sep + 'data/{0}'.format(expInfo['participant'])
################################################
################ Setup logfile #################
################################################
# save a log file for detailed verbose info
logFile = logging.LogFile(filename+'.log', level=logging.DATA)
# this outputs to the screen, not a file
logging.console.setLevel(logging.WARNING)
################################################
################# Variables ####################
################################################
# setup window
mon = monitors.Monitor(name = 'OptiPlex 7440',
width = 1920,
distance = 80)
mon.setWidth(80)
mon.setSizePix([1920, 1080])
win = visual.Window(fullscr=True,
size = [1920, 1080],
monitor=mon,
units='deg',
allowGUI=False)
trialClock = core.Clock()
# store frame rate of monitor if we can measure it
expInfo['frameRate'] = win.getActualFrameRate()
if expInfo['frameRate'] != None:
frameDur = 1.0 / round(expInfo['frameRate'])
else:
frameDur = 1.0 / 60.0 # could not measure, so guess 60Hz
with open('data/{}participant_info.txt'.format(expInfo['participant']), 'w') as log_file:
log_file.write('Session\t' +
'Participant\t' +
'Handedness\t' +
'Gender\t' +
'Native_language\t' +
'Age\t' +
'frameRate\t' + '\n')
log_file.write('\t'.join([str(expInfo['session']),
str(expInfo['participant']),
str(expInfo['handedness']),
str(expInfo['gender']),
str(expInfo['native language']),
str(expInfo['age']),
str(expInfo['frameRate'])]) + '\n')
log_file.close()
################################################
########## Trial list construction #############
################################################
# Main sentences
main_conditions = [sub_cong, sub_incong1, obj_cong, obj_incong1, obj_incong2,sub_neut, obj_neut]
main_probes = [probe_mc_pos, probe_mc_neg, probe_rc_subpos_objneg, probe_rc_subneg_objpos,]
sentence_list = trialCreator(main_conditions, main_probes) # using function in customFunctions.py script to randomise and assemble sentences and probes
# Combining main and assorted trials into one list
all_trials = sentence_list
all_trials = data.TrialHandler(trialList = all_trials[:], nReps = 1, method = 'random', extraInfo = expInfo, name = 'all_trials')
thisTrial = all_trials.trialList[0] # so we can initialise stimuli with some values
# Practice trials
prac_list = [ {**prac[i], **prac_probes[i]} for i in range(len(prac)) ]
prac_list = data.TrialHandler(trialList = prac_list[:], nReps = 1, method = 'sequential', extraInfo = expInfo, name = 'practice_trials')
thisPracTrial = prac_list.trialList[0] # so we can initialise stimuli with some values
################################################
############## Run experiment ##################
################################################
try:
# ==== SETUP TRIAL OBJECTS ==== #
message1 = visual.TextStim(win, pos=[0,+3], color=FGC, alignHoriz='center', name='topMsg', text="placeholder")
message2 = visual.TextStim(win, pos=[0,-3], color=FGC, alignHoriz='center', name='bottomMsg', text="placeholder")
fixation = visual.TextStim(win, pos=[0,0], color=FGC, alignHoriz='center', text="+")
endMessage = visual.TextStim(win, pos=[0,0], color=FGC, alignHoriz='center', text="The end! Thank you for participating :)")
space_cont = visual.TextStim(win, pos=[0,0], color=FGC, text="Press space to continue")
too_slow = visual.TextStim(win, pos=[0,0], color=FGC, text="Too slow: respond quicker next time")
check_text = visual.TextStim(win, pos=[0,0], color=FGC, text="What beat did the last word end on?")
feedback = visual.TextStim(win, pos=[0,0], color=FGC, text="placeholder")
introText = visual.TextStim(win, pos=[0,0], color=FGC, text="Placeholder")
probe_text = visual.TextStim(win, pos=[0,0], color=FGC, alignHoriz='center', name='top_probe', text="placeholder")
GSI = visual.RatingScale(win, name='GSI', marker='triangle',
textSize = 0.4, showValue = False, acceptText = 'confirm',
size=1.5, pos=[0.0, -0.4],
choices=['Completely\n Disagree', 'Strongly\n Disagree',
'Disagree', 'Neither Agree\n or Disagree', 'Agree',
'Strongly\n Agree', 'Completely\n Agree'],
tickHeight=-1)
response_keys = visual.TextStim(win, pos=[0,-5], height = .5, color=FGC, text="respond:'y' 'n' or 'd'")
response_keys_check = visual.TextStim(win, pos=[0,-5], height = .5, color=FGC, text="respond:'1', '2', or '3'")
# ==== OTHER TRIAL VARIABLES ==== #
clock = core.Clock()
# ===== LOG FILES ====== #
# File for all trial information
with open('data/{}trial_log.txt'.format(expInfo['participant']), 'w') as log_file:
log_file.write('Trial\t' +
'Beat\t' +
'Sentence\t' +
'Sentence_extraction\t' +
'Congruency\t' +
'Probe\t' +
'Probe_clause\t' +
'Response\t' +
'Accuracy\t' +
'RT' +
'Catch_response' +
'Catch Accuracy' + '\n')
log_file.close()
################################################
############## START EXPERIMENT ################
################################################
win.mouseVisible = False
"""
# ===== PRACTISE TRIALS INTRO ====== #
counter = 0
while counter < len(part2Intro):
# === set top text === #
message1.setText(part2Intro[counter])
# === set bottom text === #
if counter == 0:
message2.setText(bottom_text[0])
elif counter in range(1, (len(part2Intro) - 1)):
message2.setText(bottom_text[1])
else:
message2.setText(bottom_text[2])
# === display instructions and wait === #
message1.draw()
message2.draw()
win.logOnFlip(level=logging.EXP, msg='Display Instructions%d'%(counter+1))
win.flip()
# === check for a keypress === #
thisKey = event.waitKeys()
if thisKey[0] in ['q','escape']:
core.quit()
elif thisKey[0] == 'backspace' and counter > 0:
counter -= 1
else:
counter += 1
# ===== PRACTICE TRIALS ====== #
trial_num = 0
for thisPracTrial in prac_list:
trial_num += 1
# Abbeviate parameter names... e.g. thisPracTrial['beat_type'] becomes beat_type
if thisPracTrial != None:
for paramName in thisPracTrial:
exec('{} = thisPracTrial[paramName]'.format(paramName))
probe_resp = event.BuilderKeyResponse()
####====SETUP TRIAL COMPONENTS LIST====####
# initialize trial components list
trialComponents = []
audio_stim = sound.Sound( str(os.path.join('Stimuli', 'Audio', 'Practise', ('sent' + str(sent_number + 1) + '.wav'))) )
trialComponents.extend([audio_stim,]) # add audio stim to trialComponents list
# set probe text for the trial
probe_text.setText(probe)
####====BASIC ROUTINE CHECKS====####
continueRoutine = True
# keep track of which components have finished
for thisComponent in trialComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
t = 0
trialClock.reset() # clock
frameN = -1
####====START PRACTISE TRIAL ROUTINE====####
while continueRoutine:
# get current time
t = trialClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
##### 1. start/stop beat_stim #####
if t >= 0.0 and audio_stim.status == NOT_STARTED:
# keep track of start time/frame for later
audio_stim.tStart = t
audio_stim.frameNStart = frameN # exact frame index
audio_stim.play() # start the sound (it finishes automatically)
fixation.setAutoDraw(True)
##### 3. check if all components have finished #####
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in trialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
##### 4. refresh the screen #####
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
####====Ending Trial Routine====####
for thisComponent in trialComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
audio_stim.stop() # ensure sound has stopped at end of routine
fixation.setAutoDraw(False)
####====Probe====####
# 3. display probe text e.g. "The boy helped the girl?" #####
probe_text.tStart = t
probe_text.setAutoDraw(True)
response_keys.setAutoDraw(True)
####====check for response====#####
probe_resp.tStart = t
win.callOnFlip(probe_resp.clock.reset) # t=0 on next screen flip
event.clearEvents(eventType='keyboard')
thing = True
while thing:
win.flip()
theseKeys = event.getKeys(keyList=['y', 'n', 'd'])
if len(theseKeys) > 0: # at least one key was pressed
probe_text.setAutoDraw(False)
response_keys.setAutoDraw(False)
probe_resp.keys = theseKeys[-1] # just the last key pressed
probe_resp.rt = probe_resp.clock.getTime()
# was this 'correct'?
if probe_resp.keys == 'n' and (trial_num == 1 or trial_num == 2):
probe_resp.corr = 1
feedback.setText("correct")
feedback.draw()
thing = False
elif probe_resp.keys == 'y' and (trial_num == 3 or trial_num == 4):
probe_resp.corr = 1
feedback.setText("correct")
feedback.draw()
thing = False
elif probe_resp.keys == 'd':
probe_resp.corr = 0
feedback.setText("(don't know)")
feedback.draw()
thing = False
else:
probe_resp.corr = 0
feedback.setText("incorrect")
feedback.draw()
thing = False
win.flip()
core.wait(1)
####====Check if response is too slow====####
if probe_resp.rt > probe_duration:
too_slow.draw()
win.flip()
core.wait(2)
####====Space to continue====####
event.clearEvents(eventType='keyboard')
space_cont.draw()
win.flip()
thisKey = event.waitKeys(keyList=['space'])
while not 'space' in thisKey:
thisKey = event.waitKeys(keyList=['space'])
core.wait(1)
"""
# ===== INSTRUCTIONS 2 ====== #
counter = 0
while counter < len(part3Intro):
message1.setText(part3Intro[counter])
if counter == 0:
message2.setText(bottom_text[0])
elif counter in range(1, (len(part3Intro) - 1)):
message2.setText(bottom_text[1])
else:
message2.setText(bottom_text[2])
#display instructions and wait
message1.draw()
message2.draw()
win.logOnFlip(level=logging.EXP, msg='Display Instructions%d'%(counter+1))
win.flip()
#check for a keypress
thisKey = event.waitKeys()
if thisKey[0] in ['q','escape']:
core.quit()
elif thisKey[0] == 'backspace' and counter > 0:
counter -= 1
else:
counter += 1
trial_num = 0 # initialise trial number
# ===== MAIN TRIALS ====== #
for thisTrial in all_trials:
trial_num += 1
####====ABBREVIATE PARAMETER NAMES====####
if thisTrial != None:
for paramName in thisTrial:
exec('{} = thisTrial[paramName]'.format(paramName))
probe_resp = event.BuilderKeyResponse() # initialising
check_resp = event.BuilderKeyResponse() # initialising
# create counter for adding the manipulation check bit every 4/5 trials
if trial_type == 'catch':
check_trial = True
else:
check_trial = False
check_resp.corr = 'NA'
check_resp.keys = 'NA'
####====SETUP TRIAL COMPONENTS LIST====####
# initialize trial components list
trialComponents = []
# add auditory stimuli component
audio_stim = sound.Sound( str(os.path.join('Stimuli', 'Audio', (extraction + '_' + congruency), ('sent' + str(sent_number + 1) + '.wav'))) )
trialComponents.extend([audio_stim],) # add beat stim to trialComponents list
# set probe text for the trial
probe_text.setText(probe)
####====BASIC ROUTINE CHECKS====####
continueRoutine = True
# keep track of which components have finished
for thisComponent in trialComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
t = 0
tap_data = []
trialClock.reset() # clock
frameN = -1
####====START MAIN TRIAL ROUTINE====####
while continueRoutine:
t = trialClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
##### 1. start/stop beat_stim #####
if t >= 0.0 and audio_stim.status == NOT_STARTED:
# keep track of start time/frame for later
audio_stim.tStart = t
audio_stim.frameNStart = frameN # exact frame index
audio_stim.play() # start the sound (it finishes automatically)
fixation.setAutoDraw(True)
##### 2. check if all components have finished #####
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in trialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
##### 3. refresh the screen #####
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
####====Ending Trial Routine====####
audio_stim.stop() # ensure sound has stopped at end of routine
fixation.setAutoDraw(False)
core.wait(probe_delay)
####====Probe====####
# 3. display probe text e.g. "The boy helped the girl?" #####
probe_text.tStart = t
probe_text.setAutoDraw(True)
response_keys.setAutoDraw(True)
####====check for response====#####
probe_resp.tStart = t
win.callOnFlip(probe_resp.clock.reset) # t=0 on next screen flip
event.clearEvents(eventType='keyboard')
thing = True
while thing:
win.flip()
theseKeys = event.getKeys(keyList=['y', 'n', 'd'])
if len(theseKeys) > 0: # at least one key was pressed
probe_text.setAutoDraw(False)
response_keys.setAutoDraw(False)
probe_resp.keys = theseKeys[-1] # just the last key pressed
probe_resp.rt = probe_resp.clock.getTime()
# was this 'correct'?
if probe_resp.keys == 'y' and (\
pos_neg == 'positive' or \
( \
(pos_neg == 'subneg_objpos' and clause == 'relative_clause' and extraction == 'object') or \
(pos_neg == 'subpos_objneg' and clause == 'relative_clause' and extraction == 'subject') \
)):
probe_resp.corr = 1
feedback.setText("correct")
feedback.draw()
elif probe_resp.keys == 'n' and (\
pos_neg == 'negative' or \
( \
(pos_neg == 'subpos_objneg' and clause == 'relative_clause' and extraction == 'object') or \
(pos_neg == 'subneg_objpos' and clause == 'relative_clause' and extraction == 'subject') \
)):
probe_resp.corr = 1
feedback.setText("correct")
feedback.draw()
elif probe_resp.keys == 'd':
probe_resp.corr = 0
feedback.setText("(don't know)")
feedback.draw()
else:
probe_resp.corr = 0
feedback.setText("incorrect")
feedback.draw()
probe_text.setAutoDraw(False)
thing = False
win.flip()
core.wait(.5)
####====Check if response is too slow====####
if probe_resp.rt > probe_duration:
too_slow.draw()
win.flip()
core.wait(2)
####====Catch trial prompt====####
while check_trial:
win.flip()
check_text.setAutoDraw(True)
response_keys_check.setAutoDraw(True)
theseKeys = event.getKeys(keyList=['1', '2', '3'])
if len(theseKeys) > 0:
check_text.setAutoDraw(False)
response_keys_check.setAutoDraw(False)
check_resp.keys = theseKeys[-1]
if check_resp.keys == check_beat:
check_resp.corr = 1
feedback.setText("correct")
feedback.draw()
check_trial = False
elif check_resp.keys != check_beat:
check_resp.corr = 0
feedback.setText("incorrect")
feedback.draw()
check_trial = False
win.flip()
with open('data/{}trial_log.txt'.format(expInfo['participant']), 'a') as log_file:
log_file.write('\t'.join([str(trial_num),
str(beat_type),
str(sent_stim),
str(extraction),
str(congruency),
str(probe),
str(clause),
str(probe_resp.keys),
str(probe_resp.corr),
str(probe_resp.rt),
str(check_resp.keys),
str(check_resp.corr)]) + '\n')
log_file.close()
core.wait(.8)
####====Space to continue====####
event.clearEvents(eventType='keyboard')
space_cont.draw()
win.flip()
thisKey = event.waitKeys(keyList=['space'])
while not 'space' in thisKey:
thisKey = event.waitKeys(keyList=['space'])
core.wait(.5)
logging.flush()
################################################
############## GSI QUESTIONNAIRE ################
################################################
# ===== INSTRUCTIONS 3 ====== #
counter = 0
while counter < len(part4Intro):
message1.setText(part4Intro[counter])
if counter == 0:
message2.setText(bottom_text[0])
elif counter in range(1, (len(part4Intro) - 1)):
message2.setText(bottom_text[1])
else:
message2.setText(bottom_text[2])
#display instructions and wait
message1.draw()
message2.draw()
win.logOnFlip(level=logging.EXP, msg='Display Instructions%d'%(counter+1))
win.flip()
#check for a keypress
thisKey = event.waitKeys()
if thisKey[0] in ['q','escape']:
core.quit()
elif thisKey[0] == 'backspace':
counter -= 1
else:
counter += 1
with open('data/{}questionnaire_log.txt'.format(expInfo['participant']), 'w') as log_file:
log_file.write('Question_num\t' +
'Question\t' +
'Response' + '\n')
win.mouseVisible = True
quest_num = 1 # initialising counter
for question in gsi_part1:
message1.setText(question)
while GSI.noResponse:
message1.draw()
GSI.draw()
win.flip()
response = GSI.getRating()
#======WRITE DATA TO FILE======#
log_file.write('\t'.join([str(quest_num),
str( question.replace('\n','') ),
str( response.replace('\n','') )]) + '\n')
log_file.flush()
GSI.noResponse = True
GSI.response = None
quest_num += 1
core.wait(.2)
quest_num = 1 # initialising counter
for question in gsi_part2:
message1.setText(question)
GSI = visual.RatingScale(win, name='GSI', marker='triangle',
textSize = 0.4, showValue = False, acceptText = 'confirm',
size=1.5, pos=[0.0, -0.4],
choices= gsi_part2_scales[quest_num - 1],
tickHeight=-1)
while GSI.noResponse:
message1.draw()
GSI.draw()
win.flip()
response = GSI.getRating()
#======WRITE DATA TO FILE======#
log_file.write('\t'.join([str((quest_num + 31)),
str( question.replace('\n','') ),
str( response.replace('\n','') )]) + '\n')
log_file.flush()
GSI.noResponse = True
GSI.response = None
quest_num += 1
core.wait(.2)
endMessage.draw()
win.flip()
core.wait(5)
finally:
win.close()
core.quit()
| 40.84065 | 151 | 0.504638 | true | true | |
f7fe59a8e3c9d74b0b4ed8b384776f8f61a3d9fe | 1,848 | py | Python | plugin.video.vstream/resources/hosters/filepup.py | akuala/REPO.KUALA | ea9a157025530d2ce8fa0d88431c46c5352e89d4 | [
"Apache-2.0"
] | 2 | 2018-11-02T19:55:30.000Z | 2020-08-14T02:22:20.000Z | plugin.video.vstream/resources/hosters/filepup.py | akuala/REPO.KUALA | ea9a157025530d2ce8fa0d88431c46c5352e89d4 | [
"Apache-2.0"
] | null | null | null | plugin.video.vstream/resources/hosters/filepup.py | akuala/REPO.KUALA | ea9a157025530d2ce8fa0d88431c46c5352e89d4 | [
"Apache-2.0"
] | 3 | 2019-12-17T20:47:00.000Z | 2021-02-11T19:03:59.000Z | #-*- coding: utf-8 -*-
#Vstream https://github.com/Kodi-vStream/venom-xbmc-addons
from resources.lib.handler.requestHandler import cRequestHandler
from resources.lib.parser import cParser
from resources.hosters.hoster import iHoster
class cHoster(iHoster):
def __init__(self):
self.__sDisplayName = 'FilePup'
self.__sFileName = self.__sDisplayName
self.__sHD = ''
def getDisplayName(self):
return self.__sDisplayName
def setDisplayName(self, sDisplayName):
self.__sDisplayName = sDisplayName + ' [COLOR skyblue]' + self.__sDisplayName + '[/COLOR] [COLOR khaki]' + self.__sHD + '[/COLOR]'
def setFileName(self, sFileName):
self.__sFileName = sFileName
def getFileName(self):
return self.__sFileName
def getPluginIdentifier(self):
return 'filepup'
def setHD(self, sHD):
self.__sHD = ''
def getHD(self):
return self.__sHD
def isDownloadable(self):
return True
def isJDownloaderable(self):
return True
def getPattern(self):
return ''
def __getIdFromUrl(self, sUrl):
return ''
def setUrl(self, sUrl):
self.__sUrl = str(sUrl)
def checkUrl(self, sUrl):
return True
def __getUrl(self, media_id):
return
def getMediaLink(self):
return self.__getMediaLinkForGuest()
def __getMediaLinkForGuest(self):
url = self.__sUrl
oRequestHandler = cRequestHandler(url)
#oRequestHandler.addParameters('login', '1')
sHtmlContent = oRequestHandler.request()
oParser = cParser()
sPattern = 'type: "video\/mp4", *src: "([^<>"{}]+?)"'
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
return True, aResult[1][0]
return False, False
| 24.315789 | 138 | 0.631494 |
from resources.lib.handler.requestHandler import cRequestHandler
from resources.lib.parser import cParser
from resources.hosters.hoster import iHoster
class cHoster(iHoster):
def __init__(self):
self.__sDisplayName = 'FilePup'
self.__sFileName = self.__sDisplayName
self.__sHD = ''
def getDisplayName(self):
return self.__sDisplayName
def setDisplayName(self, sDisplayName):
self.__sDisplayName = sDisplayName + ' [COLOR skyblue]' + self.__sDisplayName + '[/COLOR] [COLOR khaki]' + self.__sHD + '[/COLOR]'
def setFileName(self, sFileName):
self.__sFileName = sFileName
def getFileName(self):
return self.__sFileName
def getPluginIdentifier(self):
return 'filepup'
def setHD(self, sHD):
self.__sHD = ''
def getHD(self):
return self.__sHD
def isDownloadable(self):
return True
def isJDownloaderable(self):
return True
def getPattern(self):
return ''
def __getIdFromUrl(self, sUrl):
return ''
def setUrl(self, sUrl):
self.__sUrl = str(sUrl)
def checkUrl(self, sUrl):
return True
def __getUrl(self, media_id):
return
def getMediaLink(self):
return self.__getMediaLinkForGuest()
def __getMediaLinkForGuest(self):
url = self.__sUrl
oRequestHandler = cRequestHandler(url)
sHtmlContent = oRequestHandler.request()
oParser = cParser()
sPattern = 'type: "video\/mp4", *src: "([^<>"{}]+?)"'
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
return True, aResult[1][0]
return False, False
| true | true |
f7fe5b8bf0510d3941196ee8bb5f769dededdabe | 8,462 | py | Python | 07ctc_att_mtl/attention.py | sotuken2021/s | 2b37fdcaa60c1f5ac492edddd4a0960882fbc0aa | [
"MIT"
] | 41 | 2021-05-19T15:04:19.000Z | 2022-02-11T15:24:58.000Z | 07ctc_att_mtl/attention.py | sotuken2021/s | 2b37fdcaa60c1f5ac492edddd4a0960882fbc0aa | [
"MIT"
] | 1 | 2021-07-19T08:59:36.000Z | 2021-07-25T07:54:29.000Z | 07ctc_att_mtl/attention.py | sotuken2021/s | 2b37fdcaa60c1f5ac492edddd4a0960882fbc0aa | [
"MIT"
] | 14 | 2021-05-21T05:47:22.000Z | 2022-03-31T09:11:32.000Z | # -*- coding: utf-8 -*-
#
# Attention (Location aware attention) の実装です.
# 参考文献
# - D. Bahdanau, et al.,
# ``End-to-end attention-based large vocabulary speech
# recognition,''
# in Proc. ICASSP, 2016.
# - J. Chorowski, et al.,
# ``Attention-based models for speech recognition,''
# in Proc. NIPS , 2015.
#
# Pytorchを用いた処理に必要なモジュールをインポート
import torch
import torch.nn as nn
import torch.nn.functional as F
class LocationAwareAttention(nn.Module):
''' Location aware attention
dim_encoder: エンコーダRNN出力の次元数
dim_decoder: デコーダRNN出力の次元数
dim_attention: Attention機構の次元数
filter_size: location filter (前のAttention重みに
畳み込まれるフィルタ)のサイズ
filter_num: location filterの数
temperature: Attention重み計算時に用いる温度パラメータ
'''
def __init__(self,
dim_encoder,
dim_decoder,
dim_attention,
filter_size,
filter_num,
temperature=1.0):
super(LocationAwareAttention, self).__init__()
# F: 前のAttention重みに畳み込まれる畳み込み層
self.loc_conv = nn.Conv1d(in_channels=1,
out_channels=filter_num,
kernel_size=2*filter_size+1,
stride=1,
padding=filter_size,
bias=False)
# 以下三つの層のうち,一つのみbiasをTrueにし,他はFalseにする
# W: 前ステップのデコーダRNN出力にかかる射影層
self.dec_proj = nn.Linear(in_features=dim_decoder,
out_features=dim_attention,
bias=False)
# V: エンコーダRNN出力にかかる射影層
self.enc_proj = nn.Linear(in_features=dim_encoder,
out_features=dim_attention,
bias=False)
# U: 畳み込み後のAttention重みにかかる射影層
self.att_proj = nn.Linear(in_features=filter_num,
out_features=dim_attention,
bias=True)
# w: Ws + Vh + Uf + b にかかる線形層
self.out = nn.Linear(in_features=dim_attention,
out_features=1)
# 各次元数
self.dim_encoder = dim_encoder
self.dim_decoder = dim_decoder
self.dim_attention = dim_attention
# 温度パラメータ
self.temperature = temperature
# エンコーダRNN出力(h)とその射影(Vh)
# これらは毎デコードステップで同じ値のため,
# 一回のみ計算し,計算結果を保持しておく
self.input_enc = None
self.projected_enc = None
# エンコーダRNN出力の,発話ごとの系列長
self.enc_lengths = None
# エンコーダRNN出力の最大系列長
# (=ゼロ詰めしたエンコーダRNN出力の系列長)
self.max_enc_length = None
# Attentionマスク
# エンコーダの系列長以降
# (ゼロ詰めされている部分)の重みをゼロにするマスク
self.mask = None
def reset(self):
''' 内部パラメータのリセット
この関数は1バッチの処理を行うたびに,
最初に呼び出す必要がある
'''
self.input_enc = None
self.projected_enc = None
self.enc_lengths = None
self.max_enc_length = None
self.mask = None
def forward(self,
input_enc,
enc_lengths,
input_dec=None,
prev_att=None):
''' ネットワーク計算(forward処理)の関数
input_enc: エンコーダRNNの出力 [B x Tenc x Denc]
enc_lengths: バッチ内の各発話のエンコーダRNN出力の系列長 [B]
input_dec: 前ステップにおけるデコーダRNNの出力 [B x Ddec]
prev_att: 前ステップにおけるAttention重み [B x Tenc]
[]の中はテンソルのサイズ
B: ミニバッチ内の発話数(ミニバッチサイズ)
Tenc: エンコーダRNN出力の系列長(ゼロ埋め部分含む)
Denc: エンコーダRNN出力の次元数(dim_encoder)
Ddec: デコーダRNN出力の次元数(dim_decoder)
'''
# バッチサイズ(発話数)を得る
batch_size = input_enc.size()[0]
#
# エンコーダRNN出力とその射影ベクトルを一度のみ計算
#
if self.input_enc is None:
# エンコーダRNN出力(h)
self.input_enc = input_enc
# 各発話の系列長
self.enc_lengths = enc_lengths
# 最大系列長
self.max_enc_length = input_enc.size()[1]
# 射影を行う(Vhの計算)
self.projected_enc = self.enc_proj(self.input_enc)
#
# 前ステップにおけるデコーダRNN出力を射影する(Wsの計算)
#
# 前のデコーダRNN出力が無い場合は初期値としてゼロ行列を使用
if input_dec is None:
input_dec = torch.zeros(batch_size, self.dim_decoder)
# 作成したテンソルをエンコーダRNN出力と
# 同じデバイス(GPU/CPU)に配置
input_dec = input_dec.to(device=self.input_enc.device,
dtype=self.input_enc.dtype)
# 前のデコーダRNN出力を射影する
projected_dec = self.dec_proj(input_dec)
#
# 前ステップにおけるのAttention重み情報を
# 射影する(Uf+bの計算)
#
# Attentionマスクを作成
if self.mask is None:
self.mask = torch.zeros(batch_size,
self.max_enc_length,
dtype=torch.bool)
# バッチ内の各発話について,その発話の
# 系列長以上の要素(つまりゼロ埋めされている部分)を
# 1(=マスキング対象)にする
for i, length in enumerate(self.enc_lengths):
length = length.item()
self.mask[i, length:] = 1
# 作成したテンソルをエンコーダRNN出力と
# 同じデバイス(GPU/CPU)に配置
self.mask = self.mask.to(device=self.input_enc.device)
# 前のAttention重みが無い場合は初期値として,
# 一様の重みを与える
if prev_att is None:
# 全ての要素を1のテンソルを作成
prev_att = torch.ones(batch_size, self.max_enc_length)
# 発話毎の系列長で割る
# このとき,prev_attは2次のテンソル,
# enc_lengthsは1次のテンソルなので,
# view(batch_size, 1)によりenc_lengthsを
# 2次テンソルの形にしてから割り算する
prev_att = prev_att \
/ self.enc_lengths.view(batch_size, 1)
# 作成したテンソルをエンコーダRNN出力と
# 同じデバイス(GPU/CPU)に配置
prev_att = prev_att.to(device=self.input_enc.device,
dtype=self.input_enc.dtype)
# 発話長以降の重みをゼロにするようマスキングを実行
prev_att.masked_fill_(self.mask, 0)
# Attention重みの畳み込みを計算する {f} = F*a
# このとき,Conv1Dが受け付ける入力のサイズは
# (batch_size, in_channels, self.max_enc_length)
# (in_channelsは入力のチャネル数で,
# このプログラムではin_channels=1)
# サイズを合わせるため,viewを行う
convolved_att \
= self.loc_conv(prev_att.view(batch_size,
1, self.max_enc_length))
# convolved_attのサイズは
# (batch_size, filter_num, self.max_enc_length)
# Linearレイヤーが受け付ける入力のサイズは
# (batch_size, self.max_enc_length, filter_num) なので,
# transposeにより1次元目と2次元目をの入れ替えた上で
# att_projに通す
projected_att = self.att_proj(convolved_att.transpose(1, 2))
#
# Attention重みを計算する
#
# この時点での各テンソルのサイズは
# self.projected_enc: (batch_size, self.max_enc_length,
# self.dim_attention)
# projected_dec: (batch_size, self.dim_attention)
# projected_att: (batch_size, self.max_enc_length, self.dim_attention)
# projected_decのテンソルの次元数を合わせるため,viewを用いる
projected_dec = projected_dec.view(batch_size,
1,
self.dim_attention)
# scoreを計算するため,各射影テンソルの加算,
# tanh,さらに射影を実施
# w tanh(Ws + Vh + Uf + b)
score = self.out(torch.tanh(projected_dec \
+ self.projected_enc
+ projected_att))
# 現時点のscoreのテンソルサイズは
# (batch_size, self.max_enc_length, 1)
# viewを用いて元々のattentionのサイズに戻す
score = score.view(batch_size, self.max_enc_length)
# マスキングを行う
# (エンコーダRNN出力でゼロ埋めされている部分の
# 重みをゼロにする)
# ただし,この後softmax関数の中で計算される
# exp(score)がゼロになるように
# しないといけないので,scoreの段階では0ではなく,
# 0の対数値である-infで埋めておく
score.masked_fill_(self.mask, -float('inf'))
# 温度付きSoftmaxを計算することで,Attention重みを得る
att_weight = F.softmax(self.temperature * score, dim=1)
# att_weightを使って,エンコーダRNN出力の重みづけ和を計算し,
# contextベクトルを得る
# (viewによりinput_encとattention_weightの
# テンソルサイズを合わせている)
context \
= torch.sum(self.input_enc * \
att_weight.view(batch_size, self.max_enc_length, 1),
dim=1)
# contextベクトルとattention重みを出力
return context, att_weight
| 33.713147 | 78 | 0.549397 |
import torch
import torch.nn as nn
import torch.nn.functional as F
class LocationAwareAttention(nn.Module):
def __init__(self,
dim_encoder,
dim_decoder,
dim_attention,
filter_size,
filter_num,
temperature=1.0):
super(LocationAwareAttention, self).__init__()
self.loc_conv = nn.Conv1d(in_channels=1,
out_channels=filter_num,
kernel_size=2*filter_size+1,
stride=1,
padding=filter_size,
bias=False)
self.dec_proj = nn.Linear(in_features=dim_decoder,
out_features=dim_attention,
bias=False)
self.enc_proj = nn.Linear(in_features=dim_encoder,
out_features=dim_attention,
bias=False)
self.att_proj = nn.Linear(in_features=filter_num,
out_features=dim_attention,
bias=True)
self.out = nn.Linear(in_features=dim_attention,
out_features=1)
self.dim_encoder = dim_encoder
self.dim_decoder = dim_decoder
self.dim_attention = dim_attention
self.temperature = temperature
self.input_enc = None
self.projected_enc = None
self.enc_lengths = None
self.max_enc_length = None
self.mask = None
def reset(self):
self.input_enc = None
self.projected_enc = None
self.enc_lengths = None
self.max_enc_length = None
self.mask = None
def forward(self,
input_enc,
enc_lengths,
input_dec=None,
prev_att=None):
batch_size = input_enc.size()[0]
if self.input_enc is None:
self.input_enc = input_enc
self.enc_lengths = enc_lengths
self.max_enc_length = input_enc.size()[1]
self.projected_enc = self.enc_proj(self.input_enc)
if input_dec is None:
input_dec = torch.zeros(batch_size, self.dim_decoder)
input_dec = input_dec.to(device=self.input_enc.device,
dtype=self.input_enc.dtype)
projected_dec = self.dec_proj(input_dec)
if self.mask is None:
self.mask = torch.zeros(batch_size,
self.max_enc_length,
dtype=torch.bool)
for i, length in enumerate(self.enc_lengths):
length = length.item()
self.mask[i, length:] = 1
self.mask = self.mask.to(device=self.input_enc.device)
if prev_att is None:
prev_att = torch.ones(batch_size, self.max_enc_length)
prev_att = prev_att \
/ self.enc_lengths.view(batch_size, 1)
prev_att = prev_att.to(device=self.input_enc.device,
dtype=self.input_enc.dtype)
prev_att.masked_fill_(self.mask, 0)
convolved_att \
= self.loc_conv(prev_att.view(batch_size,
1, self.max_enc_length))
projected_att = self.att_proj(convolved_att.transpose(1, 2))
projected_dec = projected_dec.view(batch_size,
1,
self.dim_attention)
score = self.out(torch.tanh(projected_dec \
+ self.projected_enc
+ projected_att))
score = score.view(batch_size, self.max_enc_length)
score.masked_fill_(self.mask, -float('inf'))
att_weight = F.softmax(self.temperature * score, dim=1)
context \
= torch.sum(self.input_enc * \
att_weight.view(batch_size, self.max_enc_length, 1),
dim=1)
return context, att_weight
| true | true |
f7fe5c024f10872ce3b44df612f8f69b4e0273a2 | 2,884 | py | Python | CoffeeMachine/main.py | Wilhit/Coffee-Machine-Projects | 7d1e5b02424f434c373f768594a43ec3075d5018 | [
"MIT"
] | 1 | 2022-01-28T01:09:30.000Z | 2022-01-28T01:09:30.000Z | CoffeeMachine/main.py | Wilhit/Coffee-Machine-Projects | 7d1e5b02424f434c373f768594a43ec3075d5018 | [
"MIT"
] | null | null | null | CoffeeMachine/main.py | Wilhit/Coffee-Machine-Projects | 7d1e5b02424f434c373f768594a43ec3075d5018 | [
"MIT"
] | null | null | null | MENU = {
"espresso": {
"ingredients": {
"water": 50,
"coffee": 18,
},
"cost": 15.5,
},
"latte": {
"ingredients": {
"water": 200,
"milk": 150,
"coffee": 24,
},
"cost": 20.0,
},
"cappuccino": {
"ingredients": {
"water": 250,
"milk": 100,
"coffee": 24,
},
"cost": 25.0,
}
}
resources = {
"water": 900,
"milk": 700,
"coffee": 300,
}
profit = 0
password = "Admin"
# TODO: 1. Print the report of the coffee machine resources
# TODO: 2 coin processing system
def coin_processing():
"""Process the coins and returns the total calculation"""
print("Please insert coins.")
total = int(input("How many tens? (N$10): ")) * 10
total += int(input("How manny fives? (N$5): ")) * 5
total += int(input("How many ones? (N$1): ")) * 1
total += int(input("How many 50 cents? (N$0.5): ")) * 0.5
return total
def coffee(drink_name, order_ingredients):
for item in order_ingredients:
resources[item] -= order_ingredients[item]
print(f"Here is your {drink_name}, Enjoy.")
def successful_transaction(money_payed, drink_cost):
if money_payed >= drink_cost:
change = round(money_payed - drink_cost, 2)
print(f"Here is N${change} change")
global profit
profit += drink_cost
return True
else:
print("Sorry that's not enough money. Money returned.")
return False
def sufficient_resources(order_ingredients):
"""Returns true if there is enough resources and false if there isn't"""
for item in order_ingredients:
if order_ingredients[item] > resources[item]:
print(f"Sorry there is not enough {item}.")
return False
return True
is_machine_on = True
while is_machine_on:
order = input("What would you like? (espresso/latte/cappuccino): ").lower()
if order == "off":
ps = input("Enter Password: ")
if ps == password:
is_machine_on = False
else:
print("Wrong Password")
is_machine_on = True
elif order == "report":
ps = input("Enter Password: ")
if ps == password:
print(f"Water: {resources['water']}ml")
print(f"Milk: {resources['milk']}ml")
print(f"Coffee: {resources['coffee']}ml")
print(f"Money: N${profit}")
else:
print("Wrong Password")
else:
drink = MENU[order]
if sufficient_resources(drink['ingredients']):
payment = coin_processing()
if successful_transaction(payment, drink['cost']):
coffee(order, drink['ingredients'])
| 28.27451 | 80 | 0.532594 | MENU = {
"espresso": {
"ingredients": {
"water": 50,
"coffee": 18,
},
"cost": 15.5,
},
"latte": {
"ingredients": {
"water": 200,
"milk": 150,
"coffee": 24,
},
"cost": 20.0,
},
"cappuccino": {
"ingredients": {
"water": 250,
"milk": 100,
"coffee": 24,
},
"cost": 25.0,
}
}
resources = {
"water": 900,
"milk": 700,
"coffee": 300,
}
profit = 0
password = "Admin"
def coin_processing():
print("Please insert coins.")
total = int(input("How many tens? (N$10): ")) * 10
total += int(input("How manny fives? (N$5): ")) * 5
total += int(input("How many ones? (N$1): ")) * 1
total += int(input("How many 50 cents? (N$0.5): ")) * 0.5
return total
def coffee(drink_name, order_ingredients):
for item in order_ingredients:
resources[item] -= order_ingredients[item]
print(f"Here is your {drink_name}, Enjoy.")
def successful_transaction(money_payed, drink_cost):
if money_payed >= drink_cost:
change = round(money_payed - drink_cost, 2)
print(f"Here is N${change} change")
global profit
profit += drink_cost
return True
else:
print("Sorry that's not enough money. Money returned.")
return False
def sufficient_resources(order_ingredients):
for item in order_ingredients:
if order_ingredients[item] > resources[item]:
print(f"Sorry there is not enough {item}.")
return False
return True
is_machine_on = True
while is_machine_on:
order = input("What would you like? (espresso/latte/cappuccino): ").lower()
if order == "off":
ps = input("Enter Password: ")
if ps == password:
is_machine_on = False
else:
print("Wrong Password")
is_machine_on = True
elif order == "report":
ps = input("Enter Password: ")
if ps == password:
print(f"Water: {resources['water']}ml")
print(f"Milk: {resources['milk']}ml")
print(f"Coffee: {resources['coffee']}ml")
print(f"Money: N${profit}")
else:
print("Wrong Password")
else:
drink = MENU[order]
if sufficient_resources(drink['ingredients']):
payment = coin_processing()
if successful_transaction(payment, drink['cost']):
coffee(order, drink['ingredients'])
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.