hexsha
stringlengths
40
40
size
int64
2
1.02M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
245
max_stars_repo_name
stringlengths
6
130
max_stars_repo_head_hexsha
stringlengths
40
40
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
245
max_issues_repo_name
stringlengths
6
130
max_issues_repo_head_hexsha
stringlengths
40
40
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
245
max_forks_repo_name
stringlengths
6
130
max_forks_repo_head_hexsha
stringlengths
40
40
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
2
1.02M
avg_line_length
float64
1
417k
max_line_length
int64
1
987k
alphanum_fraction
float64
0
1
content_no_comment
stringlengths
0
1.01M
is_comment_constant_removed
bool
1 class
is_sharp_comment_removed
bool
1 class
1c432f914ed6c58dcddeaca1299901b49420cd1b
315
py
Python
sokoapp/blog/admin_forms.py
Mercy-Nekesa/sokoapp
6c7bc4c1278b7223226124a49fc33c5b8b6b617a
[ "MIT" ]
1
2019-04-01T05:52:37.000Z
2019-04-01T05:52:37.000Z
sokoapp/blog/admin_forms.py
Mercy-Nekesa/sokoapp
6c7bc4c1278b7223226124a49fc33c5b8b6b617a
[ "MIT" ]
1
2015-03-11T16:18:12.000Z
2015-03-11T16:18:12.000Z
sokoapp/blog/admin_forms.py
Mercy-Nekesa/sokoapp
6c7bc4c1278b7223226124a49fc33c5b8b6b617a
[ "MIT" ]
null
null
null
from django import forms from mptt.forms import TreeNodeChoiceField from models import Category class CategoryForm(forms.ModelForm): parent = TreeNodeChoiceField(queryset=Category.objects.all(), level_indicator=3*unichr(160), empty_label='---------', required=False) class Meta: model = Category
28.636364
137
0.749206
from django import forms from mptt.forms import TreeNodeChoiceField from models import Category class CategoryForm(forms.ModelForm): parent = TreeNodeChoiceField(queryset=Category.objects.all(), level_indicator=3*unichr(160), empty_label='---------', required=False) class Meta: model = Category
true
true
1c432fd8aafbe155c419a37e9a6e28ef223f4100
2,275
py
Python
tests/models/symbol/raw_data_retrieve_start_result_test.py
NetApp/santricity-webapi-pythonsdk
1d3df4a00561192f4cdcdd1890f4d27547ed2de2
[ "BSD-3-Clause-Clear" ]
5
2016-08-23T17:52:22.000Z
2019-05-16T08:45:30.000Z
tests/models/symbol/raw_data_retrieve_start_result_test.py
NetApp/santricity-webapi-pythonsdk
1d3df4a00561192f4cdcdd1890f4d27547ed2de2
[ "BSD-3-Clause-Clear" ]
2
2016-11-10T05:30:21.000Z
2019-04-05T15:03:37.000Z
tests/models/symbol/raw_data_retrieve_start_result_test.py
NetApp/santricity-webapi-pythonsdk
1d3df4a00561192f4cdcdd1890f4d27547ed2de2
[ "BSD-3-Clause-Clear" ]
7
2016-08-25T16:11:44.000Z
2021-02-22T05:31:25.000Z
#!/usr/bin/env python # coding: utf-8 """ The Clear BSD License Copyright (c) – 2016, NetApp, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of NetApp, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import unittest from netapp.santricity.models.symbol.raw_data_retrieve_start_result import RawDataRetrieveStartResult class RawDataRetrieveStartResultTest(unittest.TestCase): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ # Try instantiating the model def test_raw_data_retrieve_start_result(self): raw_data_retrieve_start_result_obj = RawDataRetrieveStartResult() self.assertNotEqual(raw_data_retrieve_start_result_obj, None)
59.868421
845
0.782857
import unittest from netapp.santricity.models.symbol.raw_data_retrieve_start_result import RawDataRetrieveStartResult class RawDataRetrieveStartResultTest(unittest.TestCase): def test_raw_data_retrieve_start_result(self): raw_data_retrieve_start_result_obj = RawDataRetrieveStartResult() self.assertNotEqual(raw_data_retrieve_start_result_obj, None)
true
true
1c43316ba426e186d7d74059eb7f554612fcaa26
52,235
py
Python
swift/common/swob.py
steveruckdashel/swift
91d04ce611b10a9403ff36ada40899414b011d08
[ "Apache-2.0" ]
null
null
null
swift/common/swob.py
steveruckdashel/swift
91d04ce611b10a9403ff36ada40899414b011d08
[ "Apache-2.0" ]
null
null
null
swift/common/swob.py
steveruckdashel/swift
91d04ce611b10a9403ff36ada40899414b011d08
[ "Apache-2.0" ]
null
null
null
# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Implementation of WSGI Request and Response objects. This library has a very similar API to Webob. It wraps WSGI request environments and response values into objects that are more friendly to interact with. Why Swob and not just use WebOb? By Michael Barton We used webob for years. The main problem was that the interface wasn't stable. For a while, each of our several test suites required a slightly different version of webob to run, and none of them worked with the then-current version. It was a huge headache, so we just scrapped it. This is kind of a ton of code, but it's also been a huge relief to not have to scramble to add a bunch of code branches all over the place to keep Swift working every time webob decides some interface needs to change. """ from collections import defaultdict from cStringIO import StringIO import UserDict import time from functools import partial from datetime import datetime, timedelta, tzinfo from email.utils import parsedate import urlparse import urllib2 import re import random import functools import inspect from swift.common.utils import reiterate, split_path, Timestamp, pairs from swift.common.exceptions import InvalidTimestamp RESPONSE_REASONS = { 100: ('Continue', ''), 200: ('OK', ''), 201: ('Created', ''), 202: ('Accepted', 'The request is accepted for processing.'), 204: ('No Content', ''), 206: ('Partial Content', ''), 301: ('Moved Permanently', 'The resource has moved permanently.'), 302: ('Found', 'The resource has moved temporarily.'), 303: ('See Other', 'The response to the request can be found under a ' 'different URI.'), 304: ('Not Modified', ''), 307: ('Temporary Redirect', 'The resource has moved temporarily.'), 400: ('Bad Request', 'The server could not comply with the request since ' 'it is either malformed or otherwise incorrect.'), 401: ('Unauthorized', 'This server could not verify that you are ' 'authorized to access the document you requested.'), 402: ('Payment Required', 'Access was denied for financial reasons.'), 403: ('Forbidden', 'Access was denied to this resource.'), 404: ('Not Found', 'The resource could not be found.'), 405: ('Method Not Allowed', 'The method is not allowed for this ' 'resource.'), 406: ('Not Acceptable', 'The resource is not available in a format ' 'acceptable to your browser.'), 408: ('Request Timeout', 'The server has waited too long for the request ' 'to be sent by the client.'), 409: ('Conflict', 'There was a conflict when trying to complete ' 'your request.'), 410: ('Gone', 'This resource is no longer available.'), 411: ('Length Required', 'Content-Length header required.'), 412: ('Precondition Failed', 'A precondition for this request was not ' 'met.'), 413: ('Request Entity Too Large', 'The body of your request was too ' 'large for this server.'), 414: ('Request URI Too Long', 'The request URI was too long for this ' 'server.'), 415: ('Unsupported Media Type', 'The request media type is not ' 'supported by this server.'), 416: ('Requested Range Not Satisfiable', 'The Range requested is not ' 'available.'), 417: ('Expectation Failed', 'Expectation failed.'), 422: ('Unprocessable Entity', 'Unable to process the contained ' 'instructions'), 499: ('Client Disconnect', 'The client was disconnected during request.'), 500: ('Internal Error', 'The server has either erred or is incapable of ' 'performing the requested operation.'), 501: ('Not Implemented', 'The requested method is not implemented by ' 'this server.'), 502: ('Bad Gateway', 'Bad gateway.'), 503: ('Service Unavailable', 'The server is currently unavailable. ' 'Please try again at a later time.'), 504: ('Gateway Timeout', 'A timeout has occurred speaking to a ' 'backend server.'), 507: ('Insufficient Storage', 'There was not enough space to save the ' 'resource. Drive: %(drive)s'), } MAX_RANGE_OVERLAPS = 2 MAX_NONASCENDING_RANGES = 8 MAX_RANGES = 50 class _UTC(tzinfo): """ A tzinfo class for datetime objects that returns a 0 timedelta (UTC time) """ def dst(self, dt): return timedelta(0) utcoffset = dst def tzname(self, dt): return 'UTC' UTC = _UTC() def _datetime_property(header): """ Set and retrieve the datetime value of self.headers[header] (Used by both request and response) The header is parsed on retrieval and a datetime object is returned. The header can be set using a datetime, numeric value, or str. If a value of None is given, the header is deleted. :param header: name of the header, e.g. "Content-Length" """ def getter(self): value = self.headers.get(header, None) if value is not None: try: parts = parsedate(self.headers[header])[:7] return datetime(*(parts + (UTC,))) except Exception: return None def setter(self, value): if isinstance(value, (float, int, long)): self.headers[header] = time.strftime( "%a, %d %b %Y %H:%M:%S GMT", time.gmtime(value)) elif isinstance(value, datetime): self.headers[header] = value.strftime("%a, %d %b %Y %H:%M:%S GMT") else: self.headers[header] = value return property(getter, setter, doc=("Retrieve and set the %s header as a datetime, " "set it with a datetime, int, or str") % header) def _header_property(header): """ Set and retrieve the value of self.headers[header] (Used by both request and response) If a value of None is given, the header is deleted. :param header: name of the header, e.g. "Transfer-Encoding" """ def getter(self): return self.headers.get(header, None) def setter(self, value): self.headers[header] = value return property(getter, setter, doc="Retrieve and set the %s header" % header) def _header_int_property(header): """ Set and retrieve the value of self.headers[header] (Used by both request and response) On retrieval, it converts values to integers. If a value of None is given, the header is deleted. :param header: name of the header, e.g. "Content-Length" """ def getter(self): val = self.headers.get(header, None) if val is not None: val = int(val) return val def setter(self, value): self.headers[header] = value return property(getter, setter, doc="Retrieve and set the %s header as an int" % header) class HeaderEnvironProxy(UserDict.DictMixin): """ A dict-like object that proxies requests to a wsgi environ, rewriting header keys to environ keys. For example, headers['Content-Range'] sets and gets the value of headers.environ['HTTP_CONTENT_RANGE'] """ def __init__(self, environ): self.environ = environ def _normalize(self, key): key = 'HTTP_' + key.replace('-', '_').upper() if key == 'HTTP_CONTENT_LENGTH': return 'CONTENT_LENGTH' if key == 'HTTP_CONTENT_TYPE': return 'CONTENT_TYPE' return key def __getitem__(self, key): return self.environ[self._normalize(key)] def __setitem__(self, key, value): if value is None: self.environ.pop(self._normalize(key), None) elif isinstance(value, unicode): self.environ[self._normalize(key)] = value.encode('utf-8') else: self.environ[self._normalize(key)] = str(value) def __contains__(self, key): return self._normalize(key) in self.environ def __delitem__(self, key): del self.environ[self._normalize(key)] def keys(self): keys = [key[5:].replace('_', '-').title() for key in self.environ if key.startswith('HTTP_')] if 'CONTENT_LENGTH' in self.environ: keys.append('Content-Length') if 'CONTENT_TYPE' in self.environ: keys.append('Content-Type') return keys class HeaderKeyDict(dict): """ A dict that title-cases all keys on the way in, so as to be case-insensitive. """ def __init__(self, base_headers=None, **kwargs): if base_headers: self.update(base_headers) self.update(kwargs) def update(self, other): if hasattr(other, 'keys'): for key in other.keys(): self[key.title()] = other[key] else: for key, value in other: self[key.title()] = value def __getitem__(self, key): return dict.get(self, key.title()) def __setitem__(self, key, value): if value is None: self.pop(key.title(), None) elif isinstance(value, unicode): return dict.__setitem__(self, key.title(), value.encode('utf-8')) else: return dict.__setitem__(self, key.title(), str(value)) def __contains__(self, key): return dict.__contains__(self, key.title()) def __delitem__(self, key): return dict.__delitem__(self, key.title()) def get(self, key, default=None): return dict.get(self, key.title(), default) def setdefault(self, key, value=None): if key not in self: self[key] = value return self[key] def pop(self, key, default=None): return dict.pop(self, key.title(), default) def _resp_status_property(): """ Set and retrieve the value of Response.status On retrieval, it concatenates status_int and title. When set to a str, it splits status_int and title apart. When set to an integer, retrieves the correct title for that response code from the RESPONSE_REASONS dict. """ def getter(self): return '%s %s' % (self.status_int, self.title) def setter(self, value): if isinstance(value, (int, long)): self.status_int = value self.explanation = self.title = RESPONSE_REASONS[value][0] else: if isinstance(value, unicode): value = value.encode('utf-8') self.status_int = int(value.split(' ', 1)[0]) self.explanation = self.title = value.split(' ', 1)[1] return property(getter, setter, doc="Retrieve and set the Response status, e.g. '200 OK'") def _resp_body_property(): """ Set and retrieve the value of Response.body If necessary, it will consume Response.app_iter to create a body. On assignment, encodes unicode values to utf-8, and sets the content-length to the length of the str. """ def getter(self): if not self._body: if not self._app_iter: return '' self._body = ''.join(self._app_iter) self._app_iter = None return self._body def setter(self, value): if isinstance(value, unicode): value = value.encode('utf-8') if isinstance(value, str): self.content_length = len(value) self._app_iter = None self._body = value return property(getter, setter, doc="Retrieve and set the Response body str") def _resp_etag_property(): """ Set and retrieve Response.etag This may be broken for etag use cases other than Swift's. Quotes strings when assigned and unquotes when read, for compatibility with webob. """ def getter(self): etag = self.headers.get('etag', None) if etag: etag = etag.replace('"', '') return etag def setter(self, value): if value is None: self.headers['etag'] = None else: self.headers['etag'] = '"%s"' % value return property(getter, setter, doc="Retrieve and set the response Etag header") def _resp_content_type_property(): """ Set and retrieve Response.content_type Strips off any charset when retrieved -- that is accessible via Response.charset. """ def getter(self): if 'content-type' in self.headers: return self.headers.get('content-type').split(';')[0] def setter(self, value): self.headers['content-type'] = value return property(getter, setter, doc="Retrieve and set the response Content-Type header") def _resp_charset_property(): """ Set and retrieve Response.charset On retrieval, separates the charset from the content-type. On assignment, removes any existing charset from the content-type and appends the new one. """ def getter(self): if '; charset=' in self.headers['content-type']: return self.headers['content-type'].split('; charset=')[1] def setter(self, value): if 'content-type' in self.headers: self.headers['content-type'] = self.headers['content-type'].split( ';')[0] if value: self.headers['content-type'] += '; charset=' + value return property(getter, setter, doc="Retrieve and set the response charset") def _resp_app_iter_property(): """ Set and retrieve Response.app_iter Mostly a pass-through to Response._app_iter; it's a property so it can zero out an existing content-length on assignment. """ def getter(self): return self._app_iter def setter(self, value): if isinstance(value, (list, tuple)): self.content_length = sum(map(len, value)) elif value is not None: self.content_length = None self._body = None self._app_iter = value return property(getter, setter, doc="Retrieve and set the response app_iter") def _req_fancy_property(cls, header, even_if_nonexistent=False): """ Set and retrieve "fancy" properties. On retrieval, these properties return a class that takes the value of the header as the only argument to their constructor. For assignment, those classes should implement a __str__ that converts them back to their header values. :param header: name of the header, e.g. "Accept" :param even_if_nonexistent: Return a value even if the header does not exist. Classes using this should be prepared to accept None as a parameter. """ def getter(self): try: if header in self.headers or even_if_nonexistent: return cls(self.headers.get(header)) except ValueError: return None def setter(self, value): self.headers[header] = value return property(getter, setter, doc=("Retrieve and set the %s " "property in the WSGI environ, as a %s object") % (header, cls.__name__)) class Range(object): """ Wraps a Request's Range header as a friendly object. After initialization, "range.ranges" is populated with a list of (start, end) tuples denoting the requested ranges. If there were any syntactically-invalid byte-range-spec values, "range.ranges" will be an empty list, per the relevant RFC: "The recipient of a byte-range-set that includes one or more syntactically invalid byte-range-spec values MUST ignore the header field that includes that byte-range-set." According to the RFC 2616 specification, the following cases will be all considered as syntactically invalid, thus, a ValueError is thrown so that the range header will be ignored. If the range value contains at least one of the following cases, the entire range is considered invalid, ValueError will be thrown so that the header will be ignored. 1. value not starts with bytes= 2. range value start is greater than the end, eg. bytes=5-3 3. range does not have start or end, eg. bytes=- 4. range does not have hyphen, eg. bytes=45 5. range value is non numeric 6. any combination of the above Every syntactically valid range will be added into the ranges list even when some of the ranges may not be satisfied by underlying content. :param headerval: value of the header as a str """ def __init__(self, headerval): headerval = headerval.replace(' ', '') if not headerval.lower().startswith('bytes='): raise ValueError('Invalid Range header: %s' % headerval) self.ranges = [] for rng in headerval[6:].split(','): # Check if the range has required hyphen. if rng.find('-') == -1: raise ValueError('Invalid Range header: %s' % headerval) start, end = rng.split('-', 1) if start: # when start contains non numeric value, this also causes # ValueError start = int(start) else: start = None if end: # when end contains non numeric value, this also causes # ValueError end = int(end) if start is not None and end < start: raise ValueError('Invalid Range header: %s' % headerval) else: end = None if start is None: raise ValueError('Invalid Range header: %s' % headerval) self.ranges.append((start, end)) def __str__(self): string = 'bytes=' for start, end in self.ranges: if start is not None: string += str(start) string += '-' if end is not None: string += str(end) string += ',' return string.rstrip(',') def ranges_for_length(self, length): """ This method is used to return multiple ranges for a given length which should represent the length of the underlying content. The constructor method __init__ made sure that any range in ranges list is syntactically valid. So if length is None or size of the ranges is zero, then the Range header should be ignored which will eventually make the response to be 200. If an empty list is returned by this method, it indicates that there are unsatisfiable ranges found in the Range header, 416 will be returned. if a returned list has at least one element, the list indicates that there is at least one range valid and the server should serve the request with a 206 status code. The start value of each range represents the starting position in the content, the end value represents the ending position. This method purposely adds 1 to the end number because the spec defines the Range to be inclusive. The Range spec can be found at the following link: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.1 :param length: length of the underlying content """ # not syntactically valid ranges, must ignore if length is None or not self.ranges or self.ranges == []: return None all_ranges = [] for single_range in self.ranges: begin, end = single_range # The possible values for begin and end are # None, 0, or a positive numeric number if begin is None: if end == 0: # this is the bytes=-0 case continue elif end > length: # This is the case where the end is greater than the # content length, as the RFC 2616 stated, the entire # content should be returned. all_ranges.append((0, length)) else: all_ranges.append((length - end, length)) continue # begin can only be 0 and numeric value from this point on if end is None: if begin < length: all_ranges.append((begin, length)) else: # the begin position is greater than or equal to the # content length; skip and move on to the next range continue # end can only be 0 or numeric value elif begin < length: # the begin position is valid, take the min of end + 1 or # the total length of the content all_ranges.append((begin, min(end + 1, length))) # RFC 7233 section 6.1 ("Denial-of-Service Attacks Using Range") says: # # Unconstrained multiple range requests are susceptible to denial-of- # service attacks because the effort required to request many # overlapping ranges of the same data is tiny compared to the time, # memory, and bandwidth consumed by attempting to serve the requested # data in many parts. Servers ought to ignore, coalesce, or reject # egregious range requests, such as requests for more than two # overlapping ranges or for many small ranges in a single set, # particularly when the ranges are requested out of order for no # apparent reason. Multipart range requests are not designed to # support random access. # # We're defining "egregious" here as: # # * more than 100 requested ranges OR # * more than 2 overlapping ranges OR # * more than 8 non-ascending-order ranges if len(all_ranges) > MAX_RANGES: return [] overlaps = 0 for ((start1, end1), (start2, end2)) in pairs(all_ranges): if ((start1 < start2 < end1) or (start1 < end2 < end1) or (start2 < start1 < end2) or (start2 < end1 < end2)): overlaps += 1 if overlaps > MAX_RANGE_OVERLAPS: return [] ascending = True for start1, start2 in zip(all_ranges, all_ranges[1:]): if start1 > start2: ascending = False break if not ascending and len(all_ranges) >= MAX_NONASCENDING_RANGES: return [] return all_ranges class Match(object): """ Wraps a Request's If-[None-]Match header as a friendly object. :param headerval: value of the header as a str """ def __init__(self, headerval): self.tags = set() for tag in headerval.split(', '): if tag.startswith('"') and tag.endswith('"'): self.tags.add(tag[1:-1]) else: self.tags.add(tag) def __contains__(self, val): return '*' in self.tags or val in self.tags class Accept(object): """ Wraps a Request's Accept header as a friendly object. :param headerval: value of the header as a str """ # RFC 2616 section 2.2 token = r'[^()<>@,;:\"/\[\]?={}\x00-\x20\x7f]+' qdtext = r'[^"]' quoted_pair = r'(?:\\.)' quoted_string = r'"(?:' + qdtext + r'|' + quoted_pair + r')*"' extension = (r'(?:\s*;\s*(?:' + token + r")\s*=\s*" + r'(?:' + token + r'|' + quoted_string + r'))') acc = (r'^\s*(' + token + r')/(' + token + r')(' + extension + r'*?\s*)$') acc_pattern = re.compile(acc) def __init__(self, headerval): self.headerval = headerval def _get_types(self): types = [] if not self.headerval: return [] for typ in self.headerval.split(','): type_parms = self.acc_pattern.findall(typ) if not type_parms: raise ValueError('Invalid accept header') typ, subtype, parms = type_parms[0] parms = [p.strip() for p in parms.split(';') if p.strip()] seen_q_already = False quality = 1.0 for parm in parms: name, value = parm.split('=') name = name.strip() value = value.strip() if name == 'q': if seen_q_already: raise ValueError('Multiple "q" params') seen_q_already = True quality = float(value) pattern = '^' + \ (self.token if typ == '*' else re.escape(typ)) + '/' + \ (self.token if subtype == '*' else re.escape(subtype)) + '$' types.append((pattern, quality, '*' not in (typ, subtype))) # sort candidates by quality, then whether or not there were globs types.sort(reverse=True, key=lambda t: (t[1], t[2])) return [t[0] for t in types] def best_match(self, options): """ Returns the item from "options" that best matches the accept header. Returns None if no available options are acceptable to the client. :param options: a list of content-types the server can respond with """ try: types = self._get_types() except ValueError: return None if not types and options: return options[0] for pattern in types: for option in options: if re.match(pattern, option): return option return None def __repr__(self): return self.headerval def _req_environ_property(environ_field): """ Set and retrieve value of the environ_field entry in self.environ. (Used by both request and response) """ def getter(self): return self.environ.get(environ_field, None) def setter(self, value): if isinstance(value, unicode): self.environ[environ_field] = value.encode('utf-8') else: self.environ[environ_field] = value return property(getter, setter, doc=("Get and set the %s property " "in the WSGI environment") % environ_field) def _req_body_property(): """ Set and retrieve the Request.body parameter. It consumes wsgi.input and returns the results. On assignment, uses a StringIO to create a new wsgi.input. """ def getter(self): body = self.environ['wsgi.input'].read() self.environ['wsgi.input'] = StringIO(body) return body def setter(self, value): self.environ['wsgi.input'] = StringIO(value) self.environ['CONTENT_LENGTH'] = str(len(value)) return property(getter, setter, doc="Get and set the request body str") def _host_url_property(): """ Retrieves the best guess that can be made for an absolute location up to the path, for example: https://host.com:1234 """ def getter(self): if 'HTTP_HOST' in self.environ: host = self.environ['HTTP_HOST'] else: host = '%s:%s' % (self.environ['SERVER_NAME'], self.environ['SERVER_PORT']) scheme = self.environ.get('wsgi.url_scheme', 'http') if scheme == 'http' and host.endswith(':80'): host, port = host.rsplit(':', 1) elif scheme == 'https' and host.endswith(':443'): host, port = host.rsplit(':', 1) return '%s://%s' % (scheme, host) return property(getter, doc="Get url for request/response up to path") class Request(object): """ WSGI Request object. """ range = _req_fancy_property(Range, 'range') if_none_match = _req_fancy_property(Match, 'if-none-match') accept = _req_fancy_property(Accept, 'accept', True) method = _req_environ_property('REQUEST_METHOD') referrer = referer = _req_environ_property('HTTP_REFERER') script_name = _req_environ_property('SCRIPT_NAME') path_info = _req_environ_property('PATH_INFO') host = _req_environ_property('HTTP_HOST') host_url = _host_url_property() remote_addr = _req_environ_property('REMOTE_ADDR') remote_user = _req_environ_property('REMOTE_USER') user_agent = _req_environ_property('HTTP_USER_AGENT') query_string = _req_environ_property('QUERY_STRING') if_match = _req_fancy_property(Match, 'if-match') body_file = _req_environ_property('wsgi.input') content_length = _header_int_property('content-length') if_modified_since = _datetime_property('if-modified-since') if_unmodified_since = _datetime_property('if-unmodified-since') body = _req_body_property() charset = None _params_cache = None _timestamp = None acl = _req_environ_property('swob.ACL') def __init__(self, environ): self.environ = environ self.headers = HeaderEnvironProxy(self.environ) @classmethod def blank(cls, path, environ=None, headers=None, body=None, **kwargs): """ Create a new request object with the given parameters, and an environment otherwise filled in with non-surprising default values. :param path: encoded, parsed, and unquoted into PATH_INFO :param environ: WSGI environ dictionary :param headers: HTTP headers :param body: stuffed in a StringIO and hung on wsgi.input :param kwargs: any environ key with an property setter """ headers = headers or {} environ = environ or {} if isinstance(path, unicode): path = path.encode('utf-8') parsed_path = urlparse.urlparse(path) server_name = 'localhost' if parsed_path.netloc: server_name = parsed_path.netloc.split(':', 1)[0] server_port = parsed_path.port if server_port is None: server_port = {'http': 80, 'https': 443}.get(parsed_path.scheme, 80) if parsed_path.scheme and parsed_path.scheme not in ['http', 'https']: raise TypeError('Invalid scheme: %s' % parsed_path.scheme) env = { 'REQUEST_METHOD': 'GET', 'SCRIPT_NAME': '', 'QUERY_STRING': parsed_path.query, 'PATH_INFO': urllib2.unquote(parsed_path.path), 'SERVER_NAME': server_name, 'SERVER_PORT': str(server_port), 'HTTP_HOST': '%s:%d' % (server_name, server_port), 'SERVER_PROTOCOL': 'HTTP/1.0', 'wsgi.version': (1, 0), 'wsgi.url_scheme': parsed_path.scheme or 'http', 'wsgi.errors': StringIO(''), 'wsgi.multithread': False, 'wsgi.multiprocess': False } env.update(environ) if body is not None: env['wsgi.input'] = StringIO(body) env['CONTENT_LENGTH'] = str(len(body)) elif 'wsgi.input' not in env: env['wsgi.input'] = StringIO('') req = Request(env) for key, val in headers.iteritems(): req.headers[key] = val for key, val in kwargs.items(): prop = getattr(Request, key, None) if prop and isinstance(prop, property): try: setattr(req, key, val) except AttributeError: pass else: continue raise TypeError("got unexpected keyword argument %r" % key) return req @property def params(self): "Provides QUERY_STRING parameters as a dictionary" if self._params_cache is None: if 'QUERY_STRING' in self.environ: self._params_cache = dict( urlparse.parse_qsl(self.environ['QUERY_STRING'], True)) else: self._params_cache = {} return self._params_cache str_params = params @property def timestamp(self): """ Provides HTTP_X_TIMESTAMP as a :class:`~swift.common.utils.Timestamp` """ if self._timestamp is None: try: raw_timestamp = self.environ['HTTP_X_TIMESTAMP'] except KeyError: raise InvalidTimestamp('Missing X-Timestamp header') try: self._timestamp = Timestamp(raw_timestamp) except ValueError: raise InvalidTimestamp('Invalid X-Timestamp header') return self._timestamp @property def path_qs(self): """The path of the request, without host but with query string.""" path = self.path if self.query_string: path += '?' + self.query_string return path @property def path(self): "Provides the full path of the request, excluding the QUERY_STRING" return urllib2.quote(self.environ.get('SCRIPT_NAME', '') + self.environ['PATH_INFO']) @property def swift_entity_path(self): """ Provides the account/container/object path, sans API version. This can be useful when constructing a path to send to a backend server, as that path will need everything after the "/v1". """ _ver, entity_path = self.split_path(1, 2, rest_with_last=True) if entity_path is not None: return '/' + entity_path @property def url(self): "Provides the full url of the request" return self.host_url + self.path_qs def as_referer(self): return self.method + ' ' + self.url def path_info_pop(self): """ Takes one path portion (delineated by slashes) from the path_info, and appends it to the script_name. Returns the path segment. """ path_info = self.path_info if not path_info or path_info[0] != '/': return None try: slash_loc = path_info.index('/', 1) except ValueError: slash_loc = len(path_info) self.script_name += path_info[:slash_loc] self.path_info = path_info[slash_loc:] return path_info[1:slash_loc] def copy_get(self): """ Makes a copy of the request, converting it to a GET. """ env = self.environ.copy() env.update({ 'REQUEST_METHOD': 'GET', 'CONTENT_LENGTH': '0', 'wsgi.input': StringIO(''), }) return Request(env) def call_application(self, application): """ Calls the application with this request's environment. Returns the status, headers, and app_iter for the response as a tuple. :param application: the WSGI application to call """ output = [] captured = [] def start_response(status, headers, exc_info=None): captured[:] = [status, headers, exc_info] return output.append app_iter = application(self.environ, start_response) if not app_iter: app_iter = output if not captured: app_iter = reiterate(app_iter) return (captured[0], captured[1], app_iter) def get_response(self, application): """ Calls the application with this request's environment. Returns a Response object that wraps up the application's result. :param application: the WSGI application to call """ status, headers, app_iter = self.call_application(application) return Response(status=status, headers=dict(headers), app_iter=app_iter, request=self) def split_path(self, minsegs=1, maxsegs=None, rest_with_last=False): """ Validate and split the Request's path. **Examples**:: ['a'] = split_path('/a') ['a', None] = split_path('/a', 1, 2) ['a', 'c'] = split_path('/a/c', 1, 2) ['a', 'c', 'o/r'] = split_path('/a/c/o/r', 1, 3, True) :param minsegs: Minimum number of segments to be extracted :param maxsegs: Maximum number of segments to be extracted :param rest_with_last: If True, trailing data will be returned as part of last segment. If False, and there is trailing data, raises ValueError. :returns: list of segments with a length of maxsegs (non-existent segments will return as None) :raises: ValueError if given an invalid path """ return split_path( self.environ.get('SCRIPT_NAME', '') + self.environ['PATH_INFO'], minsegs, maxsegs, rest_with_last) def message_length(self): """ Properly determine the message length for this request. It will return an integer if the headers explicitly contain the message length, or None if the headers don't contain a length. The ValueError exception will be raised if the headers are invalid. :raises ValueError: if either transfer-encoding or content-length headers have bad values :raises AttributeError: if the last value of the transfer-encoding header is not "chunked" """ te = self.headers.get('transfer-encoding') if te: encodings = te.split(',') if len(encodings) > 1: raise AttributeError('Unsupported Transfer-Coding header' ' value specified in Transfer-Encoding' ' header') # If there are more than one transfer encoding value, the last # one must be chunked, see RFC 2616 Sec. 3.6 if encodings[-1].lower() == 'chunked': chunked = True else: raise ValueError('Invalid Transfer-Encoding header value') else: chunked = False if not chunked: # Because we are not using chunked transfer encoding we can pay # attention to the content-length header. fsize = self.headers.get('content-length', None) if fsize is not None: try: fsize = int(fsize) except ValueError: raise ValueError('Invalid Content-Length header value') else: fsize = None return fsize def content_range_header_value(start, stop, size): return 'bytes %s-%s/%s' % (start, (stop - 1), size) def content_range_header(start, stop, size): return "Content-Range: " + content_range_header_value(start, stop, size) def multi_range_iterator(ranges, content_type, boundary, size, sub_iter_gen): for start, stop in ranges: yield ''.join(['\r\n--', boundary, '\r\n', 'Content-Type: ', content_type, '\r\n']) yield content_range_header(start, stop, size) + '\r\n\r\n' sub_iter = sub_iter_gen(start, stop) for chunk in sub_iter: yield chunk yield '\r\n--' + boundary + '--\r\n' class Response(object): """ WSGI Response object. """ content_length = _header_int_property('content-length') content_type = _resp_content_type_property() content_range = _header_property('content-range') etag = _resp_etag_property() status = _resp_status_property() body = _resp_body_property() host_url = _host_url_property() last_modified = _datetime_property('last-modified') location = _header_property('location') accept_ranges = _header_property('accept-ranges') charset = _resp_charset_property() app_iter = _resp_app_iter_property() def __init__(self, body=None, status=200, headers=None, app_iter=None, request=None, conditional_response=False, **kw): self.headers = HeaderKeyDict( [('Content-Type', 'text/html; charset=UTF-8')]) self.conditional_response = conditional_response self.request = request self.body = body self.app_iter = app_iter self.status = status self.boundary = "%.32x" % random.randint(0, 256 ** 16) if request: self.environ = request.environ else: self.environ = {} if headers: if self._body and 'Content-Length' in headers: # If body is not empty, prioritize actual body length over # content_length in headers del headers['Content-Length'] self.headers.update(headers) if self.status_int == 401 and 'www-authenticate' not in self.headers: self.headers.update({'www-authenticate': self.www_authenticate()}) for key, value in kw.iteritems(): setattr(self, key, value) # When specifying both 'content_type' and 'charset' in the kwargs, # charset needs to be applied *after* content_type, otherwise charset # can get wiped out when content_type sorts later in dict order. if 'charset' in kw and 'content_type' in kw: self.charset = kw['charset'] def _prepare_for_ranges(self, ranges): """ Prepare the Response for multiple ranges. """ content_size = self.content_length content_type = self.content_type self.content_type = ''.join(['multipart/byteranges;', 'boundary=', self.boundary]) # This section calculate the total size of the targeted response # The value 12 is the length of total bytes of hyphen, new line # form feed for each section header. The value 8 is the length of # total bytes of hyphen, new line, form feed characters for the # closing boundary which appears only once section_header_fixed_len = 12 + (len(self.boundary) + len('Content-Type: ') + len(content_type) + len('Content-Range: bytes ')) body_size = 0 for start, end in ranges: body_size += section_header_fixed_len body_size += len(str(start) + '-' + str(end - 1) + '/' + str(content_size)) + (end - start) body_size += 8 + len(self.boundary) self.content_length = body_size self.content_range = None return content_size, content_type def _response_iter(self, app_iter, body): if self.conditional_response and self.request: if self.etag and self.request.if_none_match and \ self.etag in self.request.if_none_match: self.status = 304 self.content_length = 0 return [''] if self.etag and self.request.if_match and \ self.etag not in self.request.if_match: self.status = 412 self.content_length = 0 return [''] if self.status_int == 404 and self.request.if_match \ and '*' in self.request.if_match: # If none of the entity tags match, or if "*" is given and no # current entity exists, the server MUST NOT perform the # requested method, and MUST return a 412 (Precondition # Failed) response. [RFC 2616 section 14.24] self.status = 412 self.content_length = 0 return [''] if self.last_modified and self.request.if_modified_since \ and self.last_modified <= self.request.if_modified_since: self.status = 304 self.content_length = 0 return [''] if self.last_modified and self.request.if_unmodified_since \ and self.last_modified > self.request.if_unmodified_since: self.status = 412 self.content_length = 0 return [''] if self.request and self.request.method == 'HEAD': # We explicitly do NOT want to set self.content_length to 0 here return [''] if self.conditional_response and self.request and \ self.request.range and self.request.range.ranges and \ not self.content_range: ranges = self.request.range.ranges_for_length(self.content_length) if ranges == []: self.status = 416 self.content_length = 0 return [''] elif ranges: range_size = len(ranges) if range_size > 0: # There is at least one valid range in the request, so try # to satisfy the request if range_size == 1: start, end = ranges[0] if app_iter and hasattr(app_iter, 'app_iter_range'): self.status = 206 self.content_range = content_range_header_value( start, end, self.content_length) self.content_length = (end - start) return app_iter.app_iter_range(start, end) elif body: self.status = 206 self.content_range = content_range_header_value( start, end, self.content_length) self.content_length = (end - start) return [body[start:end]] elif range_size > 1: if app_iter and hasattr(app_iter, 'app_iter_ranges'): self.status = 206 content_size, content_type = \ self._prepare_for_ranges(ranges) return app_iter.app_iter_ranges(ranges, content_type, self.boundary, content_size) elif body: self.status = 206 content_size, content_type, = \ self._prepare_for_ranges(ranges) def _body_slicer(start, stop): yield body[start:stop] return multi_range_iterator(ranges, content_type, self.boundary, content_size, _body_slicer) if app_iter: return app_iter if body is not None: return [body] if self.status_int in RESPONSE_REASONS: title, exp = RESPONSE_REASONS[self.status_int] if exp: body = '<html><h1>%s</h1><p>%s</p></html>' % (title, exp) if '%(' in body: body = body % defaultdict(lambda: 'unknown', self.__dict__) self.content_length = len(body) return [body] return [''] def absolute_location(self): """ Attempt to construct an absolute location. """ if not self.location.startswith('/'): return self.location return self.host_url + self.location def www_authenticate(self): """ Construct a suitable value for WWW-Authenticate response header If we have a request and a valid-looking path, the realm is the account; otherwise we set it to 'unknown'. """ try: vrs, realm, rest = self.request.split_path(2, 3, True) if realm in ('v1.0', 'auth'): realm = 'unknown' except (AttributeError, ValueError): realm = 'unknown' return 'Swift realm="%s"' % urllib2.quote(realm) @property def is_success(self): return self.status_int // 100 == 2 def __call__(self, env, start_response): """ Respond to the WSGI request. .. warning:: This will translate any relative Location header value to an absolute URL using the WSGI environment's HOST_URL as a prefix, as RFC 2616 specifies. However, it is quite common to use relative redirects, especially when it is difficult to know the exact HOST_URL the browser would have used when behind several CNAMEs, CDN services, etc. All modern browsers support relative redirects. To skip over RFC enforcement of the Location header value, you may set ``env['swift.leave_relative_location'] = True`` in the WSGI environment. """ if not self.request: self.request = Request(env) self.environ = env app_iter = self._response_iter(self.app_iter, self._body) if 'location' in self.headers and \ not env.get('swift.leave_relative_location'): self.location = self.absolute_location() start_response(self.status, self.headers.items()) return app_iter class HTTPException(Response, Exception): def __init__(self, *args, **kwargs): Response.__init__(self, *args, **kwargs) Exception.__init__(self, self.status) def wsgify(func): """ A decorator for translating functions which take a swob Request object and return a Response object into WSGI callables. Also catches any raised HTTPExceptions and treats them as a returned Response. """ argspec = inspect.getargspec(func) if argspec.args and argspec.args[0] == 'self': @functools.wraps(func) def _wsgify_self(self, env, start_response): try: return func(self, Request(env))(env, start_response) except HTTPException as err_resp: return err_resp(env, start_response) return _wsgify_self else: @functools.wraps(func) def _wsgify_bare(env, start_response): try: return func(Request(env))(env, start_response) except HTTPException as err_resp: return err_resp(env, start_response) return _wsgify_bare class StatusMap(object): """ A dict-like object that returns HTTPException subclasses/factory functions where the given key is the status code. """ def __getitem__(self, key): return partial(HTTPException, status=key) status_map = StatusMap() HTTPOk = status_map[200] HTTPCreated = status_map[201] HTTPAccepted = status_map[202] HTTPNoContent = status_map[204] HTTPMovedPermanently = status_map[301] HTTPFound = status_map[302] HTTPSeeOther = status_map[303] HTTPNotModified = status_map[304] HTTPTemporaryRedirect = status_map[307] HTTPBadRequest = status_map[400] HTTPUnauthorized = status_map[401] HTTPForbidden = status_map[403] HTTPMethodNotAllowed = status_map[405] HTTPNotFound = status_map[404] HTTPNotAcceptable = status_map[406] HTTPRequestTimeout = status_map[408] HTTPConflict = status_map[409] HTTPLengthRequired = status_map[411] HTTPPreconditionFailed = status_map[412] HTTPRequestEntityTooLarge = status_map[413] HTTPRequestedRangeNotSatisfiable = status_map[416] HTTPUnprocessableEntity = status_map[422] HTTPClientDisconnect = status_map[499] HTTPServerError = status_map[500] HTTPInternalServerError = status_map[500] HTTPNotImplemented = status_map[501] HTTPBadGateway = status_map[502] HTTPServiceUnavailable = status_map[503] HTTPInsufficientStorage = status_map[507]
37.606192
79
0.593414
from collections import defaultdict from cStringIO import StringIO import UserDict import time from functools import partial from datetime import datetime, timedelta, tzinfo from email.utils import parsedate import urlparse import urllib2 import re import random import functools import inspect from swift.common.utils import reiterate, split_path, Timestamp, pairs from swift.common.exceptions import InvalidTimestamp RESPONSE_REASONS = { 100: ('Continue', ''), 200: ('OK', ''), 201: ('Created', ''), 202: ('Accepted', 'The request is accepted for processing.'), 204: ('No Content', ''), 206: ('Partial Content', ''), 301: ('Moved Permanently', 'The resource has moved permanently.'), 302: ('Found', 'The resource has moved temporarily.'), 303: ('See Other', 'The response to the request can be found under a ' 'different URI.'), 304: ('Not Modified', ''), 307: ('Temporary Redirect', 'The resource has moved temporarily.'), 400: ('Bad Request', 'The server could not comply with the request since ' 'it is either malformed or otherwise incorrect.'), 401: ('Unauthorized', 'This server could not verify that you are ' 'authorized to access the document you requested.'), 402: ('Payment Required', 'Access was denied for financial reasons.'), 403: ('Forbidden', 'Access was denied to this resource.'), 404: ('Not Found', 'The resource could not be found.'), 405: ('Method Not Allowed', 'The method is not allowed for this ' 'resource.'), 406: ('Not Acceptable', 'The resource is not available in a format ' 'acceptable to your browser.'), 408: ('Request Timeout', 'The server has waited too long for the request ' 'to be sent by the client.'), 409: ('Conflict', 'There was a conflict when trying to complete ' 'your request.'), 410: ('Gone', 'This resource is no longer available.'), 411: ('Length Required', 'Content-Length header required.'), 412: ('Precondition Failed', 'A precondition for this request was not ' 'met.'), 413: ('Request Entity Too Large', 'The body of your request was too ' 'large for this server.'), 414: ('Request URI Too Long', 'The request URI was too long for this ' 'server.'), 415: ('Unsupported Media Type', 'The request media type is not ' 'supported by this server.'), 416: ('Requested Range Not Satisfiable', 'The Range requested is not ' 'available.'), 417: ('Expectation Failed', 'Expectation failed.'), 422: ('Unprocessable Entity', 'Unable to process the contained ' 'instructions'), 499: ('Client Disconnect', 'The client was disconnected during request.'), 500: ('Internal Error', 'The server has either erred or is incapable of ' 'performing the requested operation.'), 501: ('Not Implemented', 'The requested method is not implemented by ' 'this server.'), 502: ('Bad Gateway', 'Bad gateway.'), 503: ('Service Unavailable', 'The server is currently unavailable. ' 'Please try again at a later time.'), 504: ('Gateway Timeout', 'A timeout has occurred speaking to a ' 'backend server.'), 507: ('Insufficient Storage', 'There was not enough space to save the ' 'resource. Drive: %(drive)s'), } MAX_RANGE_OVERLAPS = 2 MAX_NONASCENDING_RANGES = 8 MAX_RANGES = 50 class _UTC(tzinfo): def dst(self, dt): return timedelta(0) utcoffset = dst def tzname(self, dt): return 'UTC' UTC = _UTC() def _datetime_property(header): def getter(self): value = self.headers.get(header, None) if value is not None: try: parts = parsedate(self.headers[header])[:7] return datetime(*(parts + (UTC,))) except Exception: return None def setter(self, value): if isinstance(value, (float, int, long)): self.headers[header] = time.strftime( "%a, %d %b %Y %H:%M:%S GMT", time.gmtime(value)) elif isinstance(value, datetime): self.headers[header] = value.strftime("%a, %d %b %Y %H:%M:%S GMT") else: self.headers[header] = value return property(getter, setter, doc=("Retrieve and set the %s header as a datetime, " "set it with a datetime, int, or str") % header) def _header_property(header): def getter(self): return self.headers.get(header, None) def setter(self, value): self.headers[header] = value return property(getter, setter, doc="Retrieve and set the %s header" % header) def _header_int_property(header): def getter(self): val = self.headers.get(header, None) if val is not None: val = int(val) return val def setter(self, value): self.headers[header] = value return property(getter, setter, doc="Retrieve and set the %s header as an int" % header) class HeaderEnvironProxy(UserDict.DictMixin): def __init__(self, environ): self.environ = environ def _normalize(self, key): key = 'HTTP_' + key.replace('-', '_').upper() if key == 'HTTP_CONTENT_LENGTH': return 'CONTENT_LENGTH' if key == 'HTTP_CONTENT_TYPE': return 'CONTENT_TYPE' return key def __getitem__(self, key): return self.environ[self._normalize(key)] def __setitem__(self, key, value): if value is None: self.environ.pop(self._normalize(key), None) elif isinstance(value, unicode): self.environ[self._normalize(key)] = value.encode('utf-8') else: self.environ[self._normalize(key)] = str(value) def __contains__(self, key): return self._normalize(key) in self.environ def __delitem__(self, key): del self.environ[self._normalize(key)] def keys(self): keys = [key[5:].replace('_', '-').title() for key in self.environ if key.startswith('HTTP_')] if 'CONTENT_LENGTH' in self.environ: keys.append('Content-Length') if 'CONTENT_TYPE' in self.environ: keys.append('Content-Type') return keys class HeaderKeyDict(dict): def __init__(self, base_headers=None, **kwargs): if base_headers: self.update(base_headers) self.update(kwargs) def update(self, other): if hasattr(other, 'keys'): for key in other.keys(): self[key.title()] = other[key] else: for key, value in other: self[key.title()] = value def __getitem__(self, key): return dict.get(self, key.title()) def __setitem__(self, key, value): if value is None: self.pop(key.title(), None) elif isinstance(value, unicode): return dict.__setitem__(self, key.title(), value.encode('utf-8')) else: return dict.__setitem__(self, key.title(), str(value)) def __contains__(self, key): return dict.__contains__(self, key.title()) def __delitem__(self, key): return dict.__delitem__(self, key.title()) def get(self, key, default=None): return dict.get(self, key.title(), default) def setdefault(self, key, value=None): if key not in self: self[key] = value return self[key] def pop(self, key, default=None): return dict.pop(self, key.title(), default) def _resp_status_property(): def getter(self): return '%s %s' % (self.status_int, self.title) def setter(self, value): if isinstance(value, (int, long)): self.status_int = value self.explanation = self.title = RESPONSE_REASONS[value][0] else: if isinstance(value, unicode): value = value.encode('utf-8') self.status_int = int(value.split(' ', 1)[0]) self.explanation = self.title = value.split(' ', 1)[1] return property(getter, setter, doc="Retrieve and set the Response status, e.g. '200 OK'") def _resp_body_property(): def getter(self): if not self._body: if not self._app_iter: return '' self._body = ''.join(self._app_iter) self._app_iter = None return self._body def setter(self, value): if isinstance(value, unicode): value = value.encode('utf-8') if isinstance(value, str): self.content_length = len(value) self._app_iter = None self._body = value return property(getter, setter, doc="Retrieve and set the Response body str") def _resp_etag_property(): def getter(self): etag = self.headers.get('etag', None) if etag: etag = etag.replace('"', '') return etag def setter(self, value): if value is None: self.headers['etag'] = None else: self.headers['etag'] = '"%s"' % value return property(getter, setter, doc="Retrieve and set the response Etag header") def _resp_content_type_property(): def getter(self): if 'content-type' in self.headers: return self.headers.get('content-type').split(';')[0] def setter(self, value): self.headers['content-type'] = value return property(getter, setter, doc="Retrieve and set the response Content-Type header") def _resp_charset_property(): def getter(self): if '; charset=' in self.headers['content-type']: return self.headers['content-type'].split('; charset=')[1] def setter(self, value): if 'content-type' in self.headers: self.headers['content-type'] = self.headers['content-type'].split( ';')[0] if value: self.headers['content-type'] += '; charset=' + value return property(getter, setter, doc="Retrieve and set the response charset") def _resp_app_iter_property(): def getter(self): return self._app_iter def setter(self, value): if isinstance(value, (list, tuple)): self.content_length = sum(map(len, value)) elif value is not None: self.content_length = None self._body = None self._app_iter = value return property(getter, setter, doc="Retrieve and set the response app_iter") def _req_fancy_property(cls, header, even_if_nonexistent=False): def getter(self): try: if header in self.headers or even_if_nonexistent: return cls(self.headers.get(header)) except ValueError: return None def setter(self, value): self.headers[header] = value return property(getter, setter, doc=("Retrieve and set the %s " "property in the WSGI environ, as a %s object") % (header, cls.__name__)) class Range(object): def __init__(self, headerval): headerval = headerval.replace(' ', '') if not headerval.lower().startswith('bytes='): raise ValueError('Invalid Range header: %s' % headerval) self.ranges = [] for rng in headerval[6:].split(','): # Check if the range has required hyphen. if rng.find('-') == -1: raise ValueError('Invalid Range header: %s' % headerval) start, end = rng.split('-', 1) if start: # when start contains non numeric value, this also causes # ValueError start = int(start) else: start = None if end: # when end contains non numeric value, this also causes # ValueError end = int(end) if start is not None and end < start: raise ValueError('Invalid Range header: %s' % headerval) else: end = None if start is None: raise ValueError('Invalid Range header: %s' % headerval) self.ranges.append((start, end)) def __str__(self): string = 'bytes=' for start, end in self.ranges: if start is not None: string += str(start) string += '-' if end is not None: string += str(end) string += ',' return string.rstrip(',') def ranges_for_length(self, length): # not syntactically valid ranges, must ignore if length is None or not self.ranges or self.ranges == []: return None all_ranges = [] for single_range in self.ranges: begin, end = single_range # The possible values for begin and end are # None, 0, or a positive numeric number if begin is None: if end == 0: # this is the bytes=-0 case continue elif end > length: # This is the case where the end is greater than the # content length, as the RFC 2616 stated, the entire # content should be returned. all_ranges.append((0, length)) else: all_ranges.append((length - end, length)) continue # begin can only be 0 and numeric value from this point on if end is None: if begin < length: all_ranges.append((begin, length)) else: # the begin position is greater than or equal to the # content length; skip and move on to the next range continue # end can only be 0 or numeric value elif begin < length: # the begin position is valid, take the min of end + 1 or # the total length of the content all_ranges.append((begin, min(end + 1, length))) # RFC 7233 section 6.1 ("Denial-of-Service Attacks Using Range") says: # # Unconstrained multiple range requests are susceptible to denial-of- # service attacks because the effort required to request many # overlapping ranges of the same data is tiny compared to the time, # memory, and bandwidth consumed by attempting to serve the requested # data in many parts. Servers ought to ignore, coalesce, or reject # egregious range requests, such as requests for more than two # overlapping ranges or for many small ranges in a single set, # particularly when the ranges are requested out of order for no # apparent reason. Multipart range requests are not designed to # support random access. # # We're defining "egregious" here as: # # * more than 100 requested ranges OR # * more than 2 overlapping ranges OR # * more than 8 non-ascending-order ranges if len(all_ranges) > MAX_RANGES: return [] overlaps = 0 for ((start1, end1), (start2, end2)) in pairs(all_ranges): if ((start1 < start2 < end1) or (start1 < end2 < end1) or (start2 < start1 < end2) or (start2 < end1 < end2)): overlaps += 1 if overlaps > MAX_RANGE_OVERLAPS: return [] ascending = True for start1, start2 in zip(all_ranges, all_ranges[1:]): if start1 > start2: ascending = False break if not ascending and len(all_ranges) >= MAX_NONASCENDING_RANGES: return [] return all_ranges class Match(object): def __init__(self, headerval): self.tags = set() for tag in headerval.split(', '): if tag.startswith('"') and tag.endswith('"'): self.tags.add(tag[1:-1]) else: self.tags.add(tag) def __contains__(self, val): return '*' in self.tags or val in self.tags class Accept(object): # RFC 2616 section 2.2 token = r'[^()<>@,;:\"/\[\]?={}\x00-\x20\x7f]+' qdtext = r'[^"]' quoted_pair = r'(?:\\.)' quoted_string = r'"(?:' + qdtext + r'|' + quoted_pair + r')*"' extension = (r'(?:\s*;\s*(?:' + token + r")\s*=\s*" + r'(?:' + token + r'|' + quoted_string + r'))') acc = (r'^\s*(' + token + r')/(' + token + r')(' + extension + r'*?\s*)$') acc_pattern = re.compile(acc) def __init__(self, headerval): self.headerval = headerval def _get_types(self): types = [] if not self.headerval: return [] for typ in self.headerval.split(','): type_parms = self.acc_pattern.findall(typ) if not type_parms: raise ValueError('Invalid accept header') typ, subtype, parms = type_parms[0] parms = [p.strip() for p in parms.split(';') if p.strip()] seen_q_already = False quality = 1.0 for parm in parms: name, value = parm.split('=') name = name.strip() value = value.strip() if name == 'q': if seen_q_already: raise ValueError('Multiple "q" params') seen_q_already = True quality = float(value) pattern = '^' + \ (self.token if typ == '*' else re.escape(typ)) + '/' + \ (self.token if subtype == '*' else re.escape(subtype)) + '$' types.append((pattern, quality, '*' not in (typ, subtype))) # sort candidates by quality, then whether or not there were globs types.sort(reverse=True, key=lambda t: (t[1], t[2])) return [t[0] for t in types] def best_match(self, options): try: types = self._get_types() except ValueError: return None if not types and options: return options[0] for pattern in types: for option in options: if re.match(pattern, option): return option return None def __repr__(self): return self.headerval def _req_environ_property(environ_field): def getter(self): return self.environ.get(environ_field, None) def setter(self, value): if isinstance(value, unicode): self.environ[environ_field] = value.encode('utf-8') else: self.environ[environ_field] = value return property(getter, setter, doc=("Get and set the %s property " "in the WSGI environment") % environ_field) def _req_body_property(): def getter(self): body = self.environ['wsgi.input'].read() self.environ['wsgi.input'] = StringIO(body) return body def setter(self, value): self.environ['wsgi.input'] = StringIO(value) self.environ['CONTENT_LENGTH'] = str(len(value)) return property(getter, setter, doc="Get and set the request body str") def _host_url_property(): def getter(self): if 'HTTP_HOST' in self.environ: host = self.environ['HTTP_HOST'] else: host = '%s:%s' % (self.environ['SERVER_NAME'], self.environ['SERVER_PORT']) scheme = self.environ.get('wsgi.url_scheme', 'http') if scheme == 'http' and host.endswith(':80'): host, port = host.rsplit(':', 1) elif scheme == 'https' and host.endswith(':443'): host, port = host.rsplit(':', 1) return '%s://%s' % (scheme, host) return property(getter, doc="Get url for request/response up to path") class Request(object): range = _req_fancy_property(Range, 'range') if_none_match = _req_fancy_property(Match, 'if-none-match') accept = _req_fancy_property(Accept, 'accept', True) method = _req_environ_property('REQUEST_METHOD') referrer = referer = _req_environ_property('HTTP_REFERER') script_name = _req_environ_property('SCRIPT_NAME') path_info = _req_environ_property('PATH_INFO') host = _req_environ_property('HTTP_HOST') host_url = _host_url_property() remote_addr = _req_environ_property('REMOTE_ADDR') remote_user = _req_environ_property('REMOTE_USER') user_agent = _req_environ_property('HTTP_USER_AGENT') query_string = _req_environ_property('QUERY_STRING') if_match = _req_fancy_property(Match, 'if-match') body_file = _req_environ_property('wsgi.input') content_length = _header_int_property('content-length') if_modified_since = _datetime_property('if-modified-since') if_unmodified_since = _datetime_property('if-unmodified-since') body = _req_body_property() charset = None _params_cache = None _timestamp = None acl = _req_environ_property('swob.ACL') def __init__(self, environ): self.environ = environ self.headers = HeaderEnvironProxy(self.environ) @classmethod def blank(cls, path, environ=None, headers=None, body=None, **kwargs): headers = headers or {} environ = environ or {} if isinstance(path, unicode): path = path.encode('utf-8') parsed_path = urlparse.urlparse(path) server_name = 'localhost' if parsed_path.netloc: server_name = parsed_path.netloc.split(':', 1)[0] server_port = parsed_path.port if server_port is None: server_port = {'http': 80, 'https': 443}.get(parsed_path.scheme, 80) if parsed_path.scheme and parsed_path.scheme not in ['http', 'https']: raise TypeError('Invalid scheme: %s' % parsed_path.scheme) env = { 'REQUEST_METHOD': 'GET', 'SCRIPT_NAME': '', 'QUERY_STRING': parsed_path.query, 'PATH_INFO': urllib2.unquote(parsed_path.path), 'SERVER_NAME': server_name, 'SERVER_PORT': str(server_port), 'HTTP_HOST': '%s:%d' % (server_name, server_port), 'SERVER_PROTOCOL': 'HTTP/1.0', 'wsgi.version': (1, 0), 'wsgi.url_scheme': parsed_path.scheme or 'http', 'wsgi.errors': StringIO(''), 'wsgi.multithread': False, 'wsgi.multiprocess': False } env.update(environ) if body is not None: env['wsgi.input'] = StringIO(body) env['CONTENT_LENGTH'] = str(len(body)) elif 'wsgi.input' not in env: env['wsgi.input'] = StringIO('') req = Request(env) for key, val in headers.iteritems(): req.headers[key] = val for key, val in kwargs.items(): prop = getattr(Request, key, None) if prop and isinstance(prop, property): try: setattr(req, key, val) except AttributeError: pass else: continue raise TypeError("got unexpected keyword argument %r" % key) return req @property def params(self): if self._params_cache is None: if 'QUERY_STRING' in self.environ: self._params_cache = dict( urlparse.parse_qsl(self.environ['QUERY_STRING'], True)) else: self._params_cache = {} return self._params_cache str_params = params @property def timestamp(self): if self._timestamp is None: try: raw_timestamp = self.environ['HTTP_X_TIMESTAMP'] except KeyError: raise InvalidTimestamp('Missing X-Timestamp header') try: self._timestamp = Timestamp(raw_timestamp) except ValueError: raise InvalidTimestamp('Invalid X-Timestamp header') return self._timestamp @property def path_qs(self): path = self.path if self.query_string: path += '?' + self.query_string return path @property def path(self): return urllib2.quote(self.environ.get('SCRIPT_NAME', '') + self.environ['PATH_INFO']) @property def swift_entity_path(self): _ver, entity_path = self.split_path(1, 2, rest_with_last=True) if entity_path is not None: return '/' + entity_path @property def url(self): return self.host_url + self.path_qs def as_referer(self): return self.method + ' ' + self.url def path_info_pop(self): path_info = self.path_info if not path_info or path_info[0] != '/': return None try: slash_loc = path_info.index('/', 1) except ValueError: slash_loc = len(path_info) self.script_name += path_info[:slash_loc] self.path_info = path_info[slash_loc:] return path_info[1:slash_loc] def copy_get(self): env = self.environ.copy() env.update({ 'REQUEST_METHOD': 'GET', 'CONTENT_LENGTH': '0', 'wsgi.input': StringIO(''), }) return Request(env) def call_application(self, application): output = [] captured = [] def start_response(status, headers, exc_info=None): captured[:] = [status, headers, exc_info] return output.append app_iter = application(self.environ, start_response) if not app_iter: app_iter = output if not captured: app_iter = reiterate(app_iter) return (captured[0], captured[1], app_iter) def get_response(self, application): status, headers, app_iter = self.call_application(application) return Response(status=status, headers=dict(headers), app_iter=app_iter, request=self) def split_path(self, minsegs=1, maxsegs=None, rest_with_last=False): return split_path( self.environ.get('SCRIPT_NAME', '') + self.environ['PATH_INFO'], minsegs, maxsegs, rest_with_last) def message_length(self): te = self.headers.get('transfer-encoding') if te: encodings = te.split(',') if len(encodings) > 1: raise AttributeError('Unsupported Transfer-Coding header' ' value specified in Transfer-Encoding' ' header') # If there are more than one transfer encoding value, the last # one must be chunked, see RFC 2616 Sec. 3.6 if encodings[-1].lower() == 'chunked': chunked = True else: raise ValueError('Invalid Transfer-Encoding header value') else: chunked = False if not chunked: # Because we are not using chunked transfer encoding we can pay # attention to the content-length header. fsize = self.headers.get('content-length', None) if fsize is not None: try: fsize = int(fsize) except ValueError: raise ValueError('Invalid Content-Length header value') else: fsize = None return fsize def content_range_header_value(start, stop, size): return 'bytes %s-%s/%s' % (start, (stop - 1), size) def content_range_header(start, stop, size): return "Content-Range: " + content_range_header_value(start, stop, size) def multi_range_iterator(ranges, content_type, boundary, size, sub_iter_gen): for start, stop in ranges: yield ''.join(['\r\n--', boundary, '\r\n', 'Content-Type: ', content_type, '\r\n']) yield content_range_header(start, stop, size) + '\r\n\r\n' sub_iter = sub_iter_gen(start, stop) for chunk in sub_iter: yield chunk yield '\r\n--' + boundary + '--\r\n' class Response(object): content_length = _header_int_property('content-length') content_type = _resp_content_type_property() content_range = _header_property('content-range') etag = _resp_etag_property() status = _resp_status_property() body = _resp_body_property() host_url = _host_url_property() last_modified = _datetime_property('last-modified') location = _header_property('location') accept_ranges = _header_property('accept-ranges') charset = _resp_charset_property() app_iter = _resp_app_iter_property() def __init__(self, body=None, status=200, headers=None, app_iter=None, request=None, conditional_response=False, **kw): self.headers = HeaderKeyDict( [('Content-Type', 'text/html; charset=UTF-8')]) self.conditional_response = conditional_response self.request = request self.body = body self.app_iter = app_iter self.status = status self.boundary = "%.32x" % random.randint(0, 256 ** 16) if request: self.environ = request.environ else: self.environ = {} if headers: if self._body and 'Content-Length' in headers: # If body is not empty, prioritize actual body length over # content_length in headers del headers['Content-Length'] self.headers.update(headers) if self.status_int == 401 and 'www-authenticate' not in self.headers: self.headers.update({'www-authenticate': self.www_authenticate()}) for key, value in kw.iteritems(): setattr(self, key, value) # When specifying both 'content_type' and 'charset' in the kwargs, # charset needs to be applied *after* content_type, otherwise charset # can get wiped out when content_type sorts later in dict order. if 'charset' in kw and 'content_type' in kw: self.charset = kw['charset'] def _prepare_for_ranges(self, ranges): content_size = self.content_length content_type = self.content_type self.content_type = ''.join(['multipart/byteranges;', 'boundary=', self.boundary]) # This section calculate the total size of the targeted response # The value 12 is the length of total bytes of hyphen, new line # form feed for each section header. The value 8 is the length of # total bytes of hyphen, new line, form feed characters for the # closing boundary which appears only once section_header_fixed_len = 12 + (len(self.boundary) + len('Content-Type: ') + len(content_type) + len('Content-Range: bytes ')) body_size = 0 for start, end in ranges: body_size += section_header_fixed_len body_size += len(str(start) + '-' + str(end - 1) + '/' + str(content_size)) + (end - start) body_size += 8 + len(self.boundary) self.content_length = body_size self.content_range = None return content_size, content_type def _response_iter(self, app_iter, body): if self.conditional_response and self.request: if self.etag and self.request.if_none_match and \ self.etag in self.request.if_none_match: self.status = 304 self.content_length = 0 return [''] if self.etag and self.request.if_match and \ self.etag not in self.request.if_match: self.status = 412 self.content_length = 0 return [''] if self.status_int == 404 and self.request.if_match \ and '*' in self.request.if_match: # If none of the entity tags match, or if "*" is given and no # current entity exists, the server MUST NOT perform the # requested method, and MUST return a 412 (Precondition # Failed) response. [RFC 2616 section 14.24] self.status = 412 self.content_length = 0 return [''] if self.last_modified and self.request.if_modified_since \ and self.last_modified <= self.request.if_modified_since: self.status = 304 self.content_length = 0 return [''] if self.last_modified and self.request.if_unmodified_since \ and self.last_modified > self.request.if_unmodified_since: self.status = 412 self.content_length = 0 return [''] if self.request and self.request.method == 'HEAD': # We explicitly do NOT want to set self.content_length to 0 here return [''] if self.conditional_response and self.request and \ self.request.range and self.request.range.ranges and \ not self.content_range: ranges = self.request.range.ranges_for_length(self.content_length) if ranges == []: self.status = 416 self.content_length = 0 return [''] elif ranges: range_size = len(ranges) if range_size > 0: # There is at least one valid range in the request, so try # to satisfy the request if range_size == 1: start, end = ranges[0] if app_iter and hasattr(app_iter, 'app_iter_range'): self.status = 206 self.content_range = content_range_header_value( start, end, self.content_length) self.content_length = (end - start) return app_iter.app_iter_range(start, end) elif body: self.status = 206 self.content_range = content_range_header_value( start, end, self.content_length) self.content_length = (end - start) return [body[start:end]] elif range_size > 1: if app_iter and hasattr(app_iter, 'app_iter_ranges'): self.status = 206 content_size, content_type = \ self._prepare_for_ranges(ranges) return app_iter.app_iter_ranges(ranges, content_type, self.boundary, content_size) elif body: self.status = 206 content_size, content_type, = \ self._prepare_for_ranges(ranges) def _body_slicer(start, stop): yield body[start:stop] return multi_range_iterator(ranges, content_type, self.boundary, content_size, _body_slicer) if app_iter: return app_iter if body is not None: return [body] if self.status_int in RESPONSE_REASONS: title, exp = RESPONSE_REASONS[self.status_int] if exp: body = '<html><h1>%s</h1><p>%s</p></html>' % (title, exp) if '%(' in body: body = body % defaultdict(lambda: 'unknown', self.__dict__) self.content_length = len(body) return [body] return [''] def absolute_location(self): if not self.location.startswith('/'): return self.location return self.host_url + self.location def www_authenticate(self): try: vrs, realm, rest = self.request.split_path(2, 3, True) if realm in ('v1.0', 'auth'): realm = 'unknown' except (AttributeError, ValueError): realm = 'unknown' return 'Swift realm="%s"' % urllib2.quote(realm) @property def is_success(self): return self.status_int // 100 == 2 def __call__(self, env, start_response): if not self.request: self.request = Request(env) self.environ = env app_iter = self._response_iter(self.app_iter, self._body) if 'location' in self.headers and \ not env.get('swift.leave_relative_location'): self.location = self.absolute_location() start_response(self.status, self.headers.items()) return app_iter class HTTPException(Response, Exception): def __init__(self, *args, **kwargs): Response.__init__(self, *args, **kwargs) Exception.__init__(self, self.status) def wsgify(func): argspec = inspect.getargspec(func) if argspec.args and argspec.args[0] == 'self': @functools.wraps(func) def _wsgify_self(self, env, start_response): try: return func(self, Request(env))(env, start_response) except HTTPException as err_resp: return err_resp(env, start_response) return _wsgify_self else: @functools.wraps(func) def _wsgify_bare(env, start_response): try: return func(Request(env))(env, start_response) except HTTPException as err_resp: return err_resp(env, start_response) return _wsgify_bare class StatusMap(object): def __getitem__(self, key): return partial(HTTPException, status=key) status_map = StatusMap() HTTPOk = status_map[200] HTTPCreated = status_map[201] HTTPAccepted = status_map[202] HTTPNoContent = status_map[204] HTTPMovedPermanently = status_map[301] HTTPFound = status_map[302] HTTPSeeOther = status_map[303] HTTPNotModified = status_map[304] HTTPTemporaryRedirect = status_map[307] HTTPBadRequest = status_map[400] HTTPUnauthorized = status_map[401] HTTPForbidden = status_map[403] HTTPMethodNotAllowed = status_map[405] HTTPNotFound = status_map[404] HTTPNotAcceptable = status_map[406] HTTPRequestTimeout = status_map[408] HTTPConflict = status_map[409] HTTPLengthRequired = status_map[411] HTTPPreconditionFailed = status_map[412] HTTPRequestEntityTooLarge = status_map[413] HTTPRequestedRangeNotSatisfiable = status_map[416] HTTPUnprocessableEntity = status_map[422] HTTPClientDisconnect = status_map[499] HTTPServerError = status_map[500] HTTPInternalServerError = status_map[500] HTTPNotImplemented = status_map[501] HTTPBadGateway = status_map[502] HTTPServiceUnavailable = status_map[503] HTTPInsufficientStorage = status_map[507]
true
true
1c4331b3340c76fc2bb61a1cbefe73cd7b1a46bd
1,663
py
Python
dvc/render/image_converter.py
itcarroll/dvc
55219e9089005ac15d668ecf735aeaf31a771d0b
[ "Apache-2.0" ]
null
null
null
dvc/render/image_converter.py
itcarroll/dvc
55219e9089005ac15d668ecf735aeaf31a771d0b
[ "Apache-2.0" ]
41
2021-11-16T15:38:50.000Z
2022-03-30T10:32:14.000Z
dvc/render/image_converter.py
jhhuh/dvc
fecc81e951efeaa8130264f726c27e92876422ae
[ "Apache-2.0" ]
null
null
null
import base64 import os from typing import TYPE_CHECKING, Dict, List, Optional, Tuple from dvc.render import FILENAME_FIELD, REVISION_FIELD, SRC_FIELD if TYPE_CHECKING: from dvc.types import StrPath class ImageConverter: def __init__(self, plot_properties: Optional[Dict] = None): self.plot_properties = plot_properties or {} @staticmethod def _write_image( path: "StrPath", revision: str, filename: str, image_data: bytes, ) -> "StrPath": img_path = os.path.join( path, f"{revision}_{filename.replace(os.sep, '_')}" ) with open(img_path, "wb") as fd: fd.write(image_data) return img_path @staticmethod def _encode_image( image_data: bytes, ) -> str: base64_str = base64.b64encode(image_data).decode() return f"data:image;base64,{base64_str}" def convert( self, data: bytes, revision, filename ) -> Tuple[List[Dict], Dict]: """ Convert the DVC Plots content to DVC Render datapoints. Return both generated datapoints and updated properties. """ path = self.plot_properties.get("out") if path: if not os.path.isdir(path): os.makedirs(path, exist_ok=True) src = self._write_image( os.path.abspath(path), revision, filename, data ) else: src = self._encode_image(data) datapoint = { REVISION_FIELD: revision, FILENAME_FIELD: filename, SRC_FIELD: src, } return [datapoint], self.plot_properties
28.186441
64
0.594708
import base64 import os from typing import TYPE_CHECKING, Dict, List, Optional, Tuple from dvc.render import FILENAME_FIELD, REVISION_FIELD, SRC_FIELD if TYPE_CHECKING: from dvc.types import StrPath class ImageConverter: def __init__(self, plot_properties: Optional[Dict] = None): self.plot_properties = plot_properties or {} @staticmethod def _write_image( path: "StrPath", revision: str, filename: str, image_data: bytes, ) -> "StrPath": img_path = os.path.join( path, f"{revision}_{filename.replace(os.sep, '_')}" ) with open(img_path, "wb") as fd: fd.write(image_data) return img_path @staticmethod def _encode_image( image_data: bytes, ) -> str: base64_str = base64.b64encode(image_data).decode() return f"data:image;base64,{base64_str}" def convert( self, data: bytes, revision, filename ) -> Tuple[List[Dict], Dict]: path = self.plot_properties.get("out") if path: if not os.path.isdir(path): os.makedirs(path, exist_ok=True) src = self._write_image( os.path.abspath(path), revision, filename, data ) else: src = self._encode_image(data) datapoint = { REVISION_FIELD: revision, FILENAME_FIELD: filename, SRC_FIELD: src, } return [datapoint], self.plot_properties
true
true
1c4331f767c85c11f71d344660c6f4ec8a44f48d
636
py
Python
zeitsprung/base.py
munterfinger/zeitsprung
8b1a539069cd0d5508b5ce419fc2ba8c26aaecf2
[ "MIT" ]
null
null
null
zeitsprung/base.py
munterfinger/zeitsprung
8b1a539069cd0d5508b5ce419fc2ba8c26aaecf2
[ "MIT" ]
109
2020-10-03T16:41:30.000Z
2021-09-16T21:03:34.000Z
zeitsprung/base.py
munterfinger/zeitsprung
8b1a539069cd0d5508b5ce419fc2ba8c26aaecf2
[ "MIT" ]
null
null
null
from datetime import datetime, timezone class Base: """Base class for 'zeitsprung-fm' package classes.""" def __init__(self, verbose: bool = True) -> None: """ Class constructor for the Base class. Parameters ---------- verbose : bool, default True Print messages about the activities conducted by a class instance. Returns ------- None """ self.verbose = verbose def _print(self, message: str) -> None: if self.verbose: print(f'{datetime.now(timezone.utc).replace(microsecond=0).isoformat()} {message}')
24.461538
95
0.575472
from datetime import datetime, timezone class Base: def __init__(self, verbose: bool = True) -> None: self.verbose = verbose def _print(self, message: str) -> None: if self.verbose: print(f'{datetime.now(timezone.utc).replace(microsecond=0).isoformat()} {message}')
true
true
1c43340b8e297014f5f5b70a00221b88898d6631
835
py
Python
predict_orbit_BCBF.py
janismac/ksp_rtls_launch_to_rendezvous
195ebfb5aacf1a857aaaf0a69bf071d93d887efd
[ "Apache-2.0" ]
1
2020-11-07T15:53:19.000Z
2020-11-07T15:53:19.000Z
predict_orbit_BCBF.py
janismac/ksp_rtls_launch_to_rendezvous
195ebfb5aacf1a857aaaf0a69bf071d93d887efd
[ "Apache-2.0" ]
null
null
null
predict_orbit_BCBF.py
janismac/ksp_rtls_launch_to_rendezvous
195ebfb5aacf1a857aaaf0a69bf071d93d887efd
[ "Apache-2.0" ]
1
2020-11-07T15:56:06.000Z
2020-11-07T15:56:06.000Z
import numpy as np import scipy.integrate def predict_orbit_BCBF(vessel, frame): r0 = vessel.position(frame) v0 = vessel.velocity(frame) omega = vessel.orbit.body.rotational_speed mu = vessel.orbit.body.gravitational_parameter y0 = list(r0)+list(v0) t_grid = np.arange(0.0,60.0*20,5.0) result = scipy.integrate.odeint(lambda y,t: vacuum_coast_BCBF_ODE(y,omega,mu), y0, t_grid, atol=1e-5, rtol=1e-5) # return vessel position for the next 10 to 20 minutes return result[t_grid >= 600.0] def vacuum_coast_BCBF_ODE(y,omega,mu): r = y[0:3] v = y[3:6] w = np.array([0.0,-omega,0.0]) a_gravity = -r * mu * np.dot(r,r)**(-1.5) a_coriolis = -2 * np.cross(w,v) a_centrifugal = -np.cross(w,np.cross(w,r)) a = a_gravity + a_coriolis + a_centrifugal return np.hstack((v,a))
29.821429
116
0.657485
import numpy as np import scipy.integrate def predict_orbit_BCBF(vessel, frame): r0 = vessel.position(frame) v0 = vessel.velocity(frame) omega = vessel.orbit.body.rotational_speed mu = vessel.orbit.body.gravitational_parameter y0 = list(r0)+list(v0) t_grid = np.arange(0.0,60.0*20,5.0) result = scipy.integrate.odeint(lambda y,t: vacuum_coast_BCBF_ODE(y,omega,mu), y0, t_grid, atol=1e-5, rtol=1e-5) return result[t_grid >= 600.0] def vacuum_coast_BCBF_ODE(y,omega,mu): r = y[0:3] v = y[3:6] w = np.array([0.0,-omega,0.0]) a_gravity = -r * mu * np.dot(r,r)**(-1.5) a_coriolis = -2 * np.cross(w,v) a_centrifugal = -np.cross(w,np.cross(w,r)) a = a_gravity + a_coriolis + a_centrifugal return np.hstack((v,a))
true
true
1c4334625377c3c0e7ac7312e2837d4a6d25667d
1,151
py
Python
tests/utils.py
SE2020Fall-Group1/BaiTuanTong-Backend
8e2575332ff9c3d8c7b7412376b5105c4c68cc95
[ "MIT" ]
null
null
null
tests/utils.py
SE2020Fall-Group1/BaiTuanTong-Backend
8e2575332ff9c3d8c7b7412376b5105c4c68cc95
[ "MIT" ]
1
2020-12-17T02:09:29.000Z
2020-12-17T02:09:29.000Z
tests/utils.py
SE2020Fall-Group1/BaiTuanTong-Backend
8e2575332ff9c3d8c7b7412376b5105c4c68cc95
[ "MIT" ]
null
null
null
from exts import db from app.models import User, Preference, Club, Post, Like, Comment def add_items(): u1 = User(id=1, username='jhc', password='hehehe', email='jhc@pku.edu.cn') u2 = User(id=2, username='gf', password='gagaga', email='gf@stu.pku.edu.cn') u3 = User(id=3, username='zhp', password='hailjd', email='zhp@pku.edu.cn') ad = User(id=4, username='amdno', password='it0803', email='admno@pku.edu.cn') pr1 = Preference(preference_name='kfc') c1 = Club(id=1, club_name='yuanhuo', president_id=1) c2 = Club(id=2, club_name='feiying', president_id=2) po1 = Post(id=1, title='one', text='jd is too strong', club_id=1) po2 = Post(id=2, title='two', text="let's compliment jd", club_id=2) po3 = Post(id=3, title='three', text="why not compliment jd", club_id=2) co1 = Comment(user_id=3, post_id=1, content='i think so.') like1 = Like(user_id=1, post_id=1) u1.preferences.append(pr1) u1.followed_clubs.append(c1) u2.managed_clubs.append(c1) u2.collected_posts.append(po3) db.session.add_all([ad, u1, u2, u3, pr1, po1, po2, po3, co1, like1, c1, c2]) db.session.commit()
37.129032
82
0.655951
from exts import db from app.models import User, Preference, Club, Post, Like, Comment def add_items(): u1 = User(id=1, username='jhc', password='hehehe', email='jhc@pku.edu.cn') u2 = User(id=2, username='gf', password='gagaga', email='gf@stu.pku.edu.cn') u3 = User(id=3, username='zhp', password='hailjd', email='zhp@pku.edu.cn') ad = User(id=4, username='amdno', password='it0803', email='admno@pku.edu.cn') pr1 = Preference(preference_name='kfc') c1 = Club(id=1, club_name='yuanhuo', president_id=1) c2 = Club(id=2, club_name='feiying', president_id=2) po1 = Post(id=1, title='one', text='jd is too strong', club_id=1) po2 = Post(id=2, title='two', text="let's compliment jd", club_id=2) po3 = Post(id=3, title='three', text="why not compliment jd", club_id=2) co1 = Comment(user_id=3, post_id=1, content='i think so.') like1 = Like(user_id=1, post_id=1) u1.preferences.append(pr1) u1.followed_clubs.append(c1) u2.managed_clubs.append(c1) u2.collected_posts.append(po3) db.session.add_all([ad, u1, u2, u3, pr1, po1, po2, po3, co1, like1, c1, c2]) db.session.commit()
true
true
1c433609b1e3a76fc08d0fd1cbd6dafcf08efe6a
11,179
py
Python
myems-api/reports/meterrealtime.py
MyEMS/MyEMS
00263969a32e02293025880403b6657a65c1585b
[ "MIT" ]
2
2021-09-04T03:39:26.000Z
2021-09-05T00:29:20.000Z
myems-api/reports/meterrealtime.py
MyEMS/MyEMS
00263969a32e02293025880403b6657a65c1585b
[ "MIT" ]
4
2021-09-04T07:50:41.000Z
2021-09-05T09:35:20.000Z
myems-api/reports/meterrealtime.py
MyEMS/MyEMS
00263969a32e02293025880403b6657a65c1585b
[ "MIT" ]
2
2021-09-03T00:01:17.000Z
2021-09-06T06:38:20.000Z
import re import falcon import simplejson as json import mysql.connector import config from datetime import datetime, timedelta, timezone class Reporting: @staticmethod def __init__(): """"Initializes Reporting""" pass @staticmethod def on_options(req, resp): resp.status = falcon.HTTP_200 #################################################################################################################### # PROCEDURES # Step 1: valid parameters # Step 2: query the meter and energy category # Step 3: query associated points # Step 4: query reporting period points trends # Step 5: query tariff data # Step 6: construct the report #################################################################################################################### @staticmethod def on_get(req, resp): print(req.params) meter_id = req.params.get('meterid') meter_uuid = req.params.get('meteruuid') ################################################################################################################ # Step 1: valid parameters ################################################################################################################ if meter_id is None and meter_uuid is None: raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST', description='API.INVALID_METER_ID') if meter_id is not None: meter_id = str.strip(meter_id) if not meter_id.isdigit() or int(meter_id) <= 0: raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST', description='API.INVALID_METER_ID') if meter_uuid is not None: regex = re.compile('^[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}\Z', re.I) match = regex.match(str.strip(meter_uuid)) if not bool(match): raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST', description='API.INVALID_METER_UUID') timezone_offset = int(config.utc_offset[1:3]) * 60 + int(config.utc_offset[4:6]) if config.utc_offset[0] == '-': timezone_offset = -timezone_offset reporting_end_datetime_utc = datetime.utcnow() reporting_start_datetime_utc = reporting_end_datetime_utc - timedelta(minutes=60) ################################################################################################################ # Step 2: query the meter and energy category ################################################################################################################ cnx_system = mysql.connector.connect(**config.myems_system_db) cursor_system = cnx_system.cursor() cnx_historical = mysql.connector.connect(**config.myems_historical_db) cursor_historical = cnx_historical.cursor() if meter_id is not None: cursor_system.execute(" SELECT m.id, m.name, m.cost_center_id, m.energy_category_id, " " ec.name, ec.unit_of_measure, ec.kgce, ec.kgco2e " " FROM tbl_meters m, tbl_energy_categories ec " " WHERE m.id = %s AND m.energy_category_id = ec.id ", (meter_id,)) row_meter = cursor_system.fetchone() elif meter_uuid is not None: cursor_system.execute(" SELECT m.id, m.name, m.cost_center_id, m.energy_category_id, " " ec.name, ec.unit_of_measure, ec.kgce, ec.kgco2e " " FROM tbl_meters m, tbl_energy_categories ec " " WHERE m.uuid = %s AND m.energy_category_id = ec.id ", (meter_uuid,)) row_meter = cursor_system.fetchone() if row_meter is None: if cursor_system: cursor_system.close() if cnx_system: cnx_system.disconnect() if cursor_historical: cursor_historical.close() if cnx_historical: cnx_historical.disconnect() raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND', description='API.METER_NOT_FOUND') meter = dict() meter['id'] = row_meter[0] meter['name'] = row_meter[1] meter['cost_center_id'] = row_meter[2] meter['energy_category_id'] = row_meter[3] meter['energy_category_name'] = row_meter[4] meter['unit_of_measure'] = row_meter[5] ################################################################################################################ # Step 3: query associated points ################################################################################################################ point_list = list() cursor_system.execute(" SELECT p.id, p.name, p.units, p.object_type " " FROM tbl_meters m, tbl_meters_points mp, tbl_points p " " WHERE m.id = %s AND m.id = mp.meter_id AND mp.point_id = p.id " " ORDER BY p.id ", (meter['id'],)) rows_points = cursor_system.fetchall() if rows_points is not None and len(rows_points) > 0: for row in rows_points: point_list.append({"id": row[0], "name": row[1], "units": row[2], "object_type": row[3]}) ################################################################################################################ # Step 7: query associated points data ################################################################################################################ energy_value_data = dict() energy_value_data['name'] = None energy_value_data['timestamps'] = list() energy_value_data['values'] = list() parameters_data = dict() parameters_data['names'] = list() parameters_data['timestamps'] = list() parameters_data['values'] = list() for point in point_list: point_values = [] point_timestamps = [] if point['object_type'] == 'ANALOG_VALUE': query = (" SELECT utc_date_time, actual_value " " FROM tbl_analog_value " " WHERE point_id = %s " " AND utc_date_time BETWEEN %s AND %s " " ORDER BY utc_date_time ") cursor_historical.execute(query, (point['id'], reporting_start_datetime_utc, reporting_end_datetime_utc)) rows = cursor_historical.fetchall() if rows is not None and len(rows) > 0: for row in rows: current_datetime_local = row[0].replace(tzinfo=timezone.utc) + \ timedelta(minutes=timezone_offset) current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S') point_timestamps.append(current_datetime) point_values.append(row[1]) parameters_data['names'].append(point['name'] + ' (' + point['units'] + ')') parameters_data['timestamps'].append(point_timestamps) parameters_data['values'].append(point_values) elif point['object_type'] == 'ENERGY_VALUE': energy_value_data['name'] = point['name'] query = (" SELECT utc_date_time, actual_value " " FROM tbl_energy_value " " WHERE point_id = %s " " AND utc_date_time BETWEEN %s AND %s " " ORDER BY utc_date_time ") cursor_historical.execute(query, (point['id'], reporting_start_datetime_utc, reporting_end_datetime_utc)) rows = cursor_historical.fetchall() if rows is not None and len(rows) > 0: for row in rows: current_datetime_local = row[0].replace(tzinfo=timezone.utc) + \ timedelta(minutes=timezone_offset) current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S') energy_value_data['timestamps'].append(current_datetime) energy_value_data['values'].append(row[1]) elif point['object_type'] == 'DIGITAL_VALUE': query = (" SELECT utc_date_time, actual_value " " FROM tbl_digital_value " " WHERE point_id = %s " " AND utc_date_time BETWEEN %s AND %s " " ORDER BY utc_date_time ") cursor_historical.execute(query, (point['id'], reporting_start_datetime_utc, reporting_end_datetime_utc)) rows = cursor_historical.fetchall() if rows is not None and len(rows) > 0: for row in rows: current_datetime_local = row[0].replace(tzinfo=timezone.utc) + \ timedelta(minutes=timezone_offset) current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S') point_timestamps.append(current_datetime) point_values.append(row[1]) parameters_data['names'].append(point['name'] + ' (' + point['units'] + ')') parameters_data['timestamps'].append(point_timestamps) parameters_data['values'].append(point_values) ################################################################################################################ # Step 6: construct the report ################################################################################################################ if cursor_system: cursor_system.close() if cnx_system: cnx_system.disconnect() if cursor_historical: cursor_historical.close() if cnx_historical: cnx_historical.disconnect() result = { "meter": { "cost_center_id": meter['cost_center_id'], "energy_category_id": meter['energy_category_id'], "energy_category_name": meter['energy_category_name'], "unit_of_measure": meter['unit_of_measure'], }, "energy_value": energy_value_data, "parameters": { "names": parameters_data['names'], "timestamps": parameters_data['timestamps'], "values": parameters_data['values'] }, } resp.text = json.dumps(result)
49.684444
120
0.469631
import re import falcon import simplejson as json import mysql.connector import config from datetime import datetime, timedelta, timezone class Reporting: @staticmethod def __init__(): pass @staticmethod def on_options(req, resp): resp.status = falcon.HTTP_200
true
true
1c4336b105b492bd501735393b32c1a2be626606
5,514
py
Python
dags/acona_notifications.py
acolono/ACONA-scheduler-intelligence
1ccaaca21a2b4e10b30242294d5fc8dc087dbd8d
[ "Apache-2.0" ]
1
2022-01-27T14:51:18.000Z
2022-01-27T14:51:18.000Z
dags/acona_notifications.py
acolono/ACONA-scheduler-intelligence
1ccaaca21a2b4e10b30242294d5fc8dc087dbd8d
[ "Apache-2.0" ]
null
null
null
dags/acona_notifications.py
acolono/ACONA-scheduler-intelligence
1ccaaca21a2b4e10b30242294d5fc8dc087dbd8d
[ "Apache-2.0" ]
null
null
null
from airflow.decorators import dag, task from airflow.utils.dates import days_ago from airflow.operators.bash import BashOperator from airflow.providers.postgres.operators.postgres import PostgresOperator from airflow.hooks.postgres_hook import PostgresHook from airflow.models import Variable from datetime import datetime, timedelta from acona_postgres_tools import acona_truncate_table, acona_data_write, acona_fetch_data, acona_fetch_one # [END import_module] # [START default_args] # These args will get passed on to each operator # You can override them on a per-task basis during operator initialization default_args = { 'owner': 'airflow' } # [END default_args] # [START instantiate_dag] @dag( default_args=default_args, start_date=days_ago(2), tags=['notifications'], schedule_interval='30 4 * * 0') def acona_notifications(): # [END instantiate_dag] # [START notify] @task() def notify(metric): """ #### Get data for current date from Warehouse """ import json import requests import os import urllib.parse import pandas as pd import numpy as np import re WAREHOUSE_TOKEN = Variable.get("WAREHOUSE_TOKEN") WAREHOUSE_URL = Variable.get("WAREHOUSE_URL") # Load urls (for specific domain only?) urls = os.popen('curl ' + WAREHOUSE_URL + '/rpc/acona_urls -H "Authorization: Bearer ' + WAREHOUSE_TOKEN + '"').read() notifications = {} date = (datetime.now() - timedelta(1)).strftime('%Y-%m-%d') #date = '2021-10-22' #Load metric data sql = "select * from internal.variables where var_data_table = '{}'".format(metric) values = acona_fetch_one(sql) metric_id = values[0] #TODO: Add metric title to internal.variables table and use it here. metric_mn = values[1] for url in json.loads(urls): #Load historic data url = url['url'] sql = "select value from api." + metric + " where date = '{}' and url = '{}'".format(date, url) values = acona_fetch_one(sql) if values: value = values[0] # Load forecasted upper value. sql = "select value from api." + metric + "_f_upper where date = '{}' and url = '{}'".format(date, url) upper_values = acona_fetch_one(sql) upper_value = None if upper_values: upper_value = upper_values[0] # Load forecasted lower value. sql = "select value from api." + metric + "_f_lower where date = '{}' and url = '{}'".format(date, url) lower_values = acona_fetch_one(sql) lower_value = None if lower_values: lower_value = lower_values[0] # Compare values. notification_id = re.sub('[^A-Za-z0-9]+', '', str(date)) + '_' + re.sub('[^A-Za-z0-9]+', '', str(url)) + str(metric) + '_lower' if lower_value != None and value < lower_value: #write notification. data = {'notification_id': [notification_id], 'url': [url], 'date': [date], 'variable_id': [metric_id]} dataf = pd.DataFrame(data) acona_data_write('api.notifications', dataf) text_en = 'Attention, the value for ' + metric_mn + ' is lower than expected. Please check if something is wrong.' title_en = 'Value is lower as expected' data = {'notification_id': [notification_id], 'langcode': ['en'], 'title': [title_en], 'text': [text_en]} dataf = pd.DataFrame(data) acona_data_write('api.notification_texts', dataf) if upper_value != None and value > upper_value: #write notification. data = {'notification_id': [notification_id], 'url': [url], 'date': [date], 'variable_id': [metric_id]} dataf = pd.DataFrame(data) acona_data_write('api.notifications', dataf) text_en = 'Attention, the value for ' + metric_mn + ' is higher than expected.' title_en = 'Value is higher as expected' data = {'notification_id': [notification_id], 'langcode': ['en'], 'title': [title_en], 'text': [text_en]} dataf = pd.DataFrame(data) acona_data_write('api.notification_texts', dataf) # write calc dates data = {'variable': 'api.notifications', 'date': date, 'url': url} dataf = pd.DataFrame(data) acona_data_write('internal.var_calc_dates', dataf) # [END notify] # [START main_flow] # Supported metrics. Todo: Load from data warehouse. metrics = { 'metric_d_page_views', 'metric_d_bounces', 'metric_d_page_views', 'metric_d_visits', 'metric_d_unique_visits', 'metric_d_conversions', 'metric_d_visit_time_total', 'metric_d_visit_time_average', 'metric_d_visits_converted', 'metric_d_bounce_rate' } #metrics = { #'metric_d_page_views' #} # Loop over metrics, forecast values and save in data warehouse for metric in metrics: notify(metric) # [END main_flow] # [START dag_invocation] acona_notifications = acona_notifications() # [END dag_invocation]
38.830986
143
0.591222
from airflow.decorators import dag, task from airflow.utils.dates import days_ago from airflow.operators.bash import BashOperator from airflow.providers.postgres.operators.postgres import PostgresOperator from airflow.hooks.postgres_hook import PostgresHook from airflow.models import Variable from datetime import datetime, timedelta from acona_postgres_tools import acona_truncate_table, acona_data_write, acona_fetch_data, acona_fetch_one default_args = { 'owner': 'airflow' } @dag( default_args=default_args, start_date=days_ago(2), tags=['notifications'], schedule_interval='30 4 * * 0') def acona_notifications(): @task() def notify(metric): import json import requests import os import urllib.parse import pandas as pd import numpy as np import re WAREHOUSE_TOKEN = Variable.get("WAREHOUSE_TOKEN") WAREHOUSE_URL = Variable.get("WAREHOUSE_URL") urls = os.popen('curl ' + WAREHOUSE_URL + '/rpc/acona_urls -H "Authorization: Bearer ' + WAREHOUSE_TOKEN + '"').read() notifications = {} date = (datetime.now() - timedelta(1)).strftime('%Y-%m-%d') sql = "select * from internal.variables where var_data_table = '{}'".format(metric) values = acona_fetch_one(sql) metric_id = values[0] metric_mn = values[1] for url in json.loads(urls): url = url['url'] sql = "select value from api." + metric + " where date = '{}' and url = '{}'".format(date, url) values = acona_fetch_one(sql) if values: value = values[0] sql = "select value from api." + metric + "_f_upper where date = '{}' and url = '{}'".format(date, url) upper_values = acona_fetch_one(sql) upper_value = None if upper_values: upper_value = upper_values[0] sql = "select value from api." + metric + "_f_lower where date = '{}' and url = '{}'".format(date, url) lower_values = acona_fetch_one(sql) lower_value = None if lower_values: lower_value = lower_values[0] notification_id = re.sub('[^A-Za-z0-9]+', '', str(date)) + '_' + re.sub('[^A-Za-z0-9]+', '', str(url)) + str(metric) + '_lower' if lower_value != None and value < lower_value: data = {'notification_id': [notification_id], 'url': [url], 'date': [date], 'variable_id': [metric_id]} dataf = pd.DataFrame(data) acona_data_write('api.notifications', dataf) text_en = 'Attention, the value for ' + metric_mn + ' is lower than expected. Please check if something is wrong.' title_en = 'Value is lower as expected' data = {'notification_id': [notification_id], 'langcode': ['en'], 'title': [title_en], 'text': [text_en]} dataf = pd.DataFrame(data) acona_data_write('api.notification_texts', dataf) if upper_value != None and value > upper_value: data = {'notification_id': [notification_id], 'url': [url], 'date': [date], 'variable_id': [metric_id]} dataf = pd.DataFrame(data) acona_data_write('api.notifications', dataf) text_en = 'Attention, the value for ' + metric_mn + ' is higher than expected.' title_en = 'Value is higher as expected' data = {'notification_id': [notification_id], 'langcode': ['en'], 'title': [title_en], 'text': [text_en]} dataf = pd.DataFrame(data) acona_data_write('api.notification_texts', dataf) data = {'variable': 'api.notifications', 'date': date, 'url': url} dataf = pd.DataFrame(data) acona_data_write('internal.var_calc_dates', dataf) metrics = { 'metric_d_page_views', 'metric_d_bounces', 'metric_d_page_views', 'metric_d_visits', 'metric_d_unique_visits', 'metric_d_conversions', 'metric_d_visit_time_total', 'metric_d_visit_time_average', 'metric_d_visits_converted', 'metric_d_bounce_rate' } for metric in metrics: notify(metric) acona_notifications = acona_notifications()
true
true
1c4336c8355df8a2828caa6afe949180f0053eaa
3,088
py
Python
Beginner Python projects/Tip Calculator/tip.py
manish1822510059/build-10-python-beginner-projects
999363f332069a8266dfab1504f16c3193272ea0
[ "MIT" ]
2
2021-01-15T15:58:44.000Z
2021-04-15T15:29:04.000Z
Beginner Python projects/Tip Calculator/tip.py
manish1822510059/build-10-python-beginner-projects
999363f332069a8266dfab1504f16c3193272ea0
[ "MIT" ]
null
null
null
Beginner Python projects/Tip Calculator/tip.py
manish1822510059/build-10-python-beginner-projects
999363f332069a8266dfab1504f16c3193272ea0
[ "MIT" ]
null
null
null
from tkinter import Tk,Radiobutton, Button,Label,StringVar,IntVar,Entry class TipCalculator(): def __init__(self): window = Tk() window.title("Tip Calculator App") window.configure(background="sky blue") window.geometry("375x250") window.resizable(width=False,height=False) self.meal_cost = StringVar() self.tip_percent = IntVar() self.tip = StringVar() self.total_cost = StringVar() tip_percents = Label(window,text ="Tip Percentages",bg="purple",fg="white") tip_percents.grid(column=0,row=0,padx=15) bill_amount = Label(window,text ="Bill Amount",bg="black",fg="white") bill_amount.grid(column=1,row=0,padx=15) bill_amount_entry = Entry(window,textvariable =self.meal_cost,width =14) bill_amount_entry.grid(column=2,row=0) five_percent_tip = Radiobutton(window,text="0.5%",variable = self.tip_percent,value = 5) five_percent_tip.grid(column=0,row=1) ten_percent_tip = Radiobutton(window,text="10%",variable = self.tip_percent,value = 10) ten_percent_tip.grid(column=0,row=2) fifteen_percent_tip = Radiobutton(window,text="15%",variable = self.tip_percent,value = 15) fifteen_percent_tip.grid(column=0,row=3) twenty_percent_tip = Radiobutton(window,text="20%",variable = self.tip_percent,value = 20) twenty_percent_tip.grid(column=0,row=4) twentyfive_percent_tip = Radiobutton(window,text="25%",variable = self.tip_percent,value = 25) twentyfive_percent_tip.grid(column=0,row=5) thirty_percent_tip = Radiobutton(window,text="30%",variable = self.tip_percent,value = 30) thirty_percent_tip.grid(column=0,row=6) tip_amount_lbl = Label(window,text ="Tip Amount",bg="brown",fg="white") tip_amount_lbl.grid(column=1,row=3,padx=15) tip_amount_entry = Entry(window,textvariable =self.tip,width =14) tip_amount_entry.grid(column=2,row=3) bill_total_lbl = Label(window,text ="Bill Total",bg="blue",fg="white") bill_total_lbl.grid(column=1,row=5,padx=15) bill_total_entry = Entry(window,textvariable =self.total_cost,width =14) bill_total_entry.grid(column=2,row=5) calculate_btn = Button(window,text ="Calculate",bg="green",fg="white",command=self.calculate) calculate_btn.grid(column=1,row=7,padx=15) clear_btn = Button(window,text ="Clear",bg="black",fg="white",command=self.clear) clear_btn.grid(column=2,row=7) window.mainloop() def calculate(self): pre_tip = float(self.meal_cost.get()) percentage = self.tip_percent.get() / 100 tip_amount_entry = pre_tip * percentage self.tip.set(tip_amount_entry) final_bill = pre_tip + tip_amount_entry self.total_cost.set(final_bill) def clear(self) : self.total_cost.set("") self.meal_cost.set("") self.tip.set("") TipCalculator()
35.906977
103
0.647668
from tkinter import Tk,Radiobutton, Button,Label,StringVar,IntVar,Entry class TipCalculator(): def __init__(self): window = Tk() window.title("Tip Calculator App") window.configure(background="sky blue") window.geometry("375x250") window.resizable(width=False,height=False) self.meal_cost = StringVar() self.tip_percent = IntVar() self.tip = StringVar() self.total_cost = StringVar() tip_percents = Label(window,text ="Tip Percentages",bg="purple",fg="white") tip_percents.grid(column=0,row=0,padx=15) bill_amount = Label(window,text ="Bill Amount",bg="black",fg="white") bill_amount.grid(column=1,row=0,padx=15) bill_amount_entry = Entry(window,textvariable =self.meal_cost,width =14) bill_amount_entry.grid(column=2,row=0) five_percent_tip = Radiobutton(window,text="0.5%",variable = self.tip_percent,value = 5) five_percent_tip.grid(column=0,row=1) ten_percent_tip = Radiobutton(window,text="10%",variable = self.tip_percent,value = 10) ten_percent_tip.grid(column=0,row=2) fifteen_percent_tip = Radiobutton(window,text="15%",variable = self.tip_percent,value = 15) fifteen_percent_tip.grid(column=0,row=3) twenty_percent_tip = Radiobutton(window,text="20%",variable = self.tip_percent,value = 20) twenty_percent_tip.grid(column=0,row=4) twentyfive_percent_tip = Radiobutton(window,text="25%",variable = self.tip_percent,value = 25) twentyfive_percent_tip.grid(column=0,row=5) thirty_percent_tip = Radiobutton(window,text="30%",variable = self.tip_percent,value = 30) thirty_percent_tip.grid(column=0,row=6) tip_amount_lbl = Label(window,text ="Tip Amount",bg="brown",fg="white") tip_amount_lbl.grid(column=1,row=3,padx=15) tip_amount_entry = Entry(window,textvariable =self.tip,width =14) tip_amount_entry.grid(column=2,row=3) bill_total_lbl = Label(window,text ="Bill Total",bg="blue",fg="white") bill_total_lbl.grid(column=1,row=5,padx=15) bill_total_entry = Entry(window,textvariable =self.total_cost,width =14) bill_total_entry.grid(column=2,row=5) calculate_btn = Button(window,text ="Calculate",bg="green",fg="white",command=self.calculate) calculate_btn.grid(column=1,row=7,padx=15) clear_btn = Button(window,text ="Clear",bg="black",fg="white",command=self.clear) clear_btn.grid(column=2,row=7) window.mainloop() def calculate(self): pre_tip = float(self.meal_cost.get()) percentage = self.tip_percent.get() / 100 tip_amount_entry = pre_tip * percentage self.tip.set(tip_amount_entry) final_bill = pre_tip + tip_amount_entry self.total_cost.set(final_bill) def clear(self) : self.total_cost.set("") self.meal_cost.set("") self.tip.set("") TipCalculator()
true
true
1c4336de70a1d80e4e3fa9569e8ca3984e4bbef7
5,855
py
Python
tests/build_test_db.py
zhongxinghong/EECS-Volunteer-Reservation-2-Backend
e468aff0f33760432326a8b355ef8ed028b96bdf
[ "MIT" ]
null
null
null
tests/build_test_db.py
zhongxinghong/EECS-Volunteer-Reservation-2-Backend
e468aff0f33760432326a8b355ef8ed028b96bdf
[ "MIT" ]
null
null
null
tests/build_test_db.py
zhongxinghong/EECS-Volunteer-Reservation-2-Backend
e468aff0f33760432326a8b355ef8ed028b96bdf
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # filename: build_test_db.py # modified: 2019-10-29 import sys sys.path.append("../") from pprint import pprint from flask import current_app from requests.sessions import Session from requests.compat import json from app import create_app, db from app.models import Admin, User from app.core.const import SAFETY_PROFILE from app.core.exceptions import Success BASE_URL = "http://127.0.0.1:7073" APP_CONFIG = "development" TEST_DATA_JSON = "./test_data.json" INTERNAL_TOKEN = SAFETY_PROFILE["internal_token"] ctx = None data = None root_token = None worker_token = None user_tokens = [] def _hook_check_status_code(r, **kwargs): r.raise_for_status() def _hook_check_errcode(r, **kwargs): respJson = r.json() if respJson["errcode"] != Success.code: raise Exception("[%d] %s" % (respJson["errcode"], respJson["errmsg"])) class TestClient(object): def __init__(self): self._session = Session() self._session.hooks["response"] = [ _hook_check_status_code, _hook_check_errcode, ] def _request(self, method, path, **kwargs): return self._session.request(method, BASE_URL + path, **kwargs) def _get(self, path, params={}, **kwargs): return self._request('GET', path, params=params, **kwargs) def _post(self, path, data={}, **kwargs): return self._request('POST', path, data=data, **kwargs) def create_admin(self, username, password, type, internal_token): return self._post("/_internal/create_admin", { "username": username, "password": password, "type": type, }, headers={ "Authorization": internal_token }) def create_activity(self, date, begin, end, site, root_token): return self._post("/activity/create", { "date": date, "begin": begin, "end": end, "site": site, }, headers={ "Authorization": root_token }) def create_order(self, model, description, repairType, period, email, user_token): return self._post("/order/create", { "model": model, "description": description, "repairType": repairType, "period": period, "email": email, }, headers={ "Authorization": user_token }) def get_existed_order(self, user_token): return self._get("/order/get_existed", headers={ "Authorization": user_token, }) def get_register_token(self, admin_token): return self._get("/order/get_register_token", headers={ "Authorization": admin_token, }) def register_order(self, oid, token, user_token): return self._post("/order/register", { "oid": oid, "token": token, }, headers={ "Authorization": user_token }) def task_init_app(): global ctx app = create_app(APP_CONFIG) ctx = app.app_context() ctx.push() def task_rebuild_db(): db.drop_all() db.create_all() def task_load_test_data(): global data with open(TEST_DATA_JSON, "r", encoding="utf-8-sig") as fp: data = json.load(fp) def task_create_roots(): roots = data["roots"] client = TestClient() for root in roots: client.create_admin( username=root["username"], password=root["password"], type=Admin.TYPE_ROOT, internal_token=INTERNAL_TOKEN, ) def task_get_root_token(): global root_token admin = Admin.query.filter_by(type=Admin.TYPE_ROOT).first() root_token = admin.token def task_create_workers(): workers = data["workers"] client = TestClient() for worker in workers: client.create_admin( username=worker["username"], password=worker["password"], type=Admin.TYPE_WORKER, internal_token=INTERNAL_TOKEN, ) def task_get_worker_token(): global worker_token admin = Admin.query.filter_by(type=Admin.TYPE_WORKER).first() worker_token = admin.token def task_create_users(): users = data["users"] client = TestClient() with db.session.transaction_start(): for user in users: openid = user["openid"] user = User(openid) db.session.add(user) def task_get_user_tokens(): global user_tokens user_tokens = [ u.token for u in User.query.all() ] def task_create_activities(): activities = data["activities"] client = TestClient() for activity in activities: client.create_activity(**activity, root_token=root_token) def task_create_orders(): orders = data["orders"] client = TestClient() for idx, order in enumerate(orders): client.create_order(**order, user_token=user_tokens[idx]) def task_register_orders(): orders = data["orders"] client = TestClient() register_token = client.get_register_token(root_token).json()["detail"]["token"] for idx, _ in enumerate(orders): user_token = user_tokens[idx] order = client.get_existed_order(user_token).json()["detail"]["order"] client.register_order(order["oid"], register_token, user_token) def task_deinit_app(): ctx.pop() def main(): task_init_app() task_rebuild_db() task_load_test_data() task_create_roots() task_create_workers() task_create_users() task_get_root_token() task_get_worker_token() task_get_user_tokens() task_create_activities() task_create_orders() task_register_orders() task_deinit_app() if __name__ == '__main__': main()
25.34632
86
0.616225
import sys sys.path.append("../") from pprint import pprint from flask import current_app from requests.sessions import Session from requests.compat import json from app import create_app, db from app.models import Admin, User from app.core.const import SAFETY_PROFILE from app.core.exceptions import Success BASE_URL = "http://127.0.0.1:7073" APP_CONFIG = "development" TEST_DATA_JSON = "./test_data.json" INTERNAL_TOKEN = SAFETY_PROFILE["internal_token"] ctx = None data = None root_token = None worker_token = None user_tokens = [] def _hook_check_status_code(r, **kwargs): r.raise_for_status() def _hook_check_errcode(r, **kwargs): respJson = r.json() if respJson["errcode"] != Success.code: raise Exception("[%d] %s" % (respJson["errcode"], respJson["errmsg"])) class TestClient(object): def __init__(self): self._session = Session() self._session.hooks["response"] = [ _hook_check_status_code, _hook_check_errcode, ] def _request(self, method, path, **kwargs): return self._session.request(method, BASE_URL + path, **kwargs) def _get(self, path, params={}, **kwargs): return self._request('GET', path, params=params, **kwargs) def _post(self, path, data={}, **kwargs): return self._request('POST', path, data=data, **kwargs) def create_admin(self, username, password, type, internal_token): return self._post("/_internal/create_admin", { "username": username, "password": password, "type": type, }, headers={ "Authorization": internal_token }) def create_activity(self, date, begin, end, site, root_token): return self._post("/activity/create", { "date": date, "begin": begin, "end": end, "site": site, }, headers={ "Authorization": root_token }) def create_order(self, model, description, repairType, period, email, user_token): return self._post("/order/create", { "model": model, "description": description, "repairType": repairType, "period": period, "email": email, }, headers={ "Authorization": user_token }) def get_existed_order(self, user_token): return self._get("/order/get_existed", headers={ "Authorization": user_token, }) def get_register_token(self, admin_token): return self._get("/order/get_register_token", headers={ "Authorization": admin_token, }) def register_order(self, oid, token, user_token): return self._post("/order/register", { "oid": oid, "token": token, }, headers={ "Authorization": user_token }) def task_init_app(): global ctx app = create_app(APP_CONFIG) ctx = app.app_context() ctx.push() def task_rebuild_db(): db.drop_all() db.create_all() def task_load_test_data(): global data with open(TEST_DATA_JSON, "r", encoding="utf-8-sig") as fp: data = json.load(fp) def task_create_roots(): roots = data["roots"] client = TestClient() for root in roots: client.create_admin( username=root["username"], password=root["password"], type=Admin.TYPE_ROOT, internal_token=INTERNAL_TOKEN, ) def task_get_root_token(): global root_token admin = Admin.query.filter_by(type=Admin.TYPE_ROOT).first() root_token = admin.token def task_create_workers(): workers = data["workers"] client = TestClient() for worker in workers: client.create_admin( username=worker["username"], password=worker["password"], type=Admin.TYPE_WORKER, internal_token=INTERNAL_TOKEN, ) def task_get_worker_token(): global worker_token admin = Admin.query.filter_by(type=Admin.TYPE_WORKER).first() worker_token = admin.token def task_create_users(): users = data["users"] client = TestClient() with db.session.transaction_start(): for user in users: openid = user["openid"] user = User(openid) db.session.add(user) def task_get_user_tokens(): global user_tokens user_tokens = [ u.token for u in User.query.all() ] def task_create_activities(): activities = data["activities"] client = TestClient() for activity in activities: client.create_activity(**activity, root_token=root_token) def task_create_orders(): orders = data["orders"] client = TestClient() for idx, order in enumerate(orders): client.create_order(**order, user_token=user_tokens[idx]) def task_register_orders(): orders = data["orders"] client = TestClient() register_token = client.get_register_token(root_token).json()["detail"]["token"] for idx, _ in enumerate(orders): user_token = user_tokens[idx] order = client.get_existed_order(user_token).json()["detail"]["order"] client.register_order(order["oid"], register_token, user_token) def task_deinit_app(): ctx.pop() def main(): task_init_app() task_rebuild_db() task_load_test_data() task_create_roots() task_create_workers() task_create_users() task_get_root_token() task_get_worker_token() task_get_user_tokens() task_create_activities() task_create_orders() task_register_orders() task_deinit_app() if __name__ == '__main__': main()
true
true
1c433737047e09d318ff08a65f9b1dc95a9604fc
2,229
py
Python
src/continuous_scheduler.py
tdm-project/tdm-energy-consumption-report
a36c42b87f9eccd6feeb7f4049537a0ce41007d2
[ "Apache-2.0" ]
null
null
null
src/continuous_scheduler.py
tdm-project/tdm-energy-consumption-report
a36c42b87f9eccd6feeb7f4049537a0ce41007d2
[ "Apache-2.0" ]
null
null
null
src/continuous_scheduler.py
tdm-project/tdm-energy-consumption-report
a36c42b87f9eccd6feeb7f4049537a0ce41007d2
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # # Copyright 2018-2022, CRS4 - Center for Advanced Studies, Research and Development in Sardinia # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ---------------------------------------------------------------------------- # import sched from time import sleep, time # ---------------------------------------------------------------------------- # class TaskWrapper(object): def __init__(self, task, period, priority, scheduler, *args, **kwargs): self._task = task self._period = period self._priority = priority self._scheduler = scheduler self._args = args self._kwargs = kwargs # ------------------------------------------------------------------------ # def __call__(self, *args, **kwargs): self._task(*self._args, **self._kwargs) self._scheduler.enter(self._period, self._priority, self, *self._args, **self._kwargs) # ---------------------------------------------------------------------------- # class MainScheduler(object): def __init__(self): self._scheduler = sched.scheduler(time, sleep) # ------------------------------------------------------------------------ # def add_task(self, task, delay, period, priority, *args, **kwargs): _task = TaskWrapper(task, period, priority, self._scheduler, *args, **kwargs) self._scheduler.enter(delay, priority, _task, *args, **kwargs) # ------------------------------------------------------------------------ # def start(self): self._scheduler.run() # ---------------------------------------------------------------------------- #
35.380952
96
0.493046
import sched from time import sleep, time class TaskWrapper(object): def __init__(self, task, period, priority, scheduler, *args, **kwargs): self._task = task self._period = period self._priority = priority self._scheduler = scheduler self._args = args self._kwargs = kwargs def __call__(self, *args, **kwargs): self._task(*self._args, **self._kwargs) self._scheduler.enter(self._period, self._priority, self, *self._args, **self._kwargs) class MainScheduler(object): def __init__(self): self._scheduler = sched.scheduler(time, sleep) def add_task(self, task, delay, period, priority, *args, **kwargs): _task = TaskWrapper(task, period, priority, self._scheduler, *args, **kwargs) self._scheduler.enter(delay, priority, _task, *args, **kwargs) def start(self): self._scheduler.run()
true
true
1c4337947826be9ce90962a0fce2550f49d15f6b
3,284
py
Python
python3/koans/about_strings.py
StevenLeighton21/mentoring-python-koans
944275bd4d0e86455faf18135245ad3343b1bb0d
[ "MIT" ]
null
null
null
python3/koans/about_strings.py
StevenLeighton21/mentoring-python-koans
944275bd4d0e86455faf18135245ad3343b1bb0d
[ "MIT" ]
null
null
null
python3/koans/about_strings.py
StevenLeighton21/mentoring-python-koans
944275bd4d0e86455faf18135245ad3343b1bb0d
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- from runner.koan import * class AboutStrings(Koan): # def test_show_us_what_strings_do(self): # a = 1 # print(string1 = "a == {a}") # print(string2 = 'a == {a}') # print(string3 = """a == {a}""") def test_double_quoted_strings_are_strings(self): string = "Hello, world." self.assertEqual(True, isinstance(string, str)) def test_single_quoted_strings_are_also_strings(self): string = 'Goodbye, world.' self.assertEqual(True, isinstance(string, str)) def test_triple_quote_strings_are_also_strings(self): string = """Howdy, world!""" self.assertEqual(True, isinstance(string, str)) def test_triple_single_quotes_work_too(self): string = '''Bonjour tout le monde!''' self.assertEqual(True, isinstance(string, str)) def test_raw_strings_are_also_strings(self): string = r"Konnichi wa, world!" self.assertEqual(True, isinstance(string, str)) def test_use_single_quotes_to_create_string_with_double_quotes(self): string = 'He said, "Go Away."' self.assertEqual('He said, "Go Away."', string) def test_use_double_quotes_to_create_strings_with_single_quotes(self): string = "Don't" self.assertEqual("Don't", string) def test_use_backslash_for_escaping_quotes_in_strings(self): a = "He said, \"Don't\"" b = 'He said, "Don\'t"' self.assertEqual(True, (a == b)) def test_use_backslash_at_the_end_of_a_line_to_continue_onto_the_next_line(self): string = "It was the best of times,\n\ It was the worst of times." self.assertEqual(52, len(string)) def test_triple_quoted_strings_can_span_lines(self): string = """ Howdy, world! """ self.assertEqual(15, len(string)) def test_triple_quoted_strings_need_less_escaping(self): a = "Hello \"world\"." b = """Hello "world".""" self.assertEqual(True, (a == b)) def test_escaping_quotes_at_the_end_of_triple_quoted_string(self): string = """Hello "world\"""" self.assertEqual("""Hello "world\"""", string) def test_plus_concatenates_strings(self): string = "Hello, " + "world" self.assertEqual("Hello, world", string) def test_adjacent_strings_are_concatenated_automatically(self): string = "Hello" ", " "world" self.assertEqual("Hello, world", string) def test_plus_will_not_modify_original_strings(self): hi = "Hello, " there = "world" string = hi + there self.assertEqual("Hello, ", hi) self.assertEqual("world", there) def test_plus_equals_will_append_to_end_of_string(self): hi = "Hello, " there = "world" hi += there self.assertEqual("Hello, world", hi) def test_plus_equals_also_leaves_original_string_unmodified(self): original = "Hello, " hi = original there = "world" hi += there self.assertEqual("Hello, ", original) def test_most_strings_interpret_escape_characters(self): string = "\n" self.assertEqual('\n', string) self.assertEqual("""\n""", string) self.assertEqual(1, len(string))
32.84
85
0.637028
from runner.koan import * class AboutStrings(Koan): def test_double_quoted_strings_are_strings(self): string = "Hello, world." self.assertEqual(True, isinstance(string, str)) def test_single_quoted_strings_are_also_strings(self): string = 'Goodbye, world.' self.assertEqual(True, isinstance(string, str)) def test_triple_quote_strings_are_also_strings(self): string = """Howdy, world!""" self.assertEqual(True, isinstance(string, str)) def test_triple_single_quotes_work_too(self): string = '''Bonjour tout le monde!''' self.assertEqual(True, isinstance(string, str)) def test_raw_strings_are_also_strings(self): string = r"Konnichi wa, world!" self.assertEqual(True, isinstance(string, str)) def test_use_single_quotes_to_create_string_with_double_quotes(self): string = 'He said, "Go Away."' self.assertEqual('He said, "Go Away."', string) def test_use_double_quotes_to_create_strings_with_single_quotes(self): string = "Don't" self.assertEqual("Don't", string) def test_use_backslash_for_escaping_quotes_in_strings(self): a = "He said, \"Don't\"" b = 'He said, "Don\'t"' self.assertEqual(True, (a == b)) def test_use_backslash_at_the_end_of_a_line_to_continue_onto_the_next_line(self): string = "It was the best of times,\n\ It was the worst of times." self.assertEqual(52, len(string)) def test_triple_quoted_strings_can_span_lines(self): string = """ Howdy, world! """ self.assertEqual(15, len(string)) def test_triple_quoted_strings_need_less_escaping(self): a = "Hello \"world\"." b = """Hello "world".""" self.assertEqual(True, (a == b)) def test_escaping_quotes_at_the_end_of_triple_quoted_string(self): string = """Hello "world\"""" self.assertEqual("""Hello "world\"""", string) def test_plus_concatenates_strings(self): string = "Hello, " + "world" self.assertEqual("Hello, world", string) def test_adjacent_strings_are_concatenated_automatically(self): string = "Hello" ", " "world" self.assertEqual("Hello, world", string) def test_plus_will_not_modify_original_strings(self): hi = "Hello, " there = "world" string = hi + there self.assertEqual("Hello, ", hi) self.assertEqual("world", there) def test_plus_equals_will_append_to_end_of_string(self): hi = "Hello, " there = "world" hi += there self.assertEqual("Hello, world", hi) def test_plus_equals_also_leaves_original_string_unmodified(self): original = "Hello, " hi = original there = "world" hi += there self.assertEqual("Hello, ", original) def test_most_strings_interpret_escape_characters(self): string = "\n" self.assertEqual('\n', string) self.assertEqual("""\n""", string) self.assertEqual(1, len(string))
true
true
1c433794e4e123470798b01d9ce643005c276372
3,411
py
Python
examples/color/colormaps_reference.py
mkcor/matplotlib
016a4e350cf48aa6ebb969abfcb3cee1904969e4
[ "MIT", "BSD-3-Clause" ]
16
2016-06-14T19:45:35.000Z
2020-11-30T19:02:58.000Z
Documentation/matplotlib/mpl_examples/color/colormaps_reference.py
leesavide/pythonista-docs
9ec3363f07e328bde0a58738a16907f11dfd06e1
[ "Apache-2.0" ]
1
2016-06-15T07:10:27.000Z
2016-06-15T07:10:27.000Z
Documentation/matplotlib/mpl_examples/color/colormaps_reference.py
leesavide/pythonista-docs
9ec3363f07e328bde0a58738a16907f11dfd06e1
[ "Apache-2.0" ]
null
null
null
""" Reference for colormaps included with Matplotlib. This reference example shows all colormaps included with Matplotlib. Note that any colormap listed here can be reversed by appending "_r" (e.g., "pink_r"). These colormaps are divided into the following categories: Sequential: These colormaps are approximately monochromatic colormaps varying smoothly between two color tones---usually from low saturation (e.g. white) to high saturation (e.g. a bright blue). Sequential colormaps are ideal for representing most scientific data since they show a clear progression from low-to-high values. Diverging: These colormaps have a median value (usually light in color) and vary smoothly to two different color tones at high and low values. Diverging colormaps are ideal when your data has a median value that is significant (e.g. 0, such that positive and negative values are represented by different colors of the colormap). Qualitative: These colormaps vary rapidly in color. Qualitative colormaps are useful for choosing a set of discrete colors. For example:: color_list = plt.cm.Set3(np.linspace(0, 1, 12)) gives a list of RGB colors that are good for plotting a series of lines on a dark background. Miscellaneous: Colormaps that don't fit into the categories above. """ import numpy as np import matplotlib.pyplot as plt cmaps = [('Sequential', ['binary', 'Blues', 'BuGn', 'BuPu', 'gist_yarg', 'GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd', 'PuBu', 'PuBuGn', 'PuRd', 'Purples', 'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd']), ('Sequential (2)', ['afmhot', 'autumn', 'bone', 'cool', 'copper', 'gist_gray', 'gist_heat', 'gray', 'hot', 'pink', 'spring', 'summer', 'winter']), ('Diverging', ['BrBG', 'bwr', 'coolwarm', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'seismic']), ('Qualitative', ['Accent', 'Dark2', 'hsv', 'Paired', 'Pastel1', 'Pastel2', 'Set1', 'Set2', 'Set3', 'spectral']), ('Miscellaneous', ['gist_earth', 'gist_ncar', 'gist_rainbow', 'gist_stern', 'jet', 'brg', 'CMRmap', 'cubehelix', 'gnuplot', 'gnuplot2', 'ocean', 'rainbow', 'terrain', 'flag', 'prism'])] nrows = max(len(cmap_list) for cmap_category, cmap_list in cmaps) gradient = np.linspace(0, 1, 256) gradient = np.vstack((gradient, gradient)) def plot_color_gradients(cmap_category, cmap_list): fig, axes = plt.subplots(nrows=nrows) fig.subplots_adjust(top=0.95, bottom=0.01, left=0.2, right=0.99) axes[0].set_title(cmap_category + ' colormaps', fontsize=14) for ax, name in zip(axes, cmap_list): ax.imshow(gradient, aspect='auto', cmap=plt.get_cmap(name)) pos = list(ax.get_position().bounds) x_text = pos[0] - 0.01 y_text = pos[1] + pos[3]/2. fig.text(x_text, y_text, name, va='center', ha='right', fontsize=10) # Turn off *all* ticks & spines, not just the ones with colormaps. for ax in axes: ax.set_axis_off() for cmap_category, cmap_list in cmaps: plot_color_gradients(cmap_category, cmap_list) plt.show()
42.6375
79
0.620053
import numpy as np import matplotlib.pyplot as plt cmaps = [('Sequential', ['binary', 'Blues', 'BuGn', 'BuPu', 'gist_yarg', 'GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd', 'PuBu', 'PuBuGn', 'PuRd', 'Purples', 'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd']), ('Sequential (2)', ['afmhot', 'autumn', 'bone', 'cool', 'copper', 'gist_gray', 'gist_heat', 'gray', 'hot', 'pink', 'spring', 'summer', 'winter']), ('Diverging', ['BrBG', 'bwr', 'coolwarm', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'seismic']), ('Qualitative', ['Accent', 'Dark2', 'hsv', 'Paired', 'Pastel1', 'Pastel2', 'Set1', 'Set2', 'Set3', 'spectral']), ('Miscellaneous', ['gist_earth', 'gist_ncar', 'gist_rainbow', 'gist_stern', 'jet', 'brg', 'CMRmap', 'cubehelix', 'gnuplot', 'gnuplot2', 'ocean', 'rainbow', 'terrain', 'flag', 'prism'])] nrows = max(len(cmap_list) for cmap_category, cmap_list in cmaps) gradient = np.linspace(0, 1, 256) gradient = np.vstack((gradient, gradient)) def plot_color_gradients(cmap_category, cmap_list): fig, axes = plt.subplots(nrows=nrows) fig.subplots_adjust(top=0.95, bottom=0.01, left=0.2, right=0.99) axes[0].set_title(cmap_category + ' colormaps', fontsize=14) for ax, name in zip(axes, cmap_list): ax.imshow(gradient, aspect='auto', cmap=plt.get_cmap(name)) pos = list(ax.get_position().bounds) x_text = pos[0] - 0.01 y_text = pos[1] + pos[3]/2. fig.text(x_text, y_text, name, va='center', ha='right', fontsize=10) for ax in axes: ax.set_axis_off() for cmap_category, cmap_list in cmaps: plot_color_gradients(cmap_category, cmap_list) plt.show()
true
true
1c4337a1a0edfb82064a32346d4a3f22552cfa5d
900
py
Python
LintCode/1282.py
RENHANFEI/LintCode
d572dee248ba4c2a95b52cd737d76c7297f4e7b4
[ "CNRI-Python" ]
null
null
null
LintCode/1282.py
RENHANFEI/LintCode
d572dee248ba4c2a95b52cd737d76c7297f4e7b4
[ "CNRI-Python" ]
null
null
null
LintCode/1282.py
RENHANFEI/LintCode
d572dee248ba4c2a95b52cd737d76c7297f4e7b4
[ "CNRI-Python" ]
null
null
null
class Solution: """ @param s: a string @return: reverse only the vowels of a string """ def reverseVowels(self, s): vowels = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U'] vowels_positions = [] vowels_in_string = [] for i, ss in enumerate(s): if ss in vowels: vowels_positions.append(i) vowels_in_string.append(ss) len_vowels = len(vowels_positions) if len_vowels <= 1: return s for i, v1 in enumerate(vowels_in_string[0:len_vowels // 2]): pos1 = vowels_positions[i] pos2 = vowels_positions[len_vowels - i -1 ] v2 = vowels_in_string[len_vowels - i - 1] s = s[:pos1] + v2 + s[pos1 + 1:] s = s[:pos2] + v1 + s[pos2 + 1:] return s
28.125
68
0.465556
class Solution: def reverseVowels(self, s): vowels = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U'] vowels_positions = [] vowels_in_string = [] for i, ss in enumerate(s): if ss in vowels: vowels_positions.append(i) vowels_in_string.append(ss) len_vowels = len(vowels_positions) if len_vowels <= 1: return s for i, v1 in enumerate(vowels_in_string[0:len_vowels // 2]): pos1 = vowels_positions[i] pos2 = vowels_positions[len_vowels - i -1 ] v2 = vowels_in_string[len_vowels - i - 1] s = s[:pos1] + v2 + s[pos1 + 1:] s = s[:pos2] + v1 + s[pos2 + 1:] return s
true
true
1c433806301ab740218f064066f85c2ca5542cfa
1,283
py
Python
custom_components/volkswagencarnet/switch.py
hwikene/homeassistant-volkswagencarnet
9eb180c5246abd1d52de3243fdf96cfdb5e4d776
[ "Apache-2.0" ]
null
null
null
custom_components/volkswagencarnet/switch.py
hwikene/homeassistant-volkswagencarnet
9eb180c5246abd1d52de3243fdf96cfdb5e4d776
[ "Apache-2.0" ]
null
null
null
custom_components/volkswagencarnet/switch.py
hwikene/homeassistant-volkswagencarnet
9eb180c5246abd1d52de3243fdf96cfdb5e4d776
[ "Apache-2.0" ]
null
null
null
""" Support for Volkswagen Carnet Platform """ import logging from homeassistant.helpers.entity import ToggleEntity from . import DATA_KEY, VolkswagenEntity _LOGGER = logging.getLogger(__name__) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """ Setup the volkswagen switch.""" if discovery_info is None: return async_add_entities([VolkswagenSwitch(hass.data[DATA_KEY], *discovery_info)]) class VolkswagenSwitch(VolkswagenEntity, ToggleEntity): """Representation of a Volkswagen Carnet Switch.""" @property def is_on(self): """Return true if switch is on.""" _LOGGER.debug("Getting state of %s" % self.instrument.attr) return self.instrument.state async def async_turn_on(self, **kwargs): """Turn the switch on.""" _LOGGER.debug("Turning ON %s." % self.instrument.attr) await self.instrument.turn_on() self.async_write_ha_state() async def async_turn_off(self, **kwargs): """Turn the switch off.""" _LOGGER.debug("Turning OFF %s." % self.instrument.attr) await self.instrument.turn_off() self.async_write_ha_state() @property def assumed_state(self): return self.instrument.assumed_state
29.159091
86
0.691348
import logging from homeassistant.helpers.entity import ToggleEntity from . import DATA_KEY, VolkswagenEntity _LOGGER = logging.getLogger(__name__) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): if discovery_info is None: return async_add_entities([VolkswagenSwitch(hass.data[DATA_KEY], *discovery_info)]) class VolkswagenSwitch(VolkswagenEntity, ToggleEntity): @property def is_on(self): _LOGGER.debug("Getting state of %s" % self.instrument.attr) return self.instrument.state async def async_turn_on(self, **kwargs): _LOGGER.debug("Turning ON %s." % self.instrument.attr) await self.instrument.turn_on() self.async_write_ha_state() async def async_turn_off(self, **kwargs): _LOGGER.debug("Turning OFF %s." % self.instrument.attr) await self.instrument.turn_off() self.async_write_ha_state() @property def assumed_state(self): return self.instrument.assumed_state
true
true
1c4338ae38a6705f96e83955c78a4df816be2bda
915
py
Python
git_contributions_importer/generators/Generator.py
kennydukor/mock-repo
cb780885b32b48171285c7502b616635546d23b8
[ "MIT" ]
null
null
null
git_contributions_importer/generators/Generator.py
kennydukor/mock-repo
cb780885b32b48171285c7502b616635546d23b8
[ "MIT" ]
null
null
null
git_contributions_importer/generators/Generator.py
kennydukor/mock-repo
cb780885b32b48171285c7502b616635546d23b8
[ "MIT" ]
null
null
null
#!/usr/bin/python3 import random class Generator: def __init__(self): pass def random_string(self, length=10): return ''.join([chr(int(random.random() * (ord('z') - ord('a'))) + ord('a')) for c in range(length)]) def random_phrase(self, length=10, word_length=10): return ' '.join([self.random_string(length=int(word_length)) for _ in range(int(length))]) ''' insert num lines of code/text inside content. content is a list of strings that represent the file ''' def insert(self, content, num): for i in range(num): content.append(self.random_phrase(random.random() * 10 + 1)) ''' delete num lines of code/text from content. content is a list of strings that represent the file ''' def delete(self, content, num): for i in range(min(num, len(content))): content.pop()
32.678571
110
0.602186
import random class Generator: def __init__(self): pass def random_string(self, length=10): return ''.join([chr(int(random.random() * (ord('z') - ord('a'))) + ord('a')) for c in range(length)]) def random_phrase(self, length=10, word_length=10): return ' '.join([self.random_string(length=int(word_length)) for _ in range(int(length))]) def insert(self, content, num): for i in range(num): content.append(self.random_phrase(random.random() * 10 + 1)) def delete(self, content, num): for i in range(min(num, len(content))): content.pop()
true
true
1c43394790c2be2b49bba1d33554a2876abc4543
529
py
Python
diventi/landing/migrations/0078_auto_20200215_1909.py
flavoi/diven
3173ca3ca3fbedc191b8eab3639a6bceb3c442c4
[ "Apache-2.0" ]
2
2019-06-27T16:00:17.000Z
2020-08-14T07:46:05.000Z
diventi/landing/migrations/0078_auto_20200215_1909.py
flavoi/diven
3173ca3ca3fbedc191b8eab3639a6bceb3c442c4
[ "Apache-2.0" ]
26
2020-02-15T22:39:35.000Z
2022-02-19T21:09:01.000Z
diventi/landing/migrations/0078_auto_20200215_1909.py
flavoi/diven
3173ca3ca3fbedc191b8eab3639a6bceb3c442c4
[ "Apache-2.0" ]
1
2021-11-12T22:30:15.000Z
2021-11-12T22:30:15.000Z
# Generated by Django 2.2.10 on 2020-02-15 18:09 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('landing', '0077_auto_20190618_0829'), ] operations = [ migrations.AlterField( model_name='section', name='featured_template', field=models.CharField(choices=[('standard_header.html', 'standard header'), ('search_header.html', 'search header')], max_length=50, verbose_name='featured template'), ), ]
27.842105
180
0.644612
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('landing', '0077_auto_20190618_0829'), ] operations = [ migrations.AlterField( model_name='section', name='featured_template', field=models.CharField(choices=[('standard_header.html', 'standard header'), ('search_header.html', 'search header')], max_length=50, verbose_name='featured template'), ), ]
true
true
1c433ac930cb19d2d759684e1136fef5a382b3db
3,261
py
Python
unikastaroak/settings.py
bipoza/unikastaroak
4044d3ff3eaa4172275a8f46d9765a3840a51d7b
[ "Apache-2.0" ]
null
null
null
unikastaroak/settings.py
bipoza/unikastaroak
4044d3ff3eaa4172275a8f46d9765a3840a51d7b
[ "Apache-2.0" ]
null
null
null
unikastaroak/settings.py
bipoza/unikastaroak
4044d3ff3eaa4172275a8f46d9765a3840a51d7b
[ "Apache-2.0" ]
null
null
null
""" Django settings for unikastaroak project. Generated by 'django-admin startproject' using Django 1.11.7. For more information on this file, see https://docs.djangoproject.com/en/1.11/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.11/ref/settings/ """ #coding=utf-8 import os SECURE_SSL_REDIRECT = True # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '1(o*qoluw*3i1brm0(+lum22k7v@jc+l(com94g5up$-*e_9bd' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'tutorials', ] MIDDLEWARE_CLASSES = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'unikastaroak.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'unikastaroak.wsgi.application' # Database # https://docs.djangoproject.com/en/1.11/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.11/topics/i18n/ LANGUAGE_CODE = 'eu-en' TIME_ZONE = 'Europe/Madrid' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.11/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'static') LOGIN_REDIRECT_URL = '/'
26.088
91
0.701625
import os SECURE_SSL_REDIRECT = True BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) SECRET_KEY = '1(o*qoluw*3i1brm0(+lum22k7v@jc+l(com94g5up$-*e_9bd' DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'tutorials', ] MIDDLEWARE_CLASSES = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'unikastaroak.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'unikastaroak.wsgi.application' # Database # https://docs.djangoproject.com/en/1.11/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.11/topics/i18n/ LANGUAGE_CODE = 'eu-en' TIME_ZONE = 'Europe/Madrid' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.11/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'static') LOGIN_REDIRECT_URL = '/'
true
true
1c433b4735f0971086fcc7a912331746ead9bc23
507
py
Python
clear_pay/cloudfix.py
utterclarity/clear-pay
45632839d9bca21e8f5760e683f027e72b3f9abf
[ "MIT" ]
null
null
null
clear_pay/cloudfix.py
utterclarity/clear-pay
45632839d9bca21e8f5760e683f027e72b3f9abf
[ "MIT" ]
null
null
null
clear_pay/cloudfix.py
utterclarity/clear-pay
45632839d9bca21e8f5760e683f027e72b3f9abf
[ "MIT" ]
1
2020-01-05T21:34:25.000Z
2020-01-05T21:34:25.000Z
# -*- coding: utf-8 -*- class CloudFix(object): """ Fixes the REMOTE_ADDR given if the app is run behind CloudFlare w/ nginx setting CF-Connecting-IP. """ def __init__(self, app): self.app = app def __call__(self, environ, start_response): connecting_ip = environ.get('HTTP_CF_CONNECTING_IP', '') if connecting_ip: environ['REMOTE_ADDR'] = connecting_ip return self.app(environ, start_response) # app.wsgi_app = CloudFix(app.wsgi_app)
28.166667
64
0.64497
class CloudFix(object): def __init__(self, app): self.app = app def __call__(self, environ, start_response): connecting_ip = environ.get('HTTP_CF_CONNECTING_IP', '') if connecting_ip: environ['REMOTE_ADDR'] = connecting_ip return self.app(environ, start_response)
true
true
1c433bc11c80dadd9785e0089ecbc09c6ace7239
1,465
py
Python
test.py
martinwe001/U-Net-Building-Segmentation
8bd32a8a2bca25a4c0bd9b22fee5a09d77bffbad
[ "MIT" ]
3
2021-12-14T09:12:08.000Z
2022-03-14T11:31:13.000Z
test.py
martinwe001/CNNs-for-Building-Segmentation
8bd32a8a2bca25a4c0bd9b22fee5a09d77bffbad
[ "MIT" ]
null
null
null
test.py
martinwe001/CNNs-for-Building-Segmentation
8bd32a8a2bca25a4c0bd9b22fee5a09d77bffbad
[ "MIT" ]
null
null
null
import os os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" import tensorflow as tf import numpy as np import cv2 from glob import glob from tqdm import tqdm import tensorflow_addons as tfa if __name__ == "__main__": """ Load the test images """ test_images = glob("building-segmentation/test/test_64/*") """ Load the model """ model = 'unet' epochs = 300 res = 64 model = tf.keras.models.load_model(f"{model}_models/{model}_{epochs}_epochs_{res}.h5", custom_objects={'MaxUnpooling2D': tfa.layers.MaxUnpooling2D}) for path in tqdm(test_images, total=len(test_images)): x = cv2.imread(path, cv2.IMREAD_COLOR) original_image = x h, w, _ = x.shape x = cv2.resize(x, (64, 64)) #x = x/255.0 x = x.astype(np.float32) x = np.expand_dims(x, axis=0) pred_mask = model.predict(x)[0] pred_mask = np.concatenate( [ pred_mask, pred_mask, pred_mask ], axis=2) pred_mask = (pred_mask > 0.5) * 255 pred_mask = pred_mask.astype(np.float32) pred_mask = cv2.resize(pred_mask, (w, h)) original_image = original_image.astype(np.float32) alpha_image = 0.8 alpha_mask = 1 cv2.addWeighted(pred_mask, alpha_mask, original_image, alpha_image, 0, original_image) name = path.split("/")[-1] cv2.imwrite(f"save_images/{name}", original_image)
27.641509
152
0.606826
import os os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" import tensorflow as tf import numpy as np import cv2 from glob import glob from tqdm import tqdm import tensorflow_addons as tfa if __name__ == "__main__": test_images = glob("building-segmentation/test/test_64/*") model = 'unet' epochs = 300 res = 64 model = tf.keras.models.load_model(f"{model}_models/{model}_{epochs}_epochs_{res}.h5", custom_objects={'MaxUnpooling2D': tfa.layers.MaxUnpooling2D}) for path in tqdm(test_images, total=len(test_images)): x = cv2.imread(path, cv2.IMREAD_COLOR) original_image = x h, w, _ = x.shape x = cv2.resize(x, (64, 64)) x = x.astype(np.float32) x = np.expand_dims(x, axis=0) pred_mask = model.predict(x)[0] pred_mask = np.concatenate( [ pred_mask, pred_mask, pred_mask ], axis=2) pred_mask = (pred_mask > 0.5) * 255 pred_mask = pred_mask.astype(np.float32) pred_mask = cv2.resize(pred_mask, (w, h)) original_image = original_image.astype(np.float32) alpha_image = 0.8 alpha_mask = 1 cv2.addWeighted(pred_mask, alpha_mask, original_image, alpha_image, 0, original_image) name = path.split("/")[-1] cv2.imwrite(f"save_images/{name}", original_image)
true
true
1c433bffa908cc42bff4c65440b2b56678682692
1,975
py
Python
onnx_export/export_mobilenetv3_block.py
AndrewZhaoLuo/OnnxSnippets
40e2231da8dc3d9152bc71daff5f4b154b97a5e4
[ "BSD-3-Clause" ]
1
2022-01-08T01:38:57.000Z
2022-01-08T01:38:57.000Z
onnx_export/export_mobilenetv3_block.py
AndrewZhaoLuo/OnnxSnippets
40e2231da8dc3d9152bc71daff5f4b154b97a5e4
[ "BSD-3-Clause" ]
null
null
null
onnx_export/export_mobilenetv3_block.py
AndrewZhaoLuo/OnnxSnippets
40e2231da8dc3d9152bc71daff5f4b154b97a5e4
[ "BSD-3-Clause" ]
null
null
null
import torch from pytorch.common.blocks import mobilenetv3_block from onnx_export import common class ExportMobilenetV3: default_conditions = { "in_channels": 64, "spatial_dimension": 128, } sequential_conditions = { "in_channels": [32, 64, 128], } def get_all_conditions(self): conditions = set() for condition_name in self.sequential_conditions: for v in self.sequential_conditions[condition_name]: new_condition = self.default_conditions.copy() new_condition[condition_name] = v conditions.add(tuple(new_condition.items())) return conditions def export_model( self, torch_model, ndim, features_in, spatial_dimensions, name, dir="export/" ): dims = [1, features_in] + [spatial_dimensions] * ndim # Input to the model x = torch.randn(*dims, requires_grad=True) common.export_model( torch_model, x, name, dir=dir, dynamic_axes={ "input": { 0: "batch_size", 2: "height_in", 3: "width_in", }, # variable length axes "output": {0: "batch_size", 2: "height_out", 3: "width_out"}, }, ) def export_mobilenetv3_block( self, in_channels, spatial_dimension, dir="./export", ): model = mobilenetv3_block.MobilenetV3Block(in_channels) name = f"mobilenetv3_block_inc={in_channels}_outc={in_channels}" self.export_model(model, 2, in_channels, spatial_dimension, name, dir=dir) if __name__ == "__main__": exporter = ExportMobilenetV3() conds = exporter.get_all_conditions() for cond in conds: print("Exporting:", cond) cond = dict(cond) exporter.export_mobilenetv3_block(**cond, dir="export/mobilenetv3_block")
28.623188
85
0.585316
import torch from pytorch.common.blocks import mobilenetv3_block from onnx_export import common class ExportMobilenetV3: default_conditions = { "in_channels": 64, "spatial_dimension": 128, } sequential_conditions = { "in_channels": [32, 64, 128], } def get_all_conditions(self): conditions = set() for condition_name in self.sequential_conditions: for v in self.sequential_conditions[condition_name]: new_condition = self.default_conditions.copy() new_condition[condition_name] = v conditions.add(tuple(new_condition.items())) return conditions def export_model( self, torch_model, ndim, features_in, spatial_dimensions, name, dir="export/" ): dims = [1, features_in] + [spatial_dimensions] * ndim x = torch.randn(*dims, requires_grad=True) common.export_model( torch_model, x, name, dir=dir, dynamic_axes={ "input": { 0: "batch_size", 2: "height_in", 3: "width_in", }, "output": {0: "batch_size", 2: "height_out", 3: "width_out"}, }, ) def export_mobilenetv3_block( self, in_channels, spatial_dimension, dir="./export", ): model = mobilenetv3_block.MobilenetV3Block(in_channels) name = f"mobilenetv3_block_inc={in_channels}_outc={in_channels}" self.export_model(model, 2, in_channels, spatial_dimension, name, dir=dir) if __name__ == "__main__": exporter = ExportMobilenetV3() conds = exporter.get_all_conditions() for cond in conds: print("Exporting:", cond) cond = dict(cond) exporter.export_mobilenetv3_block(**cond, dir="export/mobilenetv3_block")
true
true
1c433e01186700b7be39cc948171e3292588ea2d
302
py
Python
34/digit_factorials.py
redfast00/euler
98fc49a1fcb8b49415cc4384952a6447378bd4f4
[ "MIT" ]
null
null
null
34/digit_factorials.py
redfast00/euler
98fc49a1fcb8b49415cc4384952a6447378bd4f4
[ "MIT" ]
null
null
null
34/digit_factorials.py
redfast00/euler
98fc49a1fcb8b49415cc4384952a6447378bd4f4
[ "MIT" ]
null
null
null
import math def is_digit_factorial_sum(number): return number == sum((math.factorial(int(i)) for i in str(number))) def find_digit_factorials_below(number): for i in range(10, number): if is_digit_factorial_sum(i): yield(i) print(sum(find_digit_factorials_below(10**7)))
25.166667
71
0.701987
import math def is_digit_factorial_sum(number): return number == sum((math.factorial(int(i)) for i in str(number))) def find_digit_factorials_below(number): for i in range(10, number): if is_digit_factorial_sum(i): yield(i) print(sum(find_digit_factorials_below(10**7)))
true
true
1c433fa7868fe1c11080ceaef1de8eec4bb09f43
974
py
Python
applications/schemas/admin_power.py
jobeckham7/pear-admin-flask
c2042d5f8054be289a363283345ea05f27e00ec4
[ "MIT" ]
null
null
null
applications/schemas/admin_power.py
jobeckham7/pear-admin-flask
c2042d5f8054be289a363283345ea05f27e00ec4
[ "MIT" ]
null
null
null
applications/schemas/admin_power.py
jobeckham7/pear-admin-flask
c2042d5f8054be289a363283345ea05f27e00ec4
[ "MIT" ]
null
null
null
from applications.extensions import ma from marshmallow import fields # 权限models序列化类 class PowerSchema(ma.Schema): id = fields.Integer() title = fields.Str(attribute="name") type = fields.Str() code = fields.Str() href = fields.Str(attribute="url") openType = fields.Str(attribute="open_type") parent_id = fields.Integer() icon = fields.Str() sort = fields.Integer() create_time = fields.DateTime() update_time = fields.DateTime() enable = fields.Integer() class PowerSchema2(ma.Schema): # 序列化类 powerId = fields.Str(attribute="id") powerName = fields.Str(attribute="name") powerType = fields.Str(attribute="type") powerUrl = fields.Str(attribute="url") openType = fields.Str(attribute="open_type") parentId = fields.Str(attribute="parent_id") icon = fields.Str() sort = fields.Integer() create_time = fields.DateTime() update_time = fields.DateTime() enable = fields.Integer()
29.515152
48
0.679671
from applications.extensions import ma from marshmallow import fields class PowerSchema(ma.Schema): id = fields.Integer() title = fields.Str(attribute="name") type = fields.Str() code = fields.Str() href = fields.Str(attribute="url") openType = fields.Str(attribute="open_type") parent_id = fields.Integer() icon = fields.Str() sort = fields.Integer() create_time = fields.DateTime() update_time = fields.DateTime() enable = fields.Integer() class PowerSchema2(ma.Schema): powerId = fields.Str(attribute="id") powerName = fields.Str(attribute="name") powerType = fields.Str(attribute="type") powerUrl = fields.Str(attribute="url") openType = fields.Str(attribute="open_type") parentId = fields.Str(attribute="parent_id") icon = fields.Str() sort = fields.Integer() create_time = fields.DateTime() update_time = fields.DateTime() enable = fields.Integer()
true
true
1c4340253aae3fe1b54ac0df1ebeba8097c29baa
9,152
py
Python
tensorlayer/app/computer_vision_object_detection/yolov4.py
dalonsoa/tensorlayer
066c09be1eea1b49914b2a6e806329a599edce58
[ "Apache-2.0" ]
null
null
null
tensorlayer/app/computer_vision_object_detection/yolov4.py
dalonsoa/tensorlayer
066c09be1eea1b49914b2a6e806329a599edce58
[ "Apache-2.0" ]
null
null
null
tensorlayer/app/computer_vision_object_detection/yolov4.py
dalonsoa/tensorlayer
066c09be1eea1b49914b2a6e806329a599edce58
[ "Apache-2.0" ]
null
null
null
#! /usr/bin/python # -*- coding: utf-8 -*- """YOLOv4 for MS COCO. # Reference: - [tensorflow-yolov4-tflite]( https://github.com/hunglc007/tensorflow-yolov4-tflite) """ import tensorflow as tf import numpy as np import tensorlayer as tl from tensorlayer.activation import mish from tensorlayer.layers import Conv2d, MaxPool2d, BatchNorm2d, ZeroPad2d, UpSampling2d, Concat, Input, Elementwise from tensorlayer.models import Model from tensorlayer import logging INPUT_SIZE = 416 weights_url = {'link': 'https://pan.baidu.com/s/1MC1dmEwpxsdgHO1MZ8fYRQ', 'password': 'idsz'} def upsample(input_layer): return UpSampling2d(scale=2)(input_layer) def convolutional( input_layer, filters_shape, downsample=False, activate=True, bn=True, activate_type='leaky', name=None ): if downsample: input_layer = ZeroPad2d(((1, 0), (1, 0)))(input_layer) padding = 'VALID' strides = 2 else: strides = 1 padding = 'SAME' if bn: b_init = None else: b_init = tl.initializers.constant(value=0.0) conv = Conv2d( n_filter=filters_shape[-1], filter_size=(filters_shape[0], filters_shape[1]), strides=(strides, strides), padding=padding, b_init=b_init, name=name )(input_layer) if bn: if activate ==True: if activate_type == 'leaky': conv = BatchNorm2d(act='lrelu0.1')(conv) elif activate_type == 'mish': conv = BatchNorm2d(act=mish)(conv) else: conv = BatchNorm2d()(conv) return conv def residual_block(input_layer, input_channel, filter_num1, filter_num2, activate_type='leaky'): short_cut = input_layer conv = convolutional(input_layer, filters_shape=(1, 1, input_channel, filter_num1), activate_type=activate_type) conv = convolutional(conv, filters_shape=(3, 3, filter_num1, filter_num2), activate_type=activate_type) residual_output = Elementwise(tf.add)([short_cut, conv]) return residual_output def cspdarknet53(input_data=None): input_data = convolutional(input_data, (3, 3, 3, 32), activate_type='mish') input_data = convolutional(input_data, (3, 3, 32, 64), downsample=True, activate_type='mish') route = input_data route = convolutional(route, (1, 1, 64, 64), activate_type='mish', name='conv_rote_block_1') input_data = convolutional(input_data, (1, 1, 64, 64), activate_type='mish') for i in range(1): input_data = residual_block(input_data, 64, 32, 64, activate_type="mish") input_data = convolutional(input_data, (1, 1, 64, 64), activate_type='mish') input_data = Concat()([input_data, route]) input_data = convolutional(input_data, (1, 1, 128, 64), activate_type='mish') input_data = convolutional(input_data, (3, 3, 64, 128), downsample=True, activate_type='mish') route = input_data route = convolutional(route, (1, 1, 128, 64), activate_type='mish', name='conv_rote_block_2') input_data = convolutional(input_data, (1, 1, 128, 64), activate_type='mish') for i in range(2): input_data = residual_block(input_data, 64, 64, 64, activate_type="mish") input_data = convolutional(input_data, (1, 1, 64, 64), activate_type='mish') input_data = Concat()([input_data, route]) input_data = convolutional(input_data, (1, 1, 128, 128), activate_type='mish') input_data = convolutional(input_data, (3, 3, 128, 256), downsample=True, activate_type='mish') route = input_data route = convolutional(route, (1, 1, 256, 128), activate_type='mish', name='conv_rote_block_3') input_data = convolutional(input_data, (1, 1, 256, 128), activate_type='mish') for i in range(8): input_data = residual_block(input_data, 128, 128, 128, activate_type="mish") input_data = convolutional(input_data, (1, 1, 128, 128), activate_type='mish') input_data = Concat()([input_data, route]) input_data = convolutional(input_data, (1, 1, 256, 256), activate_type='mish') route_1 = input_data input_data = convolutional(input_data, (3, 3, 256, 512), downsample=True, activate_type='mish') route = input_data route = convolutional(route, (1, 1, 512, 256), activate_type='mish', name='conv_rote_block_4') input_data = convolutional(input_data, (1, 1, 512, 256), activate_type='mish') for i in range(8): input_data = residual_block(input_data, 256, 256, 256, activate_type="mish") input_data = convolutional(input_data, (1, 1, 256, 256), activate_type='mish') input_data = Concat()([input_data, route]) input_data = convolutional(input_data, (1, 1, 512, 512), activate_type='mish') route_2 = input_data input_data = convolutional(input_data, (3, 3, 512, 1024), downsample=True, activate_type='mish') route = input_data route = convolutional(route, (1, 1, 1024, 512), activate_type='mish', name='conv_rote_block_5') input_data = convolutional(input_data, (1, 1, 1024, 512), activate_type='mish') for i in range(4): input_data = residual_block(input_data, 512, 512, 512, activate_type="mish") input_data = convolutional(input_data, (1, 1, 512, 512), activate_type='mish') input_data = Concat()([input_data, route]) input_data = convolutional(input_data, (1, 1, 1024, 1024), activate_type='mish') input_data = convolutional(input_data, (1, 1, 1024, 512)) input_data = convolutional(input_data, (3, 3, 512, 1024)) input_data = convolutional(input_data, (1, 1, 1024, 512)) maxpool1 = MaxPool2d(filter_size=(13, 13), strides=(1, 1))(input_data) maxpool2 = MaxPool2d(filter_size=(9, 9), strides=(1, 1))(input_data) maxpool3 = MaxPool2d(filter_size=(5, 5), strides=(1, 1))(input_data) input_data = Concat()([maxpool1, maxpool2, maxpool3, input_data]) input_data = convolutional(input_data, (1, 1, 2048, 512)) input_data = convolutional(input_data, (3, 3, 512, 1024)) input_data = convolutional(input_data, (1, 1, 1024, 512)) return route_1, route_2, input_data def YOLOv4(NUM_CLASS, pretrained=False): input_layer = Input([None, INPUT_SIZE, INPUT_SIZE, 3]) route_1, route_2, conv = cspdarknet53(input_layer) route = conv conv = convolutional(conv, (1, 1, 512, 256)) conv = upsample(conv) route_2 = convolutional(route_2, (1, 1, 512, 256), name='conv_yolo_1') conv = Concat()([route_2, conv]) conv = convolutional(conv, (1, 1, 512, 256)) conv = convolutional(conv, (3, 3, 256, 512)) conv = convolutional(conv, (1, 1, 512, 256)) conv = convolutional(conv, (3, 3, 256, 512)) conv = convolutional(conv, (1, 1, 512, 256)) route_2 = conv conv = convolutional(conv, (1, 1, 256, 128)) conv = upsample(conv) route_1 = convolutional(route_1, (1, 1, 256, 128), name='conv_yolo_2') conv = Concat()([route_1, conv]) conv = convolutional(conv, (1, 1, 256, 128)) conv = convolutional(conv, (3, 3, 128, 256)) conv = convolutional(conv, (1, 1, 256, 128)) conv = convolutional(conv, (3, 3, 128, 256)) conv = convolutional(conv, (1, 1, 256, 128)) route_1 = conv conv = convolutional(conv, (3, 3, 128, 256), name='conv_route_1') conv_sbbox = convolutional(conv, (1, 1, 256, 3 * (NUM_CLASS + 5)), activate=False, bn=False) conv = convolutional(route_1, (3, 3, 128, 256), downsample=True, name='conv_route_2') conv = Concat()([conv, route_2]) conv = convolutional(conv, (1, 1, 512, 256)) conv = convolutional(conv, (3, 3, 256, 512)) conv = convolutional(conv, (1, 1, 512, 256)) conv = convolutional(conv, (3, 3, 256, 512)) conv = convolutional(conv, (1, 1, 512, 256)) route_2 = conv conv = convolutional(conv, (3, 3, 256, 512), name='conv_route_3') conv_mbbox = convolutional(conv, (1, 1, 512, 3 * (NUM_CLASS + 5)), activate=False, bn=False) conv = convolutional(route_2, (3, 3, 256, 512), downsample=True, name='conv_route_4') conv = Concat()([conv, route]) conv = convolutional(conv, (1, 1, 1024, 512)) conv = convolutional(conv, (3, 3, 512, 1024)) conv = convolutional(conv, (1, 1, 1024, 512)) conv = convolutional(conv, (3, 3, 512, 1024)) conv = convolutional(conv, (1, 1, 1024, 512)) conv = convolutional(conv, (3, 3, 512, 1024)) conv_lbbox = convolutional(conv, (1, 1, 1024, 3 * (NUM_CLASS + 5)), activate=False, bn=False) network = Model(input_layer, [conv_sbbox, conv_mbbox, conv_lbbox]) if pretrained: restore_params(network, model_path='model/model.npz') return network def restore_params(network, model_path='models.npz'): logging.info("Restore pre-trained weights") try: npz = np.load(model_path, allow_pickle=True) except: print("Download the model file, placed in the /model ") print("Weights download: ", weights_url['link'], "password:", weights_url['password']) txt_path = 'model/yolov4_config.txt' f = open(txt_path, "r") line = f.readlines() for i in range(len(line)): network.all_weights[i].assign(npz[line[i].strip()]) logging.info(" Loading weights %s in %s" % (network.all_weights[i].shape, network.all_weights[i].name))
41.225225
116
0.666412
import tensorflow as tf import numpy as np import tensorlayer as tl from tensorlayer.activation import mish from tensorlayer.layers import Conv2d, MaxPool2d, BatchNorm2d, ZeroPad2d, UpSampling2d, Concat, Input, Elementwise from tensorlayer.models import Model from tensorlayer import logging INPUT_SIZE = 416 weights_url = {'link': 'https://pan.baidu.com/s/1MC1dmEwpxsdgHO1MZ8fYRQ', 'password': 'idsz'} def upsample(input_layer): return UpSampling2d(scale=2)(input_layer) def convolutional( input_layer, filters_shape, downsample=False, activate=True, bn=True, activate_type='leaky', name=None ): if downsample: input_layer = ZeroPad2d(((1, 0), (1, 0)))(input_layer) padding = 'VALID' strides = 2 else: strides = 1 padding = 'SAME' if bn: b_init = None else: b_init = tl.initializers.constant(value=0.0) conv = Conv2d( n_filter=filters_shape[-1], filter_size=(filters_shape[0], filters_shape[1]), strides=(strides, strides), padding=padding, b_init=b_init, name=name )(input_layer) if bn: if activate ==True: if activate_type == 'leaky': conv = BatchNorm2d(act='lrelu0.1')(conv) elif activate_type == 'mish': conv = BatchNorm2d(act=mish)(conv) else: conv = BatchNorm2d()(conv) return conv def residual_block(input_layer, input_channel, filter_num1, filter_num2, activate_type='leaky'): short_cut = input_layer conv = convolutional(input_layer, filters_shape=(1, 1, input_channel, filter_num1), activate_type=activate_type) conv = convolutional(conv, filters_shape=(3, 3, filter_num1, filter_num2), activate_type=activate_type) residual_output = Elementwise(tf.add)([short_cut, conv]) return residual_output def cspdarknet53(input_data=None): input_data = convolutional(input_data, (3, 3, 3, 32), activate_type='mish') input_data = convolutional(input_data, (3, 3, 32, 64), downsample=True, activate_type='mish') route = input_data route = convolutional(route, (1, 1, 64, 64), activate_type='mish', name='conv_rote_block_1') input_data = convolutional(input_data, (1, 1, 64, 64), activate_type='mish') for i in range(1): input_data = residual_block(input_data, 64, 32, 64, activate_type="mish") input_data = convolutional(input_data, (1, 1, 64, 64), activate_type='mish') input_data = Concat()([input_data, route]) input_data = convolutional(input_data, (1, 1, 128, 64), activate_type='mish') input_data = convolutional(input_data, (3, 3, 64, 128), downsample=True, activate_type='mish') route = input_data route = convolutional(route, (1, 1, 128, 64), activate_type='mish', name='conv_rote_block_2') input_data = convolutional(input_data, (1, 1, 128, 64), activate_type='mish') for i in range(2): input_data = residual_block(input_data, 64, 64, 64, activate_type="mish") input_data = convolutional(input_data, (1, 1, 64, 64), activate_type='mish') input_data = Concat()([input_data, route]) input_data = convolutional(input_data, (1, 1, 128, 128), activate_type='mish') input_data = convolutional(input_data, (3, 3, 128, 256), downsample=True, activate_type='mish') route = input_data route = convolutional(route, (1, 1, 256, 128), activate_type='mish', name='conv_rote_block_3') input_data = convolutional(input_data, (1, 1, 256, 128), activate_type='mish') for i in range(8): input_data = residual_block(input_data, 128, 128, 128, activate_type="mish") input_data = convolutional(input_data, (1, 1, 128, 128), activate_type='mish') input_data = Concat()([input_data, route]) input_data = convolutional(input_data, (1, 1, 256, 256), activate_type='mish') route_1 = input_data input_data = convolutional(input_data, (3, 3, 256, 512), downsample=True, activate_type='mish') route = input_data route = convolutional(route, (1, 1, 512, 256), activate_type='mish', name='conv_rote_block_4') input_data = convolutional(input_data, (1, 1, 512, 256), activate_type='mish') for i in range(8): input_data = residual_block(input_data, 256, 256, 256, activate_type="mish") input_data = convolutional(input_data, (1, 1, 256, 256), activate_type='mish') input_data = Concat()([input_data, route]) input_data = convolutional(input_data, (1, 1, 512, 512), activate_type='mish') route_2 = input_data input_data = convolutional(input_data, (3, 3, 512, 1024), downsample=True, activate_type='mish') route = input_data route = convolutional(route, (1, 1, 1024, 512), activate_type='mish', name='conv_rote_block_5') input_data = convolutional(input_data, (1, 1, 1024, 512), activate_type='mish') for i in range(4): input_data = residual_block(input_data, 512, 512, 512, activate_type="mish") input_data = convolutional(input_data, (1, 1, 512, 512), activate_type='mish') input_data = Concat()([input_data, route]) input_data = convolutional(input_data, (1, 1, 1024, 1024), activate_type='mish') input_data = convolutional(input_data, (1, 1, 1024, 512)) input_data = convolutional(input_data, (3, 3, 512, 1024)) input_data = convolutional(input_data, (1, 1, 1024, 512)) maxpool1 = MaxPool2d(filter_size=(13, 13), strides=(1, 1))(input_data) maxpool2 = MaxPool2d(filter_size=(9, 9), strides=(1, 1))(input_data) maxpool3 = MaxPool2d(filter_size=(5, 5), strides=(1, 1))(input_data) input_data = Concat()([maxpool1, maxpool2, maxpool3, input_data]) input_data = convolutional(input_data, (1, 1, 2048, 512)) input_data = convolutional(input_data, (3, 3, 512, 1024)) input_data = convolutional(input_data, (1, 1, 1024, 512)) return route_1, route_2, input_data def YOLOv4(NUM_CLASS, pretrained=False): input_layer = Input([None, INPUT_SIZE, INPUT_SIZE, 3]) route_1, route_2, conv = cspdarknet53(input_layer) route = conv conv = convolutional(conv, (1, 1, 512, 256)) conv = upsample(conv) route_2 = convolutional(route_2, (1, 1, 512, 256), name='conv_yolo_1') conv = Concat()([route_2, conv]) conv = convolutional(conv, (1, 1, 512, 256)) conv = convolutional(conv, (3, 3, 256, 512)) conv = convolutional(conv, (1, 1, 512, 256)) conv = convolutional(conv, (3, 3, 256, 512)) conv = convolutional(conv, (1, 1, 512, 256)) route_2 = conv conv = convolutional(conv, (1, 1, 256, 128)) conv = upsample(conv) route_1 = convolutional(route_1, (1, 1, 256, 128), name='conv_yolo_2') conv = Concat()([route_1, conv]) conv = convolutional(conv, (1, 1, 256, 128)) conv = convolutional(conv, (3, 3, 128, 256)) conv = convolutional(conv, (1, 1, 256, 128)) conv = convolutional(conv, (3, 3, 128, 256)) conv = convolutional(conv, (1, 1, 256, 128)) route_1 = conv conv = convolutional(conv, (3, 3, 128, 256), name='conv_route_1') conv_sbbox = convolutional(conv, (1, 1, 256, 3 * (NUM_CLASS + 5)), activate=False, bn=False) conv = convolutional(route_1, (3, 3, 128, 256), downsample=True, name='conv_route_2') conv = Concat()([conv, route_2]) conv = convolutional(conv, (1, 1, 512, 256)) conv = convolutional(conv, (3, 3, 256, 512)) conv = convolutional(conv, (1, 1, 512, 256)) conv = convolutional(conv, (3, 3, 256, 512)) conv = convolutional(conv, (1, 1, 512, 256)) route_2 = conv conv = convolutional(conv, (3, 3, 256, 512), name='conv_route_3') conv_mbbox = convolutional(conv, (1, 1, 512, 3 * (NUM_CLASS + 5)), activate=False, bn=False) conv = convolutional(route_2, (3, 3, 256, 512), downsample=True, name='conv_route_4') conv = Concat()([conv, route]) conv = convolutional(conv, (1, 1, 1024, 512)) conv = convolutional(conv, (3, 3, 512, 1024)) conv = convolutional(conv, (1, 1, 1024, 512)) conv = convolutional(conv, (3, 3, 512, 1024)) conv = convolutional(conv, (1, 1, 1024, 512)) conv = convolutional(conv, (3, 3, 512, 1024)) conv_lbbox = convolutional(conv, (1, 1, 1024, 3 * (NUM_CLASS + 5)), activate=False, bn=False) network = Model(input_layer, [conv_sbbox, conv_mbbox, conv_lbbox]) if pretrained: restore_params(network, model_path='model/model.npz') return network def restore_params(network, model_path='models.npz'): logging.info("Restore pre-trained weights") try: npz = np.load(model_path, allow_pickle=True) except: print("Download the model file, placed in the /model ") print("Weights download: ", weights_url['link'], "password:", weights_url['password']) txt_path = 'model/yolov4_config.txt' f = open(txt_path, "r") line = f.readlines() for i in range(len(line)): network.all_weights[i].assign(npz[line[i].strip()]) logging.info(" Loading weights %s in %s" % (network.all_weights[i].shape, network.all_weights[i].name))
true
true
1c4340270dcdef2713ee8a18152ba3aebc09a59b
2,836
py
Python
pyventskalender/tag20.py
kopp/pyventskalender
6f6455f3c1db07f65a772b2716e4be95fbcd1804
[ "MIT" ]
null
null
null
pyventskalender/tag20.py
kopp/pyventskalender
6f6455f3c1db07f65a772b2716e4be95fbcd1804
[ "MIT" ]
null
null
null
pyventskalender/tag20.py
kopp/pyventskalender
6f6455f3c1db07f65a772b2716e4be95fbcd1804
[ "MIT" ]
null
null
null
# Heute wollen wir ein externes Paket installieren. # # Dazu wird in Python `pip` verwendet. # # Setzt Datei virtualenv_und_pip.md voraus. # %% Venv erstellen -- Tests 10 20 # Damit man Pakete später wieder einfach löschen kann, legt man am besten ein # Virtual Environment an. # Das geht am besten in der Konsole powershell. # In Visual Studio Code kannst du eine aufmachen, indem du oben auf Terminal # und dann New Terminal (oder Neues Terminal) gehst. # Stelle bitte sicher, dass du in dem richtigen Ordner bist. # Dazu kannst du `ls`, und es sollte dir u.a. # `ich_will_meine_belohnung.py` anzeigen. # Um jetzt das Virtual Environment anzulegen, gib ein: # python -m venv --system-site-packages venv # Es müsste dann der Ordner `venv` angelegt werden. # Wenn du Glück hast, fragt dich Visual Studio Code direkt, ob du das Virtual # Environent aktivieren willst -- sag "Ja". # Wenn nicht, dann kannst du jetzt das Virtual Environment aktivieren. # Am einfachsten geht das, wenn du die Konsole schließt (`exit`) und dann # Strg+Shift+P drückst, dann geht oben eine Eingabe auf. # Gibt dort "Python: Select Interpreter" ein und wähle das Element aus (Enter). # Wähle dort "Enter Interpreter Path" und gib ".\venv\Scripts\python.exe" ein # (Linux/Mac: "./venv/bin/python"). # Wenn du jetzt eine neue Konsole aufmachst, dann sorgt Visual Studio Code # dafür, dass du das Virtual Environment nutzt. # %% Installieren per pip -- Test 30 # Um jetzt tatsächlich dort etwas zu installieren öffne eine Konsole. # Um zu prüfen, ob sie das Virtual Environment verwendet, gib # Get-Command python # ein (Linux/Mac User: `which python`). # Das sollte den aktuellen Ordner mit venv\Scripts\python ausgeben (Linux/Mac: "venv/bin/python"). # Wenn dem nicht so ist, Konsole schließen (`exit`) und neu öffnen und hoffen. # Wenn dem immer noch nicht so ist, das Virtual Environment nochmal aktivieren. # Jetzt kommt endlich die Installation: # pip install cat_fact # %% # War sie erfolgreich, kannst du # catFact # eingeben und bekommst einen wichtigen Fakt über Katzen. # %% # Das können wir jetzt auch gleich in Code machen: try: import requests from cat_fact.client import CatClient cat_client = CatClient(requests.Session(), "http://cat-fact.herokuapp.com") cat_client.get_random_fact("cat") except ImportError: # Wir können offenbar nicht importieren, was wir wollen "Katzen sind tolle Tiere" except ModuleNotFoundError: # Wir können offenbar nicht importieren, was wir wollen "Katzen sind tolle Tiere" # %% # Das ist ein ganz schön langer output, aber der Teil in "text" ist der interessante. # Speichere doch den Output von `get_random_fact` (ein `dict`) und gib dir den # Wert zum Schlüssel "text" aus. # %% # Die Wichtigsten Schritte zu Virtualenv findest du in `virtualenv_und_pip.md`.
41.101449
98
0.744711
try: import requests from cat_fact.client import CatClient cat_client = CatClient(requests.Session(), "http://cat-fact.herokuapp.com") cat_client.get_random_fact("cat") except ImportError: "Katzen sind tolle Tiere" except ModuleNotFoundError: "Katzen sind tolle Tiere"
true
true
1c43409cd5f286e144c46827ab418d817bfc839f
9,301
bzl
Python
experimental/python/wheel.bzl
Antobiotics/rules_python
4b84ad270387a7c439ebdccfd530e2339601ef27
[ "Apache-2.0" ]
2
2020-01-13T19:37:29.000Z
2021-01-18T05:52:44.000Z
experimental/python/wheel.bzl
Antobiotics/rules_python
4b84ad270387a7c439ebdccfd530e2339601ef27
[ "Apache-2.0" ]
null
null
null
experimental/python/wheel.bzl
Antobiotics/rules_python
4b84ad270387a7c439ebdccfd530e2339601ef27
[ "Apache-2.0" ]
null
null
null
# Copyright 2018 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Rules for building wheels.""" def _path_inside_wheel(input_file): # input_file.short_path is relative ("../${repository_root}/foobar") # so it can't be a valid path within a zip file. Thus strip out the root # manually instead of using short_path here. root = input_file.root.path if root != "": # TODO: '/' is wrong on windows, but the path separator is not available in skylark. # Fix this once ctx.configuration has directory separator information. root += "/" if not input_file.path.startswith(root): fail("input_file.path '%s' does not start with expected root '%s'" % (input_file.path, root)) return input_file.path[len(root):] def _input_file_to_arg(input_file): """Converts a File object to string for --input_file argument to wheelmaker""" return "%s;%s" % (_path_inside_wheel(input_file), input_file.path) def _py_package_impl(ctx): inputs = depset( transitive = [dep[DefaultInfo].data_runfiles.files for dep in ctx.attr.deps] + [dep[DefaultInfo].default_runfiles.files for dep in ctx.attr.deps], ) # TODO: '/' is wrong on windows, but the path separator is not available in skylark. # Fix this once ctx.configuration has directory separator information. packages = [p.replace(".", "/") for p in ctx.attr.packages] if not packages: filtered_inputs = inputs else: filtered_files = [] # TODO: flattening depset to list gives poor performance, for input_file in inputs.to_list(): wheel_path = _path_inside_wheel(input_file) for package in packages: if wheel_path.startswith(package): filtered_files.append(input_file) filtered_inputs = depset(direct = filtered_files) return [DefaultInfo( files = filtered_inputs, )] py_package = rule( implementation = _py_package_impl, doc = """ A rule to select all files in transitive dependencies of deps which belong to given set of Python packages. This rule is intended to be used as data dependency to py_wheel rule """, attrs = { "deps": attr.label_list(), "packages": attr.string_list( mandatory = False, allow_empty = True, doc = """\ List of Python packages to include in the distribution. Sub-packages are automatically included. """, ), }, ) def _py_wheel_impl(ctx): outfile = ctx.actions.declare_file("-".join([ ctx.attr.distribution, ctx.attr.version, ctx.attr.python_tag, ctx.attr.abi, ctx.attr.platform, ]) + ".whl") inputs_to_package = depset( direct = ctx.files.deps, ) # Inputs to this rule which are not to be packaged. # Currently this is only the description file (if used). other_inputs = [] args = ctx.actions.args() args.add("--name", ctx.attr.distribution) args.add("--version", ctx.attr.version) args.add("--python_tag", ctx.attr.python_tag) args.add("--abi", ctx.attr.abi) args.add("--platform", ctx.attr.platform) args.add("--out", outfile.path) args.add_all(ctx.attr.strip_path_prefixes, format_each = "--strip_path_prefix=%s") args.add_all(inputs_to_package, format_each = "--input_file=%s", map_each = _input_file_to_arg) extra_headers = [] if ctx.attr.author: extra_headers.append("Author: %s" % ctx.attr.author) if ctx.attr.author_email: extra_headers.append("Author-email: %s" % ctx.attr.author_email) if ctx.attr.homepage: extra_headers.append("Home-page: %s" % ctx.attr.homepage) if ctx.attr.license: extra_headers.append("License: %s" % ctx.attr.license) for h in extra_headers: args.add("--header", h) for c in ctx.attr.classifiers: args.add("--classifier", c) for r in ctx.attr.requires: args.add("--requires", r) for option, requirements in ctx.attr.extra_requires.items(): for r in requirements: args.add("--extra_requires", r + ";" + option) for name, ref in ctx.attr.console_scripts.items(): args.add("--console_script", name + " = " + ref) if ctx.attr.description_file: description_file = ctx.file.description_file args.add("--description_file", description_file) other_inputs.append(description_file) ctx.actions.run( inputs = depset(direct = other_inputs, transitive = [inputs_to_package]), outputs = [outfile], arguments = [args], executable = ctx.executable._wheelmaker, progress_message = "Building wheel", ) return [DefaultInfo( files = depset([outfile]), data_runfiles = ctx.runfiles(files = [outfile]), )] py_wheel = rule( implementation = _py_wheel_impl, doc = """ A rule for building Python Wheels. Wheels are Python distribution format defined in https://www.python.org/dev/peps/pep-0427/. This rule packages a set of targets into a single wheel. Currently only pure-python wheels are supported. Examples: <code> # Package just a specific py_libraries, without their dependencies py_wheel( name = "minimal_with_py_library", # Package data. We're building "example_minimal_library-0.0.1-py3-none-any.whl" distribution = "example_minimal_library", python_tag = "py3", version = "0.0.1", deps = [ "//experimental/examples/wheel/lib:module_with_data", "//experimental/examples/wheel/lib:simple_module", ], ) # Use py_package to collect all transitive dependencies of a target, # selecting just the files within a specific python package. py_package( name = "example_pkg", # Only include these Python packages. packages = ["experimental.examples.wheel"], deps = [":main"], ) py_wheel( name = "minimal_with_py_package", # Package data. We're building "example_minimal_package-0.0.1-py3-none-any.whl" distribution = "example_minimal_package", python_tag = "py3", version = "0.0.1", deps = [":example_pkg"], ) </code> """, attrs = { "deps": attr.label_list( doc = """\ Targets to be included in the distribution. The targets to package are usually `py_library` rules or filesets (for packaging data files). Note it's usually better to package `py_library` targets and use `console_scripts` attribute to specify entry points than to package `py_binary` rules. `py_binary` targets would wrap a executable script that tries to locate `.runfiles` directory which is not packaged in the wheel. """, ), # Attributes defining the distribution "distribution": attr.string( mandatory = True, doc = """ Name of the distribution. This should match the project name onm PyPI. It's also the name that is used to refer to the package in other packages' dependencies. """, ), "version": attr.string( mandatory = True, doc = "Version number of the package", ), "python_tag": attr.string( default = "py3", doc = "Supported Python major version. 'py2' or 'py3'", values = ["py2", "py3"], ), "abi": attr.string( default = "none", doc = "Python ABI tag. 'none' for pure-Python wheels.", ), # TODO(pstradomski): Support non-pure wheels "platform": attr.string( default = "any", doc = "Supported platforms. 'any' for pure-Python wheel.", ), # Other attributes "author": attr.string(default = ""), "author_email": attr.string(default = ""), "homepage": attr.string(default = ""), "license": attr.string(default = ""), "classifiers": attr.string_list(), "description_file": attr.label(allow_single_file = True), "strip_path_prefixes": attr.string_list( default = [], doc = "path prefixes to strip from files added to the generated package", ), # Requirements "requires": attr.string_list( doc = "List of requirements for this package", ), "extra_requires": attr.string_list_dict( doc = "List of optional requirements for this package", ), # Entry points "console_scripts": attr.string_dict( doc = """\ console_script entry points, e.g. 'experimental.examples.wheel.main:main'. """, ), # Implementation details. "_wheelmaker": attr.label( executable = True, cfg = "host", default = "//experimental/rules_python:wheelmaker", ), }, )
34.321033
101
0.641006
def _path_inside_wheel(input_file): # manually instead of using short_path here. root = input_file.root.path if root != "": # TODO: '/' is wrong on windows, but the path separator is not available in skylark. # Fix this once ctx.configuration has directory separator information. root += "/" if not input_file.path.startswith(root): fail("input_file.path '%s' does not start with expected root '%s'" % (input_file.path, root)) return input_file.path[len(root):] def _input_file_to_arg(input_file): return "%s;%s" % (_path_inside_wheel(input_file), input_file.path) def _py_package_impl(ctx): inputs = depset( transitive = [dep[DefaultInfo].data_runfiles.files for dep in ctx.attr.deps] + [dep[DefaultInfo].default_runfiles.files for dep in ctx.attr.deps], ) # TODO: '/' is wrong on windows, but the path separator is not available in skylark. # Fix this once ctx.configuration has directory separator information. packages = [p.replace(".", "/") for p in ctx.attr.packages] if not packages: filtered_inputs = inputs else: filtered_files = [] # TODO: flattening depset to list gives poor performance, for input_file in inputs.to_list(): wheel_path = _path_inside_wheel(input_file) for package in packages: if wheel_path.startswith(package): filtered_files.append(input_file) filtered_inputs = depset(direct = filtered_files) return [DefaultInfo( files = filtered_inputs, )] py_package = rule( implementation = _py_package_impl, doc = """ A rule to select all files in transitive dependencies of deps which belong to given set of Python packages. This rule is intended to be used as data dependency to py_wheel rule """, attrs = { "deps": attr.label_list(), "packages": attr.string_list( mandatory = False, allow_empty = True, doc = """\ List of Python packages to include in the distribution. Sub-packages are automatically included. """, ), }, ) def _py_wheel_impl(ctx): outfile = ctx.actions.declare_file("-".join([ ctx.attr.distribution, ctx.attr.version, ctx.attr.python_tag, ctx.attr.abi, ctx.attr.platform, ]) + ".whl") inputs_to_package = depset( direct = ctx.files.deps, ) # Inputs to this rule which are not to be packaged. # Currently this is only the description file (if used). other_inputs = [] args = ctx.actions.args() args.add("--name", ctx.attr.distribution) args.add("--version", ctx.attr.version) args.add("--python_tag", ctx.attr.python_tag) args.add("--abi", ctx.attr.abi) args.add("--platform", ctx.attr.platform) args.add("--out", outfile.path) args.add_all(ctx.attr.strip_path_prefixes, format_each = "--strip_path_prefix=%s") args.add_all(inputs_to_package, format_each = "--input_file=%s", map_each = _input_file_to_arg) extra_headers = [] if ctx.attr.author: extra_headers.append("Author: %s" % ctx.attr.author) if ctx.attr.author_email: extra_headers.append("Author-email: %s" % ctx.attr.author_email) if ctx.attr.homepage: extra_headers.append("Home-page: %s" % ctx.attr.homepage) if ctx.attr.license: extra_headers.append("License: %s" % ctx.attr.license) for h in extra_headers: args.add("--header", h) for c in ctx.attr.classifiers: args.add("--classifier", c) for r in ctx.attr.requires: args.add("--requires", r) for option, requirements in ctx.attr.extra_requires.items(): for r in requirements: args.add("--extra_requires", r + ";" + option) for name, ref in ctx.attr.console_scripts.items(): args.add("--console_script", name + " = " + ref) if ctx.attr.description_file: description_file = ctx.file.description_file args.add("--description_file", description_file) other_inputs.append(description_file) ctx.actions.run( inputs = depset(direct = other_inputs, transitive = [inputs_to_package]), outputs = [outfile], arguments = [args], executable = ctx.executable._wheelmaker, progress_message = "Building wheel", ) return [DefaultInfo( files = depset([outfile]), data_runfiles = ctx.runfiles(files = [outfile]), )] py_wheel = rule( implementation = _py_wheel_impl, doc = """ A rule for building Python Wheels. Wheels are Python distribution format defined in https://www.python.org/dev/peps/pep-0427/. This rule packages a set of targets into a single wheel. Currently only pure-python wheels are supported. Examples: <code> # Package just a specific py_libraries, without their dependencies py_wheel( name = "minimal_with_py_library", # Package data. We're building "example_minimal_library-0.0.1-py3-none-any.whl" distribution = "example_minimal_library", python_tag = "py3", version = "0.0.1", deps = [ "//experimental/examples/wheel/lib:module_with_data", "//experimental/examples/wheel/lib:simple_module", ], ) # Use py_package to collect all transitive dependencies of a target, # selecting just the files within a specific python package. py_package( name = "example_pkg", # Only include these Python packages. packages = ["experimental.examples.wheel"], deps = [":main"], ) py_wheel( name = "minimal_with_py_package", # Package data. We're building "example_minimal_package-0.0.1-py3-none-any.whl" distribution = "example_minimal_package", python_tag = "py3", version = "0.0.1", deps = [":example_pkg"], ) </code> """, attrs = { "deps": attr.label_list( doc = """\ Targets to be included in the distribution. The targets to package are usually `py_library` rules or filesets (for packaging data files). Note it's usually better to package `py_library` targets and use `console_scripts` attribute to specify entry points than to package `py_binary` rules. `py_binary` targets would wrap a executable script that tries to locate `.runfiles` directory which is not packaged in the wheel. """, ), "distribution": attr.string( mandatory = True, doc = """ Name of the distribution. This should match the project name onm PyPI. It's also the name that is used to refer to the package in other packages' dependencies. """, ), "version": attr.string( mandatory = True, doc = "Version number of the package", ), "python_tag": attr.string( default = "py3", doc = "Supported Python major version. 'py2' or 'py3'", values = ["py2", "py3"], ), "abi": attr.string( default = "none", doc = "Python ABI tag. 'none' for pure-Python wheels.", ), "platform": attr.string( default = "any", doc = "Supported platforms. 'any' for pure-Python wheel.", ), "author": attr.string(default = ""), "author_email": attr.string(default = ""), "homepage": attr.string(default = ""), "license": attr.string(default = ""), "classifiers": attr.string_list(), "description_file": attr.label(allow_single_file = True), "strip_path_prefixes": attr.string_list( default = [], doc = "path prefixes to strip from files added to the generated package", ), "requires": attr.string_list( doc = "List of requirements for this package", ), "extra_requires": attr.string_list_dict( doc = "List of optional requirements for this package", ), "console_scripts": attr.string_dict( doc = """\ console_script entry points, e.g. 'experimental.examples.wheel.main:main'. """, ), "_wheelmaker": attr.label( executable = True, cfg = "host", default = "//experimental/rules_python:wheelmaker", ), }, )
true
true
1c43435b0db90baabc818eeeb1b256f1f625d861
838
py
Python
app.py
ericsouza/flask-restful
906301b27fa9968b3a88db04820d437ebbc3767c
[ "MIT" ]
null
null
null
app.py
ericsouza/flask-restful
906301b27fa9968b3a88db04820d437ebbc3767c
[ "MIT" ]
null
null
null
app.py
ericsouza/flask-restful
906301b27fa9968b3a88db04820d437ebbc3767c
[ "MIT" ]
null
null
null
import os from flask import Flask from flask_restful import Api from flask_jwt import JWT from security import authenticate, identity from resources.user import UserRegister from resources.item import Item, ItemList from resources.store import Store, StoreList app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL', 'sqlite:///data.db') app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.config['PROPAGATE_EXCEPTIONS'] = True app.secret_key = 'jose' api = Api(app) jwt = JWT(app, authenticate, identity) # /auth api.add_resource(Store, '/store/<string:name>') api.add_resource(StoreList, '/stores') api.add_resource(Item, '/item/<string:name>') api.add_resource(ItemList, '/items') api.add_resource(UserRegister, '/register') if __name__ == '__main__': app.run(port=5000, debug=True)
29.928571
91
0.768496
import os from flask import Flask from flask_restful import Api from flask_jwt import JWT from security import authenticate, identity from resources.user import UserRegister from resources.item import Item, ItemList from resources.store import Store, StoreList app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL', 'sqlite:///data.db') app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.config['PROPAGATE_EXCEPTIONS'] = True app.secret_key = 'jose' api = Api(app) jwt = JWT(app, authenticate, identity) api.add_resource(Store, '/store/<string:name>') api.add_resource(StoreList, '/stores') api.add_resource(Item, '/item/<string:name>') api.add_resource(ItemList, '/items') api.add_resource(UserRegister, '/register') if __name__ == '__main__': app.run(port=5000, debug=True)
true
true
1c4343837fc4e55a0a92d392000573c4581268cb
2,673
py
Python
networkx/linalg/tests/test_spectrum.py
bjedwards/NetworkX_fork
6cb4465d73b8adc4692206fdbc8e1a3934d94fe6
[ "BSD-3-Clause" ]
3
2016-02-06T01:18:41.000Z
2020-11-10T08:15:33.000Z
networkx/linalg/tests/test_spectrum.py
tomzhang/NetworkX_fork
6cb4465d73b8adc4692206fdbc8e1a3934d94fe6
[ "BSD-3-Clause" ]
null
null
null
networkx/linalg/tests/test_spectrum.py
tomzhang/NetworkX_fork
6cb4465d73b8adc4692206fdbc8e1a3934d94fe6
[ "BSD-3-Clause" ]
7
2015-04-28T19:19:30.000Z
2022-02-06T11:46:29.000Z
from nose import SkipTest import networkx as nx from networkx.generators.degree_seq import havel_hakimi_graph class TestSpectrum(object): @classmethod def setupClass(cls): global numpy global assert_equal global assert_almost_equal try: import numpy from numpy.testing import assert_equal,assert_almost_equal except ImportError: raise SkipTest('NumPy not available.') def setUp(self): deg=[3,2,2,1,0] self.G=havel_hakimi_graph(deg) self.P=nx.path_graph(3) self.A=numpy.array([[0, 1, 1, 1, 0], [1, 0, 1, 0, 0], [1, 1, 0, 0, 0], [1, 0, 0, 0, 0], [0, 0, 0, 0, 0]]) def test_adjacency_matrix(self): "Conversion to adjacency matrix" assert_equal(nx.adj_matrix(self.G),self.A) def test_laplacian(self): "Graph Laplacian" NL=numpy.array([[ 3, -1, -1, -1, 0], [-1, 2, -1, 0, 0], [-1, -1, 2, 0, 0], [-1, 0, 0, 1, 0], [ 0, 0, 0, 0, 0]]) assert_equal(nx.laplacian(self.G),NL) def test_generalized_laplacian(self): "Generalized Graph Laplacian" GL=numpy.array([[ 1.00, -0.408, -0.408, -0.577, 0.00], [-0.408, 1.00, -0.50, 0.00 , 0.00], [-0.408, -0.50, 1.00, 0.00, 0.00], [-0.577, 0.00, 0.00, 1.00, 0.00], [ 0.00, 0.00, 0.00, 0.00, 0.00]]) assert_almost_equal(nx.generalized_laplacian(self.G),GL,decimal=3) def test_normalized_laplacian(self): "Generalized Graph Laplacian" GL=numpy.array([[ 1.00, -0.408, -0.408, -0.577, 0.00], [-0.408, 1.00, -0.50, 0.00 , 0.00], [-0.408, -0.50, 1.00, 0.00, 0.00], [-0.577, 0.00, 0.00, 1.00, 0.00], [ 0.00, 0.00, 0.00, 0.00, 0.00]]) assert_almost_equal(nx.normalized_laplacian(self.G),GL,decimal=3) def test_laplacian_spectrum(self): "Laplacian eigenvalues" evals=numpy.array([0, 0, 1, 3, 4]) e=sorted(nx.laplacian_spectrum(self.G)) assert_almost_equal(e,evals) def test_adjacency_spectrum(self): "Adjacency eigenvalues" evals=numpy.array([-numpy.sqrt(2), 0, numpy.sqrt(2)]) e=sorted(nx.adjacency_spectrum(self.P)) assert_almost_equal(e,evals)
36.616438
74
0.483726
from nose import SkipTest import networkx as nx from networkx.generators.degree_seq import havel_hakimi_graph class TestSpectrum(object): @classmethod def setupClass(cls): global numpy global assert_equal global assert_almost_equal try: import numpy from numpy.testing import assert_equal,assert_almost_equal except ImportError: raise SkipTest('NumPy not available.') def setUp(self): deg=[3,2,2,1,0] self.G=havel_hakimi_graph(deg) self.P=nx.path_graph(3) self.A=numpy.array([[0, 1, 1, 1, 0], [1, 0, 1, 0, 0], [1, 1, 0, 0, 0], [1, 0, 0, 0, 0], [0, 0, 0, 0, 0]]) def test_adjacency_matrix(self): assert_equal(nx.adj_matrix(self.G),self.A) def test_laplacian(self): NL=numpy.array([[ 3, -1, -1, -1, 0], [-1, 2, -1, 0, 0], [-1, -1, 2, 0, 0], [-1, 0, 0, 1, 0], [ 0, 0, 0, 0, 0]]) assert_equal(nx.laplacian(self.G),NL) def test_generalized_laplacian(self): GL=numpy.array([[ 1.00, -0.408, -0.408, -0.577, 0.00], [-0.408, 1.00, -0.50, 0.00 , 0.00], [-0.408, -0.50, 1.00, 0.00, 0.00], [-0.577, 0.00, 0.00, 1.00, 0.00], [ 0.00, 0.00, 0.00, 0.00, 0.00]]) assert_almost_equal(nx.generalized_laplacian(self.G),GL,decimal=3) def test_normalized_laplacian(self): GL=numpy.array([[ 1.00, -0.408, -0.408, -0.577, 0.00], [-0.408, 1.00, -0.50, 0.00 , 0.00], [-0.408, -0.50, 1.00, 0.00, 0.00], [-0.577, 0.00, 0.00, 1.00, 0.00], [ 0.00, 0.00, 0.00, 0.00, 0.00]]) assert_almost_equal(nx.normalized_laplacian(self.G),GL,decimal=3) def test_laplacian_spectrum(self): evals=numpy.array([0, 0, 1, 3, 4]) e=sorted(nx.laplacian_spectrum(self.G)) assert_almost_equal(e,evals) def test_adjacency_spectrum(self): evals=numpy.array([-numpy.sqrt(2), 0, numpy.sqrt(2)]) e=sorted(nx.adjacency_spectrum(self.P)) assert_almost_equal(e,evals)
true
true
1c4343ae4b115bd8ea517c39a6fe9b057f3ad62d
443
py
Python
randjsongenerator.py
vajgi90/Python_Scripting
b9852878f458b9ed75f0e1a7804cd5328df9a058
[ "MIT" ]
null
null
null
randjsongenerator.py
vajgi90/Python_Scripting
b9852878f458b9ed75f0e1a7804cd5328df9a058
[ "MIT" ]
null
null
null
randjsongenerator.py
vajgi90/Python_Scripting
b9852878f458b9ed75f0e1a7804cd5328df9a058
[ "MIT" ]
null
null
null
import random import os import json count = int(os.getenv("FILE_COUNT") or 100) words = [word.strip() for word in open('/usr/share/dict/words').readlines()] for identifier in range(count): amount = random.uniform(1.0, 1000) content = { 'topic': random.choice(words), 'value': "%.2f" % amount } # The path is optional with open(f'./new/receipt-{identifier}.json', 'w') as f: json.dump(content, f)
24.611111
76
0.623025
import random import os import json count = int(os.getenv("FILE_COUNT") or 100) words = [word.strip() for word in open('/usr/share/dict/words').readlines()] for identifier in range(count): amount = random.uniform(1.0, 1000) content = { 'topic': random.choice(words), 'value': "%.2f" % amount } with open(f'./new/receipt-{identifier}.json', 'w') as f: json.dump(content, f)
true
true
1c43444f6c249fc8b371b96fac7f8df903b98549
89
py
Python
src/fixture/apps.py
vineethvanga18/gymkhana_portal
9dcb108dab0fd9fe163c72013b6de3210d5c9cd5
[ "MIT" ]
16
2018-04-12T22:38:28.000Z
2020-08-19T16:06:33.000Z
src/fixture/apps.py
vineethvanga18/gymkhana_portal
9dcb108dab0fd9fe163c72013b6de3210d5c9cd5
[ "MIT" ]
36
2018-03-23T15:40:26.000Z
2021-06-10T17:51:23.000Z
src/fixture/apps.py
vineethvanga18/gymkhana_portal
9dcb108dab0fd9fe163c72013b6de3210d5c9cd5
[ "MIT" ]
46
2018-04-06T21:03:36.000Z
2021-11-03T04:39:27.000Z
from django.apps import AppConfig class FixtureConfig(AppConfig): name = 'fixture'
14.833333
33
0.752809
from django.apps import AppConfig class FixtureConfig(AppConfig): name = 'fixture'
true
true
1c4345873e6851e4aef8c0d19674b52cae299711
11,920
py
Python
grr/server/grr_response_server/databases/mysql_users.py
ahmednofal/grr
08a57f6873ee13f425d0106e4143663bc6dbdd60
[ "Apache-2.0" ]
null
null
null
grr/server/grr_response_server/databases/mysql_users.py
ahmednofal/grr
08a57f6873ee13f425d0106e4143663bc6dbdd60
[ "Apache-2.0" ]
null
null
null
grr/server/grr_response_server/databases/mysql_users.py
ahmednofal/grr
08a57f6873ee13f425d0106e4143663bc6dbdd60
[ "Apache-2.0" ]
2
2020-08-24T00:22:03.000Z
2020-11-14T08:34:43.000Z
#!/usr/bin/env python """The MySQL database methods for GRR users and approval handling.""" from __future__ import absolute_import from __future__ import unicode_literals import MySQLdb from grr_response_core.lib import rdfvalue from grr_response_core.lib.util import random from grr_response_server import db from grr_response_server.databases import mysql_utils from grr_response_server.rdfvalues import objects as rdf_objects def _IntToApprovalID(approval_id): return u"%016x" % approval_id def _ApprovalIDToInt(approval_id): return int(approval_id, 16) def _ResponseToApprovalsWithGrants(response): """Converts a generator with approval rows into ApprovalRequest objects.""" prev_triplet = None cur_approval_request = None for (approval_id_int, approval_timestamp, approval_request_bytes, grantor_username, grant_timestamp) in response: cur_triplet = (approval_id_int, approval_timestamp, approval_request_bytes) if cur_triplet != prev_triplet: prev_triplet = cur_triplet if cur_approval_request: yield cur_approval_request cur_approval_request = mysql_utils.StringToRDFProto( rdf_objects.ApprovalRequest, approval_request_bytes) cur_approval_request.approval_id = _IntToApprovalID(approval_id_int) cur_approval_request.timestamp = mysql_utils.MysqlToRDFDatetime( approval_timestamp) if grantor_username and grant_timestamp: cur_approval_request.grants.append( rdf_objects.ApprovalGrant( grantor_username=grantor_username, timestamp=mysql_utils.MysqlToRDFDatetime(grant_timestamp))) if cur_approval_request: yield cur_approval_request class MySQLDBUsersMixin(object): """MySQLDB mixin for GRR users and approval related functions.""" @mysql_utils.WithTransaction() def WriteGRRUser(self, username, password=None, ui_mode=None, canary_mode=None, user_type=None, cursor=None): """Writes user object for a user with a given name.""" columns = ["username"] values = [username] if password is not None: columns.append("password") values.append(password.SerializeToString()) if ui_mode is not None: columns.append("ui_mode") values.append(int(ui_mode)) if canary_mode is not None: columns.append("canary_mode") # TODO(amoser): This int conversion is dirty but necessary with # the current MySQL driver. values.append(int(bool(canary_mode))) if user_type is not None: columns.append("user_type") values.append(int(user_type)) query = "INSERT INTO grr_users ({cols}) VALUES ({vals})".format( cols=", ".join(columns), vals=", ".join(["%s"] * len(columns))) if len(values) > 1: updates = ", ".join( ["{c} = VALUES ({c})".format(c=col) for col in columns[1:]]) query += "ON DUPLICATE KEY UPDATE " + updates cursor.execute(query, values) def _RowToGRRUser(self, row): """Creates a GRR user object from a database result row.""" username, password, ui_mode, canary_mode, user_type = row result = rdf_objects.GRRUser( username=username, ui_mode=ui_mode, canary_mode=canary_mode, user_type=user_type) if password: result.password.ParseFromString(password) return result @mysql_utils.WithTransaction(readonly=True) def ReadGRRUser(self, username, cursor=None): """Reads a user object corresponding to a given name.""" cursor.execute( "SELECT username, password, ui_mode, canary_mode, user_type " "FROM grr_users WHERE username=%s", [username]) row = cursor.fetchone() if row is None: raise db.UnknownGRRUserError("User '%s' not found." % username) return self._RowToGRRUser(row) @mysql_utils.WithTransaction(readonly=True) def ReadAllGRRUsers(self, cursor=None): cursor.execute("SELECT username, password, ui_mode, canary_mode, user_type " "FROM grr_users") res = [] for row in cursor.fetchall(): res.append(self._RowToGRRUser(row)) return res @mysql_utils.WithTransaction() def WriteApprovalRequest(self, approval_request, cursor=None): """Writes an approval request object.""" # Copy the approval_request to ensure we don't modify the source object. approval_request = approval_request.Copy() # Generate random approval id. approval_id_int = random.UInt64() now_str = mysql_utils.RDFDatetimeToMysqlString(rdfvalue.RDFDatetime.Now()) grants = approval_request.grants approval_request.grants = None query = ("INSERT INTO approval_request (username, approval_type, " "subject_id, approval_id, timestamp, expiration_time, " "approval_request) VALUES (%s, %s, %s, %s, %s, %s, %s)") args = [ approval_request.requestor_username, int(approval_request.approval_type), approval_request.subject_id, approval_id_int, now_str, mysql_utils.RDFDatetimeToMysqlString(approval_request.expiration_time), approval_request.SerializeToString() ] cursor.execute(query, args) for grant in grants: grant_query = ("INSERT INTO approval_grant (username, approval_id, " "grantor_username, timestamp) VALUES (%s, %s, %s, %s)") grant_args = [ approval_request.requestor_username, approval_id_int, grant.grantor_username, now_str ] cursor.execute(grant_query, grant_args) return _IntToApprovalID(approval_id_int) @mysql_utils.WithTransaction() def GrantApproval(self, requestor_username, approval_id, grantor_username, cursor=None): """Grants approval for a given request using given username.""" now_str = mysql_utils.RDFDatetimeToMysqlString(rdfvalue.RDFDatetime.Now()) grant_query = ("INSERT INTO approval_grant (username, approval_id, " "grantor_username, timestamp) VALUES (%s, %s, %s, %s)") grant_args = [ requestor_username, _ApprovalIDToInt(approval_id), grantor_username, now_str ] cursor.execute(grant_query, grant_args) @mysql_utils.WithTransaction(readonly=True) def ReadApprovalRequest(self, requestor_username, approval_id, cursor=None): """Reads an approval request object with a given id.""" query = ("SELECT approval_request.approval_id, approval_request.timestamp, " "approval_request.approval_request, " "approval_grant.grantor_username, approval_grant.timestamp " "FROM approval_request " "LEFT JOIN approval_grant USING (username, approval_id) " "WHERE approval_request.approval_id=%s " "AND approval_request.username=%s") cursor.execute(query, [_ApprovalIDToInt(approval_id), requestor_username]) res = cursor.fetchall() if not res: raise db.UnknownApprovalRequestError( "Approval '%s' not found." % approval_id) approval_id_int, timestamp, approval_request_bytes, _, _ = res[0] approval_request = mysql_utils.StringToRDFProto(rdf_objects.ApprovalRequest, approval_request_bytes) approval_request.approval_id = _IntToApprovalID(approval_id_int) approval_request.timestamp = mysql_utils.MysqlToRDFDatetime(timestamp) for _, _, _, grantor_username, timestamp in res: if not grantor_username: continue # Note: serialized approval_request objects are guaranteed to not # have any grants. approval_request.grants.append( rdf_objects.ApprovalGrant( grantor_username=grantor_username, timestamp=mysql_utils.MysqlToRDFDatetime(timestamp))) return approval_request @mysql_utils.WithTransaction(readonly=True) def ReadApprovalRequests(self, requestor_username, approval_type, subject_id=None, include_expired=False, cursor=None): """Reads approval requests of a given type for a given user.""" query = ("SELECT ar.approval_id, ar.timestamp, ar.approval_request, " "ag.grantor_username, ag.timestamp " "FROM approval_request ar " "LEFT JOIN approval_grant AS ag USING (username, approval_id) " "WHERE ar.username=%s AND ar.approval_type=%s") args = [requestor_username, int(approval_type)] if subject_id: query += " AND ar.subject_id = %s" args.append(subject_id) query += " ORDER BY ar.approval_id" ret = [] now = rdfvalue.RDFDatetime.Now() cursor.execute(query, args) for approval_request in _ResponseToApprovalsWithGrants(cursor.fetchall()): if include_expired or approval_request.expiration_time >= now: ret.append(approval_request) return ret @mysql_utils.WithTransaction() def WriteUserNotification(self, notification, cursor=None): """Writes a notification for a given user.""" # Copy the notification to ensure we don't modify the source object. notification = notification.Copy() if not notification.timestamp: notification.timestamp = rdfvalue.RDFDatetime.Now() query = ("INSERT INTO user_notification (username, timestamp, " "notification_state, notification) " "VALUES (%s, %s, %s, %s)") args = [ notification.username, mysql_utils.RDFDatetimeToMysqlString(notification.timestamp), int(notification.state), notification.SerializeToString() ] try: cursor.execute(query, args) except MySQLdb.IntegrityError: raise db.UnknownGRRUserError("User %s not found!" % notification.username) @mysql_utils.WithTransaction(readonly=True) def ReadUserNotifications(self, username, state=None, timerange=None, cursor=None): """Reads notifications scheduled for a user within a given timerange.""" query = ("SELECT timestamp, notification_state, notification " "FROM user_notification " "WHERE username=%s ") args = [username] if state is not None: query += "AND notification_state = %s " args.append(int(state)) if timerange is not None: time_from, time_to = timerange # pylint: disable=unpacking-non-sequence if time_from is not None: query += "AND timestamp >= %s " args.append(mysql_utils.RDFDatetimeToMysqlString(time_from)) if time_to is not None: query += "AND timestamp <= %s " args.append(mysql_utils.RDFDatetimeToMysqlString(time_to)) query += "ORDER BY timestamp DESC " ret = [] cursor.execute(query, args) for timestamp, state, notification_ser in cursor.fetchall(): n = rdf_objects.UserNotification.FromSerializedString(notification_ser) n.timestamp = mysql_utils.MysqlToRDFDatetime(timestamp) n.state = state ret.append(n) return ret @mysql_utils.WithTransaction() def UpdateUserNotifications(self, username, timestamps, state=None, cursor=None): """Updates existing user notification objects.""" query = ("UPDATE user_notification n " "SET n.notification_state = %s " "WHERE n.username = %s AND n.timestamp IN ({})").format(", ".join( ["%s"] * len(timestamps))) args = [ int(state), username, ] + [mysql_utils.RDFDatetimeToMysqlString(t) for t in timestamps] cursor.execute(query, args)
35.58209
80
0.660906
from __future__ import absolute_import from __future__ import unicode_literals import MySQLdb from grr_response_core.lib import rdfvalue from grr_response_core.lib.util import random from grr_response_server import db from grr_response_server.databases import mysql_utils from grr_response_server.rdfvalues import objects as rdf_objects def _IntToApprovalID(approval_id): return u"%016x" % approval_id def _ApprovalIDToInt(approval_id): return int(approval_id, 16) def _ResponseToApprovalsWithGrants(response): prev_triplet = None cur_approval_request = None for (approval_id_int, approval_timestamp, approval_request_bytes, grantor_username, grant_timestamp) in response: cur_triplet = (approval_id_int, approval_timestamp, approval_request_bytes) if cur_triplet != prev_triplet: prev_triplet = cur_triplet if cur_approval_request: yield cur_approval_request cur_approval_request = mysql_utils.StringToRDFProto( rdf_objects.ApprovalRequest, approval_request_bytes) cur_approval_request.approval_id = _IntToApprovalID(approval_id_int) cur_approval_request.timestamp = mysql_utils.MysqlToRDFDatetime( approval_timestamp) if grantor_username and grant_timestamp: cur_approval_request.grants.append( rdf_objects.ApprovalGrant( grantor_username=grantor_username, timestamp=mysql_utils.MysqlToRDFDatetime(grant_timestamp))) if cur_approval_request: yield cur_approval_request class MySQLDBUsersMixin(object): @mysql_utils.WithTransaction() def WriteGRRUser(self, username, password=None, ui_mode=None, canary_mode=None, user_type=None, cursor=None): columns = ["username"] values = [username] if password is not None: columns.append("password") values.append(password.SerializeToString()) if ui_mode is not None: columns.append("ui_mode") values.append(int(ui_mode)) if canary_mode is not None: columns.append("canary_mode") values.append(int(bool(canary_mode))) if user_type is not None: columns.append("user_type") values.append(int(user_type)) query = "INSERT INTO grr_users ({cols}) VALUES ({vals})".format( cols=", ".join(columns), vals=", ".join(["%s"] * len(columns))) if len(values) > 1: updates = ", ".join( ["{c} = VALUES ({c})".format(c=col) for col in columns[1:]]) query += "ON DUPLICATE KEY UPDATE " + updates cursor.execute(query, values) def _RowToGRRUser(self, row): username, password, ui_mode, canary_mode, user_type = row result = rdf_objects.GRRUser( username=username, ui_mode=ui_mode, canary_mode=canary_mode, user_type=user_type) if password: result.password.ParseFromString(password) return result @mysql_utils.WithTransaction(readonly=True) def ReadGRRUser(self, username, cursor=None): cursor.execute( "SELECT username, password, ui_mode, canary_mode, user_type " "FROM grr_users WHERE username=%s", [username]) row = cursor.fetchone() if row is None: raise db.UnknownGRRUserError("User '%s' not found." % username) return self._RowToGRRUser(row) @mysql_utils.WithTransaction(readonly=True) def ReadAllGRRUsers(self, cursor=None): cursor.execute("SELECT username, password, ui_mode, canary_mode, user_type " "FROM grr_users") res = [] for row in cursor.fetchall(): res.append(self._RowToGRRUser(row)) return res @mysql_utils.WithTransaction() def WriteApprovalRequest(self, approval_request, cursor=None): approval_request = approval_request.Copy() # Generate random approval id. approval_id_int = random.UInt64() now_str = mysql_utils.RDFDatetimeToMysqlString(rdfvalue.RDFDatetime.Now()) grants = approval_request.grants approval_request.grants = None query = ("INSERT INTO approval_request (username, approval_type, " "subject_id, approval_id, timestamp, expiration_time, " "approval_request) VALUES (%s, %s, %s, %s, %s, %s, %s)") args = [ approval_request.requestor_username, int(approval_request.approval_type), approval_request.subject_id, approval_id_int, now_str, mysql_utils.RDFDatetimeToMysqlString(approval_request.expiration_time), approval_request.SerializeToString() ] cursor.execute(query, args) for grant in grants: grant_query = ("INSERT INTO approval_grant (username, approval_id, " "grantor_username, timestamp) VALUES (%s, %s, %s, %s)") grant_args = [ approval_request.requestor_username, approval_id_int, grant.grantor_username, now_str ] cursor.execute(grant_query, grant_args) return _IntToApprovalID(approval_id_int) @mysql_utils.WithTransaction() def GrantApproval(self, requestor_username, approval_id, grantor_username, cursor=None): now_str = mysql_utils.RDFDatetimeToMysqlString(rdfvalue.RDFDatetime.Now()) grant_query = ("INSERT INTO approval_grant (username, approval_id, " "grantor_username, timestamp) VALUES (%s, %s, %s, %s)") grant_args = [ requestor_username, _ApprovalIDToInt(approval_id), grantor_username, now_str ] cursor.execute(grant_query, grant_args) @mysql_utils.WithTransaction(readonly=True) def ReadApprovalRequest(self, requestor_username, approval_id, cursor=None): query = ("SELECT approval_request.approval_id, approval_request.timestamp, " "approval_request.approval_request, " "approval_grant.grantor_username, approval_grant.timestamp " "FROM approval_request " "LEFT JOIN approval_grant USING (username, approval_id) " "WHERE approval_request.approval_id=%s " "AND approval_request.username=%s") cursor.execute(query, [_ApprovalIDToInt(approval_id), requestor_username]) res = cursor.fetchall() if not res: raise db.UnknownApprovalRequestError( "Approval '%s' not found." % approval_id) approval_id_int, timestamp, approval_request_bytes, _, _ = res[0] approval_request = mysql_utils.StringToRDFProto(rdf_objects.ApprovalRequest, approval_request_bytes) approval_request.approval_id = _IntToApprovalID(approval_id_int) approval_request.timestamp = mysql_utils.MysqlToRDFDatetime(timestamp) for _, _, _, grantor_username, timestamp in res: if not grantor_username: continue # Note: serialized approval_request objects are guaranteed to not # have any grants. approval_request.grants.append( rdf_objects.ApprovalGrant( grantor_username=grantor_username, timestamp=mysql_utils.MysqlToRDFDatetime(timestamp))) return approval_request @mysql_utils.WithTransaction(readonly=True) def ReadApprovalRequests(self, requestor_username, approval_type, subject_id=None, include_expired=False, cursor=None): query = ("SELECT ar.approval_id, ar.timestamp, ar.approval_request, " "ag.grantor_username, ag.timestamp " "FROM approval_request ar " "LEFT JOIN approval_grant AS ag USING (username, approval_id) " "WHERE ar.username=%s AND ar.approval_type=%s") args = [requestor_username, int(approval_type)] if subject_id: query += " AND ar.subject_id = %s" args.append(subject_id) query += " ORDER BY ar.approval_id" ret = [] now = rdfvalue.RDFDatetime.Now() cursor.execute(query, args) for approval_request in _ResponseToApprovalsWithGrants(cursor.fetchall()): if include_expired or approval_request.expiration_time >= now: ret.append(approval_request) return ret @mysql_utils.WithTransaction() def WriteUserNotification(self, notification, cursor=None): # Copy the notification to ensure we don't modify the source object. notification = notification.Copy() if not notification.timestamp: notification.timestamp = rdfvalue.RDFDatetime.Now() query = ("INSERT INTO user_notification (username, timestamp, " "notification_state, notification) " "VALUES (%s, %s, %s, %s)") args = [ notification.username, mysql_utils.RDFDatetimeToMysqlString(notification.timestamp), int(notification.state), notification.SerializeToString() ] try: cursor.execute(query, args) except MySQLdb.IntegrityError: raise db.UnknownGRRUserError("User %s not found!" % notification.username) @mysql_utils.WithTransaction(readonly=True) def ReadUserNotifications(self, username, state=None, timerange=None, cursor=None): query = ("SELECT timestamp, notification_state, notification " "FROM user_notification " "WHERE username=%s ") args = [username] if state is not None: query += "AND notification_state = %s " args.append(int(state)) if timerange is not None: time_from, time_to = timerange if time_from is not None: query += "AND timestamp >= %s " args.append(mysql_utils.RDFDatetimeToMysqlString(time_from)) if time_to is not None: query += "AND timestamp <= %s " args.append(mysql_utils.RDFDatetimeToMysqlString(time_to)) query += "ORDER BY timestamp DESC " ret = [] cursor.execute(query, args) for timestamp, state, notification_ser in cursor.fetchall(): n = rdf_objects.UserNotification.FromSerializedString(notification_ser) n.timestamp = mysql_utils.MysqlToRDFDatetime(timestamp) n.state = state ret.append(n) return ret @mysql_utils.WithTransaction() def UpdateUserNotifications(self, username, timestamps, state=None, cursor=None): query = ("UPDATE user_notification n " "SET n.notification_state = %s " "WHERE n.username = %s AND n.timestamp IN ({})").format(", ".join( ["%s"] * len(timestamps))) args = [ int(state), username, ] + [mysql_utils.RDFDatetimeToMysqlString(t) for t in timestamps] cursor.execute(query, args)
true
true
1c434822c8627f3057f2aef1288f8299aa05dbf6
1,931
py
Python
mmtbx/command_line/find_residue_in_pdb.py
dperl-sol/cctbx_project
b9e390221a2bc4fd00b9122e97c3b79c632c6664
[ "BSD-3-Clause-LBNL" ]
155
2016-11-23T12:52:16.000Z
2022-03-31T15:35:44.000Z
mmtbx/command_line/find_residue_in_pdb.py
dperl-sol/cctbx_project
b9e390221a2bc4fd00b9122e97c3b79c632c6664
[ "BSD-3-Clause-LBNL" ]
590
2016-12-10T11:31:18.000Z
2022-03-30T23:10:09.000Z
mmtbx/command_line/find_residue_in_pdb.py
dperl-sol/cctbx_project
b9e390221a2bc4fd00b9122e97c3b79c632c6664
[ "BSD-3-Clause-LBNL" ]
115
2016-11-15T08:17:28.000Z
2022-02-09T15:30:14.000Z
from __future__ import absolute_import, division, print_function from libtbx.utils import Sorry, Usage import libtbx.phil.command_line import sys master_phil = libtbx.phil.parse(""" resname = None .type = str d_max = None .type = float polymeric_type = *Any Free Polymeric .type = choice xray_only = True .type = bool data_only = False .type = bool identity_cutoff = None .type = int quiet = False .type = bool """) def run(args, out=sys.stdout): if (len(args) == 0) or ("--help" in args): raise Usage("""mmtbx.find_residue_in_pdb RESNAME [options] Use the RCSB web services to retrieve a list of PDB structures containing the specified chemical ID. Full parameters: %s """ % master_phil.as_str(prefix=" ")) sources = [] def process_unknown(arg): if (1 <= len(arg) <= 3) and (arg.isalnum()): return libtbx.phil.parse("resname=%s" % arg) cai = libtbx.phil.command_line.argument_interpreter(master_phil=master_phil) working_phil = cai.process_and_fetch(args=args, custom_processor=process_unknown) params = working_phil.extract() if (params.resname is None): raise Sorry("No residue ID specified.") from mmtbx.wwpdb import rcsb_web_services pdb_ids = rcsb_web_services.chemical_id_search( resname=params.resname, d_max=params.d_max, polymeric_type=params.polymeric_type, xray_only=params.xray_only, data_only=params.data_only, identity_cutoff=params.identity_cutoff) pdb_ids = [ id.lower() for id in pdb_ids ] if (len(pdb_ids) == 0): raise Sorry("No structures found matching the specified criteria.") else : if (not params.quiet): print("%d PDB IDs retrieved:" % len(pdb_ids), file=out) i = 0 while (i < len(pdb_ids)): print(" %s" % " ".join(pdb_ids[i:i+16]), file=out) i += 16 else : print("%d PDB IDs matching" % len(pdb_ids), file=out) if (__name__ == "__main__"): run(sys.argv[1:])
28.820896
78
0.688762
from __future__ import absolute_import, division, print_function from libtbx.utils import Sorry, Usage import libtbx.phil.command_line import sys master_phil = libtbx.phil.parse(""" resname = None .type = str d_max = None .type = float polymeric_type = *Any Free Polymeric .type = choice xray_only = True .type = bool data_only = False .type = bool identity_cutoff = None .type = int quiet = False .type = bool """) def run(args, out=sys.stdout): if (len(args) == 0) or ("--help" in args): raise Usage("""mmtbx.find_residue_in_pdb RESNAME [options] Use the RCSB web services to retrieve a list of PDB structures containing the specified chemical ID. Full parameters: %s """ % master_phil.as_str(prefix=" ")) sources = [] def process_unknown(arg): if (1 <= len(arg) <= 3) and (arg.isalnum()): return libtbx.phil.parse("resname=%s" % arg) cai = libtbx.phil.command_line.argument_interpreter(master_phil=master_phil) working_phil = cai.process_and_fetch(args=args, custom_processor=process_unknown) params = working_phil.extract() if (params.resname is None): raise Sorry("No residue ID specified.") from mmtbx.wwpdb import rcsb_web_services pdb_ids = rcsb_web_services.chemical_id_search( resname=params.resname, d_max=params.d_max, polymeric_type=params.polymeric_type, xray_only=params.xray_only, data_only=params.data_only, identity_cutoff=params.identity_cutoff) pdb_ids = [ id.lower() for id in pdb_ids ] if (len(pdb_ids) == 0): raise Sorry("No structures found matching the specified criteria.") else : if (not params.quiet): print("%d PDB IDs retrieved:" % len(pdb_ids), file=out) i = 0 while (i < len(pdb_ids)): print(" %s" % " ".join(pdb_ids[i:i+16]), file=out) i += 16 else : print("%d PDB IDs matching" % len(pdb_ids), file=out) if (__name__ == "__main__"): run(sys.argv[1:])
true
true
1c4349ace41d27f9430e4589be24e3400d7e2a91
500
py
Python
settings.py
Nehal90/Conference_Central
ac49f36a0e628a066988b7e6393bed29d4aecf23
[ "Apache-2.0" ]
null
null
null
settings.py
Nehal90/Conference_Central
ac49f36a0e628a066988b7e6393bed29d4aecf23
[ "Apache-2.0" ]
null
null
null
settings.py
Nehal90/Conference_Central
ac49f36a0e628a066988b7e6393bed29d4aecf23
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python """settings.py Udacity conference server-side Python App Engine app user settings $Id$ created/forked from conference.py by wesc on 2014 may 24 """ # Replace the following lines with client IDs obtained from the APIs # Console or Cloud Console. WEB_CLIENT_ID = '72691056799-4vk1buhvl0r5hkkh04v7r26tv2m29p7k.apps.googleusercontent.com' # ANDROID_CLIENT_ID = 'replace with Android client ID' # IOS_CLIENT_ID = 'replace with iOS client ID' # ANDROID_AUDIENCE = WEB_CLIENT_ID
26.315789
89
0.788
WEB_CLIENT_ID = '72691056799-4vk1buhvl0r5hkkh04v7r26tv2m29p7k.apps.googleusercontent.com'
true
true
1c4349b3af412aec59ad5423ae460850b44d434f
2,807
py
Python
options/train_options.py
masontchen/GANPOP
235f1cb7bb14a7b62197114fbe39bfdd4736f5e3
[ "MIT" ]
2
2019-10-08T18:59:15.000Z
2021-06-28T09:22:39.000Z
options/train_options.py
masontchen/GANPOP
235f1cb7bb14a7b62197114fbe39bfdd4736f5e3
[ "MIT" ]
null
null
null
options/train_options.py
masontchen/GANPOP
235f1cb7bb14a7b62197114fbe39bfdd4736f5e3
[ "MIT" ]
2
2019-10-03T06:35:20.000Z
2020-08-18T12:44:23.000Z
from .base_options import BaseOptions class TrainOptions(BaseOptions): def initialize(self, parser): parser = BaseOptions.initialize(self, parser) parser.add_argument('--display_freq', type=int, default=400, help='frequency of showing training results on screen') parser.add_argument('--display_ncols', type=int, default=4, help='if positive, display all images in a single visdom web panel with certain number of images per row.') parser.add_argument('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html') parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console') parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results') parser.add_argument('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs') parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model') parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...') parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc') parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate') parser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero') parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam') parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam') parser.add_argument('--gan_mode', type=str, default='lsgan', help='the type of GAN objective. [vanilla| lsgan | wgangp].') # parser.add_argument('--no_lsgan', action='store_true', help='do *not* use least square GAN, if false, use vanilla GAN') parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images') parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/') parser.add_argument('--lr_policy', type=str, default='lambda', help='learning rate policy: lambda|step|plateau') parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') self.isTrain = True return parser
90.548387
175
0.705023
from .base_options import BaseOptions class TrainOptions(BaseOptions): def initialize(self, parser): parser = BaseOptions.initialize(self, parser) parser.add_argument('--display_freq', type=int, default=400, help='frequency of showing training results on screen') parser.add_argument('--display_ncols', type=int, default=4, help='if positive, display all images in a single visdom web panel with certain number of images per row.') parser.add_argument('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html') parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console') parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results') parser.add_argument('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs') parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model') parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...') parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc') parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate') parser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero') parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam') parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam') parser.add_argument('--gan_mode', type=str, default='lsgan', help='the type of GAN objective. [vanilla| lsgan | wgangp].') parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images') parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/') parser.add_argument('--lr_policy', type=str, default='lambda', help='learning rate policy: lambda|step|plateau') parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') self.isTrain = True return parser
true
true
1c434aa48f3265b229e2a4c8296bfe89aec06143
811
py
Python
nginx_rtmp_wizard/urls.py
Gerhut/nginx-rtmp-wizard
c821c3bb262503ee26408b8b3bf4a252b49a29d6
[ "Unlicense" ]
null
null
null
nginx_rtmp_wizard/urls.py
Gerhut/nginx-rtmp-wizard
c821c3bb262503ee26408b8b3bf4a252b49a29d6
[ "Unlicense" ]
1
2021-06-10T20:32:59.000Z
2021-06-10T20:32:59.000Z
nginx_rtmp_wizard/urls.py
Gerhut/nginx-rtmp-wizard
c821c3bb262503ee26408b8b3bf4a252b49a29d6
[ "Unlicense" ]
null
null
null
"""nginx_rtmp_wizard URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.1/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path from . import views urlpatterns = [ path('', views.rtmp_conf), path('admin/', admin.site.urls), ]
32.44
77
0.707768
from django.contrib import admin from django.urls import path from . import views urlpatterns = [ path('', views.rtmp_conf), path('admin/', admin.site.urls), ]
true
true
1c434ae9bf00b2693b91079d22ca9150a3986139
5,459
gyp
Python
src/third_party/skia/gyp/images.gyp
neeker/chromium_extract
0f9a0206a1876e98cf69e03869983e573138284c
[ "BSD-3-Clause" ]
27
2016-04-27T01:02:03.000Z
2021-12-13T08:53:19.000Z
src/third_party/skia/gyp/images.gyp
neeker/chromium_extract
0f9a0206a1876e98cf69e03869983e573138284c
[ "BSD-3-Clause" ]
2
2017-03-09T09:00:50.000Z
2017-09-21T15:48:20.000Z
src/third_party/skia/gyp/images.gyp
neeker/chromium_extract
0f9a0206a1876e98cf69e03869983e573138284c
[ "BSD-3-Clause" ]
17
2016-04-27T02:06:39.000Z
2019-12-18T08:07:00.000Z
# Copyright 2015 Google Inc. # # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # GYP file for images project. { 'targets': [ { 'target_name': 'images', 'product_name': 'skia_images', 'type': 'static_library', 'standalone_static_library': 1, 'dependencies': [ 'core.gyp:*', 'giflib.gyp:giflib', 'libjpeg-turbo-selector.gyp:libjpeg-turbo-selector', 'etc1.gyp:libetc1', 'ktx.gyp:libSkKTX', 'libwebp.gyp:libwebp', 'utils.gyp:utils', ], 'include_dirs': [ '../include/images', '../include/private', '../src/lazy', # for access to SkErrorInternals.h '../src/core/', # for access to SkImagePriv.h '../src/image/', ], 'sources': [ '../include/images/SkDecodingImageGenerator.h', '../include/images/SkForceLinking.h', '../src/images/SkJpegUtility.h', '../include/images/SkMovie.h', '../include/images/SkPageFlipper.h', '../src/images/bmpdecoderhelper.cpp', '../src/images/bmpdecoderhelper.h', '../src/images/SkDecodingImageGenerator.cpp', '../src/images/SkForceLinking.cpp', '../src/images/SkImageDecoder.cpp', '../src/images/SkImageDecoder_FactoryDefault.cpp', '../src/images/SkImageDecoder_FactoryRegistrar.cpp', # If decoders are added/removed to/from (all/individual) # platform(s), be sure to update SkForceLinking.cpp # so the right decoders will be forced to link. # IMPORTANT: The build order of the SkImageDecoder_*.cpp files # defines the order image decoders are tested when decoding a # stream. The last decoder is the first one tested, so the .cpp # files should be in listed in order from the least likely to be # used, to the most likely (jpeg and png should be the last two # for instance.) As a result, they are deliberately not in # alphabetical order. '../src/images/SkImageDecoder_wbmp.cpp', '../src/images/SkImageDecoder_pkm.cpp', '../src/images/SkImageDecoder_ktx.cpp', '../src/images/SkImageDecoder_astc.cpp', '../src/images/SkImageDecoder_libbmp.cpp', '../src/images/SkImageDecoder_libgif.cpp', '../src/images/SkImageDecoder_libico.cpp', '../src/images/SkImageDecoder_libwebp.cpp', '../src/images/SkImageDecoder_libjpeg.cpp', '../src/images/SkImageDecoder_libpng.cpp', '../src/images/SkImageEncoder.cpp', '../src/images/SkImageEncoder_Factory.cpp', '../src/images/SkImageEncoder_argb.cpp', '../src/images/SkJpegUtility.cpp', '../src/images/SkMovie.cpp', '../src/images/SkMovie_gif.cpp', '../src/images/SkPageFlipper.cpp', '../src/images/SkScaledBitmapSampler.cpp', '../src/images/SkScaledBitmapSampler.h', '../src/ports/SkImageGenerator_skia.cpp', '../src/ports/SkImageDecoder_CG.cpp', '../src/ports/SkImageDecoder_WIC.cpp', ], 'conditions': [ [ 'skia_os == "win"', { 'sources!': [ '../src/images/SkImageDecoder_FactoryDefault.cpp', '../src/images/SkImageDecoder_libgif.cpp', '../src/images/SkImageDecoder_libpng.cpp', '../src/images/SkMovie_gif.cpp', ], 'dependencies!': [ 'giflib.gyp:giflib' ], 'link_settings': { 'libraries': [ '-lwindowscodecs.lib', ], }, },{ #else if skia_os != win 'sources!': [ '../src/ports/SkImageDecoder_WIC.cpp', ], }], [ 'skia_os in ["mac", "ios"]', { 'sources!': [ '../src/images/SkImageDecoder_FactoryDefault.cpp', '../src/images/SkImageDecoder_libpng.cpp', '../src/images/SkImageDecoder_libgif.cpp', '../src/images/SkMovie_gif.cpp', ], },{ #else if skia_os != mac 'sources!': [ '../src/ports/SkImageDecoder_CG.cpp', ], }], [ 'skia_os in ["linux", "freebsd", "openbsd", "solaris"]', { 'dependencies': [ 'libpng.gyp:libpng', ], # end libpng stuff }], [ 'skia_os == "android"', { 'include_dirs': [ '../src/utils', ], 'dependencies': [ 'libpng.gyp:libpng', ], 'conditions': [ [ 'skia_android_framework == 1', { # The android framework disables these decoders as they are of little use to # Java applications that can't take advantage of the compressed formats. 'sources!': [ '../src/images/SkImageDecoder_pkm.cpp', '../src/images/SkImageDecoder_ktx.cpp', '../src/images/SkImageDecoder_astc.cpp', ], }], ], }], [ 'skia_os == "chromeos"', { 'dependencies': [ 'libpng.gyp:libpng', ], }], [ 'skia_os == "ios"', { 'include_dirs': [ '../include/utils/mac', ], }], ], 'direct_dependent_settings': { 'include_dirs': [ '../include/images', ], }, }, ], }
33.697531
90
0.532881
{ 'targets': [ { 'target_name': 'images', 'product_name': 'skia_images', 'type': 'static_library', 'standalone_static_library': 1, 'dependencies': [ 'core.gyp:*', 'giflib.gyp:giflib', 'libjpeg-turbo-selector.gyp:libjpeg-turbo-selector', 'etc1.gyp:libetc1', 'ktx.gyp:libSkKTX', 'libwebp.gyp:libwebp', 'utils.gyp:utils', ], 'include_dirs': [ '../include/images', '../include/private', '../src/lazy', '../src/core/', '../src/image/', ], 'sources': [ '../include/images/SkDecodingImageGenerator.h', '../include/images/SkForceLinking.h', '../src/images/SkJpegUtility.h', '../include/images/SkMovie.h', '../include/images/SkPageFlipper.h', '../src/images/bmpdecoderhelper.cpp', '../src/images/bmpdecoderhelper.h', '../src/images/SkDecodingImageGenerator.cpp', '../src/images/SkForceLinking.cpp', '../src/images/SkImageDecoder.cpp', '../src/images/SkImageDecoder_FactoryDefault.cpp', '../src/images/SkImageDecoder_FactoryRegistrar.cpp', '../src/images/SkImageDecoder_wbmp.cpp', '../src/images/SkImageDecoder_pkm.cpp', '../src/images/SkImageDecoder_ktx.cpp', '../src/images/SkImageDecoder_astc.cpp', '../src/images/SkImageDecoder_libbmp.cpp', '../src/images/SkImageDecoder_libgif.cpp', '../src/images/SkImageDecoder_libico.cpp', '../src/images/SkImageDecoder_libwebp.cpp', '../src/images/SkImageDecoder_libjpeg.cpp', '../src/images/SkImageDecoder_libpng.cpp', '../src/images/SkImageEncoder.cpp', '../src/images/SkImageEncoder_Factory.cpp', '../src/images/SkImageEncoder_argb.cpp', '../src/images/SkJpegUtility.cpp', '../src/images/SkMovie.cpp', '../src/images/SkMovie_gif.cpp', '../src/images/SkPageFlipper.cpp', '../src/images/SkScaledBitmapSampler.cpp', '../src/images/SkScaledBitmapSampler.h', '../src/ports/SkImageGenerator_skia.cpp', '../src/ports/SkImageDecoder_CG.cpp', '../src/ports/SkImageDecoder_WIC.cpp', ], 'conditions': [ [ 'skia_os == "win"', { 'sources!': [ '../src/images/SkImageDecoder_FactoryDefault.cpp', '../src/images/SkImageDecoder_libgif.cpp', '../src/images/SkImageDecoder_libpng.cpp', '../src/images/SkMovie_gif.cpp', ], 'dependencies!': [ 'giflib.gyp:giflib' ], 'link_settings': { 'libraries': [ '-lwindowscodecs.lib', ], }, },{ 'sources!': [ '../src/ports/SkImageDecoder_WIC.cpp', ], }], [ 'skia_os in ["mac", "ios"]', { 'sources!': [ '../src/images/SkImageDecoder_FactoryDefault.cpp', '../src/images/SkImageDecoder_libpng.cpp', '../src/images/SkImageDecoder_libgif.cpp', '../src/images/SkMovie_gif.cpp', ], },{ 'sources!': [ '../src/ports/SkImageDecoder_CG.cpp', ], }], [ 'skia_os in ["linux", "freebsd", "openbsd", "solaris"]', { 'dependencies': [ 'libpng.gyp:libpng', ], }], [ 'skia_os == "android"', { 'include_dirs': [ '../src/utils', ], 'dependencies': [ 'libpng.gyp:libpng', ], 'conditions': [ [ 'skia_android_framework == 1', { 'sources!': [ '../src/images/SkImageDecoder_pkm.cpp', '../src/images/SkImageDecoder_ktx.cpp', '../src/images/SkImageDecoder_astc.cpp', ], }], ], }], [ 'skia_os == "chromeos"', { 'dependencies': [ 'libpng.gyp:libpng', ], }], [ 'skia_os == "ios"', { 'include_dirs': [ '../include/utils/mac', ], }], ], 'direct_dependent_settings': { 'include_dirs': [ '../include/images', ], }, }, ], }
true
true
1c434b5af497473b6ccd24c93ac7686c7ecb97f5
27,530
py
Python
airflow/providers/google/cloud/operators/workflows.py
Piatachock/airflow
6dd0a0df7e6a2f025e9234bdbf97b41e9b8f6257
[ "Apache-2.0" ]
3
2020-12-25T04:09:44.000Z
2021-04-02T13:37:42.000Z
airflow/providers/google/cloud/operators/workflows.py
XiangchunChen/airflow
56bdfe7a840c25360d596ca94fd11d2ccfadb4ba
[ "Apache-2.0" ]
5
2021-06-16T11:41:36.000Z
2022-01-27T17:20:37.000Z
airflow/providers/google/cloud/operators/workflows.py
XiangchunChen/airflow
56bdfe7a840c25360d596ca94fd11d2ccfadb4ba
[ "Apache-2.0" ]
2
2021-12-28T22:46:07.000Z
2022-01-08T13:29:00.000Z
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import hashlib import json import re import uuid from datetime import datetime, timedelta from typing import Dict, Optional, Sequence, Tuple, Union import pytz from google.api_core.exceptions import AlreadyExists from google.api_core.retry import Retry from google.cloud.workflows.executions_v1beta import Execution from google.cloud.workflows_v1beta import Workflow from google.protobuf.field_mask_pb2 import FieldMask from airflow.models import BaseOperator from airflow.providers.google.cloud.hooks.workflows import WorkflowsHook class WorkflowsCreateWorkflowOperator(BaseOperator): """ Creates a new workflow. If a workflow with the specified name already exists in the specified project and location, the long running operation will return [ALREADY_EXISTS][google.rpc.Code.ALREADY_EXISTS] error. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:WorkflowsCreateWorkflowOperator` :param workflow: Required. Workflow to be created. :type workflow: Dict :param workflow_id: Required. The ID of the workflow to be created. :type workflow_id: str :param project_id: Required. The ID of the Google Cloud project the cluster belongs to. :type project_id: str :param location: Required. The GCP region in which to handle the request. :type location: str :param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be retried. :type retry: google.api_core.retry.Retry :param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. :type timeout: float :param metadata: Additional metadata that is provided to the method. :type metadata: Sequence[Tuple[str, str]] """ template_fields = ("location", "workflow", "workflow_id") template_fields_renderers = {"workflow": "json"} def __init__( self, *, workflow: Dict, workflow_id: str, location: str, project_id: Optional[str] = None, retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Optional[Sequence[Tuple[str, str]]] = None, gcp_conn_id: str = "google_cloud_default", force_rerun: bool = False, impersonation_chain: Optional[Union[str, Sequence[str]]] = None, **kwargs, ): super().__init__(**kwargs) self.workflow = workflow self.workflow_id = workflow_id self.location = location self.project_id = project_id self.retry = retry self.timeout = timeout self.metadata = metadata self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain self.force_rerun = force_rerun def _workflow_id(self, context): if self.workflow_id and not self.force_rerun: # If users provide workflow id then assuring the idempotency # is on their side return self.workflow_id if self.force_rerun: hash_base = str(uuid.uuid4()) else: hash_base = json.dumps(self.workflow, sort_keys=True) # We are limited by allowed length of workflow_id so # we use hash of whole information exec_date = context['execution_date'].isoformat() base = f"airflow_{self.dag_id}_{self.task_id}_{exec_date}_{hash_base}" workflow_id = hashlib.md5(base.encode()).hexdigest() return re.sub(r"[:\-+.]", "_", workflow_id) def execute(self, context): hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain) workflow_id = self._workflow_id(context) self.log.info("Creating workflow") try: operation = hook.create_workflow( workflow=self.workflow, workflow_id=workflow_id, location=self.location, project_id=self.project_id, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) workflow = operation.result() except AlreadyExists: workflow = hook.get_workflow( workflow_id=workflow_id, location=self.location, project_id=self.project_id, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) return Workflow.to_dict(workflow) class WorkflowsUpdateWorkflowOperator(BaseOperator): """ Updates an existing workflow. Running this method has no impact on already running executions of the workflow. A new revision of the workflow may be created as a result of a successful update operation. In that case, such revision will be used in new workflow executions. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:WorkflowsUpdateWorkflowOperator` :param workflow_id: Required. The ID of the workflow to be updated. :type workflow_id: str :param location: Required. The GCP region in which to handle the request. :type location: str :param project_id: Required. The ID of the Google Cloud project the cluster belongs to. :type project_id: str :param update_mask: List of fields to be updated. If not present, the entire workflow will be updated. :type update_mask: FieldMask :param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be retried. :type retry: google.api_core.retry.Retry :param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. :type timeout: float :param metadata: Additional metadata that is provided to the method. :type metadata: Sequence[Tuple[str, str]] """ template_fields = ("workflow_id", "update_mask") template_fields_renderers = {"update_mask": "json"} def __init__( self, *, workflow_id: str, location: str, project_id: Optional[str] = None, update_mask: Optional[FieldMask] = None, retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Optional[Sequence[Tuple[str, str]]] = None, gcp_conn_id: str = "google_cloud_default", impersonation_chain: Optional[Union[str, Sequence[str]]] = None, **kwargs, ): super().__init__(**kwargs) self.workflow_id = workflow_id self.location = location self.project_id = project_id self.update_mask = update_mask self.retry = retry self.timeout = timeout self.metadata = metadata self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain def execute(self, context): hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain) workflow = hook.get_workflow( workflow_id=self.workflow_id, project_id=self.project_id, location=self.location, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) self.log.info("Updating workflow") operation = hook.update_workflow( workflow=workflow, update_mask=self.update_mask, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) workflow = operation.result() return Workflow.to_dict(workflow) class WorkflowsDeleteWorkflowOperator(BaseOperator): """ Deletes a workflow with the specified name. This method also cancels and deletes all running executions of the workflow. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:WorkflowsDeleteWorkflowOperator` :param workflow_id: Required. The ID of the workflow to be created. :type workflow_id: str :param project_id: Required. The ID of the Google Cloud project the cluster belongs to. :type project_id: str :param location: Required. The GCP region in which to handle the request. :type location: str :param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be retried. :type retry: google.api_core.retry.Retry :param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. :type timeout: float :param metadata: Additional metadata that is provided to the method. :type metadata: Sequence[Tuple[str, str]] """ template_fields = ("location", "workflow_id") def __init__( self, *, workflow_id: str, location: str, project_id: Optional[str] = None, retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Optional[Sequence[Tuple[str, str]]] = None, gcp_conn_id: str = "google_cloud_default", impersonation_chain: Optional[Union[str, Sequence[str]]] = None, **kwargs, ): super().__init__(**kwargs) self.workflow_id = workflow_id self.location = location self.project_id = project_id self.retry = retry self.timeout = timeout self.metadata = metadata self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain def execute(self, context): hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain) self.log.info("Deleting workflow %s", self.workflow_id) operation = hook.delete_workflow( workflow_id=self.workflow_id, location=self.location, project_id=self.project_id, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) operation.result() class WorkflowsListWorkflowsOperator(BaseOperator): """ Lists Workflows in a given project and location. The default order is not specified. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:WorkflowsListWorkflowsOperator` :param filter_: Filter to restrict results to specific workflows. :type filter_: str :param order_by: Comma-separated list of fields that specifies the order of the results. Default sorting order for a field is ascending. To specify descending order for a field, append a "desc" suffix. If not specified, the results will be returned in an unspecified order. :type order_by: str :param project_id: Required. The ID of the Google Cloud project the cluster belongs to. :type project_id: str :param location: Required. The GCP region in which to handle the request. :type location: str :param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be retried. :type retry: google.api_core.retry.Retry :param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. :type timeout: float :param metadata: Additional metadata that is provided to the method. :type metadata: Sequence[Tuple[str, str]] """ template_fields = ("location", "order_by", "filter_") def __init__( self, *, location: str, project_id: Optional[str] = None, filter_: Optional[str] = None, order_by: Optional[str] = None, retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Optional[Sequence[Tuple[str, str]]] = None, gcp_conn_id: str = "google_cloud_default", impersonation_chain: Optional[Union[str, Sequence[str]]] = None, **kwargs, ): super().__init__(**kwargs) self.filter_ = filter_ self.order_by = order_by self.location = location self.project_id = project_id self.retry = retry self.timeout = timeout self.metadata = metadata self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain def execute(self, context): hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain) self.log.info("Retrieving workflows") workflows_iter = hook.list_workflows( filter_=self.filter_, order_by=self.order_by, location=self.location, project_id=self.project_id, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) return [Workflow.to_dict(w) for w in workflows_iter] class WorkflowsGetWorkflowOperator(BaseOperator): """ Gets details of a single Workflow. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:WorkflowsGetWorkflowOperator` :param workflow_id: Required. The ID of the workflow to be created. :type workflow_id: str :param project_id: Required. The ID of the Google Cloud project the cluster belongs to. :type project_id: str :param location: Required. The GCP region in which to handle the request. :type location: str :param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be retried. :type retry: google.api_core.retry.Retry :param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. :type timeout: float :param metadata: Additional metadata that is provided to the method. :type metadata: Sequence[Tuple[str, str]] """ template_fields = ("location", "workflow_id") def __init__( self, *, workflow_id: str, location: str, project_id: Optional[str] = None, retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Optional[Sequence[Tuple[str, str]]] = None, gcp_conn_id: str = "google_cloud_default", impersonation_chain: Optional[Union[str, Sequence[str]]] = None, **kwargs, ): super().__init__(**kwargs) self.workflow_id = workflow_id self.location = location self.project_id = project_id self.retry = retry self.timeout = timeout self.metadata = metadata self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain def execute(self, context): hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain) self.log.info("Retrieving workflow") workflow = hook.get_workflow( workflow_id=self.workflow_id, location=self.location, project_id=self.project_id, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) return Workflow.to_dict(workflow) class WorkflowsCreateExecutionOperator(BaseOperator): """ Creates a new execution using the latest revision of the given workflow. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:WorkflowsCreateExecutionOperator` :param execution: Required. Execution to be created. :type execution: Dict :param workflow_id: Required. The ID of the workflow. :type workflow_id: str :param project_id: Required. The ID of the Google Cloud project the cluster belongs to. :type project_id: str :param location: Required. The GCP region in which to handle the request. :type location: str :param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be retried. :type retry: google.api_core.retry.Retry :param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. :type timeout: float :param metadata: Additional metadata that is provided to the method. :type metadata: Sequence[Tuple[str, str]] """ template_fields = ("location", "workflow_id", "execution") template_fields_renderers = {"execution": "json"} def __init__( self, *, workflow_id: str, execution: Dict, location: str, project_id: Optional[str] = None, retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Optional[Sequence[Tuple[str, str]]] = None, gcp_conn_id: str = "google_cloud_default", impersonation_chain: Optional[Union[str, Sequence[str]]] = None, **kwargs, ): super().__init__(**kwargs) self.workflow_id = workflow_id self.execution = execution self.location = location self.project_id = project_id self.retry = retry self.timeout = timeout self.metadata = metadata self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain def execute(self, context): hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain) self.log.info("Creating execution") execution = hook.create_execution( workflow_id=self.workflow_id, execution=self.execution, location=self.location, project_id=self.project_id, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) execution_id = execution.name.split("/")[-1] self.xcom_push(context, key="execution_id", value=execution_id) return Execution.to_dict(execution) class WorkflowsCancelExecutionOperator(BaseOperator): """ Cancels an execution using the given ``workflow_id`` and ``execution_id``. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:WorkflowsCancelExecutionOperator` :param workflow_id: Required. The ID of the workflow. :type workflow_id: str :param execution_id: Required. The ID of the execution. :type execution_id: str :param project_id: Required. The ID of the Google Cloud project the cluster belongs to. :type project_id: str :param location: Required. The GCP region in which to handle the request. :type location: str :param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be retried. :type retry: google.api_core.retry.Retry :param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. :type timeout: float :param metadata: Additional metadata that is provided to the method. :type metadata: Sequence[Tuple[str, str]] """ template_fields = ("location", "workflow_id", "execution_id") def __init__( self, *, workflow_id: str, execution_id: str, location: str, project_id: Optional[str] = None, retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Optional[Sequence[Tuple[str, str]]] = None, gcp_conn_id: str = "google_cloud_default", impersonation_chain: Optional[Union[str, Sequence[str]]] = None, **kwargs, ): super().__init__(**kwargs) self.workflow_id = workflow_id self.execution_id = execution_id self.location = location self.project_id = project_id self.retry = retry self.timeout = timeout self.metadata = metadata self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain def execute(self, context): hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain) self.log.info("Canceling execution %s", self.execution_id) execution = hook.cancel_execution( workflow_id=self.workflow_id, execution_id=self.execution_id, location=self.location, project_id=self.project_id, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) return Execution.to_dict(execution) class WorkflowsListExecutionsOperator(BaseOperator): """ Returns a list of executions which belong to the workflow with the given name. The method returns executions of all workflow revisions. Returned executions are ordered by their start time (newest first). .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:WorkflowsListExecutionsOperator` :param workflow_id: Required. The ID of the workflow to be created. :type workflow_id: str :param start_date_filter: If passed only executions older that this date will be returned. By default operators return executions from last 60 minutes :type start_date_filter: datetime :param project_id: Required. The ID of the Google Cloud project the cluster belongs to. :type project_id: str :param location: Required. The GCP region in which to handle the request. :type location: str :param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be retried. :type retry: google.api_core.retry.Retry :param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. :type timeout: float :param metadata: Additional metadata that is provided to the method. :type metadata: Sequence[Tuple[str, str]] """ template_fields = ("location", "workflow_id") def __init__( self, *, workflow_id: str, location: str, start_date_filter: Optional[datetime] = None, project_id: Optional[str] = None, retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Optional[Sequence[Tuple[str, str]]] = None, gcp_conn_id: str = "google_cloud_default", impersonation_chain: Optional[Union[str, Sequence[str]]] = None, **kwargs, ): super().__init__(**kwargs) self.workflow_id = workflow_id self.location = location self.start_date_filter = start_date_filter or datetime.now(tz=pytz.UTC) - timedelta(minutes=60) self.project_id = project_id self.retry = retry self.timeout = timeout self.metadata = metadata self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain def execute(self, context): hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain) self.log.info("Retrieving executions for workflow %s", self.workflow_id) execution_iter = hook.list_executions( workflow_id=self.workflow_id, location=self.location, project_id=self.project_id, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) return [Execution.to_dict(e) for e in execution_iter if e.start_time > self.start_date_filter] class WorkflowsGetExecutionOperator(BaseOperator): """ Returns an execution for the given ``workflow_id`` and ``execution_id``. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:WorkflowsGetExecutionOperator` :param workflow_id: Required. The ID of the workflow. :type workflow_id: str :param execution_id: Required. The ID of the execution. :type execution_id: str :param project_id: Required. The ID of the Google Cloud project the cluster belongs to. :type project_id: str :param location: Required. The GCP region in which to handle the request. :type location: str :param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be retried. :type retry: google.api_core.retry.Retry :param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. :type timeout: float :param metadata: Additional metadata that is provided to the method. :type metadata: Sequence[Tuple[str, str]] """ template_fields = ("location", "workflow_id", "execution_id") def __init__( self, *, workflow_id: str, execution_id: str, location: str, project_id: Optional[str] = None, retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Optional[Sequence[Tuple[str, str]]] = None, gcp_conn_id: str = "google_cloud_default", impersonation_chain: Optional[Union[str, Sequence[str]]] = None, **kwargs, ): super().__init__(**kwargs) self.workflow_id = workflow_id self.execution_id = execution_id self.location = location self.project_id = project_id self.retry = retry self.timeout = timeout self.metadata = metadata self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain def execute(self, context): hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain) self.log.info("Retrieving execution %s for workflow %s", self.execution_id, self.workflow_id) execution = hook.get_execution( workflow_id=self.workflow_id, execution_id=self.execution_id, location=self.location, project_id=self.project_id, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) return Execution.to_dict(execution)
38.720113
104
0.663967
import hashlib import json import re import uuid from datetime import datetime, timedelta from typing import Dict, Optional, Sequence, Tuple, Union import pytz from google.api_core.exceptions import AlreadyExists from google.api_core.retry import Retry from google.cloud.workflows.executions_v1beta import Execution from google.cloud.workflows_v1beta import Workflow from google.protobuf.field_mask_pb2 import FieldMask from airflow.models import BaseOperator from airflow.providers.google.cloud.hooks.workflows import WorkflowsHook class WorkflowsCreateWorkflowOperator(BaseOperator): template_fields = ("location", "workflow", "workflow_id") template_fields_renderers = {"workflow": "json"} def __init__( self, *, workflow: Dict, workflow_id: str, location: str, project_id: Optional[str] = None, retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Optional[Sequence[Tuple[str, str]]] = None, gcp_conn_id: str = "google_cloud_default", force_rerun: bool = False, impersonation_chain: Optional[Union[str, Sequence[str]]] = None, **kwargs, ): super().__init__(**kwargs) self.workflow = workflow self.workflow_id = workflow_id self.location = location self.project_id = project_id self.retry = retry self.timeout = timeout self.metadata = metadata self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain self.force_rerun = force_rerun def _workflow_id(self, context): if self.workflow_id and not self.force_rerun: return self.workflow_id if self.force_rerun: hash_base = str(uuid.uuid4()) else: hash_base = json.dumps(self.workflow, sort_keys=True) exec_date = context['execution_date'].isoformat() base = f"airflow_{self.dag_id}_{self.task_id}_{exec_date}_{hash_base}" workflow_id = hashlib.md5(base.encode()).hexdigest() return re.sub(r"[:\-+.]", "_", workflow_id) def execute(self, context): hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain) workflow_id = self._workflow_id(context) self.log.info("Creating workflow") try: operation = hook.create_workflow( workflow=self.workflow, workflow_id=workflow_id, location=self.location, project_id=self.project_id, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) workflow = operation.result() except AlreadyExists: workflow = hook.get_workflow( workflow_id=workflow_id, location=self.location, project_id=self.project_id, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) return Workflow.to_dict(workflow) class WorkflowsUpdateWorkflowOperator(BaseOperator): template_fields = ("workflow_id", "update_mask") template_fields_renderers = {"update_mask": "json"} def __init__( self, *, workflow_id: str, location: str, project_id: Optional[str] = None, update_mask: Optional[FieldMask] = None, retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Optional[Sequence[Tuple[str, str]]] = None, gcp_conn_id: str = "google_cloud_default", impersonation_chain: Optional[Union[str, Sequence[str]]] = None, **kwargs, ): super().__init__(**kwargs) self.workflow_id = workflow_id self.location = location self.project_id = project_id self.update_mask = update_mask self.retry = retry self.timeout = timeout self.metadata = metadata self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain def execute(self, context): hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain) workflow = hook.get_workflow( workflow_id=self.workflow_id, project_id=self.project_id, location=self.location, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) self.log.info("Updating workflow") operation = hook.update_workflow( workflow=workflow, update_mask=self.update_mask, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) workflow = operation.result() return Workflow.to_dict(workflow) class WorkflowsDeleteWorkflowOperator(BaseOperator): template_fields = ("location", "workflow_id") def __init__( self, *, workflow_id: str, location: str, project_id: Optional[str] = None, retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Optional[Sequence[Tuple[str, str]]] = None, gcp_conn_id: str = "google_cloud_default", impersonation_chain: Optional[Union[str, Sequence[str]]] = None, **kwargs, ): super().__init__(**kwargs) self.workflow_id = workflow_id self.location = location self.project_id = project_id self.retry = retry self.timeout = timeout self.metadata = metadata self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain def execute(self, context): hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain) self.log.info("Deleting workflow %s", self.workflow_id) operation = hook.delete_workflow( workflow_id=self.workflow_id, location=self.location, project_id=self.project_id, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) operation.result() class WorkflowsListWorkflowsOperator(BaseOperator): template_fields = ("location", "order_by", "filter_") def __init__( self, *, location: str, project_id: Optional[str] = None, filter_: Optional[str] = None, order_by: Optional[str] = None, retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Optional[Sequence[Tuple[str, str]]] = None, gcp_conn_id: str = "google_cloud_default", impersonation_chain: Optional[Union[str, Sequence[str]]] = None, **kwargs, ): super().__init__(**kwargs) self.filter_ = filter_ self.order_by = order_by self.location = location self.project_id = project_id self.retry = retry self.timeout = timeout self.metadata = metadata self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain def execute(self, context): hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain) self.log.info("Retrieving workflows") workflows_iter = hook.list_workflows( filter_=self.filter_, order_by=self.order_by, location=self.location, project_id=self.project_id, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) return [Workflow.to_dict(w) for w in workflows_iter] class WorkflowsGetWorkflowOperator(BaseOperator): template_fields = ("location", "workflow_id") def __init__( self, *, workflow_id: str, location: str, project_id: Optional[str] = None, retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Optional[Sequence[Tuple[str, str]]] = None, gcp_conn_id: str = "google_cloud_default", impersonation_chain: Optional[Union[str, Sequence[str]]] = None, **kwargs, ): super().__init__(**kwargs) self.workflow_id = workflow_id self.location = location self.project_id = project_id self.retry = retry self.timeout = timeout self.metadata = metadata self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain def execute(self, context): hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain) self.log.info("Retrieving workflow") workflow = hook.get_workflow( workflow_id=self.workflow_id, location=self.location, project_id=self.project_id, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) return Workflow.to_dict(workflow) class WorkflowsCreateExecutionOperator(BaseOperator): template_fields = ("location", "workflow_id", "execution") template_fields_renderers = {"execution": "json"} def __init__( self, *, workflow_id: str, execution: Dict, location: str, project_id: Optional[str] = None, retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Optional[Sequence[Tuple[str, str]]] = None, gcp_conn_id: str = "google_cloud_default", impersonation_chain: Optional[Union[str, Sequence[str]]] = None, **kwargs, ): super().__init__(**kwargs) self.workflow_id = workflow_id self.execution = execution self.location = location self.project_id = project_id self.retry = retry self.timeout = timeout self.metadata = metadata self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain def execute(self, context): hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain) self.log.info("Creating execution") execution = hook.create_execution( workflow_id=self.workflow_id, execution=self.execution, location=self.location, project_id=self.project_id, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) execution_id = execution.name.split("/")[-1] self.xcom_push(context, key="execution_id", value=execution_id) return Execution.to_dict(execution) class WorkflowsCancelExecutionOperator(BaseOperator): template_fields = ("location", "workflow_id", "execution_id") def __init__( self, *, workflow_id: str, execution_id: str, location: str, project_id: Optional[str] = None, retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Optional[Sequence[Tuple[str, str]]] = None, gcp_conn_id: str = "google_cloud_default", impersonation_chain: Optional[Union[str, Sequence[str]]] = None, **kwargs, ): super().__init__(**kwargs) self.workflow_id = workflow_id self.execution_id = execution_id self.location = location self.project_id = project_id self.retry = retry self.timeout = timeout self.metadata = metadata self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain def execute(self, context): hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain) self.log.info("Canceling execution %s", self.execution_id) execution = hook.cancel_execution( workflow_id=self.workflow_id, execution_id=self.execution_id, location=self.location, project_id=self.project_id, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) return Execution.to_dict(execution) class WorkflowsListExecutionsOperator(BaseOperator): template_fields = ("location", "workflow_id") def __init__( self, *, workflow_id: str, location: str, start_date_filter: Optional[datetime] = None, project_id: Optional[str] = None, retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Optional[Sequence[Tuple[str, str]]] = None, gcp_conn_id: str = "google_cloud_default", impersonation_chain: Optional[Union[str, Sequence[str]]] = None, **kwargs, ): super().__init__(**kwargs) self.workflow_id = workflow_id self.location = location self.start_date_filter = start_date_filter or datetime.now(tz=pytz.UTC) - timedelta(minutes=60) self.project_id = project_id self.retry = retry self.timeout = timeout self.metadata = metadata self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain def execute(self, context): hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain) self.log.info("Retrieving executions for workflow %s", self.workflow_id) execution_iter = hook.list_executions( workflow_id=self.workflow_id, location=self.location, project_id=self.project_id, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) return [Execution.to_dict(e) for e in execution_iter if e.start_time > self.start_date_filter] class WorkflowsGetExecutionOperator(BaseOperator): template_fields = ("location", "workflow_id", "execution_id") def __init__( self, *, workflow_id: str, execution_id: str, location: str, project_id: Optional[str] = None, retry: Optional[Retry] = None, timeout: Optional[float] = None, metadata: Optional[Sequence[Tuple[str, str]]] = None, gcp_conn_id: str = "google_cloud_default", impersonation_chain: Optional[Union[str, Sequence[str]]] = None, **kwargs, ): super().__init__(**kwargs) self.workflow_id = workflow_id self.execution_id = execution_id self.location = location self.project_id = project_id self.retry = retry self.timeout = timeout self.metadata = metadata self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain def execute(self, context): hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain) self.log.info("Retrieving execution %s for workflow %s", self.execution_id, self.workflow_id) execution = hook.get_execution( workflow_id=self.workflow_id, execution_id=self.execution_id, location=self.location, project_id=self.project_id, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) return Execution.to_dict(execution)
true
true
1c434bce107c02897258cb87e66f2cdcbcbad14c
1,804
py
Python
mushroom_rl/algorithms/value/td/sarsa_lambda_continuous.py
PuzeLiu/mushroom-rl
99942b425e66b4ddcc26009d7105dde23841e95d
[ "MIT" ]
344
2020-01-10T09:45:02.000Z
2022-03-30T09:48:28.000Z
mushroom_rl/algorithms/value/td/sarsa_lambda_continuous.py
PuzeLiu/mushroom-rl
99942b425e66b4ddcc26009d7105dde23841e95d
[ "MIT" ]
44
2020-01-23T03:00:56.000Z
2022-03-25T17:14:22.000Z
mushroom_rl/algorithms/value/td/sarsa_lambda_continuous.py
PuzeLiu/mushroom-rl
99942b425e66b4ddcc26009d7105dde23841e95d
[ "MIT" ]
93
2020-01-10T21:17:58.000Z
2022-03-31T17:58:52.000Z
import numpy as np from mushroom_rl.algorithms.value.td import TD from mushroom_rl.approximators import Regressor from mushroom_rl.utils.parameters import to_parameter class SARSALambdaContinuous(TD): """ Continuous version of SARSA(lambda) algorithm. """ def __init__(self, mdp_info, policy, approximator, learning_rate, lambda_coeff, features, approximator_params=None): """ Constructor. Args: lambda_coeff ([float, Parameter]): eligibility trace coefficient. """ approximator_params = dict() if approximator_params is None else \ approximator_params Q = Regressor(approximator, **approximator_params) self.e = np.zeros(Q.weights_size) self._lambda = to_parameter(lambda_coeff) self._add_save_attr( _lambda='primitive', e='numpy' ) super().__init__(mdp_info, policy, Q, learning_rate, features) def _update(self, state, action, reward, next_state, absorbing): phi_state = self.phi(state) q_current = self.Q.predict(phi_state, action) alpha = self._alpha(state, action) self.e = self.mdp_info.gamma * self._lambda() * self.e + self.Q.diff( phi_state, action) self.next_action = self.draw_action(next_state) phi_next_state = self.phi(next_state) q_next = self.Q.predict(phi_next_state, self.next_action) if not absorbing else 0. delta = reward + self.mdp_info.gamma * q_next - q_current theta = self.Q.get_weights() theta += alpha * delta * self.e self.Q.set_weights(theta) def episode_start(self): self.e = np.zeros(self.Q.weights_size) super().episode_start()
30.066667
77
0.634701
import numpy as np from mushroom_rl.algorithms.value.td import TD from mushroom_rl.approximators import Regressor from mushroom_rl.utils.parameters import to_parameter class SARSALambdaContinuous(TD): def __init__(self, mdp_info, policy, approximator, learning_rate, lambda_coeff, features, approximator_params=None): approximator_params = dict() if approximator_params is None else \ approximator_params Q = Regressor(approximator, **approximator_params) self.e = np.zeros(Q.weights_size) self._lambda = to_parameter(lambda_coeff) self._add_save_attr( _lambda='primitive', e='numpy' ) super().__init__(mdp_info, policy, Q, learning_rate, features) def _update(self, state, action, reward, next_state, absorbing): phi_state = self.phi(state) q_current = self.Q.predict(phi_state, action) alpha = self._alpha(state, action) self.e = self.mdp_info.gamma * self._lambda() * self.e + self.Q.diff( phi_state, action) self.next_action = self.draw_action(next_state) phi_next_state = self.phi(next_state) q_next = self.Q.predict(phi_next_state, self.next_action) if not absorbing else 0. delta = reward + self.mdp_info.gamma * q_next - q_current theta = self.Q.get_weights() theta += alpha * delta * self.e self.Q.set_weights(theta) def episode_start(self): self.e = np.zeros(self.Q.weights_size) super().episode_start()
true
true
1c434c195eeea6ea401b7813cdfd0c50091f02a0
1,219
py
Python
Tools/LyTestTools/tests/unit/test_ly_process_killer.py
cypherdotXd/o3de
bb90c4ddfe2d495e9c00ebf1e2650c6d603a5676
[ "Apache-2.0", "MIT" ]
11
2021-07-08T09:58:26.000Z
2022-03-17T17:59:26.000Z
Tools/LyTestTools/tests/unit/test_ly_process_killer.py
RoddieKieley/o3de
e804fd2a4241b039a42d9fa54eaae17dc94a7a92
[ "Apache-2.0", "MIT" ]
29
2021-07-06T19:33:52.000Z
2022-03-22T10:27:49.000Z
Tools/LyTestTools/tests/unit/test_ly_process_killer.py
RoddieKieley/o3de
e804fd2a4241b039a42d9fa54eaae17dc94a7a92
[ "Apache-2.0", "MIT" ]
4
2021-07-06T19:24:43.000Z
2022-03-31T12:42:27.000Z
""" Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution. SPDX-License-Identifier: Apache-2.0 OR MIT Unit tests for ly_test_tools._internal.managers.ly_process_killer """ import unittest.mock as mock import pytest import ly_test_tools._internal.managers.ly_process_killer pytestmark = pytest.mark.SUITE_smoke class TestProcessKiller(object): @mock.patch('ly_test_tools.environment.process_utils.process_exists') def test_DetectLumberyardProcesses_ValidProcessesList_ReturnsDetectedProcessesList(self, mock_process_exists): mock_process_exists.side_effect = [True, False] mock_process_list = ['foo', 'bar'] under_test = ly_test_tools._internal.managers.ly_process_killer.detect_lumberyard_processes( processes_list=mock_process_list) assert under_test == ['foo'] def test_KillProcesses_ProcessesListIsNotList_RaisesLyProcessKillerException(self): with pytest.raises(ly_test_tools._internal.managers.ly_process_killer.LyProcessKillerException): ly_test_tools._internal.managers.ly_process_killer.kill_processes(processes_list={})
34.828571
114
0.794914
import unittest.mock as mock import pytest import ly_test_tools._internal.managers.ly_process_killer pytestmark = pytest.mark.SUITE_smoke class TestProcessKiller(object): @mock.patch('ly_test_tools.environment.process_utils.process_exists') def test_DetectLumberyardProcesses_ValidProcessesList_ReturnsDetectedProcessesList(self, mock_process_exists): mock_process_exists.side_effect = [True, False] mock_process_list = ['foo', 'bar'] under_test = ly_test_tools._internal.managers.ly_process_killer.detect_lumberyard_processes( processes_list=mock_process_list) assert under_test == ['foo'] def test_KillProcesses_ProcessesListIsNotList_RaisesLyProcessKillerException(self): with pytest.raises(ly_test_tools._internal.managers.ly_process_killer.LyProcessKillerException): ly_test_tools._internal.managers.ly_process_killer.kill_processes(processes_list={})
true
true
1c434c4469fea787812685fd4db156c4149e329e
4,815
py
Python
h2o-bindings/bin/custom/python/gen_stackedensemble.py
13927729580/h2o-3
850ecb214f01340edb62c45242c76212f4b60381
[ "Apache-2.0" ]
1
2019-09-15T18:50:36.000Z
2019-09-15T18:50:36.000Z
h2o-bindings/bin/custom/python/gen_stackedensemble.py
13927729580/h2o-3
850ecb214f01340edb62c45242c76212f4b60381
[ "Apache-2.0" ]
null
null
null
h2o-bindings/bin/custom/python/gen_stackedensemble.py
13927729580/h2o-3
850ecb214f01340edb62c45242c76212f4b60381
[ "Apache-2.0" ]
null
null
null
rest_api_version = 99 def update_param(name, param): if name == 'metalearner_params': param['type'] = 'KeyValue' param['default_value'] = None return param return None # param untouched def class_extensions(): def metalearner(self): """Print the metalearner of an H2OStackedEnsembleEstimator.""" model = self._model_json["output"] if "metalearner" in model and model["metalearner"] is not None: return model["metalearner"] print("No metalearner for this model") def levelone_frame_id(self): """Fetch the levelone_frame_id for an H2OStackedEnsembleEstimator.""" model = self._model_json["output"] if "levelone_frame_id" in model and model["levelone_frame_id"] is not None: return model["levelone_frame_id"] print("No levelone_frame_id for this model") def stacking_strategy(self): model = self._model_json["output"] if "stacking_strategy" in model and model["stacking_strategy"] is not None: return model["stacking_strategy"] print("No stacking strategy for this model") # Override train method to support blending def train(self, x=None, y=None, training_frame=None, blending_frame=None, **kwargs): blending_frame = H2OFrame._validate(blending_frame, 'blending_frame', required=False) def extend_parms(parms): if blending_frame is not None: parms['blending_frame'] = blending_frame if self.metalearner_fold_column is not None: parms['ignored_columns'].remove(quoted(self.metalearner_fold_column)) super(self.__class__, self)._train(x, y, training_frame, extend_parms_fn=extend_parms, **kwargs) extensions = dict( __imports__=""" from h2o.utils.shared_utils import quoted from h2o.utils.typechecks import is_type import json import ast """, __class__=class_extensions ) overrides = dict( base_models=dict( setter=""" if is_type(base_models, {ptype}): {pname} = [b.model_id for b in {pname}] self._parms["{sname}"] = {pname} else: assert_is_type({pname}, None, [str]) self._parms["{sname}"] = {pname} """ ), metalearner_params=dict( getter=""" if self._parms.get("{sname}") != None: metalearner_params_dict = ast.literal_eval(self._parms.get("{sname}")) for k in metalearner_params_dict: if len(metalearner_params_dict[k]) == 1: #single parameter metalearner_params_dict[k] = metalearner_params_dict[k][0] return metalearner_params_dict else: return self._parms.get("{sname}") """, setter=""" assert_is_type({pname}, None, {ptype}) if {pname} is not None and {pname} != "": for k in {pname}: if ("[" and "]") not in str(metalearner_params[k]): metalearner_params[k] = [metalearner_params[k]] self._parms["{sname}"] = str(json.dumps({pname})) else: self._parms["{sname}"] = None """ ), ) doc = dict( __class__=""" Builds a stacked ensemble (aka "super learner") machine learning method that uses two or more H2O learning algorithms to improve predictive performance. It is a loss-based supervised learning method that finds the optimal combination of a collection of prediction algorithms.This method supports regression and binary classification. """, ) examples = dict( __class__=""" >>> import h2o >>> h2o.init() >>> from h2o.estimators.random_forest import H2ORandomForestEstimator >>> from h2o.estimators.gbm import H2OGradientBoostingEstimator >>> from h2o.estimators.stackedensemble import H2OStackedEnsembleEstimator >>> col_types = ["numeric", "numeric", "numeric", "enum", "enum", "numeric", "numeric", "numeric", "numeric"] >>> data = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/prostate/prostate.csv", col_types=col_types) >>> train, test = data.split_frame(ratios=[.8], seed=1) >>> x = ["CAPSULE","GLEASON","RACE","DPROS","DCAPS","PSA","VOL"] >>> y = "AGE" >>> nfolds = 5 >>> my_gbm = H2OGradientBoostingEstimator(nfolds=nfolds, fold_assignment="Modulo", keep_cross_validation_predictions=True) >>> my_gbm.train(x=x, y=y, training_frame=train) >>> my_rf = H2ORandomForestEstimator(nfolds=nfolds, fold_assignment="Modulo", keep_cross_validation_predictions=True) >>> my_rf.train(x=x, y=y, training_frame=train) >>> stack = H2OStackedEnsembleEstimator(model_id="my_ensemble", training_frame=train, validation_frame=test, base_models=[my_gbm.model_id, my_rf.model_id]) >>> stack.train(x=x, y=y, training_frame=train, validation_frame=test) >>> stack.model_performance() """, metalearner_params=""" >>> metalearner_params = {'max_depth': 2, 'col_sample_rate': 0.3} """, )
37.038462
155
0.675389
rest_api_version = 99 def update_param(name, param): if name == 'metalearner_params': param['type'] = 'KeyValue' param['default_value'] = None return param return None def class_extensions(): def metalearner(self): model = self._model_json["output"] if "metalearner" in model and model["metalearner"] is not None: return model["metalearner"] print("No metalearner for this model") def levelone_frame_id(self): model = self._model_json["output"] if "levelone_frame_id" in model and model["levelone_frame_id"] is not None: return model["levelone_frame_id"] print("No levelone_frame_id for this model") def stacking_strategy(self): model = self._model_json["output"] if "stacking_strategy" in model and model["stacking_strategy"] is not None: return model["stacking_strategy"] print("No stacking strategy for this model") def train(self, x=None, y=None, training_frame=None, blending_frame=None, **kwargs): blending_frame = H2OFrame._validate(blending_frame, 'blending_frame', required=False) def extend_parms(parms): if blending_frame is not None: parms['blending_frame'] = blending_frame if self.metalearner_fold_column is not None: parms['ignored_columns'].remove(quoted(self.metalearner_fold_column)) super(self.__class__, self)._train(x, y, training_frame, extend_parms_fn=extend_parms, **kwargs) extensions = dict( __imports__=""" from h2o.utils.shared_utils import quoted from h2o.utils.typechecks import is_type import json import ast """, __class__=class_extensions ) overrides = dict( base_models=dict( setter=""" if is_type(base_models, {ptype}): {pname} = [b.model_id for b in {pname}] self._parms["{sname}"] = {pname} else: assert_is_type({pname}, None, [str]) self._parms["{sname}"] = {pname} """ ), metalearner_params=dict( getter=""" if self._parms.get("{sname}") != None: metalearner_params_dict = ast.literal_eval(self._parms.get("{sname}")) for k in metalearner_params_dict: if len(metalearner_params_dict[k]) == 1: #single parameter metalearner_params_dict[k] = metalearner_params_dict[k][0] return metalearner_params_dict else: return self._parms.get("{sname}") """, setter=""" assert_is_type({pname}, None, {ptype}) if {pname} is not None and {pname} != "": for k in {pname}: if ("[" and "]") not in str(metalearner_params[k]): metalearner_params[k] = [metalearner_params[k]] self._parms["{sname}"] = str(json.dumps({pname})) else: self._parms["{sname}"] = None """ ), ) doc = dict( __class__=""" Builds a stacked ensemble (aka "super learner") machine learning method that uses two or more H2O learning algorithms to improve predictive performance. It is a loss-based supervised learning method that finds the optimal combination of a collection of prediction algorithms.This method supports regression and binary classification. """, ) examples = dict( __class__=""" >>> import h2o >>> h2o.init() >>> from h2o.estimators.random_forest import H2ORandomForestEstimator >>> from h2o.estimators.gbm import H2OGradientBoostingEstimator >>> from h2o.estimators.stackedensemble import H2OStackedEnsembleEstimator >>> col_types = ["numeric", "numeric", "numeric", "enum", "enum", "numeric", "numeric", "numeric", "numeric"] >>> data = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/prostate/prostate.csv", col_types=col_types) >>> train, test = data.split_frame(ratios=[.8], seed=1) >>> x = ["CAPSULE","GLEASON","RACE","DPROS","DCAPS","PSA","VOL"] >>> y = "AGE" >>> nfolds = 5 >>> my_gbm = H2OGradientBoostingEstimator(nfolds=nfolds, fold_assignment="Modulo", keep_cross_validation_predictions=True) >>> my_gbm.train(x=x, y=y, training_frame=train) >>> my_rf = H2ORandomForestEstimator(nfolds=nfolds, fold_assignment="Modulo", keep_cross_validation_predictions=True) >>> my_rf.train(x=x, y=y, training_frame=train) >>> stack = H2OStackedEnsembleEstimator(model_id="my_ensemble", training_frame=train, validation_frame=test, base_models=[my_gbm.model_id, my_rf.model_id]) >>> stack.train(x=x, y=y, training_frame=train, validation_frame=test) >>> stack.model_performance() """, metalearner_params=""" >>> metalearner_params = {'max_depth': 2, 'col_sample_rate': 0.3} """, )
true
true
1c434cc6b89ca7223fc6db90e76c420ed7ec4e36
388
py
Python
pip_services_logging/container/__init__.py
pip-services-infrastructure/pip-services-logging-python
5b1f6eb4e0204004fb7b4affa527d9d3325bb3c7
[ "MIT" ]
null
null
null
pip_services_logging/container/__init__.py
pip-services-infrastructure/pip-services-logging-python
5b1f6eb4e0204004fb7b4affa527d9d3325bb3c7
[ "MIT" ]
null
null
null
pip_services_logging/container/__init__.py
pip-services-infrastructure/pip-services-logging-python
5b1f6eb4e0204004fb7b4affa527d9d3325bb3c7
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ pip_services_logging.container.__init__ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Container module initialization :copyright: Conceptual Vision Consulting LLC 2015-2016, see AUTHORS for more details. :license: MIT, see LICENSE for more details. """ __all__ = [ 'LoggingProcess' ] from .LoggingProcess import LoggingProcess
22.823529
89
0.615979
__all__ = [ 'LoggingProcess' ] from .LoggingProcess import LoggingProcess
true
true
1c434d23a2e332d1479e4fcf2ae23a2194938eee
655
py
Python
migrations/versions/373b28e97f76_.py
yuraist/model_agency
64ce5a03fc1a919465c040d5adb939fc2bda1965
[ "MIT" ]
null
null
null
migrations/versions/373b28e97f76_.py
yuraist/model_agency
64ce5a03fc1a919465c040d5adb939fc2bda1965
[ "MIT" ]
null
null
null
migrations/versions/373b28e97f76_.py
yuraist/model_agency
64ce5a03fc1a919465c040d5adb939fc2bda1965
[ "MIT" ]
null
null
null
"""empty message Revision ID: 373b28e97f76 Revises: 4cf0021cdcd0 Create Date: 2018-08-19 17:24:58.766750 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '373b28e97f76' down_revision = '4cf0021cdcd0' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column('models', sa.Column('next_date', sa.Date(), nullable=True)) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_column('models', 'next_date') # ### end Alembic commands ###
22.586207
77
0.691603
from alembic import op import sqlalchemy as sa revision = '373b28e97f76' down_revision = '4cf0021cdcd0' branch_labels = None depends_on = None def upgrade():
true
true
1c434e6a0fbec6dbbca77f04a60a8550e6411695
2,210
py
Python
intake_azure_blob/azure_blob.py
hamed2005/intake-azure-blob
a9dfb8021d1fc0b354d9f08c0c86ed39e3a16993
[ "BSD-2-Clause" ]
null
null
null
intake_azure_blob/azure_blob.py
hamed2005/intake-azure-blob
a9dfb8021d1fc0b354d9f08c0c86ed39e3a16993
[ "BSD-2-Clause" ]
3
2019-08-13T10:20:32.000Z
2019-08-23T14:52:04.000Z
intake_azure_blob/azure_blob.py
hamed2005/intake-azure-blob
a9dfb8021d1fc0b354d9f08c0c86ed39e3a16993
[ "BSD-2-Clause" ]
null
null
null
# -*- coding: utf-8 -*- from . import __version__ from intake.source.base import DataSource, Schema import json import dask.dataframe as dd from datetime import datetime, timedelta from azureblobfs.dask import DaskAzureBlobFileSystem class AzureBlobSource(DataSource): """Common behaviours for plugins in this repo""" name = 'azure_blob' version = __version__ container = 'dataframe' partition_access = True def __init__(self, blob_uri, storage_account_name=None, access_key=None, azure_blob_prefix='abfs://', kwargs=None, metadata=None): """ Parameters ---------- blob_uri : str The Azure Blob URI. storage_account_name : str Azure storage account name. access_key : str Access key to authorize access to the Azure storage account. azure_blob_prefix: str The prefix for accessing Azure Blob Storage. Defaults to the `abfs://` protocol. """ self._blob_uri = blob_uri self._storage_account_name = storage_account_name self._access_key = access_key self._azure_blob_prefix = azure_blob_prefix self._dataframe = None def _open_dataset(self): df = dd.read_csv(self._blob_uri, storage_options={"account_name": self._storage_account_name, "account_key": self._access_key}, blocksize=None) self._dataframe = df def _get_schema(self): if self._dataframe is None: self._open_dataset() dtypes = self._dataframe._meta.dtypes.to_dict() dtypes = {n: str(t) for (n, t) in dtypes.items()} return Schema(datashape=None, dtype=dtypes, shape=(None, len(dtypes)), npartitions=self._dataframe.npartitions, extra_metadata={}) def _get_partition(self, i): self._get_schema() return self._dataframe.get_partition(i).compute() def read(self): self._get_schema() return self._dataframe.compute() def to_dask(self): self._get_schema() return self._dataframe def _close(self): self._dataframe = None
32.028986
151
0.631222
from . import __version__ from intake.source.base import DataSource, Schema import json import dask.dataframe as dd from datetime import datetime, timedelta from azureblobfs.dask import DaskAzureBlobFileSystem class AzureBlobSource(DataSource): name = 'azure_blob' version = __version__ container = 'dataframe' partition_access = True def __init__(self, blob_uri, storage_account_name=None, access_key=None, azure_blob_prefix='abfs://', kwargs=None, metadata=None): self._blob_uri = blob_uri self._storage_account_name = storage_account_name self._access_key = access_key self._azure_blob_prefix = azure_blob_prefix self._dataframe = None def _open_dataset(self): df = dd.read_csv(self._blob_uri, storage_options={"account_name": self._storage_account_name, "account_key": self._access_key}, blocksize=None) self._dataframe = df def _get_schema(self): if self._dataframe is None: self._open_dataset() dtypes = self._dataframe._meta.dtypes.to_dict() dtypes = {n: str(t) for (n, t) in dtypes.items()} return Schema(datashape=None, dtype=dtypes, shape=(None, len(dtypes)), npartitions=self._dataframe.npartitions, extra_metadata={}) def _get_partition(self, i): self._get_schema() return self._dataframe.get_partition(i).compute() def read(self): self._get_schema() return self._dataframe.compute() def to_dask(self): self._get_schema() return self._dataframe def _close(self): self._dataframe = None
true
true
1c434e783336bce9850898aa4594fc81c24c9d64
5,197
py
Python
src/dominh/connection.py
gavanderhoorn/dominh
318ca25d2095bc86007e2f9e87bd8dd8b06aec43
[ "Apache-2.0" ]
25
2020-02-14T17:16:31.000Z
2022-02-25T14:58:42.000Z
src/dominh/connection.py
gavanderhoorn/dominh
318ca25d2095bc86007e2f9e87bd8dd8b06aec43
[ "Apache-2.0" ]
15
2020-02-17T12:13:30.000Z
2021-11-30T09:12:38.000Z
src/dominh/connection.py
gavanderhoorn/dominh
318ca25d2095bc86007e2f9e87bd8dd8b06aec43
[ "Apache-2.0" ]
null
null
null
# Copyright (c) 2020-2021, G.A. vd. Hoorn # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # author: G.A. vd. Hoorn import typing as t class Connection(object): def __init__( self, host: str, base_path: str, helpers_uploaded: bool, skipped_helpers_upload: bool, request_timeout: float = 5, kcl_auth: t.Optional[t.Tuple[str, str]] = None, karel_auth: t.Optional[t.Tuple[str, str]] = None, ftp_auth: t.Optional[t.Tuple[str, str]] = None, ) -> None: """Stores information about an active connection. :param host: Hostname or IP address of the controller :type host: str :param base_path: Path to the directory (on the controller) which stores the helpers :type base_path: str :param helpers_uploaded: Whether or not the helpers were uploaded to the controller as part of the initialisation of this connection :type helpers_uploaded: bool :param skipped_helpers_upload: Whether or not skipping upload of the helpers was requested (because they were uploaded as part of initialisation of a prior session for instance) :type skipped_helpers_upload: bool :param request_timeout: Time after which requests should time out (default: 5 sec) :type request_timeout: float :param kcl_auth: A tuple (username, password) providing the credentials for access to KCL resources. If not set, the KCL resource is assumed to be accessible by anonymous users and such access will fail if the controller does have authentication configured for that resource. Default: None :type kcl_auth: tuple(str, str) :param karel_auth: A tuple (username, password) providing the credentials for access to Karel resources. If not set, the Karel resource is assumed to be accessible by anonymous users and such access will fail if the controller does have authentication configured for that resource. Default: None :type karel_auth: tuple(str, str) :param ftp_auth: A tuple (username, password) providing the credentials for access to FTP resources. If not set, the FTP resource is assumed to be accessible by anonymous users and such access will fail if the controller does have authentication configured for that resource. Default: None :type ftp_auth: tuple(str, str) """ # which remote system are we connected to? self._host = host # where are the helpers stored? self._base_path = base_path # have the helpers been uploaded? self._helpers_uploaded = helpers_uploaded # were we asked not to upload the helpers? self._skipped_helpers_upload = skipped_helpers_upload # when should we consider a request to have timed-out? self._request_timeout = request_timeout # authentication data self._kcl_auth = kcl_auth self._karel_auth = karel_auth self._ftp_auth = ftp_auth @property def host(self) -> str: """Hostname or IP address of the controller""" return self._host @property def base_path(self) -> str: """Path to the directory (on the controller) which stores the helpers""" return self._base_path @property def request_timeout(self) -> float: """Time after which requests should time out""" return self._request_timeout @property def helpers_uploaded(self) -> bool: """Whether or not the helpers were uploaded to the controller as part of the initialisation of this connection """ return self._helpers_uploaded @property def skipped_helpers_upload(self) -> bool: """Whether or not skipping upload of the helpers was requested Because they were uploaded as part of initialisation of a prior session for instance. """ return self._skipped_helpers_upload @property def kcl_auth(self) -> t.Optional[t.Tuple[str, str]]: """Credentials allowing access to KCL resources If not provided, returns None. """ return self._kcl_auth @property def karel_auth(self) -> t.Optional[t.Tuple[str, str]]: """Credentials allowing access to Karel resources If not provided, returns None. """ return self._karel_auth @property def ftp_auth(self) -> t.Optional[t.Tuple[str, str]]: """Credentials allowing access to FTP resources If not provided, returns None. """ return self._ftp_auth
37.121429
80
0.666538
import typing as t class Connection(object): def __init__( self, host: str, base_path: str, helpers_uploaded: bool, skipped_helpers_upload: bool, request_timeout: float = 5, kcl_auth: t.Optional[t.Tuple[str, str]] = None, karel_auth: t.Optional[t.Tuple[str, str]] = None, ftp_auth: t.Optional[t.Tuple[str, str]] = None, ) -> None: self._host = host self._base_path = base_path self._helpers_uploaded = helpers_uploaded self._skipped_helpers_upload = skipped_helpers_upload self._request_timeout = request_timeout self._kcl_auth = kcl_auth self._karel_auth = karel_auth self._ftp_auth = ftp_auth @property def host(self) -> str: return self._host @property def base_path(self) -> str: return self._base_path @property def request_timeout(self) -> float: return self._request_timeout @property def helpers_uploaded(self) -> bool: return self._helpers_uploaded @property def skipped_helpers_upload(self) -> bool: return self._skipped_helpers_upload @property def kcl_auth(self) -> t.Optional[t.Tuple[str, str]]: return self._kcl_auth @property def karel_auth(self) -> t.Optional[t.Tuple[str, str]]: return self._karel_auth @property def ftp_auth(self) -> t.Optional[t.Tuple[str, str]]: return self._ftp_auth
true
true
1c434ea6ac31675f1989083e696631dbcb369f69
43,613
py
Python
gamestonk_terminal/stocks/behavioural_analysis/ba_controller.py
DidierRLopes/GST-discordbot
8ff7f7557f5db62ea33d63cfc11ee7ae5f9de56c
[ "MIT" ]
1
2021-12-31T04:10:42.000Z
2021-12-31T04:10:42.000Z
gamestonk_terminal/stocks/behavioural_analysis/ba_controller.py
DidierRLopes/GST-discordbot
8ff7f7557f5db62ea33d63cfc11ee7ae5f9de56c
[ "MIT" ]
null
null
null
gamestonk_terminal/stocks/behavioural_analysis/ba_controller.py
DidierRLopes/GST-discordbot
8ff7f7557f5db62ea33d63cfc11ee7ae5f9de56c
[ "MIT" ]
null
null
null
"""Behavioural Analysis Controller Module""" __docformat__ = "numpy" import argparse import difflib from typing import List, Union from datetime import datetime, timedelta import textwrap from prompt_toolkit.completion import NestedCompleter from colorama import Style from gamestonk_terminal import feature_flags as gtff from gamestonk_terminal.helper_funcs import ( EXPORT_BOTH_RAW_DATA_AND_FIGURES, EXPORT_ONLY_RAW_DATA_ALLOWED, get_flair, parse_known_args_and_warn, check_int_range, valid_date, check_positive, try_except, system_clear, ) from gamestonk_terminal.menu import session from gamestonk_terminal.common.behavioural_analysis import ( google_view, reddit_view, stocktwits_view, finbrain_view, finnhub_view, twitter_view, ) from gamestonk_terminal.stocks import stocks_helper # pylint:disable=R0904,C0302 class BehaviouralAnalysisController: """Behavioural Analysis Controller class""" # Command choices CHOICES = [ "cls", "home", "h", "?", "help", "q", "quit", "..", "exit", "r", "reset", ] CHOICES_COMMANDS = [ "load", "watchlist", "spac", "spac_c", "wsb", "popular", "bullbear", "messages", "trending", "stalker", "infer", "sentiment", "mentions", "regions", "queries", "rise", "headlines", "stats", "metrics", "social", "historical", "emerging", "popular", "popularsi", "getdd", ] CHOICES += CHOICES_COMMANDS historical_sort = ["date", "value"] historical_direction = ["asc", "desc"] historical_metric = ["sentiment", "AHI", "RHI", "SGP"] def __init__(self, ticker: str, start: datetime, queue: List[str] = None): """Constructor""" self.ba_parser = argparse.ArgumentParser(add_help=False, prog="ba") self.ba_parser.add_argument( "cmd", choices=self.CHOICES + self.CHOICES_COMMANDS, ) self.completer: Union[None, NestedCompleter] = None if session and gtff.USE_PROMPT_TOOLKIT: choices: dict = {c: {} for c in self.CHOICES} choices["historical"]["-s"] = {c: None for c in self.historical_sort} choices["historical"]["--sort"] = {c: None for c in self.historical_sort} choices["historical"]["-d"] = {c: None for c in self.historical_direction} choices["historical"]["--direction"] = { c: None for c in self.historical_direction } choices["historical"]["-m"] = {c: None for c in self.historical_metric} choices["historical"]["--metric"] = { c: None for c in self.historical_metric } choices["historical"] = {c: None for c in self.historical_metric} self.completer = NestedCompleter.from_nested_dict(choices) self.ticker = ticker self.start = start if queue: self.queue = queue else: self.queue = list() def print_help(self): dim = Style.DIM if not self.ticker else "" res = Style.RESET_ALL help_txt = f""" load load a specific stock ticker for analysis Ticker: {self.ticker.upper() or None} Finbrain:{dim} headlines sentiment from 15+ major news headlines {res} Finnhub:{dim} stats sentiment stats including comparison with sector{res} Reddit: wsb show what WSB gang is up to in subreddit wallstreetbets watchlist show other users watchlist popular show popular tickers spac_c show other users spacs announcements from subreddit SPACs community spac show other users spacs announcements from other subs{dim} getdd gets due diligence from another user's post{res} Stocktwits:{dim} bullbear estimate quick sentiment from last 30 messages on board messages output up to the 30 last messages on the board{res} trending trending stocks stalker stalk stocktwits user's last messages Twitter:{dim} infer infer about stock's sentiment from latest tweets sentiment in-depth sentiment prediction from tweets over time{res} Google:{dim} mentions interest over time based on stock's mentions regions regions that show highest interest in stock queries top related queries with this stock rise top rising related queries with stock{res} SentimentInvestor: popularsi show most popular stocks on social media right now emerging show stocks that are being talked about more than usual{dim} metrics core social sentiment metrics for this stock social social media figures for stock popularity historical plot the past week of data for a selected metric{res} """ print(help_txt) def switch(self, an_input: str): """Process and dispatch input Returns ------- List[str] List of commands in the queue to execute """ # Empty command if not an_input: print("") return self.queue # Navigation slash is being used if "/" in an_input: actions = an_input.split("/") # Absolute path is specified if not actions[0]: an_input = "home" # Relative path so execute first instruction else: an_input = actions[0] # Add all instructions to the queue for cmd in actions[1:][::-1]: if cmd: self.queue.insert(0, cmd) (known_args, other_args) = self.ba_parser.parse_known_args(an_input.split()) # Redirect commands to their correct functions if known_args.cmd: if known_args.cmd in ("..", "q"): known_args.cmd = "quit" elif known_args.cmd in ("?", "h"): known_args.cmd = "help" elif known_args.cmd == "r": known_args.cmd = "reset" getattr( self, "call_" + known_args.cmd, lambda _: "Command not recognized!", )(other_args) return self.queue def call_cls(self, _): """Process cls command""" system_clear() def call_home(self, _): """Process home command""" self.queue.insert(0, "quit") self.queue.insert(0, "quit") def call_help(self, _): """Process help command""" self.print_help() def call_quit(self, _): """Process quit menu command""" print("") self.queue.insert(0, "quit") def call_exit(self, _): """Process exit terminal command""" self.queue.insert(0, "quit") self.queue.insert(0, "quit") self.queue.insert(0, "quit") def call_reset(self, _): """Process reset command""" self.queue.insert(0, "ba") self.queue.insert(0, "stocks") self.queue.insert(0, "reset") self.queue.insert(0, "quit") self.queue.insert(0, "quit") @try_except def call_load(self, other_args: List[str]): """Process load command""" parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="load", description="Load stock ticker to perform analysis on. When the data source is 'yf', an Indian ticker can be" " loaded by using '.NS' at the end, e.g. 'SBIN.NS'. See available market in" " https://help.yahoo.com/kb/exchanges-data-providers-yahoo-finance-sln2310.html.", ) parser.add_argument( "-t", "--ticker", action="store", dest="ticker", required="-h" not in other_args, help="Stock ticker", ) parser.add_argument( "-s", "--start", type=valid_date, default=(datetime.now() - timedelta(days=366)).strftime("%Y-%m-%d"), dest="start", help="The starting date (format YYYY-MM-DD) of the stock", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-t") ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: df_stock_candidate = stocks_helper.load( ns_parser.ticker, ns_parser.start, ) if not df_stock_candidate.empty: self.start = ns_parser.start if "." in ns_parser.ticker: self.ticker = ns_parser.ticker.upper().split(".")[0] else: self.ticker = ns_parser.ticker.upper() else: print("Provide a valid ticker") @try_except def call_watchlist(self, other_args: List[str]): """Process watchlist command""" parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="watchlist", description="""Print other users watchlist. [Source: Reddit]""", ) parser.add_argument( "-l", "--limit", action="store", dest="limit", type=check_positive, default=5, help="limit of posts with watchlists retrieved.", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-l") ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: reddit_view.display_watchlist(num=ns_parser.limit) @try_except def call_spac(self, other_args: List[str]): """Process spac command""" parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="spac", description="""Show other users SPACs announcement. [Source: Reddit]""", ) parser.add_argument( "-l", "--limit", action="store", dest="n_limit", type=check_positive, default=5, help="limit of posts with SPACs retrieved.", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-l") ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: reddit_view.display_spac(limit=ns_parser.n_limit) @try_except def call_spac_c(self, other_args: List[str]): """Process spac_c command""" parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="spac_c", description="""Print other users SPACs announcement under subreddit 'SPACs'. [Source: Reddit]""", ) parser.add_argument( "-l", "--limit", action="store", dest="n_limit", type=check_positive, default=10, help="limit of posts with SPACs retrieved", ) parser.add_argument( "-p", "--popular", action="store_true", default=False, dest="b_popular", help="popular flag, if true the posts retrieved are based on score rather than time", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-l") ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: reddit_view.display_spac_community( limit=ns_parser.n_limit, popular=ns_parser.b_popular ) @try_except def call_wsb(self, other_args: List[str]): """Process wsb command""" parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="wsb", description="""Print what WSB gang are up to in subreddit wallstreetbets. [Source: Reddit]""", ) parser.add_argument( "-l", "--limit", action="store", dest="n_limit", type=check_positive, default=10, help="limit of posts to print.", ) parser.add_argument( "--new", action="store_true", default=False, dest="b_new", help="new flag, if true the posts retrieved are based on being more recent rather than their score.", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-l") ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: reddit_view.display_wsb_community( limit=ns_parser.n_limit, new=ns_parser.b_new ) @try_except def call_popular(self, other_args: List[str]): """Process popular command""" parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="popular", description="""Print latest popular tickers. [Source: Reddit]""", ) parser.add_argument( "-l", "--limit", action="store", dest="limit", type=check_positive, default=10, help="limit of top tickers to retrieve", ) parser.add_argument( "-n", "--num", action="store", dest="num", type=check_positive, default=50, help="number of posts retrieved per sub reddit.", ) parser.add_argument( "-s", "--sub", action="store", dest="s_subreddit", type=str, help=""" subreddits to look for tickers, e.g. pennystocks,stocks. Default: pennystocks, RobinHoodPennyStocks, Daytrading, StockMarket, stocks, investing, wallstreetbets """, ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-l") ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: reddit_view.display_popular_tickers( n_top=ns_parser.limit, posts_to_look_at=ns_parser.num, subreddits=ns_parser.s_subreddit, ) @try_except def call_getdd(self, other_args: List[str]): """Process getdd command""" parser = argparse.ArgumentParser( add_help=False, prog="getdd", description=""" Print top stock's due diligence from other users. [Source: Reddit] """, ) parser.add_argument( "-l", "--limit", action="store", dest="limit", type=check_positive, default=5, help="limit of posts to retrieve.", ) parser.add_argument( "-d", "--days", action="store", dest="days", type=check_positive, default=3, help="number of prior days to look for.", ) parser.add_argument( "-a", "--all", action="store_true", dest="all", default=False, help=""" search through all flairs (apart from Yolo and Meme), otherwise we focus on specific flairs: DD, technical analysis, Catalyst, News, Advice, Chart""", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-l") ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: if self.ticker: reddit_view.display_due_diligence( ticker=self.ticker, limit=ns_parser.limit, n_days=ns_parser.days, show_all_flairs=ns_parser.all, ) else: print("No ticker loaded. Please load using 'load <ticker>'\n") @try_except def call_bullbear(self, other_args: List[str]): """Process bullbear command""" parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="bullbear", description=""" Print bullbear sentiment based on last 30 messages on the board. Also prints the watchlist_count. [Source: Stocktwits] """, ) ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: if self.ticker: stocktwits_view.display_bullbear(ticker=self.ticker) else: print("No ticker loaded. Please load using 'load <ticker>'\n") @try_except def call_messages(self, other_args: List[str]): """Process messages command""" parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="messages", description="""Print up to 30 of the last messages on the board. [Source: Stocktwits]""", ) parser.add_argument( "-l", "--limit", action="store", dest="limit", type=check_positive, default=30, help="limit messages shown.", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-l") ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: if self.ticker: stocktwits_view.display_messages( ticker=self.ticker, limit=ns_parser.limit ) else: print("No ticker loaded. Please load using 'load <ticker>'\n") @try_except def call_trending(self, other_args: List[str]): """Process trending command""" parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="trending", description="""Stocks trending. [Source: Stocktwits]""", ) ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: stocktwits_view.display_trending() @try_except def call_stalker(self, other_args: List[str]): """Process stalker command""" parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="stalker", description="""Print up to the last 30 messages of a user. [Source: Stocktwits]""", ) parser.add_argument( "-u", "--user", action="store", dest="s_user", type=str, default="Newsfilter", help="username.", ) parser.add_argument( "-l", "--limit", action="store", dest="limit", type=check_positive, default=30, help="limit messages shown.", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-l") ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: stocktwits_view.display_stalker( user=ns_parser.s_user, limit=ns_parser.limit ) @try_except def call_mentions(self, other_args: List[str]): """Process mentions command""" parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="mentions", description=""" Plot weekly bars of stock's interest over time. other users watchlist. [Source: Google] """, ) parser.add_argument( "-s", "--start", type=valid_date, dest="start", default=self.start, help="starting date (format YYYY-MM-DD) from when we are interested in stock's mentions.", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-s") ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES ) if ns_parser: if self.ticker: google_view.display_mentions( ticker=self.ticker, start=ns_parser.start, export=ns_parser.export ) else: print("No ticker loaded. Please load using 'load <ticker>'\n") @try_except def call_regions(self, other_args: List[str]): """Process regions command""" parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="regions", description="""Plot bars of regions based on stock's interest. [Source: Google]""", ) parser.add_argument( "-l", "--limit", action="store", dest="limit", type=check_positive, default=10, help="limit of regions to plot that show highest interest.", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-l") ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES ) if ns_parser: if self.ticker: google_view.display_regions( ticker=self.ticker, num=ns_parser.limit, export=ns_parser.export ) else: print("No ticker loaded. Please load using 'load <ticker>'\n") @try_except def call_queries(self, other_args: List[str]): """Process queries command""" parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="queries", description="""Print top related queries with this stock's query. [Source: Google]""", ) parser.add_argument( "-l", "--limit", action="store", dest="limit", type=check_positive, default=10, help="limit of top related queries to print.", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-l") ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES ) if ns_parser: if self.ticker: google_view.display_queries( ticker=self.ticker, num=ns_parser.limit, export=ns_parser.export ) else: print("No ticker loaded. Please load using 'load <ticker>'\n") @try_except def call_rise(self, other_args: List[str]): """Process rise command""" parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="rise", description="""Print top rising related queries with this stock's query. [Source: Google]""", ) parser.add_argument( "-l", "--limit", action="store", dest="limit", type=check_positive, default=10, help="limit of top rising related queries to print.", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-l") ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES ) if ns_parser: if self.ticker: google_view.display_rise( ticker=self.ticker, num=ns_parser.limit, export=ns_parser.export ) else: print("No ticker loaded. Please load using 'load <ticker>'\n") @try_except def call_infer(self, other_args: List[str]): """Process infer command""" parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="infer", description=""" Print quick sentiment inference from last tweets that contain the ticker. This model splits the text into character-level tokens and uses vader sentiment analysis. [Source: Twitter] """, ) parser.add_argument( "-l", "--limit", action="store", dest="limit", type=check_int_range(10, 100), default=100, help="limit of latest tweets to infer from.", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-l") ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES ) if ns_parser: if self.ticker: twitter_view.display_inference(ticker=self.ticker, num=ns_parser.limit) else: print("No ticker loaded. Please load using 'load <ticker>'\n") @try_except def call_sentiment(self, other_args: List[str]): """Process sentiment command""" parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="sentiment", description=""" Plot in-depth sentiment predicted from tweets from last days that contain pre-defined ticker. [Source: Twitter] """, ) # in reality this argument could be 100, but after testing it takes too long # to compute which may not be acceptable # TODO: use https://github.com/twintproject/twint instead of twitter API parser.add_argument( "-l", "--limit", action="store", dest="limit", type=check_int_range(10, 62), default=15, help="limit of tweets to extract per hour.", ) parser.add_argument( "-d", "--days", action="store", dest="n_days_past", type=check_int_range(1, 6), default=6, help="number of days in the past to extract tweets.", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-l") ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES ) if ns_parser: if self.ticker: twitter_view.display_sentiment( ticker=self.ticker, n_tweets=ns_parser.limit, n_days_past=ns_parser.n_days_past, export=ns_parser.export, ) else: print("No ticker loaded. Please load using 'load <ticker>'\n") @try_except def call_headlines(self, other_args: List[str]): """Process finbrain command""" parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="headlines", description="""FinBrain collects the news headlines from 15+ major financial news sources on a daily basis and analyzes them to generate sentiment scores for more than 4500 US stocks.FinBrain Technologies develops deep learning algorithms for financial analysis and prediction, which currently serves traders from more than 150 countries all around the world. [Source: https://finbrain.tech]""", ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES ) if ns_parser: if self.ticker: finbrain_view.display_sentiment_analysis( ticker=self.ticker, export=ns_parser.export ) else: print("No ticker loaded. Please load using 'load <ticker>'\n") @try_except def call_stats(self, other_args: List[str]): """Process stats command""" parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="stats", description=""" Sentiment stats which displays buzz, news score, articles last week, articles weekly average, bullish vs bearish percentages, sector average bullish percentage, and sector average news score. [Source: https://finnhub.io] """, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES ) if ns_parser: if self.ticker: finnhub_view.display_sentiment_stats( ticker=self.ticker, export=ns_parser.export ) else: print("No ticker loaded. Please load using 'load <ticker>'\n") @try_except def call_metrics(self, other_args: List[str]): """Process metrics command""" command_description = f""" {Style.BRIGHT}Sentiment Investor{Style.RESET_ALL} analyzes data from four major social media platforms to generate hourly metrics on over 2,000 stocks. Sentiment provides volume and sentiment metrics powered by proprietary NLP models. The {Style.BRIGHT}metrics{Style.RESET_ALL} command prints the following realtime metrics: {Style.BRIGHT}AHI (Absolute Hype Index){Style.RESET_ALL} --- AHI is a measure of how much people are talking about a stock on social media. It is calculated by dividing the total number of mentions for the chosen stock on a social network by the mean number of mentions any stock receives on that social medium. {Style.BRIGHT}RHI (Relative Hype Index){Style.RESET_ALL} --- RHI is a measure of whether people are talking about a stock more or less than usual, calculated by dividing the mean AHI for the past day by the mean AHI for for the past week for that stock. {Style.BRIGHT}Sentiment Score{Style.RESET_ALL} --- Sentiment score is the percentage of people talking positively about the stock. For each social network the number of positive posts/comments is divided by the total number of both positive and negative posts/comments. {Style.BRIGHT}SGP (Standard General Perception){Style.RESET_ALL} --- SGP is a measure of whether people are more or less positive about a stock than usual. It is calculated by averaging the past day of sentiment values and then dividing it by the average of the past week of sentiment values. """ parser = argparse.ArgumentParser( add_help=False, prog="metrics", formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent(command_description), ) ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: if self.ticker: print( "Currently under maintenance by the new Sentiment Investor team.\n" ) # sentimentinvestor_view.display_metrics(ticker=self.ticker) else: print("No ticker loaded. Please load using 'load <ticker>'\n") @try_except def call_social(self, other_args: List[str]): """Process social command""" command_description = f""" {Style.BRIGHT}Sentiment Investor{Style.RESET_ALL} analyzes data from four major social media platforms to generate hourly metrics on over 2,000 stocks. Sentiment provides volume and sentiment metrics powered by proprietary NLP models. The {Style.BRIGHT}social{Style.RESET_ALL} command prints the raw data for a given stock, including the number of mentions it has received on social media in the last hour and the sentiment score of those comments. """ parser = argparse.ArgumentParser( add_help=False, prog="social", formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent(command_description), ) ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: if self.ticker: print( "Currently under maintenance by the new Sentiment Investor team.\n" ) # sentimentinvestor_view.display_social(ticker=self.ticker) else: print("No ticker loaded. Please load using 'load <ticker>'\n") @try_except def call_historical(self, other_args: List[str]): """Process historical command""" command_description = f""" {Style.BRIGHT}Sentiment Investor{Style.RESET_ALL} analyzes data from four major social media platforms to generate hourly metrics on over 2,000 stocks. Sentiment provides volume and sentiment metrics powered by proprietary NLP models. The {Style.BRIGHT}historical{Style.RESET_ALL} command plots the past week of data for a selected metric, one of: {Style.BRIGHT}AHI (Absolute Hype Index){Style.RESET_ALL} --- AHI is a measure of how much people are talking about a stock on social media. It is calculated by dividing the total number of mentions for the chosen stock on a social network by the mean number of mentions any stock receives on that social medium. {Style.BRIGHT}RHI (Relative Hype Index){Style.RESET_ALL} --- RHI is a measure of whether people are talking about a stock more or less than usual, calculated by dividing the mean AHI for the past day by the mean AHI for for the past week for that stock. {Style.BRIGHT}Sentiment Score{Style.RESET_ALL} --- Sentiment score is the percentage of people talking positively about the stock. For each social network the number of positive posts/comments is divided by the total number of both positive and negative posts/comments. {Style.BRIGHT}SGP (Standard General Perception){Style.RESET_ALL} --- SGP is a measure of whether people are more or less positive about a stock than usual. It is calculated by averaging the past day of sentiment values and then dividing it by the average of the past week of sentiment values. """ parser = argparse.ArgumentParser( add_help=False, prog="historical", formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent(command_description), ) parser.add_argument( "-s", "--sort", action="store", type=str, default="date", help="the parameter to sort output table by", dest="sort_param", choices=self.historical_sort, ) parser.add_argument( "-d", "--direction", action="store", type=str, default="desc", help="the direction to sort the output table", dest="sort_dir", choices=self.historical_direction, ) parser.add_argument( "-m", "--metric", type=str, action="store", default="sentiment", dest="metric", choices=self.historical_metric, help="the metric to plot", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-m") ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: if self.ticker: print( "Currently under maintenance by the new Sentiment Investor team.\n" ) # sentimentinvestor_view.display_historical( # ticker=self.ticker, # sort_param=ns_parser.sort_param, # metric=ns_parser.metric, # sort_dir=ns_parser.sort_dir, # ) else: print("No ticker loaded. Please load using 'load <ticker>'\n") @try_except def call_popularsi(self, other_args: List[str]): """Process popular command""" command_description = f""" The {Style.BRIGHT}popular{Style.RESET_ALL} command prints the stocks with highest Average Hype Index right now. {Style.BRIGHT}AHI (Absolute Hype Index){Style.RESET_ALL} --- AHI is a measure of how much people are talking about a stock on social media. It is calculated by dividing the total number of mentions for the chosen stock on a social network by the mean number of mentions any stock receives on that social medium. === {Style.BRIGHT}Sentiment Investor{Style.RESET_ALL} analyzes data from four major social media platforms to generate hourly metrics on over 2,000 stocks. Sentiment provides volume and sentiment metrics powered by proprietary NLP models. """ parser = argparse.ArgumentParser( add_help=False, prog="popularsi", formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent(command_description), ) parser.add_argument( "-l", "--limit", action="store", dest="limit", type=int, default=10, help="the maximum number of stocks to retrieve", ) ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: print("Currently under maintenance by the new Sentiment Investor team.\n") # sentimentinvestor_view.display_top(metric="AHI", limit=ns_parser.limit) @try_except def call_emerging(self, other_args: List[str]): """Process emerging command""" command_description = f""" The {Style.BRIGHT}emerging{Style.RESET_ALL} command prints the stocks with highest Relative Hype Index right now. {Style.BRIGHT}RHI (Relative Hype Index){Style.RESET_ALL} --- RHI is a measure of whether people are talking about a stock more or less than usual, calculated by dividing the mean AHI for the past day by the mean AHI for for the past week for that stock. === {Style.BRIGHT}Sentiment Investor{Style.RESET_ALL} analyzes data from four major social media platforms to generate hourly metrics on over 2,000 stocks. Sentiment provides volume and sentiment metrics powered by proprietary NLP models. """ parser = argparse.ArgumentParser( add_help=False, prog="popular", formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent(command_description), ) parser.add_argument( "-l", "--limit", action="store", dest="limit", type=int, default=10, help="the maximum number of stocks to retrieve", ) ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: print("Currently under maintenance by the new Sentiment Investor team.\n") # sentimentinvestor_view.display_top(metric="RHI", limit=ns_parser.limit) def menu(ticker: str, start: datetime, queue: List[str] = None): """Behavioural Analysis Menu""" ba_controller = BehaviouralAnalysisController(ticker, start, queue) an_input = "HELP_ME" while True: # There is a command in the queue if ba_controller.queue and len(ba_controller.queue) > 0: # If the command is quitting the menu we want to return in here if ba_controller.queue[0] in ("q", "..", "quit"): print("") if len(ba_controller.queue) > 1: return ba_controller.queue[1:] return [] # Consume 1 element from the queue an_input = ba_controller.queue[0] ba_controller.queue = ba_controller.queue[1:] # Print the current location because this was an instruction and we want user to know what was the action if an_input and an_input.split(" ")[0] in ba_controller.CHOICES_COMMANDS: print(f"{get_flair()} /stocks/ba/ $ {an_input}") # Get input command from user else: # Display help menu when entering on this menu from a level above if an_input == "HELP_ME": ba_controller.print_help() # Get input from user using auto-completion if session and gtff.USE_PROMPT_TOOLKIT and ba_controller.completer: an_input = session.prompt( f"{get_flair()} /stocks/ba/ $ ", completer=ba_controller.completer, search_ignore_case=True, ) # Get input from user without auto-completion else: an_input = input(f"{get_flair()} /stocks/ba/ $ ") try: # Process the input command ba_controller.queue = ba_controller.switch(an_input) except SystemExit: print( f"\nThe command '{an_input}' doesn't exist on the /stocks/ba menu.", end="", ) similar_cmd = difflib.get_close_matches( an_input.split(" ")[0] if " " in an_input else an_input, ba_controller.CHOICES, n=1, cutoff=0.7, ) if similar_cmd: if " " in an_input: candidate_input = ( f"{similar_cmd[0]} {' '.join(an_input.split(' ')[1:])}" ) if candidate_input == an_input: an_input = "" ba_controller.queue = [] print("\n") continue an_input = candidate_input else: an_input = similar_cmd[0] print(f" Replacing by '{an_input}'.") ba_controller.queue.insert(0, an_input) else: print("\n")
37.117447
121
0.570243
__docformat__ = "numpy" import argparse import difflib from typing import List, Union from datetime import datetime, timedelta import textwrap from prompt_toolkit.completion import NestedCompleter from colorama import Style from gamestonk_terminal import feature_flags as gtff from gamestonk_terminal.helper_funcs import ( EXPORT_BOTH_RAW_DATA_AND_FIGURES, EXPORT_ONLY_RAW_DATA_ALLOWED, get_flair, parse_known_args_and_warn, check_int_range, valid_date, check_positive, try_except, system_clear, ) from gamestonk_terminal.menu import session from gamestonk_terminal.common.behavioural_analysis import ( google_view, reddit_view, stocktwits_view, finbrain_view, finnhub_view, twitter_view, ) from gamestonk_terminal.stocks import stocks_helper class BehaviouralAnalysisController: CHOICES = [ "cls", "home", "h", "?", "help", "q", "quit", "..", "exit", "r", "reset", ] CHOICES_COMMANDS = [ "load", "watchlist", "spac", "spac_c", "wsb", "popular", "bullbear", "messages", "trending", "stalker", "infer", "sentiment", "mentions", "regions", "queries", "rise", "headlines", "stats", "metrics", "social", "historical", "emerging", "popular", "popularsi", "getdd", ] CHOICES += CHOICES_COMMANDS historical_sort = ["date", "value"] historical_direction = ["asc", "desc"] historical_metric = ["sentiment", "AHI", "RHI", "SGP"] def __init__(self, ticker: str, start: datetime, queue: List[str] = None): self.ba_parser = argparse.ArgumentParser(add_help=False, prog="ba") self.ba_parser.add_argument( "cmd", choices=self.CHOICES + self.CHOICES_COMMANDS, ) self.completer: Union[None, NestedCompleter] = None if session and gtff.USE_PROMPT_TOOLKIT: choices: dict = {c: {} for c in self.CHOICES} choices["historical"]["-s"] = {c: None for c in self.historical_sort} choices["historical"]["--sort"] = {c: None for c in self.historical_sort} choices["historical"]["-d"] = {c: None for c in self.historical_direction} choices["historical"]["--direction"] = { c: None for c in self.historical_direction } choices["historical"]["-m"] = {c: None for c in self.historical_metric} choices["historical"]["--metric"] = { c: None for c in self.historical_metric } choices["historical"] = {c: None for c in self.historical_metric} self.completer = NestedCompleter.from_nested_dict(choices) self.ticker = ticker self.start = start if queue: self.queue = queue else: self.queue = list() def print_help(self): dim = Style.DIM if not self.ticker else "" res = Style.RESET_ALL help_txt = f""" load load a specific stock ticker for analysis Ticker: {self.ticker.upper() or None} Finbrain:{dim} headlines sentiment from 15+ major news headlines {res} Finnhub:{dim} stats sentiment stats including comparison with sector{res} Reddit: wsb show what WSB gang is up to in subreddit wallstreetbets watchlist show other users watchlist popular show popular tickers spac_c show other users spacs announcements from subreddit SPACs community spac show other users spacs announcements from other subs{dim} getdd gets due diligence from another user's post{res} Stocktwits:{dim} bullbear estimate quick sentiment from last 30 messages on board messages output up to the 30 last messages on the board{res} trending trending stocks stalker stalk stocktwits user's last messages Twitter:{dim} infer infer about stock's sentiment from latest tweets sentiment in-depth sentiment prediction from tweets over time{res} Google:{dim} mentions interest over time based on stock's mentions regions regions that show highest interest in stock queries top related queries with this stock rise top rising related queries with stock{res} SentimentInvestor: popularsi show most popular stocks on social media right now emerging show stocks that are being talked about more than usual{dim} metrics core social sentiment metrics for this stock social social media figures for stock popularity historical plot the past week of data for a selected metric{res} """ print(help_txt) def switch(self, an_input: str): if not an_input: print("") return self.queue if "/" in an_input: actions = an_input.split("/") if not actions[0]: an_input = "home" else: an_input = actions[0] for cmd in actions[1:][::-1]: if cmd: self.queue.insert(0, cmd) (known_args, other_args) = self.ba_parser.parse_known_args(an_input.split()) if known_args.cmd: if known_args.cmd in ("..", "q"): known_args.cmd = "quit" elif known_args.cmd in ("?", "h"): known_args.cmd = "help" elif known_args.cmd == "r": known_args.cmd = "reset" getattr( self, "call_" + known_args.cmd, lambda _: "Command not recognized!", )(other_args) return self.queue def call_cls(self, _): system_clear() def call_home(self, _): self.queue.insert(0, "quit") self.queue.insert(0, "quit") def call_help(self, _): self.print_help() def call_quit(self, _): print("") self.queue.insert(0, "quit") def call_exit(self, _): self.queue.insert(0, "quit") self.queue.insert(0, "quit") self.queue.insert(0, "quit") def call_reset(self, _): self.queue.insert(0, "ba") self.queue.insert(0, "stocks") self.queue.insert(0, "reset") self.queue.insert(0, "quit") self.queue.insert(0, "quit") @try_except def call_load(self, other_args: List[str]): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="load", description="Load stock ticker to perform analysis on. When the data source is 'yf', an Indian ticker can be" " loaded by using '.NS' at the end, e.g. 'SBIN.NS'. See available market in" " https://help.yahoo.com/kb/exchanges-data-providers-yahoo-finance-sln2310.html.", ) parser.add_argument( "-t", "--ticker", action="store", dest="ticker", required="-h" not in other_args, help="Stock ticker", ) parser.add_argument( "-s", "--start", type=valid_date, default=(datetime.now() - timedelta(days=366)).strftime("%Y-%m-%d"), dest="start", help="The starting date (format YYYY-MM-DD) of the stock", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-t") ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: df_stock_candidate = stocks_helper.load( ns_parser.ticker, ns_parser.start, ) if not df_stock_candidate.empty: self.start = ns_parser.start if "." in ns_parser.ticker: self.ticker = ns_parser.ticker.upper().split(".")[0] else: self.ticker = ns_parser.ticker.upper() else: print("Provide a valid ticker") @try_except def call_watchlist(self, other_args: List[str]): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="watchlist", description="""Print other users watchlist. [Source: Reddit]""", ) parser.add_argument( "-l", "--limit", action="store", dest="limit", type=check_positive, default=5, help="limit of posts with watchlists retrieved.", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-l") ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: reddit_view.display_watchlist(num=ns_parser.limit) @try_except def call_spac(self, other_args: List[str]): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="spac", description="""Show other users SPACs announcement. [Source: Reddit]""", ) parser.add_argument( "-l", "--limit", action="store", dest="n_limit", type=check_positive, default=5, help="limit of posts with SPACs retrieved.", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-l") ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: reddit_view.display_spac(limit=ns_parser.n_limit) @try_except def call_spac_c(self, other_args: List[str]): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="spac_c", description="""Print other users SPACs announcement under subreddit 'SPACs'. [Source: Reddit]""", ) parser.add_argument( "-l", "--limit", action="store", dest="n_limit", type=check_positive, default=10, help="limit of posts with SPACs retrieved", ) parser.add_argument( "-p", "--popular", action="store_true", default=False, dest="b_popular", help="popular flag, if true the posts retrieved are based on score rather than time", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-l") ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: reddit_view.display_spac_community( limit=ns_parser.n_limit, popular=ns_parser.b_popular ) @try_except def call_wsb(self, other_args: List[str]): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="wsb", description="""Print what WSB gang are up to in subreddit wallstreetbets. [Source: Reddit]""", ) parser.add_argument( "-l", "--limit", action="store", dest="n_limit", type=check_positive, default=10, help="limit of posts to print.", ) parser.add_argument( "--new", action="store_true", default=False, dest="b_new", help="new flag, if true the posts retrieved are based on being more recent rather than their score.", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-l") ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: reddit_view.display_wsb_community( limit=ns_parser.n_limit, new=ns_parser.b_new ) @try_except def call_popular(self, other_args: List[str]): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="popular", description="""Print latest popular tickers. [Source: Reddit]""", ) parser.add_argument( "-l", "--limit", action="store", dest="limit", type=check_positive, default=10, help="limit of top tickers to retrieve", ) parser.add_argument( "-n", "--num", action="store", dest="num", type=check_positive, default=50, help="number of posts retrieved per sub reddit.", ) parser.add_argument( "-s", "--sub", action="store", dest="s_subreddit", type=str, help=""" subreddits to look for tickers, e.g. pennystocks,stocks. Default: pennystocks, RobinHoodPennyStocks, Daytrading, StockMarket, stocks, investing, wallstreetbets """, ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-l") ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: reddit_view.display_popular_tickers( n_top=ns_parser.limit, posts_to_look_at=ns_parser.num, subreddits=ns_parser.s_subreddit, ) @try_except def call_getdd(self, other_args: List[str]): parser = argparse.ArgumentParser( add_help=False, prog="getdd", description=""" Print top stock's due diligence from other users. [Source: Reddit] """, ) parser.add_argument( "-l", "--limit", action="store", dest="limit", type=check_positive, default=5, help="limit of posts to retrieve.", ) parser.add_argument( "-d", "--days", action="store", dest="days", type=check_positive, default=3, help="number of prior days to look for.", ) parser.add_argument( "-a", "--all", action="store_true", dest="all", default=False, help=""" search through all flairs (apart from Yolo and Meme), otherwise we focus on specific flairs: DD, technical analysis, Catalyst, News, Advice, Chart""", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-l") ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: if self.ticker: reddit_view.display_due_diligence( ticker=self.ticker, limit=ns_parser.limit, n_days=ns_parser.days, show_all_flairs=ns_parser.all, ) else: print("No ticker loaded. Please load using 'load <ticker>'\n") @try_except def call_bullbear(self, other_args: List[str]): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="bullbear", description=""" Print bullbear sentiment based on last 30 messages on the board. Also prints the watchlist_count. [Source: Stocktwits] """, ) ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: if self.ticker: stocktwits_view.display_bullbear(ticker=self.ticker) else: print("No ticker loaded. Please load using 'load <ticker>'\n") @try_except def call_messages(self, other_args: List[str]): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="messages", description="""Print up to 30 of the last messages on the board. [Source: Stocktwits]""", ) parser.add_argument( "-l", "--limit", action="store", dest="limit", type=check_positive, default=30, help="limit messages shown.", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-l") ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: if self.ticker: stocktwits_view.display_messages( ticker=self.ticker, limit=ns_parser.limit ) else: print("No ticker loaded. Please load using 'load <ticker>'\n") @try_except def call_trending(self, other_args: List[str]): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="trending", description="""Stocks trending. [Source: Stocktwits]""", ) ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: stocktwits_view.display_trending() @try_except def call_stalker(self, other_args: List[str]): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="stalker", description="""Print up to the last 30 messages of a user. [Source: Stocktwits]""", ) parser.add_argument( "-u", "--user", action="store", dest="s_user", type=str, default="Newsfilter", help="username.", ) parser.add_argument( "-l", "--limit", action="store", dest="limit", type=check_positive, default=30, help="limit messages shown.", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-l") ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: stocktwits_view.display_stalker( user=ns_parser.s_user, limit=ns_parser.limit ) @try_except def call_mentions(self, other_args: List[str]): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="mentions", description=""" Plot weekly bars of stock's interest over time. other users watchlist. [Source: Google] """, ) parser.add_argument( "-s", "--start", type=valid_date, dest="start", default=self.start, help="starting date (format YYYY-MM-DD) from when we are interested in stock's mentions.", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-s") ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES ) if ns_parser: if self.ticker: google_view.display_mentions( ticker=self.ticker, start=ns_parser.start, export=ns_parser.export ) else: print("No ticker loaded. Please load using 'load <ticker>'\n") @try_except def call_regions(self, other_args: List[str]): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="regions", description="""Plot bars of regions based on stock's interest. [Source: Google]""", ) parser.add_argument( "-l", "--limit", action="store", dest="limit", type=check_positive, default=10, help="limit of regions to plot that show highest interest.", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-l") ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES ) if ns_parser: if self.ticker: google_view.display_regions( ticker=self.ticker, num=ns_parser.limit, export=ns_parser.export ) else: print("No ticker loaded. Please load using 'load <ticker>'\n") @try_except def call_queries(self, other_args: List[str]): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="queries", description="""Print top related queries with this stock's query. [Source: Google]""", ) parser.add_argument( "-l", "--limit", action="store", dest="limit", type=check_positive, default=10, help="limit of top related queries to print.", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-l") ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES ) if ns_parser: if self.ticker: google_view.display_queries( ticker=self.ticker, num=ns_parser.limit, export=ns_parser.export ) else: print("No ticker loaded. Please load using 'load <ticker>'\n") @try_except def call_rise(self, other_args: List[str]): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="rise", description="""Print top rising related queries with this stock's query. [Source: Google]""", ) parser.add_argument( "-l", "--limit", action="store", dest="limit", type=check_positive, default=10, help="limit of top rising related queries to print.", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-l") ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES ) if ns_parser: if self.ticker: google_view.display_rise( ticker=self.ticker, num=ns_parser.limit, export=ns_parser.export ) else: print("No ticker loaded. Please load using 'load <ticker>'\n") @try_except def call_infer(self, other_args: List[str]): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="infer", description=""" Print quick sentiment inference from last tweets that contain the ticker. This model splits the text into character-level tokens and uses vader sentiment analysis. [Source: Twitter] """, ) parser.add_argument( "-l", "--limit", action="store", dest="limit", type=check_int_range(10, 100), default=100, help="limit of latest tweets to infer from.", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-l") ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES ) if ns_parser: if self.ticker: twitter_view.display_inference(ticker=self.ticker, num=ns_parser.limit) else: print("No ticker loaded. Please load using 'load <ticker>'\n") @try_except def call_sentiment(self, other_args: List[str]): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="sentiment", description=""" Plot in-depth sentiment predicted from tweets from last days that contain pre-defined ticker. [Source: Twitter] """, ) parser.add_argument( "-l", "--limit", action="store", dest="limit", type=check_int_range(10, 62), default=15, help="limit of tweets to extract per hour.", ) parser.add_argument( "-d", "--days", action="store", dest="n_days_past", type=check_int_range(1, 6), default=6, help="number of days in the past to extract tweets.", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-l") ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES ) if ns_parser: if self.ticker: twitter_view.display_sentiment( ticker=self.ticker, n_tweets=ns_parser.limit, n_days_past=ns_parser.n_days_past, export=ns_parser.export, ) else: print("No ticker loaded. Please load using 'load <ticker>'\n") @try_except def call_headlines(self, other_args: List[str]): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="headlines", description="""FinBrain collects the news headlines from 15+ major financial news sources on a daily basis and analyzes them to generate sentiment scores for more than 4500 US stocks.FinBrain Technologies develops deep learning algorithms for financial analysis and prediction, which currently serves traders from more than 150 countries all around the world. [Source: https://finbrain.tech]""", ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES ) if ns_parser: if self.ticker: finbrain_view.display_sentiment_analysis( ticker=self.ticker, export=ns_parser.export ) else: print("No ticker loaded. Please load using 'load <ticker>'\n") @try_except def call_stats(self, other_args: List[str]): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="stats", description=""" Sentiment stats which displays buzz, news score, articles last week, articles weekly average, bullish vs bearish percentages, sector average bullish percentage, and sector average news score. [Source: https://finnhub.io] """, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES ) if ns_parser: if self.ticker: finnhub_view.display_sentiment_stats( ticker=self.ticker, export=ns_parser.export ) else: print("No ticker loaded. Please load using 'load <ticker>'\n") @try_except def call_metrics(self, other_args: List[str]): command_description = f""" {Style.BRIGHT}Sentiment Investor{Style.RESET_ALL} analyzes data from four major social media platforms to generate hourly metrics on over 2,000 stocks. Sentiment provides volume and sentiment metrics powered by proprietary NLP models. The {Style.BRIGHT}metrics{Style.RESET_ALL} command prints the following realtime metrics: {Style.BRIGHT}AHI (Absolute Hype Index){Style.RESET_ALL} --- AHI is a measure of how much people are talking about a stock on social media. It is calculated by dividing the total number of mentions for the chosen stock on a social network by the mean number of mentions any stock receives on that social medium. {Style.BRIGHT}RHI (Relative Hype Index){Style.RESET_ALL} --- RHI is a measure of whether people are talking about a stock more or less than usual, calculated by dividing the mean AHI for the past day by the mean AHI for for the past week for that stock. {Style.BRIGHT}Sentiment Score{Style.RESET_ALL} --- Sentiment score is the percentage of people talking positively about the stock. For each social network the number of positive posts/comments is divided by the total number of both positive and negative posts/comments. {Style.BRIGHT}SGP (Standard General Perception){Style.RESET_ALL} --- SGP is a measure of whether people are more or less positive about a stock than usual. It is calculated by averaging the past day of sentiment values and then dividing it by the average of the past week of sentiment values. """ parser = argparse.ArgumentParser( add_help=False, prog="metrics", formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent(command_description), ) ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: if self.ticker: print( "Currently under maintenance by the new Sentiment Investor team.\n" ) else: print("No ticker loaded. Please load using 'load <ticker>'\n") @try_except def call_social(self, other_args: List[str]): command_description = f""" {Style.BRIGHT}Sentiment Investor{Style.RESET_ALL} analyzes data from four major social media platforms to generate hourly metrics on over 2,000 stocks. Sentiment provides volume and sentiment metrics powered by proprietary NLP models. The {Style.BRIGHT}social{Style.RESET_ALL} command prints the raw data for a given stock, including the number of mentions it has received on social media in the last hour and the sentiment score of those comments. """ parser = argparse.ArgumentParser( add_help=False, prog="social", formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent(command_description), ) ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: if self.ticker: print( "Currently under maintenance by the new Sentiment Investor team.\n" ) else: print("No ticker loaded. Please load using 'load <ticker>'\n") @try_except def call_historical(self, other_args: List[str]): command_description = f""" {Style.BRIGHT}Sentiment Investor{Style.RESET_ALL} analyzes data from four major social media platforms to generate hourly metrics on over 2,000 stocks. Sentiment provides volume and sentiment metrics powered by proprietary NLP models. The {Style.BRIGHT}historical{Style.RESET_ALL} command plots the past week of data for a selected metric, one of: {Style.BRIGHT}AHI (Absolute Hype Index){Style.RESET_ALL} --- AHI is a measure of how much people are talking about a stock on social media. It is calculated by dividing the total number of mentions for the chosen stock on a social network by the mean number of mentions any stock receives on that social medium. {Style.BRIGHT}RHI (Relative Hype Index){Style.RESET_ALL} --- RHI is a measure of whether people are talking about a stock more or less than usual, calculated by dividing the mean AHI for the past day by the mean AHI for for the past week for that stock. {Style.BRIGHT}Sentiment Score{Style.RESET_ALL} --- Sentiment score is the percentage of people talking positively about the stock. For each social network the number of positive posts/comments is divided by the total number of both positive and negative posts/comments. {Style.BRIGHT}SGP (Standard General Perception){Style.RESET_ALL} --- SGP is a measure of whether people are more or less positive about a stock than usual. It is calculated by averaging the past day of sentiment values and then dividing it by the average of the past week of sentiment values. """ parser = argparse.ArgumentParser( add_help=False, prog="historical", formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent(command_description), ) parser.add_argument( "-s", "--sort", action="store", type=str, default="date", help="the parameter to sort output table by", dest="sort_param", choices=self.historical_sort, ) parser.add_argument( "-d", "--direction", action="store", type=str, default="desc", help="the direction to sort the output table", dest="sort_dir", choices=self.historical_direction, ) parser.add_argument( "-m", "--metric", type=str, action="store", default="sentiment", dest="metric", choices=self.historical_metric, help="the metric to plot", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-m") ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: if self.ticker: print( "Currently under maintenance by the new Sentiment Investor team.\n" ) else: print("No ticker loaded. Please load using 'load <ticker>'\n") @try_except def call_popularsi(self, other_args: List[str]): command_description = f""" The {Style.BRIGHT}popular{Style.RESET_ALL} command prints the stocks with highest Average Hype Index right now. {Style.BRIGHT}AHI (Absolute Hype Index){Style.RESET_ALL} --- AHI is a measure of how much people are talking about a stock on social media. It is calculated by dividing the total number of mentions for the chosen stock on a social network by the mean number of mentions any stock receives on that social medium. === {Style.BRIGHT}Sentiment Investor{Style.RESET_ALL} analyzes data from four major social media platforms to generate hourly metrics on over 2,000 stocks. Sentiment provides volume and sentiment metrics powered by proprietary NLP models. """ parser = argparse.ArgumentParser( add_help=False, prog="popularsi", formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent(command_description), ) parser.add_argument( "-l", "--limit", action="store", dest="limit", type=int, default=10, help="the maximum number of stocks to retrieve", ) ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: print("Currently under maintenance by the new Sentiment Investor team.\n") @try_except def call_emerging(self, other_args: List[str]): command_description = f""" The {Style.BRIGHT}emerging{Style.RESET_ALL} command prints the stocks with highest Relative Hype Index right now. {Style.BRIGHT}RHI (Relative Hype Index){Style.RESET_ALL} --- RHI is a measure of whether people are talking about a stock more or less than usual, calculated by dividing the mean AHI for the past day by the mean AHI for for the past week for that stock. === {Style.BRIGHT}Sentiment Investor{Style.RESET_ALL} analyzes data from four major social media platforms to generate hourly metrics on over 2,000 stocks. Sentiment provides volume and sentiment metrics powered by proprietary NLP models. """ parser = argparse.ArgumentParser( add_help=False, prog="popular", formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent(command_description), ) parser.add_argument( "-l", "--limit", action="store", dest="limit", type=int, default=10, help="the maximum number of stocks to retrieve", ) ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: print("Currently under maintenance by the new Sentiment Investor team.\n") def menu(ticker: str, start: datetime, queue: List[str] = None): ba_controller = BehaviouralAnalysisController(ticker, start, queue) an_input = "HELP_ME" while True: if ba_controller.queue and len(ba_controller.queue) > 0: if ba_controller.queue[0] in ("q", "..", "quit"): print("") if len(ba_controller.queue) > 1: return ba_controller.queue[1:] return [] an_input = ba_controller.queue[0] ba_controller.queue = ba_controller.queue[1:] if an_input and an_input.split(" ")[0] in ba_controller.CHOICES_COMMANDS: print(f"{get_flair()} /stocks/ba/ $ {an_input}") else: if an_input == "HELP_ME": ba_controller.print_help() if session and gtff.USE_PROMPT_TOOLKIT and ba_controller.completer: an_input = session.prompt( f"{get_flair()} /stocks/ba/ $ ", completer=ba_controller.completer, search_ignore_case=True, ) else: an_input = input(f"{get_flair()} /stocks/ba/ $ ") try: ba_controller.queue = ba_controller.switch(an_input) except SystemExit: print( f"\nThe command '{an_input}' doesn't exist on the /stocks/ba menu.", end="", ) similar_cmd = difflib.get_close_matches( an_input.split(" ")[0] if " " in an_input else an_input, ba_controller.CHOICES, n=1, cutoff=0.7, ) if similar_cmd: if " " in an_input: candidate_input = ( f"{similar_cmd[0]} {' '.join(an_input.split(' ')[1:])}" ) if candidate_input == an_input: an_input = "" ba_controller.queue = [] print("\n") continue an_input = candidate_input else: an_input = similar_cmd[0] print(f" Replacing by '{an_input}'.") ba_controller.queue.insert(0, an_input) else: print("\n")
true
true
1c43500ab565f603474d3551abc7e2a246e9e65f
1,243
py
Python
src/rudiments/reamed/__init__.py
jhermann/rudiments
7852dae498ec6ba6a7ec4cd2978ac13c05995d71
[ "Apache-2.0" ]
9
2015-04-05T00:18:32.000Z
2020-05-10T03:21:19.000Z
src/rudiments/reamed/__init__.py
jhermann/rudiments
7852dae498ec6ba6a7ec4cd2978ac13c05995d71
[ "Apache-2.0" ]
3
2015-04-11T13:31:13.000Z
2015-06-19T13:45:48.000Z
src/rudiments/reamed/__init__.py
jhermann/rudiments
7852dae498ec6ba6a7ec4cd2978ac13c05995d71
[ "Apache-2.0" ]
3
2016-09-01T19:20:57.000Z
2021-04-20T08:57:45.000Z
# -*- coding: utf-8 -*- # pylint: disable=bad-continuation """ Extensions to third-party libraries. Note that you need to add the underlying package to your dependencies in addition to ``rudiments``, in case you use one of the modules in here. ``rudiments`` itself does not publish any dependencies on them. Where the extended package has a condensed public API (i.e. names are usually only imported from the package name), these modules can serve as a drop-in replacement, so you just have to change the import statement a little. """ # Copyright © 2015 - 2019 Jürgen Hermann <jh@web.de> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, unicode_literals, print_function
42.862069
74
0.745776
from __future__ import absolute_import, unicode_literals, print_function
true
true
1c4350bca861826476082837fad153d4b6705c52
7,792
py
Python
steam/ext/commands/help.py
Gobot1234/steam
ffcd3a6c4d531fbf174f359f0b66d9a9525d62cb
[ "MIT" ]
79
2020-02-26T19:20:07.000Z
2022-03-24T11:12:57.000Z
steam/ext/commands/help.py
Gobot1234/steam
ffcd3a6c4d531fbf174f359f0b66d9a9525d62cb
[ "MIT" ]
34
2020-04-26T01:55:31.000Z
2022-03-15T17:38:34.000Z
steam/ext/commands/help.py
Gobot1234/steam
ffcd3a6c4d531fbf174f359f0b66d9a9525d62cb
[ "MIT" ]
14
2020-07-15T14:50:14.000Z
2022-01-26T21:51:30.000Z
""" The MIT License (MIT) Copyright (c) 2020 James Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from __future__ import annotations import abc import sys import traceback from collections.abc import Mapping from typing import TYPE_CHECKING, Any from typing_extensions import final from .commands import Command, Group from .context import Context if TYPE_CHECKING: from steam.ext import commands __all__ = ( "HelpCommand", "DefaultHelpCommand", ) class HelpCommand(Command): """The base implementation of the help command.""" context: Context #: The context for the command's invocation. def __init__(self, **kwargs: Any): default = dict(name="help", help="Shows this message.", cog=self) default.update(kwargs) super().__init__(self.command_callback, **default) @final async def command_callback(self, ctx: Context, *, content: str = None) -> None: """The actual implementation of the help command. This method should not directly subclassed instead you should change the behaviour through the methods that actually get dispatched: - :meth:`send_cog_help` - :meth:`send_command_help` - :meth:`send_group_help` - :meth:`command_not_found` """ self.context = ctx bot = ctx.bot if content is None: mapping = self.get_bot_mapping() return await self.send_help(mapping) # check if it's a cog cog = bot.get_cog(content) if cog is not None: return await self.send_cog_help(cog) command = bot.get_command(content) if command is not None: return await ( self.send_group_help(command) if isinstance(command, Group) else self.send_command_help(command) ) await self.command_not_found(content) def get_bot_mapping(self) -> "Mapping[str | None, list[commands.Command]]": """ Generate a mapping of the bot's commands. It's not normally necessary to subclass this. This is passed to :meth:`send_help`. """ bot = self.context.bot mapping = {cog.qualified_name: list(cog.commands) for name, cog in bot.cogs.items() if cog.commands} categorized_commands = [command for c in mapping.values() for command in c] mapping[None] = [c for c in bot.commands if c not in categorized_commands] return mapping @abc.abstractmethod async def send_help(self, mapping: "Mapping[str | None, list[commands.Command]]") -> None: """Send the basic help message for the bot's command. Parameters ---------- mapping The mapping from :meth:`get_bot_mapping`. """ @abc.abstractmethod async def send_cog_help(self, cog: "commands.Cog") -> None: """The method called with a cog is passed as an argument. Note ---- Cog names are case-sensitive. Parameters ---------- cog The cog that was passed as an argument. """ @abc.abstractmethod async def send_command_help(self, command: "commands.Command") -> None: """The method called when a normal command is passed as an argument. Parameters ---------- command The command that was passed as an argument. """ @abc.abstractmethod async def send_group_help(self, command: "commands.Group") -> None: """The method called when a group command is passed as an argument. Parameters ---------- command The command that was passed as an argument. """ @abc.abstractmethod async def command_not_found(self, command: str) -> None: """The default implementation for when a command isn't found. This by default sends "The command {command} was not found." Parameters ---------- command The command that was not found. """ async def on_error(self, ctx: "commands.Context", error: Exception) -> None: """The default error handler for the help command. This performs the functionality as :meth:`steam.ext.commands.Bot.on_command_error`. Parameters ---------- ctx The context for the invocation. error The error that was raised. """ print(f"Ignoring exception in command {ctx.command}:", file=sys.stderr) traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr) cog_command_error = on_error class DefaultHelpCommand(HelpCommand): """The default implementation of the help command.""" def __repr__(self) -> str: return "<default_help_command>" def _get_doc(self, command: Command) -> str: try: return command.help.splitlines()[0] except (IndexError, AttributeError): return "" async def send_help(self, mapping: "Mapping[str | None, list[commands.Command]]") -> None: message = ["/pre"] for cog_name, commands in mapping.items(): ( message.append(f"\n{cog_name}'s commands") if cog_name is not None else message.append("\nUn-categorized commands") ) for command in commands: message.append(f'{command.name}{f": {self._get_doc(command)}" if command.help else ""}') await self.context.send("\n".join(message)) async def send_cog_help(self, cog: "commands.Cog") -> None: message = [f"/pre {cog.qualified_name}'s commands"] for name in sorted(c.name for c in cog.commands): command = cog.__commands__[name] message.append(f'{name}{f": {self._get_doc(command)}" if command.help else ""}') await self.context.send("\n".join(message)) async def send_command_help(self, command: "commands.Command") -> None: await self.context.send(f"/pre Help with {command.name}:\n\n{command.help}") async def send_group_help(self, command: "commands.Group") -> None: msg = [f"/pre Help with {command.name}:\n\n{command.help}"] sub_commands = "\n".join(c.name for c in command.children) if sub_commands: msg.append(f"\nAnd its sub commands:\n{sub_commands}") await self.context.send("\n".join(msg)) async def command_not_found(self, command: str) -> None: """The default implementation for when a command isn't found. This by default sends "The command {command} was not found." Parameters ---------- command The command that was not found. """ await self.context.send(f"The command {command!r} was not found.")
35.099099
115
0.63963
from __future__ import annotations import abc import sys import traceback from collections.abc import Mapping from typing import TYPE_CHECKING, Any from typing_extensions import final from .commands import Command, Group from .context import Context if TYPE_CHECKING: from steam.ext import commands __all__ = ( "HelpCommand", "DefaultHelpCommand", ) class HelpCommand(Command): context: Context def __init__(self, **kwargs: Any): default = dict(name="help", help="Shows this message.", cog=self) default.update(kwargs) super().__init__(self.command_callback, **default) @final async def command_callback(self, ctx: Context, *, content: str = None) -> None: self.context = ctx bot = ctx.bot if content is None: mapping = self.get_bot_mapping() return await self.send_help(mapping) # check if it's a cog cog = bot.get_cog(content) if cog is not None: return await self.send_cog_help(cog) command = bot.get_command(content) if command is not None: return await ( self.send_group_help(command) if isinstance(command, Group) else self.send_command_help(command) ) await self.command_not_found(content) def get_bot_mapping(self) -> "Mapping[str | None, list[commands.Command]]": bot = self.context.bot mapping = {cog.qualified_name: list(cog.commands) for name, cog in bot.cogs.items() if cog.commands} categorized_commands = [command for c in mapping.values() for command in c] mapping[None] = [c for c in bot.commands if c not in categorized_commands] return mapping @abc.abstractmethod async def send_help(self, mapping: "Mapping[str | None, list[commands.Command]]") -> None: @abc.abstractmethod async def send_cog_help(self, cog: "commands.Cog") -> None: @abc.abstractmethod async def send_command_help(self, command: "commands.Command") -> None: @abc.abstractmethod async def send_group_help(self, command: "commands.Group") -> None: @abc.abstractmethod async def command_not_found(self, command: str) -> None: async def on_error(self, ctx: "commands.Context", error: Exception) -> None: print(f"Ignoring exception in command {ctx.command}:", file=sys.stderr) traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr) cog_command_error = on_error class DefaultHelpCommand(HelpCommand): def __repr__(self) -> str: return "<default_help_command>" def _get_doc(self, command: Command) -> str: try: return command.help.splitlines()[0] except (IndexError, AttributeError): return "" async def send_help(self, mapping: "Mapping[str | None, list[commands.Command]]") -> None: message = ["/pre"] for cog_name, commands in mapping.items(): ( message.append(f"\n{cog_name}'s commands") if cog_name is not None else message.append("\nUn-categorized commands") ) for command in commands: message.append(f'{command.name}{f": {self._get_doc(command)}" if command.help else ""}') await self.context.send("\n".join(message)) async def send_cog_help(self, cog: "commands.Cog") -> None: message = [f"/pre {cog.qualified_name}'s commands"] for name in sorted(c.name for c in cog.commands): command = cog.__commands__[name] message.append(f'{name}{f": {self._get_doc(command)}" if command.help else ""}') await self.context.send("\n".join(message)) async def send_command_help(self, command: "commands.Command") -> None: await self.context.send(f"/pre Help with {command.name}:\n\n{command.help}") async def send_group_help(self, command: "commands.Group") -> None: msg = [f"/pre Help with {command.name}:\n\n{command.help}"] sub_commands = "\n".join(c.name for c in command.children) if sub_commands: msg.append(f"\nAnd its sub commands:\n{sub_commands}") await self.context.send("\n".join(msg)) async def command_not_found(self, command: str) -> None: await self.context.send(f"The command {command!r} was not found.")
true
true
1c43517a6ac0c278538d2d7dacc69c184b3d16e4
1,340
py
Python
src/dominh/tool/dominh_get.py
gavanderhoorn/dominh
318ca25d2095bc86007e2f9e87bd8dd8b06aec43
[ "Apache-2.0" ]
25
2020-02-14T17:16:31.000Z
2022-02-25T14:58:42.000Z
src/dominh/tool/dominh_get.py
gavanderhoorn/dominh
318ca25d2095bc86007e2f9e87bd8dd8b06aec43
[ "Apache-2.0" ]
15
2020-02-17T12:13:30.000Z
2021-11-30T09:12:38.000Z
src/dominh/tool/dominh_get.py
gavanderhoorn/dominh
318ca25d2095bc86007e2f9e87bd8dd8b06aec43
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 # Copyright (c) 2020, G.A. vd. Hoorn # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # author: G.A. vd. Hoorn """ Retrieve value of a (system) variable. Usage: dominh [--no-upload] get [options] <host> <variable> Options: -h --help Show this screen. """ import sys from docopt import docopt from requests import exceptions import dominh def main(argv, skip_upload=False): args = docopt(__doc__, argv=argv) varname = args['<variable>'] try: c = dominh.connect(host=args['<host>'], skip_helper_upload=skip_upload) print(c.variable(varname).val) except (exceptions.ConnectionError, OSError) as e: sys.stderr.write(f"Error trying to connect to the controller: {e}\n") except dominh.DominhException as e: sys.stderr.write(f"Error during read: {e}\n")
26.27451
79
0.70597
import sys from docopt import docopt from requests import exceptions import dominh def main(argv, skip_upload=False): args = docopt(__doc__, argv=argv) varname = args['<variable>'] try: c = dominh.connect(host=args['<host>'], skip_helper_upload=skip_upload) print(c.variable(varname).val) except (exceptions.ConnectionError, OSError) as e: sys.stderr.write(f"Error trying to connect to the controller: {e}\n") except dominh.DominhException as e: sys.stderr.write(f"Error during read: {e}\n")
true
true
1c4351de021e4418f19db6fd841a2bfb0803c8e3
5,517
py
Python
Python/AI-ToolBox/computer_vision/image_classification_keras/parameterAdjusting_practice/CNN_v3/CNN_v3.py
Lornatang/DayHR
02e81961e0a710f2c82db70c06b505e608db61cc
[ "Apache-2.0" ]
7
2019-07-12T07:37:50.000Z
2020-04-10T00:32:23.000Z
Python/AI-ToolBox/computer_vision/image_classification_keras/parameterAdjusting_practice/CNN_v3/CNN_v3.py
Lornatang/dayhr-transfer
02e81961e0a710f2c82db70c06b505e608db61cc
[ "Apache-2.0" ]
null
null
null
Python/AI-ToolBox/computer_vision/image_classification_keras/parameterAdjusting_practice/CNN_v3/CNN_v3.py
Lornatang/dayhr-transfer
02e81961e0a710f2c82db70c06b505e608db61cc
[ "Apache-2.0" ]
2
2019-08-02T21:29:52.000Z
2020-01-02T15:36:28.000Z
# -*- coding: utf-8 -*- """ Created on Thu Nov 1 12:02:27 2018 CNN调参练习version3: 使用一个类似AlexNet和VGG的简单架构 在CNN_v2的基础上,进行数据增强 @author: zyb_as """ # ----------------------------------------------------------- # 基本参数 # ----------------------------------------------------------- trainSetRootPath = '../../../dataset/trainSetExample' # 训练集根目录路径,该路径下应该分布着存放各个类别图像数据的文件夹 validSetRootPath = '../../../dataset/validSetExample/' # 验证集根目录路径,该路径下应该分布着存放各个类别图像数据的文件夹 targetSize = (224, 224, 3) # 设置缩放大小(拿到的数据集将会是统一的这个尺寸) categoryNum = 3 # 你需要人工确认待识别类别的数量 batchSize = 32 epochNum = 100 #----------------------------------------------------------------------------------------- # image data generator # 使用 Keras 的 ImageDataGenerator 方法读取数据,同时进行数据增强 #----------------------------------------------------------------------------------------- from keras.preprocessing.image import ImageDataGenerator # **根据想要尝试的图像增强方法修改这里** train_datagen = ImageDataGenerator(rescale=1/255., rotation_range = 10, #数据提升时图片随机转动的角度(整数) width_shift_range = 0.1, #图片水平偏移的幅度(图片宽度的某个比例,浮点数) height_shift_range = 0.1, #图片竖直偏移的幅度(图片高度的某个比例,浮点数) shear_range = 0.2, #剪切强度(逆时针方向的剪切变换角度,浮点数) zoom_range = 0.2, #随机缩放的幅度(缩放范围[1 - zoom_range, 1+zoom_range]) horizontal_flip = True, #进行随机水平翻转 vertical_flip = False #进行随机竖直翻转 ) val_datagen = ImageDataGenerator(rescale=1/255.) train_generator = train_datagen.flow_from_directory( trainSetRootPath, #会扫描该目录下的文件,有几个文件就会默认有几类 target_size=(targetSize[0], targetSize[1]), #生成的图片像素大小 batch_size=batchSize, #一次生成的图片数目 class_mode='categorical') validation_generator = val_datagen.flow_from_directory( validSetRootPath, target_size=(targetSize[0], targetSize[1]), batch_size=batchSize, class_mode='categorical') #----------------------------------------------------------------------------------------- # Set CallBack(loss history) #----------------------------------------------------------------------------------------- from keras.callbacks import Callback # 记录训练过程 # Callback 用于记录每个epoch的 loss 和 accuracy class LossHistory(Callback): def on_train_begin(self, logs={}): self.losses = [] self.acces = [] self.val_losses = [] self.val_acces = [] def on_epoch_end(self, batch, logs={}): self.losses.append(logs.get('loss')) self.acces.append(logs.get('acc')) self.val_losses.append(logs.get('val_loss')) self.val_acces.append(logs.get('val_acc')) history = LossHistory() # ----------------------------------------------------------- # 构建网络 # ----------------------------------------------------------- from keras.models import Sequential from keras.layers import Dense, Conv2D, MaxPooling2D, Flatten from keras.optimizers import SGD, Adam from matplotlib import pyplot as plt def buildCNN(): """ 根据指定块数量构造CNN 默认blockNum最小为3 一个block为两个卷积层一个max pooling """ model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', input_shape = targetSize)) model.add(Conv2D(32, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(128, (3, 3), activation='relu')) model.add(Conv2D(128, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(256, (3, 3), activation='relu')) model.add(Conv2D(256, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dense(256, activation='relu')) model.add(Dense(categoryNum, activation='softmax')) # choose a optimizer #sgd = SGD(lr=0.0000001, decay=1e-6, momentum=0.1, nesterov=False) adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08) model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy']) return model # ----------------------------------------------------------- # 构建并训练CNN模型 # ----------------------------------------------------------- print('---------------------') print('start training model with the image enhancement strategy\n') model = buildCNN() #训练集样本总数 train_sample_count = len(train_generator.filenames) #测试集样本总数 val_sample_count = len(validation_generator.filenames) model.fit_generator( train_generator, steps_per_epoch= int(train_sample_count/batchSize) + 1, # steps_per_epoch定义多少batch算作完成一次epoch epochs=epochNum, validation_data=validation_generator, validation_steps= int(val_sample_count/batchSize) + 1, # batch_size, callbacks=[history]) print('\ntraining finished') print('---------------------\n') # 评价模型与可视化 print('best accuracy on training set:' + str(max(history.acces))) print('best accuracy on validation set:' + str(max(history.val_acces))) print('\nvalidation accuracy record on each epoches:') print(history.val_acces) plt.title('Result Analysis') plt.plot([x for x in range(1, len(history.acces) + 1)], history.acces, color='green', label='training accuracy') plt.plot([x for x in range(1, len(history.val_acces) + 1)], history.val_acces, color='skyblue', label='validation accuracy') plt.legend() # 显示图例 plt.xlabel('epoches') plt.ylabel('accuracy') plt.show()
32.263158
124
0.589995
trainSetRootPath = '../../../dataset/trainSetExample' validSetRootPath = '../../../dataset/validSetExample/' targetSize = (224, 224, 3) categoryNum = 3 batchSize = 32 epochNum = 100 from keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale=1/255., rotation_range = 10, width_shift_range = 0.1, height_shift_range = 0.1, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True, vertical_flip = False ) val_datagen = ImageDataGenerator(rescale=1/255.) train_generator = train_datagen.flow_from_directory( trainSetRootPath, target_size=(targetSize[0], targetSize[1]), batch_size=batchSize, class_mode='categorical') validation_generator = val_datagen.flow_from_directory( validSetRootPath, target_size=(targetSize[0], targetSize[1]), batch_size=batchSize, class_mode='categorical') from keras.callbacks import Callback class LossHistory(Callback): def on_train_begin(self, logs={}): self.losses = [] self.acces = [] self.val_losses = [] self.val_acces = [] def on_epoch_end(self, batch, logs={}): self.losses.append(logs.get('loss')) self.acces.append(logs.get('acc')) self.val_losses.append(logs.get('val_loss')) self.val_acces.append(logs.get('val_acc')) history = LossHistory() from keras.models import Sequential from keras.layers import Dense, Conv2D, MaxPooling2D, Flatten from keras.optimizers import SGD, Adam from matplotlib import pyplot as plt def buildCNN(): model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', input_shape = targetSize)) model.add(Conv2D(32, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(128, (3, 3), activation='relu')) model.add(Conv2D(128, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(256, (3, 3), activation='relu')) model.add(Conv2D(256, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dense(256, activation='relu')) model.add(Dense(categoryNum, activation='softmax')) adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08) model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy']) return model print('---------------------') print('start training model with the image enhancement strategy\n') model = buildCNN() train_sample_count = len(train_generator.filenames) val_sample_count = len(validation_generator.filenames) model.fit_generator( train_generator, steps_per_epoch= int(train_sample_count/batchSize) + 1, epochs=epochNum, validation_data=validation_generator, validation_steps= int(val_sample_count/batchSize) + 1, callbacks=[history]) print('\ntraining finished') print('---------------------\n') print('best accuracy on training set:' + str(max(history.acces))) print('best accuracy on validation set:' + str(max(history.val_acces))) print('\nvalidation accuracy record on each epoches:') print(history.val_acces) plt.title('Result Analysis') plt.plot([x for x in range(1, len(history.acces) + 1)], history.acces, color='green', label='training accuracy') plt.plot([x for x in range(1, len(history.val_acces) + 1)], history.val_acces, color='skyblue', label='validation accuracy') plt.legend() plt.xlabel('epoches') plt.ylabel('accuracy') plt.show()
true
true
1c435271961185b573cfe80263e018907fa38fe0
4,404
py
Python
numba/tests/test_forloop.py
aseyboldt/numba
0c0a8aa7a7cad8b5f0a5383101f5cca5a4a89df3
[ "BSD-2-Clause" ]
null
null
null
numba/tests/test_forloop.py
aseyboldt/numba
0c0a8aa7a7cad8b5f0a5383101f5cca5a4a89df3
[ "BSD-2-Clause" ]
null
null
null
numba/tests/test_forloop.py
aseyboldt/numba
0c0a8aa7a7cad8b5f0a5383101f5cca5a4a89df3
[ "BSD-2-Clause" ]
null
null
null
#! /usr/bin/env python # ______________________________________________________________________ '''test_forloop Test the Numba compiler on a simple for loop over an iterable object. ''' # ______________________________________________________________________ from numba.decorators import jit import numpy import unittest import __builtin__ # ______________________________________________________________________ def for_loop_fn_0 (iterable): acc = 0. for value in iterable: acc += value return acc # ______________________________________________________________________ def for_loop_fn_1 (start, stop, inc): acc = 0 for value in range(start, stop, inc): acc += value return acc # ______________________________________________________________________ def for_loop_fn_2 (stop): acc = 0 for value_0 in range(stop): for value_1 in range(stop): acc += value_0 * value_1 return acc # ______________________________________________________________________ def for_loop_fn_3 (stop): acc = 0 for i in range(stop): for j in range(stop): for k in range(stop): for l in range(stop): acc += 1 return acc # ______________________________________________________________________ def for_loop_w_guard_0 (test_input): '''Test case based on issue #25. See: https://github.com/numba/numba/issues/25''' acc = 0.0 for i in range(5): if i == test_input: acc += 100.0 return acc # ______________________________________________________________________ def for_loop_w_guard_1 (test_input): '''Test case based on issue #25. See: https://github.com/numba/numba/issues/25''' acc = 0.0 for i in range(5): if i == test_input: acc += 100.0 else: acc += i return acc # ______________________________________________________________________ class TestForLoop(unittest.TestCase): @unittest.skipUnless(hasattr(__builtin__, '__noskip__'), "Requires implementation of iteration " "over arrays.") def test_compiled_for_loop_fn_0(self): test_data = numpy.array([1, 2, 3], dtype = 'l') compiled_for_loop_fn = jit( argtypes = [['l']],backend='bytecode')(for_loop_fn_0) result = compiled_for_loop_fn(test_data) self.assertEqual(result, 6) self.assertEqual(result, for_loop_fn_0(testdata)) def test_compiled_for_loop_fn_1(self): compiled_for_loop_fn = jit(argtypes = ['i','i','i'], restype = 'i', backend='bytecode')(for_loop_fn_1) result = compiled_for_loop_fn(1, 4, 1) self.assertEqual(result, 6) self.assertEqual(result, for_loop_fn_1(1, 4, 1)) def test_compiled_for_loop_fn_2(self): compiled_for_loop_fn = jit(argtypes = ['i'], restype = 'i', backend='bytecode')(for_loop_fn_2) result = compiled_for_loop_fn(4) self.assertEqual(result, 36) self.assertEqual(result, for_loop_fn_2(4)) def test_compiled_for_loop_fn_3(self): compiled_for_loop_fn = jit(argtypes = ['i'], restype = 'i', backend='bytecode')(for_loop_fn_3) result = compiled_for_loop_fn(3) self.assertEqual(result, for_loop_fn_3(3)) self.assertEqual(result, 81) def test_compiled_for_loop_w_guard_0(self): compiled_for_loop_w_guard = jit(backend='bytecode')(for_loop_w_guard_0) self.assertEqual(compiled_for_loop_w_guard(5.), for_loop_w_guard_0(5.)) self.assertEqual(compiled_for_loop_w_guard(4.), for_loop_w_guard_0(4.)) def test_compiled_for_loop_w_guard_1(self): compiled_for_loop_w_guard = jit(backend='bytecode')(for_loop_w_guard_1) self.assertEqual(compiled_for_loop_w_guard(5.), for_loop_w_guard_1(5.)) self.assertEqual(compiled_for_loop_w_guard(4.), for_loop_w_guard_1(4.)) # ______________________________________________________________________ if __name__ == "__main__": unittest.main() # ______________________________________________________________________ # End of test_forloop.py
33.363636
94
0.663942
from numba.decorators import jit import numpy import unittest import __builtin__ def for_loop_fn_0 (iterable): acc = 0. for value in iterable: acc += value return acc def for_loop_fn_1 (start, stop, inc): acc = 0 for value in range(start, stop, inc): acc += value return acc def for_loop_fn_2 (stop): acc = 0 for value_0 in range(stop): for value_1 in range(stop): acc += value_0 * value_1 return acc def for_loop_fn_3 (stop): acc = 0 for i in range(stop): for j in range(stop): for k in range(stop): for l in range(stop): acc += 1 return acc def for_loop_w_guard_0 (test_input): acc = 0.0 for i in range(5): if i == test_input: acc += 100.0 return acc def for_loop_w_guard_1 (test_input): acc = 0.0 for i in range(5): if i == test_input: acc += 100.0 else: acc += i return acc class TestForLoop(unittest.TestCase): @unittest.skipUnless(hasattr(__builtin__, '__noskip__'), "Requires implementation of iteration " "over arrays.") def test_compiled_for_loop_fn_0(self): test_data = numpy.array([1, 2, 3], dtype = 'l') compiled_for_loop_fn = jit( argtypes = [['l']],backend='bytecode')(for_loop_fn_0) result = compiled_for_loop_fn(test_data) self.assertEqual(result, 6) self.assertEqual(result, for_loop_fn_0(testdata)) def test_compiled_for_loop_fn_1(self): compiled_for_loop_fn = jit(argtypes = ['i','i','i'], restype = 'i', backend='bytecode')(for_loop_fn_1) result = compiled_for_loop_fn(1, 4, 1) self.assertEqual(result, 6) self.assertEqual(result, for_loop_fn_1(1, 4, 1)) def test_compiled_for_loop_fn_2(self): compiled_for_loop_fn = jit(argtypes = ['i'], restype = 'i', backend='bytecode')(for_loop_fn_2) result = compiled_for_loop_fn(4) self.assertEqual(result, 36) self.assertEqual(result, for_loop_fn_2(4)) def test_compiled_for_loop_fn_3(self): compiled_for_loop_fn = jit(argtypes = ['i'], restype = 'i', backend='bytecode')(for_loop_fn_3) result = compiled_for_loop_fn(3) self.assertEqual(result, for_loop_fn_3(3)) self.assertEqual(result, 81) def test_compiled_for_loop_w_guard_0(self): compiled_for_loop_w_guard = jit(backend='bytecode')(for_loop_w_guard_0) self.assertEqual(compiled_for_loop_w_guard(5.), for_loop_w_guard_0(5.)) self.assertEqual(compiled_for_loop_w_guard(4.), for_loop_w_guard_0(4.)) def test_compiled_for_loop_w_guard_1(self): compiled_for_loop_w_guard = jit(backend='bytecode')(for_loop_w_guard_1) self.assertEqual(compiled_for_loop_w_guard(5.), for_loop_w_guard_1(5.)) self.assertEqual(compiled_for_loop_w_guard(4.), for_loop_w_guard_1(4.)) if __name__ == "__main__": unittest.main()
true
true
1c4353518377089683fe37c0a25d8096adebb750
7,393
py
Python
xblock_jupyter_graded/nbgrader_utils.py
titlethanason/jupyter-edx-grader-xblock
f70326432f998b919dd30a27c41175ab1249bd4b
[ "BSD-3-Clause" ]
null
null
null
xblock_jupyter_graded/nbgrader_utils.py
titlethanason/jupyter-edx-grader-xblock
f70326432f998b919dd30a27c41175ab1249bd4b
[ "BSD-3-Clause" ]
null
null
null
xblock_jupyter_graded/nbgrader_utils.py
titlethanason/jupyter-edx-grader-xblock
f70326432f998b919dd30a27c41175ab1249bd4b
[ "BSD-3-Clause" ]
null
null
null
import json import logging import os import pkg_resources from subprocess import Popen, PIPE from .config import ( RELEASE, SUBMITTED, SOURCE, AUTOGRADED, FEEDBACK, EDX_ROOT, CONT_ROOT ) from . import file_manager as fm from . import container_manager as cm from .exceptions import DockerContainerError, ValidationError log = logging.getLogger(__name__) def normalize_course_id(course_id): """Make course_id directory naming name worthy convert: from: course-v1:course+name to: course_name """ return course_id.split(":")[1].replace("+", "_") def normalize_unit_id(unit_id): """Make unit_id directory name worthy convert: from: block-v1:course+type@vertical+block@digits to: vertical_block_digits """ return "_".join(unit_id.split("@")[1:]).replace("+", "_") def init_new_course(course_id): """Create edx directory structure for new course""" fm.create_course_dirs(course_id) def generate_student_nb(course_id, unit_id, f): """Runs nbgrader assign and returns max possible notebook score""" course = normalize_course_id(course_id) unit = normalize_unit_id(unit_id) # Create new course structure if necessary init_new_course(course) # Validate and Save instructor source notebook fm.validate_instructor_nb(f) fm.save_instructor_nb(course, unit, f) max_score = _run_assign_container(f.filename, course, unit) return max_score def autograde_notebook(username, course_id, unit_id, f, cell_timeout=15, allow_net=False): """Runs nbgrader autograde and returns student score Requires normalized course_id/unit_id """ course = normalize_course_id(course_id) unit = normalize_unit_id(unit_id) fm.save_student_nb(username, course, unit, f) score = _run_autograde_container(f.filename, course, unit, username, cell_timeout, allow_net) return score def _run_assign_container(nb_filename, course_id, unit_id): """Runs assign to generate student nb and returns max score Requires normalized course_id/unit_id """ build_container_if_not_exists(course_id) host_source_path = os.path.join(EDX_ROOT, course_id, SOURCE, unit_id) cont_source_path = os.path.join(CONT_ROOT, SOURCE, 'ps1') host_release_path = os.path.join(EDX_ROOT, course_id, RELEASE, unit_id) cont_release_path = os.path.join(CONT_ROOT, RELEASE, 'ps1') cmd = [ 'sudo', '-u', 'jupyter', 'docker', 'run', '-t', '-v', "{}:{}".format(host_source_path, cont_source_path), '-v', "{}:{}".format(host_release_path, cont_release_path), course_id.lower(), 'python', '/home/jupyter/run_grader.py', '--cmd', 'assign', '--nbname', nb_filename, ] p = Popen(cmd, stderr=PIPE, stdout=PIPE) out, err = p.communicate() if p.returncode != 0: raise DockerContainerError(err.decode('utf-8')); nb_name = os.path.splitext(nb_filename)[0] result_fn = "{}_results.json".format(nb_name) with open(os.path.join(host_release_path, result_fn), 'r') as f: results = json.load(f) if not results['success']: raise DockerContainerError(results['err']) return results['max_score'] def _run_autograde_container(nb_filename, course_id, unit_id, username, cell_timeout, allow_net): """Runs autograde for notebook and student, returning student score Requires normalized course_id/unit_id """ # Create student based directories fm.create_autograded_dir(course_id, unit_id, username) fm.create_feedback_dir(course_id, unit_id, username) # Create host:container directory mappings host_source_path = os.path.join(EDX_ROOT, course_id, SOURCE, unit_id, nb_filename) cont_source_path = os.path.join(CONT_ROOT, SOURCE, 'ps1', nb_filename) host_submitted_path = os.path.join(EDX_ROOT, course_id, SUBMITTED, username, unit_id, nb_filename) cont_submitted_path = os.path.join(CONT_ROOT, SUBMITTED, username, 'ps1', nb_filename) host_autograded_path = os.path.join(EDX_ROOT, course_id, AUTOGRADED, username, unit_id) cont_autograded_path = os.path.join("/{}".format(AUTOGRADED)) host_fb_path = os.path.join(EDX_ROOT, course_id, FEEDBACK, username, unit_id) cont_fb_path = os.path.join("/{}".format(FEEDBACK)) # Set cell timeout config option # NOTE: Could expand to set other nbgrader settings here config = ["ExecutePreprocessor.timeout = {}".format(cell_timeout)] # Create a temp config file and map it into the container # NOTE: Could allow for notebook specific settings instead of XBlock wide with fm.create_temp_config(config) as filename: host_config_path = filename cont_config_path = os.path.join("/etc", "jupyter", "nbgrader_config.py") cmd = [ 'sudo', '-u', 'jupyter', 'docker', 'run', '-t', '-v', "{}:{}:ro".format(host_source_path, cont_source_path), '-v', "{}:{}:ro".format(host_submitted_path, cont_submitted_path), '-v', "{}:{}".format(host_autograded_path, cont_autograded_path), '-v', "{}:{}".format(host_fb_path, cont_fb_path), '-v', "{}:{}:ro".format(host_config_path, cont_config_path) ] # Optionally disable all network access if not allow_net: log.info("Disabling network access from docker container") cmd += ['--network', 'none'] cmd += [ course_id.lower(), 'python', '/home/jupyter/run_grader.py', '--cmd', 'grade', '--nbname', nb_filename, '--username', username, ] p = Popen(cmd, stderr=PIPE, stdout=PIPE) out, err = p.communicate() if p.returncode != 0: raise DockerContainerError(err.decode('utf-8')); # Get and read results nb_name = os.path.splitext(nb_filename)[0] result_fn = "{}_results.json".format(nb_name) with open(os.path.join(host_autograded_path, result_fn), 'r') as f: results = json.load(f) if not results['success']: raise DockerContainerError(results['err']) return { 'total': results['total_score'], 'section_scores': results['section_scores'], 'autograded_err': results['autograded_err'] } def update_requirements(course_id, f): """Updates Requirements model file this course_id""" course = normalize_course_id(course_id) try: with open(f.file.read(), 'r', encoding="utf-8") as file: packages = file.readlines() except AttributeError: raise ValidationError("No File Attached") manager = cm.ContainerManager(course) manager.set_requirements(packages) manager.build_container() manager.cleanup() def get_requirements(course_id): """Returns contents of current requirements.txt for `course`""" course = normalize_course_id(course_id) manager = cm.ContainerManager(course) return manager.get_package_list() def build_container_if_not_exists(course_id): """Builds the docker container if it doesn't exist Requires normalized course_id """ manager = cm.ContainerManager(course_id) if not manager.container_exists(): log.info("Containter: {} did not exist, building...".format(course_id)) manager.build_container() manager.cleanup()
33.757991
102
0.67361
import json import logging import os import pkg_resources from subprocess import Popen, PIPE from .config import ( RELEASE, SUBMITTED, SOURCE, AUTOGRADED, FEEDBACK, EDX_ROOT, CONT_ROOT ) from . import file_manager as fm from . import container_manager as cm from .exceptions import DockerContainerError, ValidationError log = logging.getLogger(__name__) def normalize_course_id(course_id): return course_id.split(":")[1].replace("+", "_") def normalize_unit_id(unit_id): return "_".join(unit_id.split("@")[1:]).replace("+", "_") def init_new_course(course_id): fm.create_course_dirs(course_id) def generate_student_nb(course_id, unit_id, f): course = normalize_course_id(course_id) unit = normalize_unit_id(unit_id) init_new_course(course) fm.validate_instructor_nb(f) fm.save_instructor_nb(course, unit, f) max_score = _run_assign_container(f.filename, course, unit) return max_score def autograde_notebook(username, course_id, unit_id, f, cell_timeout=15, allow_net=False): course = normalize_course_id(course_id) unit = normalize_unit_id(unit_id) fm.save_student_nb(username, course, unit, f) score = _run_autograde_container(f.filename, course, unit, username, cell_timeout, allow_net) return score def _run_assign_container(nb_filename, course_id, unit_id): build_container_if_not_exists(course_id) host_source_path = os.path.join(EDX_ROOT, course_id, SOURCE, unit_id) cont_source_path = os.path.join(CONT_ROOT, SOURCE, 'ps1') host_release_path = os.path.join(EDX_ROOT, course_id, RELEASE, unit_id) cont_release_path = os.path.join(CONT_ROOT, RELEASE, 'ps1') cmd = [ 'sudo', '-u', 'jupyter', 'docker', 'run', '-t', '-v', "{}:{}".format(host_source_path, cont_source_path), '-v', "{}:{}".format(host_release_path, cont_release_path), course_id.lower(), 'python', '/home/jupyter/run_grader.py', '--cmd', 'assign', '--nbname', nb_filename, ] p = Popen(cmd, stderr=PIPE, stdout=PIPE) out, err = p.communicate() if p.returncode != 0: raise DockerContainerError(err.decode('utf-8')); nb_name = os.path.splitext(nb_filename)[0] result_fn = "{}_results.json".format(nb_name) with open(os.path.join(host_release_path, result_fn), 'r') as f: results = json.load(f) if not results['success']: raise DockerContainerError(results['err']) return results['max_score'] def _run_autograde_container(nb_filename, course_id, unit_id, username, cell_timeout, allow_net): fm.create_autograded_dir(course_id, unit_id, username) fm.create_feedback_dir(course_id, unit_id, username) host_source_path = os.path.join(EDX_ROOT, course_id, SOURCE, unit_id, nb_filename) cont_source_path = os.path.join(CONT_ROOT, SOURCE, 'ps1', nb_filename) host_submitted_path = os.path.join(EDX_ROOT, course_id, SUBMITTED, username, unit_id, nb_filename) cont_submitted_path = os.path.join(CONT_ROOT, SUBMITTED, username, 'ps1', nb_filename) host_autograded_path = os.path.join(EDX_ROOT, course_id, AUTOGRADED, username, unit_id) cont_autograded_path = os.path.join("/{}".format(AUTOGRADED)) host_fb_path = os.path.join(EDX_ROOT, course_id, FEEDBACK, username, unit_id) cont_fb_path = os.path.join("/{}".format(FEEDBACK)) config = ["ExecutePreprocessor.timeout = {}".format(cell_timeout)] with fm.create_temp_config(config) as filename: host_config_path = filename cont_config_path = os.path.join("/etc", "jupyter", "nbgrader_config.py") cmd = [ 'sudo', '-u', 'jupyter', 'docker', 'run', '-t', '-v', "{}:{}:ro".format(host_source_path, cont_source_path), '-v', "{}:{}:ro".format(host_submitted_path, cont_submitted_path), '-v', "{}:{}".format(host_autograded_path, cont_autograded_path), '-v', "{}:{}".format(host_fb_path, cont_fb_path), '-v', "{}:{}:ro".format(host_config_path, cont_config_path) ] if not allow_net: log.info("Disabling network access from docker container") cmd += ['--network', 'none'] cmd += [ course_id.lower(), 'python', '/home/jupyter/run_grader.py', '--cmd', 'grade', '--nbname', nb_filename, '--username', username, ] p = Popen(cmd, stderr=PIPE, stdout=PIPE) out, err = p.communicate() if p.returncode != 0: raise DockerContainerError(err.decode('utf-8')); nb_name = os.path.splitext(nb_filename)[0] result_fn = "{}_results.json".format(nb_name) with open(os.path.join(host_autograded_path, result_fn), 'r') as f: results = json.load(f) if not results['success']: raise DockerContainerError(results['err']) return { 'total': results['total_score'], 'section_scores': results['section_scores'], 'autograded_err': results['autograded_err'] } def update_requirements(course_id, f): course = normalize_course_id(course_id) try: with open(f.file.read(), 'r', encoding="utf-8") as file: packages = file.readlines() except AttributeError: raise ValidationError("No File Attached") manager = cm.ContainerManager(course) manager.set_requirements(packages) manager.build_container() manager.cleanup() def get_requirements(course_id): course = normalize_course_id(course_id) manager = cm.ContainerManager(course) return manager.get_package_list() def build_container_if_not_exists(course_id): manager = cm.ContainerManager(course_id) if not manager.container_exists(): log.info("Containter: {} did not exist, building...".format(course_id)) manager.build_container() manager.cleanup()
true
true
1c4353835cf00f4003b23a6e859d571efa53e899
1,129
py
Python
airflow/hooks/webhdfs_hook.py
ChaseKnowlden/airflow
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
[ "Apache-2.0" ]
15,947
2019-01-05T13:51:02.000Z
2022-03-31T23:33:16.000Z
airflow/hooks/webhdfs_hook.py
ChaseKnowlden/airflow
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
[ "Apache-2.0" ]
14,603
2019-01-05T09:43:19.000Z
2022-03-31T23:11:59.000Z
airflow/hooks/webhdfs_hook.py
ChaseKnowlden/airflow
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
[ "Apache-2.0" ]
8,429
2019-01-05T19:45:47.000Z
2022-03-31T22:13:01.000Z
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """This module is deprecated. Please use :mod:`airflow.providers.apache.hdfs.hooks.webhdfs`.""" import warnings from airflow.providers.apache.hdfs.hooks.webhdfs import WebHDFSHook # noqa warnings.warn( "This module is deprecated. Please use `airflow.providers.apache.hdfs.hooks.webhdfs`.", DeprecationWarning, stacklevel=2, )
38.931034
95
0.76705
import warnings from airflow.providers.apache.hdfs.hooks.webhdfs import WebHDFSHook warnings.warn( "This module is deprecated. Please use `airflow.providers.apache.hdfs.hooks.webhdfs`.", DeprecationWarning, stacklevel=2, )
true
true
1c4353fef35e15660683e78f01919ecd4744808d
574
py
Python
flask_monitoringdashboard/test/core/profiler/util/test_stringhash.py
timgates42/Flask-MonitoringDashboard
0404b05b9a8f1917796e0f314a77a53a754a0b15
[ "MIT" ]
3
2020-07-17T05:37:41.000Z
2021-09-06T19:46:15.000Z
flask_monitoringdashboard/test/core/profiler/util/test_stringhash.py
timgates42/Flask-MonitoringDashboard
0404b05b9a8f1917796e0f314a77a53a754a0b15
[ "MIT" ]
null
null
null
flask_monitoringdashboard/test/core/profiler/util/test_stringhash.py
timgates42/Flask-MonitoringDashboard
0404b05b9a8f1917796e0f314a77a53a754a0b15
[ "MIT" ]
1
2020-11-21T01:25:51.000Z
2020-11-21T01:25:51.000Z
import unittest from flask_monitoringdashboard.core.profiler.util.stringHash import StringHash class TestStringHash(unittest.TestCase): def test_stringhash(self): string_hash = StringHash() self.assertEqual(string_hash.hash('abc'), 0) self.assertEqual(string_hash.hash('def'), 1) self.assertEqual(string_hash.hash('abc'), 0) def test_unhash(self): string_hash = StringHash() self.assertEqual(string_hash.unhash(string_hash.hash('abc')), 'abc') self.assertRaises(ValueError, string_hash.unhash, 'unknown')
31.888889
78
0.709059
import unittest from flask_monitoringdashboard.core.profiler.util.stringHash import StringHash class TestStringHash(unittest.TestCase): def test_stringhash(self): string_hash = StringHash() self.assertEqual(string_hash.hash('abc'), 0) self.assertEqual(string_hash.hash('def'), 1) self.assertEqual(string_hash.hash('abc'), 0) def test_unhash(self): string_hash = StringHash() self.assertEqual(string_hash.unhash(string_hash.hash('abc')), 'abc') self.assertRaises(ValueError, string_hash.unhash, 'unknown')
true
true
1c435454ba87053a25944354013eb38a8d2a0d71
1,220
py
Python
resources/mgltools_x86_64Linux2_1.5.6/lib/python2.5/site-packages/matplotlib/numerix/_nc_imports.py
J-E-J-S/aaRS-Pipeline
43f59f28ab06e4b16328c3bc405cdddc6e69ac44
[ "MIT" ]
1
2016-05-08T18:33:12.000Z
2016-05-08T18:33:12.000Z
resources/mgltools_x86_64Linux2_1.5.6/lib/python2.5/site-packages/matplotlib/numerix/_nc_imports.py
J-E-J-S/aaRS-Pipeline
43f59f28ab06e4b16328c3bc405cdddc6e69ac44
[ "MIT" ]
null
null
null
resources/mgltools_x86_64Linux2_1.5.6/lib/python2.5/site-packages/matplotlib/numerix/_nc_imports.py
J-E-J-S/aaRS-Pipeline
43f59f28ab06e4b16328c3bc405cdddc6e69ac44
[ "MIT" ]
null
null
null
from Numeric import array, ravel, reshape, shape, alltrue, sometrue from Numeric import Int8, UInt8, Int16, UInt16, Int32, UInt32, \ Float32, Float64, Complex32, Complex64, Float, Int, Complex from numpy import isnan as _isnan class _TypeNamespace: """Numeric compatible type aliases for use with extension functions.""" Int8 = Int8 UInt8 = UInt8 Int16 = Int16 UInt16 = UInt16 Int32 = Int32 UInt32 = UInt32 Float32 = Float32 Float64 = Float64 Complex32 = Complex32 Complex64 = Complex64 nx = _TypeNamespace() def isnan(a): """y = isnan(x) returns True where x is Not-A-Number""" return reshape(array([_isnan(i) for i in ravel(a)],'b'), shape(a)) def all(a, axis=None): '''Numpy-compatible version of all()''' if axis is None: return alltrue(ravel(a)) else: return alltrue(a, axis) def any(a, axis=None): if axis is None: return sometrue(ravel(a)) else: return sometrue(a, axis) # inf is useful for testing infinities in results of array divisions # (which don't raise exceptions) inf = infty = infinity = Infinity = (array([1])/0.0)[0]
28.372093
75
0.62377
from Numeric import array, ravel, reshape, shape, alltrue, sometrue from Numeric import Int8, UInt8, Int16, UInt16, Int32, UInt32, \ Float32, Float64, Complex32, Complex64, Float, Int, Complex from numpy import isnan as _isnan class _TypeNamespace: Int8 = Int8 UInt8 = UInt8 Int16 = Int16 UInt16 = UInt16 Int32 = Int32 UInt32 = UInt32 Float32 = Float32 Float64 = Float64 Complex32 = Complex32 Complex64 = Complex64 nx = _TypeNamespace() def isnan(a): return reshape(array([_isnan(i) for i in ravel(a)],'b'), shape(a)) def all(a, axis=None): if axis is None: return alltrue(ravel(a)) else: return alltrue(a, axis) def any(a, axis=None): if axis is None: return sometrue(ravel(a)) else: return sometrue(a, axis) inf = infty = infinity = Infinity = (array([1])/0.0)[0]
true
true
1c4354e052adc0aa774f9c2bc79318c236495b02
403
py
Python
TheHouseSteak/asgi.py
enessfk/TheHouseSteak
21ee01bb1e028dec7d840beb9255191f6b5f4a7e
[ "MIT" ]
1
2021-08-20T14:51:37.000Z
2021-08-20T14:51:37.000Z
TheHouseSteak/asgi.py
enessfk/TheHouseSteak
21ee01bb1e028dec7d840beb9255191f6b5f4a7e
[ "MIT" ]
2
2021-08-20T14:55:50.000Z
2021-08-23T22:09:45.000Z
TheHouseSteak/asgi.py
enessfk/TheHouseSteak
21ee01bb1e028dec7d840beb9255191f6b5f4a7e
[ "MIT" ]
null
null
null
""" ASGI config for TheHouseSteak project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'TheHouseSteak.settings') application = get_asgi_application()
23.705882
78
0.791563
import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'TheHouseSteak.settings') application = get_asgi_application()
true
true
1c43558a0014473f068d0482a8a8fd2d833db805
11,648
py
Python
nere/torch_trainer.py
WangShengguang/NERE
4b8166aa348b9db207bb9a1e1da6eed5d567ae6f
[ "MIT" ]
null
null
null
nere/torch_trainer.py
WangShengguang/NERE
4b8166aa348b9db207bb9a1e1da6eed5d567ae6f
[ "MIT" ]
null
null
null
nere/torch_trainer.py
WangShengguang/NERE
4b8166aa348b9db207bb9a1e1da6eed5d567ae6f
[ "MIT" ]
1
2021-08-21T09:21:21.000Z
2021-08-21T09:21:21.000Z
import logging import os from pathlib import Path import torch import torch.nn as nn from torch.optim import Adam from torch.optim.lr_scheduler import LambdaLR from tqdm import trange from config import Config from nere.data_helper import DataHelper from nere.evaluator import Evaluator class BaseTrainer(object): def __init__(self): self.global_step = 0 def init_model(self, model): model.to(Config.device) # without this there is no error, but it runs in CPU (instead of GPU). if Config.gpu_nums > 1 and Config.multi_gpu: model = torch.nn.DataParallel(model) if Config.full_finetuning: pass # TODO 参考源代码含义 param_optimizer = list(model.named_parameters()) self.no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] self.exclude_params = ['cls.predictions.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.transform.dense.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.decoder.weight', 'cls.seq_relationship.weight', 'cls.seq_relationship.bias'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in self.no_decay) and n not in self.exclude_params], 'weight_decay_rate': 0.01}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in self.no_decay) and n not in self.exclude_params], 'weight_decay_rate': 0.0} ] self.optimizer = Adam(optimizer_grouped_parameters, lr=Config.learning_rate) self.scheduler = LambdaLR(self.optimizer, lr_lambda=lambda epoch: 1 / (1 + 0.05 * epoch)) # model, self.optimizer = amp.initialize(model, self.optimizer, opt_level="O1") # 这里是“欧一”,不是“零一” return model def backfoward(self, loss, model): if Config.gpu_nums > 1 and Config.multi_gpu: loss = loss.mean() # mean() to average on multi-gpu if Config.gradient_accumulation_steps > 1: loss = loss / Config.gradient_accumulation_steps # https://zhuanlan.zhihu.com/p/79887894 # with amp.scale_loss(loss, self.optimizer) as scaled_loss: # scaled_loss.backward() # compute gradients of all variables wrt loss loss.backward(retain_graph=True) if self.global_step % Config.gradient_accumulation_steps == 0: # gradient clipping nn.utils.clip_grad_norm_(parameters=model.parameters(), max_norm=Config.clip_grad) # performs updates using calculated gradients self.optimizer.step() # clear previous gradients self.optimizer.zero_grad() return loss class Trainer(BaseTrainer): def __init__(self, model_name, task): super().__init__() self.model_name = model_name self.task = task self.model_dir = os.path.join(Config.torch_ckpt_dir, task) self.model_path = os.path.join(self.model_dir, model_name + ".bin") os.makedirs(self.model_dir, exist_ok=True) # evaluate self.evaluator = Evaluator(task, model_name, framework="torch", load_model=False) self.best_val_f1 = 0 self.best_loss = 100 self.patience_counter = 0 # self.data_helper = DataHelper() self.fixed_seq_len = None def get_re_model(self): from nere.re_models import BERTMultitask, BERTSoftmax, ATT_BiLSTM, BiLSTM_ATT, ACNN, BiLSTM vocab_size = len(self.data_helper.tokenizer.vocab) num_ent_tags = len(self.data_helper.ent_tag2id) num_rel_tags = len(self.data_helper.rel_label2id) if self.model_name == 'BERTSoftmax': model = BERTSoftmax.from_pretrained(Config.bert_pretrained_dir, num_labels=num_rel_tags) elif self.model_name == 'BERTMultitask': model = BERTMultitask.from_pretrained(Config.bert_pretrained_dir, num_labels=num_rel_tags) elif self.model_name == "BiLSTM_ATT": model = BiLSTM_ATT(vocab_size, num_ent_tags, num_rel_tags, Config.ent_emb_dim, Config.batch_size) elif self.model_name == "ATT_BiLSTM": self.fixed_seq_len = Config.max_sequence_len model = ATT_BiLSTM(vocab_size, num_ent_tags, num_rel_tags, Config.ent_emb_dim, Config.batch_size, self.fixed_seq_len) elif self.model_name == "BiLSTM": model = BiLSTM(vocab_size, num_ent_tags, num_rel_tags) elif self.model_name == "ACNN": model = ACNN(vocab_size, num_ent_tags, num_rel_tags, Config.ent_emb_dim, Config.max_sequence_len) else: raise ValueError("Unknown RE model {}".format(self.model_name)) return model def get_ner_model(self): from nere.ner_models import BERTCRF, BiLSTM, BERTSoftmax, BiLSTM_ATT num_ent_tags = len(self.data_helper.ent_tag2id) vocab_size = len(self.data_helper.tokenizer.vocab) if self.model_name == 'BERTCRF': model = BERTCRF.from_pretrained(Config.bert_pretrained_dir, num_labels=num_ent_tags) elif self.model_name == 'BERTSoftmax': model = BERTSoftmax.from_pretrained(Config.bert_pretrained_dir, num_labels=num_ent_tags) elif self.model_name == "BiLSTM": # self.fixed_seq_len = Config.max_sequence_len model = BiLSTM(vocab_size, num_ent_tags, Config.ent_emb_dim, Config.batch_size) elif self.model_name == "BiLSTM_ATT": self.fixed_seq_len = Config.max_sequence_len model = BiLSTM_ATT(vocab_size, num_ent_tags, Config.ent_emb_dim, Config.batch_size, self.fixed_seq_len) else: raise ValueError("Unknown NER model {}".format(self.model_name)) return model def get_model(self): if self.task == "ner": model = self.get_ner_model() elif self.task == "re": model = self.get_re_model() else: raise ValueError(self.task) if Config.load_pretrain and Path(self.model_path).is_file(): model.load_state_dict(torch.load(self.model_path)) # 断点续训 logging.info("* load model from {}".format(self.model_path)) model = self.init_model(model) return model def save_best_loss_model(self, loss, model): if loss <= self.best_loss: torch.save(model.state_dict(), self.model_path) self.best_loss = loss _log = "loss: {:.3f}, save to :{}".format(loss, self.model_path) logging.info(_log) def evaluate_save(self, model): # with torch.no_grad(): # 适用于测试阶段,不需要反向传播 self.evaluator.set_model(model=model, fixed_seq_len=self.fixed_seq_len) acc, precision, recall, f1 = self.evaluator.test(data_type="valid") # torch.save(model.state_dict(), self.model_path) if f1 >= self.best_val_f1: torch.save(model.state_dict(), self.model_path) logging.info("** - Found new best F1:{:.3f} ,save to model_path: {}".format(f1, self.model_path)) self.best_val_f1 = f1 self.patience_counter = 0 else: self.patience_counter += 1 return acc, precision, recall, f1 def train_step(self, batch_data, model): if self.task == " ner": pred, loss = model(input_ids=batch_data["sents"], attention_mask=batch_data["sents"].gt(0), labels=batch_data["ent_tags"]) elif self.task == "re": pred, loss = model(batch_data, batch_data["rel_labels"]) else: raise ValueError(self.task) return pred, loss def run(self, mode): """ https://www.pytorchtutorial.com/pytorch-note5-save-and-restore-models/#i-2 :param mode: :return: """ model = self.get_model() if mode != "train": self.evaluator.set_model(model=model, fixed_seq_len=self.fixed_seq_len) acc, precision, recall, f1 = self.evaluator.test(data_type=mode) _test_log = "* model: {} {}, test acc: {:.3f}, precision: {:.3f}, recall: {:.3f}, f1: {:.3f}".format( self.task, self.model_name, acc, precision, recall, f1) logging.info(_test_log) print(_test_log) return # evaluate self.evaluator.set_model(model=model, fixed_seq_len=self.fixed_seq_len) acc, precision, recall, f1 = self.evaluator.test(data_type="valid") _log_str = ("acc: {:.3f}, precision: {:.3f}, recall: {:.3f}, f1: {:.3f}".format(acc, precision, recall, f1)) print(_log_str) logging.info(_log_str) self.best_val_f1 = f1 # init done logging.info("{}-{} start train , epoch_nums:{}...".format(self.task, self.model_name, Config.max_epoch_nums)) for epoch_num in trange(1, Config.max_epoch_nums + 1, desc="{} {} train epoch num".format(self.task, self.model_name)): model.train() for batch_data in self.data_helper.batch_iter(self.task, data_type="train", batch_size=Config.batch_size, re_type="torch", fixed_seq_len=self.fixed_seq_len): try: if self.task == "ner": pred, loss = model(input_ids=batch_data["sents"], attention_mask=batch_data["sents"].gt(0), labels=batch_data["ent_tags"]) acc, precision, recall, f1 = self.evaluator.evaluate_ner( batch_y_ent_ids=batch_data["ent_tags"].tolist(), batch_pred_ent_ids=pred.tolist()) else: # self.task == "re": pred, loss = model(batch_data, batch_data["rel_labels"]) acc, precision, recall, f1 = self.evaluator.get_re_metrics( y_true=batch_data["rel_labels"].tolist(), y_pred=pred.tolist()) except Exception as e: logging.error(e) continue self.backfoward(loss, model) self.global_step += 1 self.scheduler.step(epoch=epoch_num) # 更新学习率 # if self.global_step % Config.check_step == 0: # logging.info("train {} {} epoch_num: {}, global_step:{} loss: {:.3f}, " # "acc: {:.3f}, precision: {:.3f}, recall: {:.3f}, f1: {:.3f}".format( # self.task, self.model_name, epoch_num, self.global_step, loss.item(), acc, precision, recall, f1)) # print("* global_step:{} loss: {:.3f}".format(self.global_step, loss.item())) # self.save_best_loss_model(loss) acc, precision, recall, f1 = self.evaluate_save(model) logging.info("valid {} {} epoch_num: {}, acc: {:.3f}, precision: {:.3f}, recall: {:.3f}, f1: {:.3f}".format( self.task, self.model_name, epoch_num, acc, precision, recall, f1)) logging.info("epoch_num: {} end .\n".format(epoch_num)) # Early stopping and logging best f1 if self.patience_counter >= Config.patience_num and epoch_num > Config.min_epoch_nums: break logging.info("{}, Best val f1: {:.3f} best loss:{:.3f}".format( self.model_name, self.best_val_f1, self.best_loss))
49.777778
120
0.606542
import logging import os from pathlib import Path import torch import torch.nn as nn from torch.optim import Adam from torch.optim.lr_scheduler import LambdaLR from tqdm import trange from config import Config from nere.data_helper import DataHelper from nere.evaluator import Evaluator class BaseTrainer(object): def __init__(self): self.global_step = 0 def init_model(self, model): model.to(Config.device) if Config.gpu_nums > 1 and Config.multi_gpu: model = torch.nn.DataParallel(model) if Config.full_finetuning: pass param_optimizer = list(model.named_parameters()) self.no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] self.exclude_params = ['cls.predictions.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.transform.dense.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.decoder.weight', 'cls.seq_relationship.weight', 'cls.seq_relationship.bias'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in self.no_decay) and n not in self.exclude_params], 'weight_decay_rate': 0.01}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in self.no_decay) and n not in self.exclude_params], 'weight_decay_rate': 0.0} ] self.optimizer = Adam(optimizer_grouped_parameters, lr=Config.learning_rate) self.scheduler = LambdaLR(self.optimizer, lr_lambda=lambda epoch: 1 / (1 + 0.05 * epoch)) model def backfoward(self, loss, model): if Config.gpu_nums > 1 and Config.multi_gpu: loss = loss.mean() if Config.gradient_accumulation_steps > 1: loss = loss / Config.gradient_accumulation_steps loss.backward(retain_graph=True) if self.global_step % Config.gradient_accumulation_steps == 0: nn.utils.clip_grad_norm_(parameters=model.parameters(), max_norm=Config.clip_grad) self.optimizer.step() self.optimizer.zero_grad() return loss class Trainer(BaseTrainer): def __init__(self, model_name, task): super().__init__() self.model_name = model_name self.task = task self.model_dir = os.path.join(Config.torch_ckpt_dir, task) self.model_path = os.path.join(self.model_dir, model_name + ".bin") os.makedirs(self.model_dir, exist_ok=True) self.evaluator = Evaluator(task, model_name, framework="torch", load_model=False) self.best_val_f1 = 0 self.best_loss = 100 self.patience_counter = 0 self.data_helper = DataHelper() self.fixed_seq_len = None def get_re_model(self): from nere.re_models import BERTMultitask, BERTSoftmax, ATT_BiLSTM, BiLSTM_ATT, ACNN, BiLSTM vocab_size = len(self.data_helper.tokenizer.vocab) num_ent_tags = len(self.data_helper.ent_tag2id) num_rel_tags = len(self.data_helper.rel_label2id) if self.model_name == 'BERTSoftmax': model = BERTSoftmax.from_pretrained(Config.bert_pretrained_dir, num_labels=num_rel_tags) elif self.model_name == 'BERTMultitask': model = BERTMultitask.from_pretrained(Config.bert_pretrained_dir, num_labels=num_rel_tags) elif self.model_name == "BiLSTM_ATT": model = BiLSTM_ATT(vocab_size, num_ent_tags, num_rel_tags, Config.ent_emb_dim, Config.batch_size) elif self.model_name == "ATT_BiLSTM": self.fixed_seq_len = Config.max_sequence_len model = ATT_BiLSTM(vocab_size, num_ent_tags, num_rel_tags, Config.ent_emb_dim, Config.batch_size, self.fixed_seq_len) elif self.model_name == "BiLSTM": model = BiLSTM(vocab_size, num_ent_tags, num_rel_tags) elif self.model_name == "ACNN": model = ACNN(vocab_size, num_ent_tags, num_rel_tags, Config.ent_emb_dim, Config.max_sequence_len) else: raise ValueError("Unknown RE model {}".format(self.model_name)) return model def get_ner_model(self): from nere.ner_models import BERTCRF, BiLSTM, BERTSoftmax, BiLSTM_ATT num_ent_tags = len(self.data_helper.ent_tag2id) vocab_size = len(self.data_helper.tokenizer.vocab) if self.model_name == 'BERTCRF': model = BERTCRF.from_pretrained(Config.bert_pretrained_dir, num_labels=num_ent_tags) elif self.model_name == 'BERTSoftmax': model = BERTSoftmax.from_pretrained(Config.bert_pretrained_dir, num_labels=num_ent_tags) elif self.model_name == "BiLSTM": model = BiLSTM(vocab_size, num_ent_tags, Config.ent_emb_dim, Config.batch_size) elif self.model_name == "BiLSTM_ATT": self.fixed_seq_len = Config.max_sequence_len model = BiLSTM_ATT(vocab_size, num_ent_tags, Config.ent_emb_dim, Config.batch_size, self.fixed_seq_len) else: raise ValueError("Unknown NER model {}".format(self.model_name)) return model def get_model(self): if self.task == "ner": model = self.get_ner_model() elif self.task == "re": model = self.get_re_model() else: raise ValueError(self.task) if Config.load_pretrain and Path(self.model_path).is_file(): model.load_state_dict(torch.load(self.model_path)) logging.info("* load model from {}".format(self.model_path)) model = self.init_model(model) return model def save_best_loss_model(self, loss, model): if loss <= self.best_loss: torch.save(model.state_dict(), self.model_path) self.best_loss = loss _log = "loss: {:.3f}, save to :{}".format(loss, self.model_path) logging.info(_log) def evaluate_save(self, model): luator.set_model(model=model, fixed_seq_len=self.fixed_seq_len) acc, precision, recall, f1 = self.evaluator.test(data_type="valid") if f1 >= self.best_val_f1: torch.save(model.state_dict(), self.model_path) logging.info("** - Found new best F1:{:.3f} ,save to model_path: {}".format(f1, self.model_path)) self.best_val_f1 = f1 self.patience_counter = 0 else: self.patience_counter += 1 return acc, precision, recall, f1 def train_step(self, batch_data, model): if self.task == " ner": pred, loss = model(input_ids=batch_data["sents"], attention_mask=batch_data["sents"].gt(0), labels=batch_data["ent_tags"]) elif self.task == "re": pred, loss = model(batch_data, batch_data["rel_labels"]) else: raise ValueError(self.task) return pred, loss def run(self, mode): model = self.get_model() if mode != "train": self.evaluator.set_model(model=model, fixed_seq_len=self.fixed_seq_len) acc, precision, recall, f1 = self.evaluator.test(data_type=mode) _test_log = "* model: {} {}, test acc: {:.3f}, precision: {:.3f}, recall: {:.3f}, f1: {:.3f}".format( self.task, self.model_name, acc, precision, recall, f1) logging.info(_test_log) print(_test_log) return self.evaluator.set_model(model=model, fixed_seq_len=self.fixed_seq_len) acc, precision, recall, f1 = self.evaluator.test(data_type="valid") _log_str = ("acc: {:.3f}, precision: {:.3f}, recall: {:.3f}, f1: {:.3f}".format(acc, precision, recall, f1)) print(_log_str) logging.info(_log_str) self.best_val_f1 = f1 logging.info("{}-{} start train , epoch_nums:{}...".format(self.task, self.model_name, Config.max_epoch_nums)) for epoch_num in trange(1, Config.max_epoch_nums + 1, desc="{} {} train epoch num".format(self.task, self.model_name)): model.train() for batch_data in self.data_helper.batch_iter(self.task, data_type="train", batch_size=Config.batch_size, re_type="torch", fixed_seq_len=self.fixed_seq_len): try: if self.task == "ner": pred, loss = model(input_ids=batch_data["sents"], attention_mask=batch_data["sents"].gt(0), labels=batch_data["ent_tags"]) acc, precision, recall, f1 = self.evaluator.evaluate_ner( batch_y_ent_ids=batch_data["ent_tags"].tolist(), batch_pred_ent_ids=pred.tolist()) else: pred, loss = model(batch_data, batch_data["rel_labels"]) acc, precision, recall, f1 = self.evaluator.get_re_metrics( y_true=batch_data["rel_labels"].tolist(), y_pred=pred.tolist()) except Exception as e: logging.error(e) continue self.backfoward(loss, model) self.global_step += 1 self.scheduler.step(epoch=epoch_num) acc, precision, recall, f1 = self.evaluate_save(model) logging.info("valid {} {} epoch_num: {}, acc: {:.3f}, precision: {:.3f}, recall: {:.3f}, f1: {:.3f}".format( self.task, self.model_name, epoch_num, acc, precision, recall, f1)) logging.info("epoch_num: {} end .\n".format(epoch_num)) if self.patience_counter >= Config.patience_num and epoch_num > Config.min_epoch_nums: break logging.info("{}, Best val f1: {:.3f} best loss:{:.3f}".format( self.model_name, self.best_val_f1, self.best_loss))
true
true
1c435843fc9b1aa7fb6f57042c4a5dfe1206dd9c
9,063
py
Python
src/tests/core/constructs/test_workspace.py
cdev-framework/cdev-sdk
06cd7b40936ab063d1d8fd1a7d9f6882750e8a96
[ "BSD-3-Clause-Clear" ]
2
2022-02-28T02:51:59.000Z
2022-03-24T15:23:18.000Z
src/tests/core/constructs/test_workspace.py
cdev-framework/cdev-sdk
06cd7b40936ab063d1d8fd1a7d9f6882750e8a96
[ "BSD-3-Clause-Clear" ]
null
null
null
src/tests/core/constructs/test_workspace.py
cdev-framework/cdev-sdk
06cd7b40936ab063d1d8fd1a7d9f6882750e8a96
[ "BSD-3-Clause-Clear" ]
null
null
null
from typing import Dict from core.constructs.backend import Backend from core.constructs.cloud_output import cloud_output_model from core.constructs.workspace import Workspace, Workspace_State, Workspace_Info from core.constructs.resource import ResourceModel from core.constructs.components import ComponentModel from core.constructs.backend import Backend_Configuration from core.constructs.settings import Settings_Info from .. import sample_data def simple_initialize_workspace( workspace: Workspace, settings_info: Settings_Info, backend_info: Backend_Configuration, resource_state_uuid: str, configuration: Dict, ): workspace.set_state(Workspace_State.INITIALIZING) workspace.initialize_workspace( settings_info=settings_info, backend_info=backend_info, resource_state_uuid=resource_state_uuid, configuration=configuration, ) def simple_execute_frontend_workspace(workspace: Workspace, config: Workspace_Info): components = sample_data.simple_components() workspace.set_state(Workspace_State.INITIALIZING) for component in components: workspace.add_component(component) workspace.initialize_workspace(config) workspace.set_state(Workspace_State.INITIALIZED) workspace.set_state(Workspace_State.EXECUTING_FRONTEND) state = workspace.generate_current_state() assert len(state) == len(components) def simple_add_commands(workspace: Workspace, config: Workspace_Info): commands = sample_data.simple_commands() workspace.set_state(Workspace_State.INITIALIZING) for command in commands: workspace.add_command(command) workspace.initialize_workspace(config) workspace.set_state(Workspace_State.INITIALIZED) returned_commands = workspace.get_commands() assert len(commands) + 1 == len(returned_commands) def _get_fake_backend(): class FakeBackend(Backend): def __init__(self, **kwargs) -> None: pass def get_cloud_output_value_by_name( self, resource_state_uuid: str, component_name: str, resource_type: str, resource_name: str, key: str, ): id = ",".join( [resource_state_uuid, component_name, resource_type, resource_name, key] ) data = { ",".join(["1", "comp1", "r", "r1", "cloud_id"]): "val1", ",".join(["1", "comp1", "r", "r2", "cloud_id"]): "val2", ",".join(["1", "comp1", "r", "r3", "cloud_id"]): "val3", ",".join(["1", "comp1", "r", "r4", "cloud_id"]): "val4", ",".join(["1", "comp1", "r", "r5", "cloud_id"]): "val5", } if not id in data: raise Exception return data.get(id) def get_component( self, resource_state_uuid: str, component_name: str ) -> ComponentModel: if not resource_state_uuid == "1": raise Exception if not component_name == "comp1": raise Exception return ComponentModel( name="comp1", hash="0", previous_resolved_cloud_values={ "r;e1": { "r;r1;cloud_id": "val1", "r;r2;cloud_id": "val2", "r;r3;cloud_id": "val3", "r;r4;cloud_id": "val4", "r;r5;cloud_id": "val5", } }, ) return FakeBackend() def simple_evaluate_and_replace_cloud_output(workspace: Workspace): data = [ ( ResourceModel( **{ "name": "e1", "ruuid": "r", "hash": "0", "val": cloud_output_model( **{ "name": "r1", "ruuid": "r", "key": "cloud_id", "type": "resource", "id": "cdev_cloud_output", } ), } ), ( ResourceModel( **{"name": "e1", "ruuid": "r", "hash": "0", "val": "val1"} ), {"r;r1;cloud_id": "val1"}, ), ), ( ResourceModel( **{ "name": "e1", "ruuid": "r", "hash": "0", "val": cloud_output_model( **{ "name": "r2", "ruuid": "r", "key": "cloud_id", "type": "resource", "id": "cdev_cloud_output", } ), } ), ( ResourceModel( **{"name": "e1", "ruuid": "r", "hash": "0", "val": "val2"} ), {"r;r2;cloud_id": "val2"}, ), ), ( ResourceModel( **{ "name": "e1", "ruuid": "r", "hash": "0", "val": cloud_output_model( **{ "name": "r3", "ruuid": "r", "key": "cloud_id", "type": "resource", "id": "cdev_cloud_output", } ), } ), ( ResourceModel( **{"name": "e1", "ruuid": "r", "hash": "0", "val": "val3"} ), {"r;r3;cloud_id": "val3"}, ), ), ] for input, expected_result in data: rv = workspace.evaluate_and_replace_cloud_output("comp1", input) assert rv == expected_result def simple_evaluate_and_replace_previous_cloud_output(workspace: Workspace): data = [ ( ResourceModel( **{ "name": "e1", "ruuid": "r", "hash": "0", "val": cloud_output_model( **{ "name": "r1", "ruuid": "r", "key": "cloud_id", "type": "resource", "id": "cdev_cloud_output", } ), } ), ResourceModel(**{"name": "e1", "ruuid": "r", "hash": "0", "val": "val1"}), ), ( ResourceModel( **{ "name": "e1", "ruuid": "r", "hash": "0", "val": cloud_output_model( **{ "name": "r2", "ruuid": "r", "key": "cloud_id", "type": "resource", "id": "cdev_cloud_output", } ), } ), ResourceModel(**{"name": "e1", "ruuid": "r", "hash": "0", "val": "val2"}), ), ( ResourceModel( **{ "name": "e1", "ruuid": "r", "hash": "0", "val": cloud_output_model( **{ "name": "r3", "ruuid": "r", "key": "cloud_id", "type": "resource", "id": "cdev_cloud_output", } ), } ), ResourceModel(**{"name": "e1", "ruuid": "r", "hash": "0", "val": "val3"}), ), ] for input, expected_result in data: rv = workspace.evaluate_and_replace_previous_cloud_output("comp1", input) print(rv) assert rv == expected_result ####################### ##### Base Class Tests ####################### # The base class implements some of the generic functionality so test that here def test_evaluate_and_replace_cloud_output(): ws = Workspace() ws.get_backend = _get_fake_backend ws.get_state = lambda: Workspace_State.EXECUTING_BACKEND ws.get_resource_state_uuid = lambda: "1" simple_evaluate_and_replace_cloud_output(ws) def test_evaluate_and_replace_previous_cloud_output(): ws = Workspace() ws.get_backend = _get_fake_backend ws.get_state = lambda: Workspace_State.EXECUTING_BACKEND ws.get_resource_state_uuid = lambda: "1" simple_evaluate_and_replace_previous_cloud_output(ws)
30.618243
88
0.439038
from typing import Dict from core.constructs.backend import Backend from core.constructs.cloud_output import cloud_output_model from core.constructs.workspace import Workspace, Workspace_State, Workspace_Info from core.constructs.resource import ResourceModel from core.constructs.components import ComponentModel from core.constructs.backend import Backend_Configuration from core.constructs.settings import Settings_Info from .. import sample_data def simple_initialize_workspace( workspace: Workspace, settings_info: Settings_Info, backend_info: Backend_Configuration, resource_state_uuid: str, configuration: Dict, ): workspace.set_state(Workspace_State.INITIALIZING) workspace.initialize_workspace( settings_info=settings_info, backend_info=backend_info, resource_state_uuid=resource_state_uuid, configuration=configuration, ) def simple_execute_frontend_workspace(workspace: Workspace, config: Workspace_Info): components = sample_data.simple_components() workspace.set_state(Workspace_State.INITIALIZING) for component in components: workspace.add_component(component) workspace.initialize_workspace(config) workspace.set_state(Workspace_State.INITIALIZED) workspace.set_state(Workspace_State.EXECUTING_FRONTEND) state = workspace.generate_current_state() assert len(state) == len(components) def simple_add_commands(workspace: Workspace, config: Workspace_Info): commands = sample_data.simple_commands() workspace.set_state(Workspace_State.INITIALIZING) for command in commands: workspace.add_command(command) workspace.initialize_workspace(config) workspace.set_state(Workspace_State.INITIALIZED) returned_commands = workspace.get_commands() assert len(commands) + 1 == len(returned_commands) def _get_fake_backend(): class FakeBackend(Backend): def __init__(self, **kwargs) -> None: pass def get_cloud_output_value_by_name( self, resource_state_uuid: str, component_name: str, resource_type: str, resource_name: str, key: str, ): id = ",".join( [resource_state_uuid, component_name, resource_type, resource_name, key] ) data = { ",".join(["1", "comp1", "r", "r1", "cloud_id"]): "val1", ",".join(["1", "comp1", "r", "r2", "cloud_id"]): "val2", ",".join(["1", "comp1", "r", "r3", "cloud_id"]): "val3", ",".join(["1", "comp1", "r", "r4", "cloud_id"]): "val4", ",".join(["1", "comp1", "r", "r5", "cloud_id"]): "val5", } if not id in data: raise Exception return data.get(id) def get_component( self, resource_state_uuid: str, component_name: str ) -> ComponentModel: if not resource_state_uuid == "1": raise Exception if not component_name == "comp1": raise Exception return ComponentModel( name="comp1", hash="0", previous_resolved_cloud_values={ "r;e1": { "r;r1;cloud_id": "val1", "r;r2;cloud_id": "val2", "r;r3;cloud_id": "val3", "r;r4;cloud_id": "val4", "r;r5;cloud_id": "val5", } }, ) return FakeBackend() def simple_evaluate_and_replace_cloud_output(workspace: Workspace): data = [ ( ResourceModel( **{ "name": "e1", "ruuid": "r", "hash": "0", "val": cloud_output_model( **{ "name": "r1", "ruuid": "r", "key": "cloud_id", "type": "resource", "id": "cdev_cloud_output", } ), } ), ( ResourceModel( **{"name": "e1", "ruuid": "r", "hash": "0", "val": "val1"} ), {"r;r1;cloud_id": "val1"}, ), ), ( ResourceModel( **{ "name": "e1", "ruuid": "r", "hash": "0", "val": cloud_output_model( **{ "name": "r2", "ruuid": "r", "key": "cloud_id", "type": "resource", "id": "cdev_cloud_output", } ), } ), ( ResourceModel( **{"name": "e1", "ruuid": "r", "hash": "0", "val": "val2"} ), {"r;r2;cloud_id": "val2"}, ), ), ( ResourceModel( **{ "name": "e1", "ruuid": "r", "hash": "0", "val": cloud_output_model( **{ "name": "r3", "ruuid": "r", "key": "cloud_id", "type": "resource", "id": "cdev_cloud_output", } ), } ), ( ResourceModel( **{"name": "e1", "ruuid": "r", "hash": "0", "val": "val3"} ), {"r;r3;cloud_id": "val3"}, ), ), ] for input, expected_result in data: rv = workspace.evaluate_and_replace_cloud_output("comp1", input) assert rv == expected_result def simple_evaluate_and_replace_previous_cloud_output(workspace: Workspace): data = [ ( ResourceModel( **{ "name": "e1", "ruuid": "r", "hash": "0", "val": cloud_output_model( **{ "name": "r1", "ruuid": "r", "key": "cloud_id", "type": "resource", "id": "cdev_cloud_output", } ), } ), ResourceModel(**{"name": "e1", "ruuid": "r", "hash": "0", "val": "val1"}), ), ( ResourceModel( **{ "name": "e1", "ruuid": "r", "hash": "0", "val": cloud_output_model( **{ "name": "r2", "ruuid": "r", "key": "cloud_id", "type": "resource", "id": "cdev_cloud_output", } ), } ), ResourceModel(**{"name": "e1", "ruuid": "r", "hash": "0", "val": "val2"}), ), ( ResourceModel( **{ "name": "e1", "ruuid": "r", "hash": "0", "val": cloud_output_model( **{ "name": "r3", "ruuid": "r", "key": "cloud_id", "type": "resource", "id": "cdev_cloud_output", } ), } ), ResourceModel(**{"name": "e1", "ruuid": "r", "hash": "0", "val": "val3"}), ), ] for input, expected_result in data: rv = workspace.evaluate_and_replace_previous_cloud_output("comp1", input) print(rv) assert rv == expected_result
true
true
1c43596eac0d0b0ab25adde8d4a9d380a9e9bd82
55,039
py
Python
kraken/ketos.py
jpmjpmjpm/kraken
8ceae75230f5e47bc5d75f8d68fb41b0532cd0ca
[ "Apache-2.0" ]
null
null
null
kraken/ketos.py
jpmjpmjpm/kraken
8ceae75230f5e47bc5d75f8d68fb41b0532cd0ca
[ "Apache-2.0" ]
null
null
null
kraken/ketos.py
jpmjpmjpm/kraken
8ceae75230f5e47bc5d75f8d68fb41b0532cd0ca
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # # Copyright 2015 Benjamin Kiessling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. import os import time import json import glob import uuid import click import logging import unicodedata from click import open_file from bidi.algorithm import get_display from typing import cast, Set, List, IO, Any from collections import defaultdict from kraken.lib import log from kraken.lib.exceptions import KrakenCairoSurfaceException from kraken.lib.exceptions import KrakenEncodeException from kraken.lib.exceptions import KrakenInputException from kraken.lib.default_specs import (SEGMENTATION_HYPER_PARAMS, RECOGNITION_HYPER_PARAMS, SEGMENTATION_SPEC, RECOGNITION_SPEC) APP_NAME = 'kraken' logging.captureWarnings(True) logger = logging.getLogger('kraken') def message(msg, **styles): if logger.getEffectiveLevel() >= 30: click.secho(msg, **styles) @click.group() @click.version_option() @click.option('-v', '--verbose', default=0, count=True) @click.option('-s', '--seed', default=None, type=click.INT, help='Seed for numpy\'s and torch\'s RNG. Set to a fixed value to ' 'ensure reproducable random splits of data') def cli(verbose, seed): if seed: import numpy.random numpy.random.seed(seed) from torch import manual_seed manual_seed(seed) log.set_logger(logger, level=30-min(10*verbose, 20)) def _validate_manifests(ctx, param, value): images = [] for manifest in value: for entry in manifest.readlines(): im_p = entry.rstrip('\r\n') if os.path.isfile(im_p): images.append(im_p) else: logger.warning('Invalid entry "{}" in {}'.format(im_p, manifest.name)) return images def _expand_gt(ctx, param, value): images = [] for expression in value: images.extend([x for x in glob.iglob(expression, recursive=True) if os.path.isfile(x)]) return images def _validate_merging(ctx, param, value): """ Maps baseline/region merging to a dict of merge structures. """ if not value: return None merge_dict = {} # type: Dict[str, str] try: for m in value: k, v = m.split(':') merge_dict[v] = k # type: ignore except Exception: raise click.BadParameter('Mappings must be in format target:src') return merge_dict @cli.command('segtrain') @click.pass_context @click.option('-o', '--output', show_default=True, type=click.Path(), default='model', help='Output model file') @click.option('-s', '--spec', show_default=True, default=SEGMENTATION_SPEC, help='VGSL spec of the baseline labeling network') @click.option('--line-width', show_default=True, default=SEGMENTATION_HYPER_PARAMS['line_width'], help='The height of each baseline in the target after scaling') @click.option('-i', '--load', show_default=True, type=click.Path(exists=True, readable=True), help='Load existing file to continue training') @click.option('-F', '--freq', show_default=True, default=SEGMENTATION_HYPER_PARAMS['freq'], type=click.FLOAT, help='Model saving and report generation frequency in epochs during training') @click.option('-q', '--quit', show_default=True, default=SEGMENTATION_HYPER_PARAMS['quit'], type=click.Choice(['early', 'dumb']), help='Stop condition for training. Set to `early` for early stopping or `dumb` for fixed number of epochs') @click.option('-N', '--epochs', show_default=True, default=SEGMENTATION_HYPER_PARAMS['epochs'], help='Number of epochs to train for') @click.option('--lag', show_default=True, default=SEGMENTATION_HYPER_PARAMS['lag'], help='Number of evaluations (--report frequence) to wait before stopping training without improvement') @click.option('--min-delta', show_default=True, default=SEGMENTATION_HYPER_PARAMS['min_delta'], type=click.FLOAT, help='Minimum improvement between epochs to reset early stopping. By default it scales the delta by the best loss') @click.option('-d', '--device', show_default=True, default='cpu', help='Select device to use (cpu, cuda:0, cuda:1, ...)') @click.option('--optimizer', show_default=True, default=SEGMENTATION_HYPER_PARAMS['optimizer'], type=click.Choice(['Adam', 'SGD', 'RMSprop']), help='Select optimizer') @click.option('-r', '--lrate', show_default=True, default=SEGMENTATION_HYPER_PARAMS['lrate'], help='Learning rate') @click.option('-m', '--momentum', show_default=True, default=SEGMENTATION_HYPER_PARAMS['momentum'], help='Momentum') @click.option('-w', '--weight-decay', show_default=True, default=SEGMENTATION_HYPER_PARAMS['weight_decay'], help='Weight decay') @click.option('--schedule', show_default=True, type=click.Choice(['constant', '1cycle', 'exponential', 'cosine', 'step', 'reduceonplateau']), default=RECOGNITION_HYPER_PARAMS['schedule'], help='Set learning rate scheduler. For 1cycle, cycle length is determined by the `--step-size` option.') @click.option('-g', '--gamma', show_default=True, default=RECOGNITION_HYPER_PARAMS['gamma'], help='Decay factor for exponential, step, and reduceonplateau learning rate schedules') @click.option('-ss', '--step-size', show_default=True, default=RECOGNITION_HYPER_PARAMS['step_size'], help='Number of validation runs between learning rate decay for exponential and step LR schedules') @click.option('--sched-patience', show_default=True, default=RECOGNITION_HYPER_PARAMS['rop_patience'], help='Minimal number of validation runs between LR reduction for reduceonplateau LR schedule.') @click.option('--cos-max', show_default=True, default=RECOGNITION_HYPER_PARAMS['cos_t_max'], help='Epoch of minimal learning rate for cosine LR scheduler.') @click.option('-p', '--partition', show_default=True, default=0.9, help='Ground truth data partition ratio between train/validation set') @click.option('-t', '--training-files', show_default=True, default=None, multiple=True, callback=_validate_manifests, type=click.File(mode='r', lazy=True), help='File(s) with additional paths to training data') @click.option('-e', '--evaluation-files', show_default=True, default=None, multiple=True, callback=_validate_manifests, type=click.File(mode='r', lazy=True), help='File(s) with paths to evaluation data. Overrides the `-p` parameter') @click.option('--threads', show_default=True, default=1, help='Number of OpenMP threads and workers when running on CPU.') @click.option('--load-hyper-parameters/--no-load-hyper-parameters', show_default=True, default=False, help='When loading an existing model, retrieve hyper-parameters from the model') @click.option('--force-binarization/--no-binarization', show_default=True, default=False, help='Forces input images to be binary, otherwise ' 'the appropriate color format will be auto-determined through the ' 'network specification. Will be ignored in `path` mode.') @click.option('-f', '--format-type', type=click.Choice(['path', 'xml', 'alto', 'page']), default='xml', help='Sets the training data format. In ALTO and PageXML mode all ' 'data is extracted from xml files containing both baselines and a ' 'link to source images. In `path` mode arguments are image files ' 'sharing a prefix up to the last extension with JSON `.path` files ' 'containing the baseline information.') @click.option('--suppress-regions/--no-suppress-regions', show_default=True, default=False, help='Disables region segmentation training.') @click.option('--suppress-baselines/--no-suppress-baselines', show_default=True, default=False, help='Disables baseline segmentation training.') @click.option('-vr', '--valid-regions', show_default=True, default=None, multiple=True, help='Valid region types in training data. May be used multiple times.') @click.option('-vb', '--valid-baselines', show_default=True, default=None, multiple=True, help='Valid baseline types in training data. May be used multiple times.') @click.option('-mr', '--merge-regions', show_default=True, default=None, help='Region merge mapping. One or more mappings of the form `$target:$src` where $src is merged into $target.', multiple=True, callback=_validate_merging) @click.option('-mb', '--merge-baselines', show_default=True, default=None, help='Baseline type merge mapping. Same syntax as `--merge-regions`', multiple=True, callback=_validate_merging) @click.option('-br', '--bounding-regions', show_default=True, default=None, multiple=True, help='Regions treated as boundaries for polygonization purposes. May be used multiple times.') @click.option('--augment/--no-augment', show_default=True, default=SEGMENTATION_HYPER_PARAMS['augment'], help='Enable image augmentation') @click.option('--resize', show_default=True, default='fail', type=click.Choice(['add', 'both', 'fail']), help='Output layer resizing option. If set to `add` new classes will be ' 'added, `both` will set the layer to match exactly ' 'the training data classes, `fail` will abort if training data and model ' 'classes do not match.') @click.option('-tl', '--topline', 'topline', show_default=True, flag_value='topline', help='Switch for the baseline location in the scripts. ' 'Set to topline if the data is annotated with a hanging baseline, as is ' 'common with Hebrew, Bengali, Devanagari, etc. Set to ' ' centerline for scripts annotated with a central line.') @click.option('-cl', '--centerline', 'topline', flag_value='centerline') @click.option('-bl', '--baseline', 'topline', flag_value='baseline', default='baseline') @click.argument('ground_truth', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False)) def segtrain(ctx, output, spec, line_width, load, freq, quit, epochs, lag, min_delta, device, optimizer, lrate, momentum, weight_decay, schedule, gamma, step_size, sched_patience, cos_max, partition, training_files, evaluation_files, threads, load_hyper_parameters, force_binarization, format_type, suppress_regions, suppress_baselines, valid_regions, valid_baselines, merge_regions, merge_baselines, bounding_regions, augment, resize, topline, ground_truth): """ Trains a baseline labeling model for layout analysis """ import re import torch import shutil import numpy as np from kraken.lib.train import KrakenTrainer if resize != 'fail' and not load: raise click.BadOptionUsage('resize', 'resize option requires loading an existing model') logger.info('Building ground truth set from {} document images'.format(len(ground_truth) + len(training_files))) # load model if given. if a new model has to be created we need to do that # after data set initialization, otherwise to output size is still unknown. nn = None # populate hyperparameters from command line args hyper_params = SEGMENTATION_HYPER_PARAMS.copy() hyper_params.update({'line_width': line_width, 'freq': freq, 'quit': quit, 'epochs': epochs, 'lag': lag, 'min_delta': min_delta, 'optimizer': optimizer, 'lrate': lrate, 'momentum': momentum, 'weight_decay': weight_decay, 'schedule': schedule, 'augment': augment, 'gamma': gamma, 'step_size': step_size, 'rop_patience': sched_patience, 'cos_t_max': cos_max }) # disable automatic partition when given evaluation set explicitly if evaluation_files: partition = 1 ground_truth = list(ground_truth) # merge training_files into ground_truth list if training_files: ground_truth.extend(training_files) if len(ground_truth) == 0: raise click.UsageError('No training data was provided to the train command. Use `-t` or the `ground_truth` argument.') np.random.shuffle(ground_truth) training_files = ground_truth[:int(len(ground_truth) * partition)] if evaluation_files: logger.debug(f'Using {len(evaluation_files)} lines/files from explicit eval set') else: evaluation_files = ground_truth[int(len(ground_truth) * partition):] logger.debug(f'Taking {len(evaluation_files)} lines/files from training set for evaluation') def _init_progressbar(label, length): if 'bar' in ctx.meta: ctx.meta['bar'].__exit__(None, None, None) ctx.meta['bar'] = log.progressbar(label=label, length=length, show_pos=True) ctx.meta['bar'].__enter__() return lambda: ctx.meta['bar'].update(1) topline = {'topline': True, 'baseline': False, 'centerline': None}[topline] trainer = KrakenTrainer.segmentation_train_gen(hyper_params, message=message, progress_callback=_init_progressbar, output=output, spec=spec, load=load, device=device, training_data=training_files, evaluation_data=evaluation_files, threads=threads, load_hyper_parameters=load_hyper_parameters, force_binarization=force_binarization, format_type=format_type, suppress_regions=suppress_regions, suppress_baselines=suppress_baselines, valid_regions=valid_regions, valid_baselines=valid_baselines, merge_regions=merge_regions, merge_baselines=merge_baselines, bounding_regions=bounding_regions, augment=augment, resize=resize, topline=topline) with log.progressbar(label='stage {}/{}'.format(1, trainer.stopper.epochs if trainer.stopper.epochs > 0 else '∞'), length=trainer.event_it, show_pos=True) as bar: def _draw_progressbar(): bar.update(1) def _print_eval(epoch, accuracy, mean_acc, mean_iu, freq_iu, **kwargs): message('Accuracy report ({}) mean_iu: {:0.4f} freq_iu: {:0.4f} mean_acc: {:0.4f} accuracy: {:0.4f}'.format(epoch, mean_iu, freq_iu, mean_acc, accuracy)) # reset progress bar bar.label = 'stage {}/{}'.format(epoch+1, trainer.stopper.epochs if trainer.stopper.epochs > 0 else '∞') bar.pos = 0 bar.finished = False bar.start = bar.last_eta = time.time() trainer.run(_print_eval, _draw_progressbar) if quit == 'early': message('Moving best model {0}_{1}.mlmodel ({2}) to {0}_best.mlmodel'.format(output, trainer.stopper.best_epoch, trainer.stopper.best_loss)) logger.info('Moving best model {0}_{1}.mlmodel ({2}) to {0}_best.mlmodel'.format(output, trainer.stopper.best_epoch, trainer.stopper.best_loss)) shutil.copy(f'{output}_{trainer.stopper.best_epoch}.mlmodel', f'{output}_best.mlmodel') @cli.command('train') @click.pass_context @click.option('-B', '--batch-size', show_default=True, type=click.INT, default=RECOGNITION_HYPER_PARAMS['batch_size'], help='batch sample size') @click.option('-p', '--pad', show_default=True, type=click.INT, default=16, help='Left and right ' 'padding around lines') @click.option('-o', '--output', show_default=True, type=click.Path(), default='model', help='Output model file') @click.option('-s', '--spec', show_default=True, default=RECOGNITION_SPEC, help='VGSL spec of the network to train. CTC layer will be added automatically.') @click.option('-a', '--append', show_default=True, default=None, type=click.INT, help='Removes layers before argument and then appends spec. Only works when loading an existing model') @click.option('-i', '--load', show_default=True, type=click.Path(exists=True, readable=True), help='Load existing file to continue training') @click.option('-F', '--freq', show_default=True, default=RECOGNITION_HYPER_PARAMS['freq'], type=click.FLOAT, help='Model saving and report generation frequency in epochs during training') @click.option('-q', '--quit', show_default=True, default=RECOGNITION_HYPER_PARAMS['quit'], type=click.Choice(['early', 'dumb']), help='Stop condition for training. Set to `early` for early stooping or `dumb` for fixed number of epochs') @click.option('-N', '--epochs', show_default=True, default=RECOGNITION_HYPER_PARAMS['epochs'], help='Number of epochs to train for') @click.option('--lag', show_default=True, default=RECOGNITION_HYPER_PARAMS['lag'], help='Number of evaluations (--report frequence) to wait before stopping training without improvement') @click.option('--min-delta', show_default=True, default=RECOGNITION_HYPER_PARAMS['min_delta'], type=click.FLOAT, help='Minimum improvement between epochs to reset early stopping. Default is scales the delta by the best loss') @click.option('-d', '--device', show_default=True, default='cpu', help='Select device to use (cpu, cuda:0, cuda:1, ...)') @click.option('--optimizer', show_default=True, default=RECOGNITION_HYPER_PARAMS['optimizer'], type=click.Choice(['Adam', 'SGD', 'RMSprop']), help='Select optimizer') @click.option('-r', '--lrate', show_default=True, default=RECOGNITION_HYPER_PARAMS['lrate'], help='Learning rate') @click.option('-m', '--momentum', show_default=True, default=RECOGNITION_HYPER_PARAMS['momentum'], help='Momentum') @click.option('-w', '--weight-decay', show_default=True, default=RECOGNITION_HYPER_PARAMS['weight_decay'], help='Weight decay') @click.option('--schedule', show_default=True, type=click.Choice(['constant', '1cycle', 'exponential', 'cosine', 'step', 'reduceonplateau']), default=RECOGNITION_HYPER_PARAMS['schedule'], help='Set learning rate scheduler. For 1cycle, cycle length is determined by the `--epoch` option.') @click.option('-g', '--gamma', show_default=True, default=RECOGNITION_HYPER_PARAMS['gamma'], help='Decay factor for exponential, step, and reduceonplateau learning rate schedules') @click.option('-ss', '--step-size', show_default=True, default=RECOGNITION_HYPER_PARAMS['step_size'], help='Number of validation runs between learning rate decay for exponential and step LR schedules') @click.option('--sched-patience', show_default=True, default=RECOGNITION_HYPER_PARAMS['rop_patience'], help='Minimal number of validation runs between LR reduction for reduceonplateau LR schedule.') @click.option('--cos-max', show_default=True, default=RECOGNITION_HYPER_PARAMS['cos_t_max'], help='Epoch of minimal learning rate for cosine LR scheduler.') @click.option('-p', '--partition', show_default=True, default=0.9, help='Ground truth data partition ratio between train/validation set') @click.option('-u', '--normalization', show_default=True, type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']), default=RECOGNITION_HYPER_PARAMS['normalization'], help='Ground truth normalization') @click.option('-n', '--normalize-whitespace/--no-normalize-whitespace', show_default=True, default=RECOGNITION_HYPER_PARAMS['normalize_whitespace'], help='Normalizes unicode whitespace') @click.option('-c', '--codec', show_default=True, default=None, type=click.File(mode='r', lazy=True), help='Load a codec JSON definition (invalid if loading existing model)') @click.option('--resize', show_default=True, default='fail', type=click.Choice(['add', 'both', 'fail']), help='Codec/output layer resizing option. If set to `add` code ' 'points will be added, `both` will set the layer to match exactly ' 'the training data, `fail` will abort if training data and model ' 'codec do not match.') @click.option('--reorder/--no-reorder', show_default=True, default=True, help='Reordering of code points to display order') @click.option('--base-dir', show_default=True, default='auto', type=click.Choice(['L', 'R', 'auto']), help='Set base text ' 'direction. This should be set to the direction used during the ' 'creation of the training data. If set to `auto` it will be ' 'overridden by any explicit value given in the input files.') @click.option('-t', '--training-files', show_default=True, default=None, multiple=True, callback=_validate_manifests, type=click.File(mode='r', lazy=True), help='File(s) with additional paths to training data') @click.option('-e', '--evaluation-files', show_default=True, default=None, multiple=True, callback=_validate_manifests, type=click.File(mode='r', lazy=True), help='File(s) with paths to evaluation data. Overrides the `-p` parameter') @click.option('--preload/--no-preload', show_default=True, default=None, help='Hard enable/disable for training data preloading') @click.option('--threads', show_default=True, default=1, help='Number of OpenMP threads and workers when running on CPU.') @click.option('--load-hyper-parameters/--no-load-hyper-parameters', show_default=True, default=False, help='When loading an existing model, retrieve hyperparameters from the model') @click.option('--repolygonize/--no-repolygonize', show_default=True, default=False, help='Repolygonizes line data in ALTO/PageXML ' 'files. This ensures that the trained model is compatible with the ' 'segmenter in kraken even if the original image files either do ' 'not contain anything but transcriptions and baseline information ' 'or the polygon data was created using a different method. Will ' 'be ignored in `path` mode. Note that this option will be slow ' 'and will not scale input images to the same size as the segmenter ' 'does.') @click.option('--force-binarization/--no-binarization', show_default=True, default=False, help='Forces input images to be binary, otherwise ' 'the appropriate color format will be auto-determined through the ' 'network specification. Will be ignored in `path` mode.') @click.option('-f', '--format-type', type=click.Choice(['path', 'xml', 'alto', 'page']), default='path', help='Sets the training data format. In ALTO and PageXML mode all ' 'data is extracted from xml files containing both line definitions and a ' 'link to source images. In `path` mode arguments are image files ' 'sharing a prefix up to the last extension with `.gt.txt` text files ' 'containing the transcription.') @click.option('--augment/--no-augment', show_default=True, default=RECOGNITION_HYPER_PARAMS['augment'], help='Enable image augmentation') @click.argument('ground_truth', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False)) def train(ctx, batch_size, pad, output, spec, append, load, freq, quit, epochs, lag, min_delta, device, optimizer, lrate, momentum, weight_decay, schedule, gamma, step_size, sched_patience, cos_max, partition, normalization, normalize_whitespace, codec, resize, reorder, base_dir, training_files, evaluation_files, preload, threads, load_hyper_parameters, repolygonize, force_binarization, format_type, augment, ground_truth): """ Trains a model from image-text pairs. """ if not load and append: raise click.BadOptionUsage('append', 'append option requires loading an existing model') if resize != 'fail' and not load: raise click.BadOptionUsage('resize', 'resize option requires loading an existing model') import shutil import numpy as np from kraken.lib.train import KrakenTrainer hyper_params = RECOGNITION_HYPER_PARAMS.copy() hyper_params.update({'freq': freq, 'pad': pad, 'batch_size': batch_size, 'quit': quit, 'epochs': epochs, 'lag': lag, 'min_delta': min_delta, 'optimizer': optimizer, 'lrate': lrate, 'momentum': momentum, 'weight_decay': weight_decay, 'schedule': schedule, 'gamma': gamma, 'step_size': step_size, 'rop_patience': sched_patience, 'cos_t_max': cos_max, 'normalization': normalization, 'normalize_whitespace': normalize_whitespace, 'augment': augment }) # disable automatic partition when given evaluation set explicitly if evaluation_files: partition = 1 ground_truth = list(ground_truth) # merge training_files into ground_truth list if training_files: ground_truth.extend(training_files) if len(ground_truth) == 0: raise click.UsageError('No training data was provided to the train command. Use `-t` or the `ground_truth` argument.') if reorder and base_dir != 'auto': reorder = base_dir np.random.shuffle(ground_truth) training_files = ground_truth[:int(len(ground_truth) * partition)] if evaluation_files: logger.debug(f'Using {len(evaluation_files)} lines/files from explicit eval set') else: evaluation_files = ground_truth[int(len(ground_truth) * partition):] logger.debug(f'Taking {len(evaluation_files)} lines/files from training set for evaluation') def _init_progressbar(label, length): if 'bar' in ctx.meta: ctx.meta['bar'].__exit__(None, None, None) ctx.meta['bar'] = log.progressbar(label=label, length=length, show_pos=True) ctx.meta['bar'].__enter__() return lambda: ctx.meta['bar'].update(1) trainer = KrakenTrainer.recognition_train_gen(hyper_params, message=message, progress_callback=_init_progressbar, output=output, spec=spec, append=append, load=load, device=device, reorder=reorder, training_data=training_files, evaluation_data=evaluation_files, preload=preload, threads=threads, load_hyper_parameters=load_hyper_parameters, repolygonize=repolygonize, force_binarization=force_binarization, format_type=format_type, codec=codec, resize=resize, augment=augment) with log.progressbar(label='stage {}/{}'.format(1, trainer.stopper.epochs if trainer.stopper.epochs > 0 else '∞'), length=trainer.event_it, show_pos=True) as bar: def _draw_progressbar(): bar.update(1) def _print_eval(epoch, accuracy, chars, error, **kwargs): message('Accuracy report ({}) {:0.4f} {} {}'.format(epoch, accuracy, chars, error)) # reset progress bar bar.label = 'stage {}/{}'.format(epoch+1, trainer.stopper.epochs if trainer.stopper.epochs > 0 else '∞') bar.pos = 0 bar.finished = False bar.start = bar.last_eta = time.time() trainer.run(_print_eval, _draw_progressbar) if quit == 'early': message('Moving best model {0}_{1}.mlmodel ({2}) to {0}_best.mlmodel'.format(output, trainer.stopper.best_epoch, trainer.stopper.best_loss)) logger.info('Moving best model {0}_{1}.mlmodel ({2}) to {0}_best.mlmodel'.format(output, trainer.stopper.best_epoch, trainer.stopper.best_loss)) shutil.copy(f'{output}_{trainer.stopper.best_epoch}.mlmodel', f'{output}_best.mlmodel') @cli.command('test') @click.pass_context @click.option('-B', '--batch-size', show_default=True, type=click.INT, default=RECOGNITION_HYPER_PARAMS['batch_size'], help='Batch sample size') @click.option('-m', '--model', show_default=True, type=click.Path(exists=True, readable=True), multiple=True, help='Model(s) to evaluate') @click.option('-e', '--evaluation-files', show_default=True, default=None, multiple=True, callback=_validate_manifests, type=click.File(mode='r', lazy=True), help='File(s) with paths to evaluation data.') @click.option('-d', '--device', show_default=True, default='cpu', help='Select device to use (cpu, cuda:0, cuda:1, ...)') @click.option('-p', '--pad', show_default=True, type=click.INT, default=16, help='Left and right ' 'padding around lines') @click.option('--threads', show_default=True, default=1, help='Number of OpenMP threads when running on CPU.') @click.option('--reorder/--no-reorder', show_default=True, default=True, help='Reordering of code points to display order') @click.option('--base-dir', show_default=True, default='auto', type=click.Choice(['L', 'R', 'auto']), help='Set base text ' 'direction. This should be set to the direction used during the ' 'creation of the training data. If set to `auto` it will be ' 'overridden by any explicit value given in the input files.') @click.option('-u', '--normalization', show_default=True, type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']), default=None, help='Ground truth normalization') @click.option('-n', '--normalize-whitespace/--no-normalize-whitespace', show_default=True, default=True, help='Normalizes unicode whitespace') @click.option('--repolygonize/--no-repolygonize', show_default=True, default=False, help='Repolygonizes line data in ALTO/PageXML ' 'files. This ensures that the trained model is compatible with the ' 'segmenter in kraken even if the original image files either do ' 'not contain anything but transcriptions and baseline information ' 'or the polygon data was created using a different method. Will ' 'be ignored in `path` mode. Note, that this option will be slow ' 'and will not scale input images to the same size as the segmenter ' 'does.') @click.option('--force-binarization/--no-binarization', show_default=True, default=False, help='Forces input images to be binary, otherwise ' 'the appropriate color format will be auto-determined through the ' 'network specification. Will be ignored in `path` mode.') @click.option('-f', '--format-type', type=click.Choice(['path', 'xml', 'alto', 'page']), default='path', help='Sets the training data format. In ALTO and PageXML mode all ' 'data is extracted from xml files containing both baselines and a ' 'link to source images. In `path` mode arguments are image files ' 'sharing a prefix up to the last extension with JSON `.path` files ' 'containing the baseline information.') @click.argument('test_set', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False)) def test(ctx, batch_size, model, evaluation_files, device, pad, threads, reorder, base_dir, normalization, normalize_whitespace, repolygonize, force_binarization, format_type, test_set): """ Evaluate on a test set. """ if not model: raise click.UsageError('No model to evaluate given.') import regex import unicodedata import numpy as np from PIL import Image from torch.utils.data import DataLoader from kraken.serialization import render_report from kraken.lib import models from kraken.lib.dataset import global_align, compute_confusions, preparse_xml_data, PolygonGTDataset, GroundTruthDataset, generate_input_transforms, collate_sequences logger.info('Building test set from {} line images'.format(len(test_set) + len(evaluation_files))) nn = {} for p in model: message('Loading model {}\t'.format(p), nl=False) nn[p] = models.load_any(p) message('\u2713', fg='green') test_set = list(test_set) # set number of OpenMP threads next(iter(nn.values())).nn.set_num_threads(1) if evaluation_files: test_set.extend(evaluation_files) if len(test_set) == 0: raise click.UsageError('No evaluation data was provided to the test command. Use `-e` or the `test_set` argument.') if format_type != 'path': if repolygonize: message('Repolygonizing data') test_set = preparse_xml_data(test_set, format_type, repolygonize) valid_norm = False DatasetClass = PolygonGTDataset else: DatasetClass = GroundTruthDataset t = [] if force_binarization: logger.warning('Forced binarization enabled in `path` mode. Will be ignored.') force_binarization = False if repolygonize: logger.warning('Repolygonization enabled in `path` mode. Will be ignored.') test_set = [{'image': img} for img in test_set] valid_norm = True if len(test_set) == 0: raise click.UsageError('No evaluation data was provided to the test command. Use `-e` or the `test_set` argument.') if reorder and base_dir != 'auto': reorder = base_dir acc_list = [] for p, net in nn.items(): algn_gt: List[str] = [] algn_pred: List[str] = [] chars = 0 error = 0 message('Evaluating {}'.format(p)) logger.info('Evaluating {}'.format(p)) batch, channels, height, width = net.nn.input ts = generate_input_transforms(batch, height, width, channels, pad, valid_norm, force_binarization) ds = DatasetClass(normalization=normalization, whitespace_normalization=normalize_whitespace, reorder=reorder, im_transforms=ts, preload=False) for line in test_set: try: ds.add(**line) except KrakenInputException as e: logger.info(e) # don't encode validation set as the alphabets may not match causing encoding failures ds.no_encode() ds_loader = DataLoader(ds, batch_size=batch_size, num_workers=threads, pin_memory=True, collate_fn=collate_sequences) with log.progressbar(ds_loader, label='Evaluating') as bar: for batch in bar: im = batch['image'] text = batch['target'] lens = batch['seq_lens'] try: pred = net.predict_string(im, lens) for x, y in zip(pred, text): chars += len(y) c, algn1, algn2 = global_align(y, x) algn_gt.extend(algn1) algn_pred.extend(algn2) error += c except FileNotFoundError as e: logger.warning('{} {}. Skipping.'.format(e.strerror, e.filename)) except KrakenInputException as e: logger.warning(str(e)) acc_list.append((chars-error)/chars) confusions, scripts, ins, dels, subs = compute_confusions(algn_gt, algn_pred) rep = render_report(p, chars, error, confusions, scripts, ins, dels, subs) logger.info(rep) message(rep) logger.info('Average accuracy: {:0.2f}%, (stddev: {:0.2f})'.format(np.mean(acc_list) * 100, np.std(acc_list) * 100)) message('Average accuracy: {:0.2f}%, (stddev: {:0.2f})'.format(np.mean(acc_list) * 100, np.std(acc_list) * 100)) @cli.command('extract') @click.pass_context @click.option('-b', '--binarize/--no-binarize', show_default=True, default=True, help='Binarize color/grayscale images') @click.option('-u', '--normalization', show_default=True, type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']), default=None, help='Normalize ground truth') @click.option('-s', '--normalize-whitespace/--no-normalize-whitespace', show_default=True, default=True, help='Normalizes unicode whitespace') @click.option('-n', '--reorder/--no-reorder', default=False, show_default=True, help='Reorder transcribed lines to display order') @click.option('-r', '--rotate/--no-rotate', default=True, show_default=True, help='Skip rotation of vertical lines') @click.option('-o', '--output', type=click.Path(), default='training', show_default=True, help='Output directory') @click.option('--format', default='{idx:06d}', show_default=True, help='Format for extractor output. valid fields are `src` (source file), `idx` (line number), and `uuid` (v4 uuid)') @click.argument('transcriptions', nargs=-1, type=click.File(lazy=True)) def extract(ctx, binarize, normalization, normalize_whitespace, reorder, rotate, output, format, transcriptions): """ Extracts image-text pairs from a transcription environment created using ``ketos transcribe``. """ import regex import base64 from io import BytesIO from PIL import Image from lxml import html, etree from kraken import binarization try: os.mkdir(output) except Exception: pass text_transforms = [] if normalization: text_transforms.append(lambda x: unicodedata.normalize(normalization, x)) if normalize_whitespace: text_transforms.append(lambda x: regex.sub(r'\s', ' ', x)) if reorder: text_transforms.append(get_display) idx = 0 manifest = [] with log.progressbar(transcriptions, label='Reading transcriptions') as bar: for fp in bar: logger.info('Reading {}'.format(fp.name)) doc = html.parse(fp) etree.strip_tags(doc, etree.Comment) td = doc.find(".//meta[@itemprop='text_direction']") if td is None: td = 'horizontal-lr' else: td = td.attrib['content'] im = None dest_dict = {'output': output, 'idx': 0, 'src': fp.name, 'uuid': str(uuid.uuid4())} for section in doc.xpath('//section'): img = section.xpath('.//img')[0].get('src') fd = BytesIO(base64.b64decode(img.split(',')[1])) im = Image.open(fd) if not im: logger.info('Skipping {} because image not found'.format(fp.name)) break if binarize: im = binarization.nlbin(im) for line in section.iter('li'): if line.get('contenteditable') and (not u''.join(line.itertext()).isspace() and u''.join(line.itertext())): dest_dict['idx'] = idx dest_dict['uuid'] = str(uuid.uuid4()) logger.debug('Writing line {:06d}'.format(idx)) l_img = im.crop([int(x) for x in line.get('data-bbox').split(',')]) if rotate and td.startswith('vertical'): im.rotate(90, expand=True) l_img.save(('{output}/' + format + '.png').format(**dest_dict)) manifest.append((format + '.png').format(**dest_dict)) text = u''.join(line.itertext()).strip() for func in text_transforms: text = func(text) with open(('{output}/' + format + '.gt.txt').format(**dest_dict), 'wb') as t: t.write(text.encode('utf-8')) idx += 1 logger.info('Extracted {} lines'.format(idx)) with open('{}/manifest.txt'.format(output), 'w') as fp: fp.write('\n'.join(manifest)) @cli.command('transcribe') @click.pass_context @click.option('-d', '--text-direction', default='horizontal-lr', type=click.Choice(['horizontal-lr', 'horizontal-rl', 'vertical-lr', 'vertical-rl']), help='Sets principal text direction', show_default=True) @click.option('--scale', default=None, type=click.FLOAT) @click.option('--bw/--orig', default=True, show_default=True, help="Put nonbinarized images in output") @click.option('-m', '--maxcolseps', default=2, type=click.INT, show_default=True) @click.option('-b/-w', '--black_colseps/--white_colseps', default=False, show_default=True) @click.option('-f', '--font', default='', help='Font family to use') @click.option('-fs', '--font-style', default=None, help='Font style to use') @click.option('-p', '--prefill', default=None, help='Use given model for prefill mode.') @click.option('--pad', show_default=True, type=(int, int), default=(0, 0), help='Left and right padding around lines') @click.option('-l', '--lines', type=click.Path(exists=True), show_default=True, help='JSON file containing line coordinates') @click.option('-o', '--output', type=click.File(mode='wb'), default='transcription.html', help='Output file', show_default=True) @click.argument('images', nargs=-1, type=click.File(mode='rb', lazy=True)) def transcription(ctx, text_direction, scale, bw, maxcolseps, black_colseps, font, font_style, prefill, pad, lines, output, images): """ Creates transcription environments for ground truth generation. """ from PIL import Image from kraken import rpred from kraken import pageseg from kraken import transcribe from kraken import binarization from kraken.lib import models ti = transcribe.TranscriptionInterface(font, font_style) if len(images) > 1 and lines: raise click.UsageError('--lines option is incompatible with multiple image files') if prefill: logger.info('Loading model {}'.format(prefill)) message('Loading ANN', nl=False) prefill = models.load_any(prefill) message('\u2713', fg='green') with log.progressbar(images, label='Reading images') as bar: for fp in bar: logger.info('Reading {}'.format(fp.name)) im = Image.open(fp) if im.mode not in ['1', 'L', 'P', 'RGB']: logger.warning('Input {} is in {} color mode. Converting to RGB'.format(fp.name, im.mode)) im = im.convert('RGB') logger.info('Binarizing page') im_bin = binarization.nlbin(im) im_bin = im_bin.convert('1') logger.info('Segmenting page') if not lines: res = pageseg.segment(im_bin, text_direction, scale, maxcolseps, black_colseps, pad=pad) else: with open_file(lines, 'r') as fp: try: fp = cast(IO[Any], fp) res = json.load(fp) except ValueError as e: raise click.UsageError('{} invalid segmentation: {}'.format(lines, str(e))) if prefill: it = rpred.rpred(prefill, im_bin, res.copy()) preds = [] logger.info('Recognizing') for pred in it: logger.debug('{}'.format(pred.prediction)) preds.append(pred) ti.add_page(im, res, records=preds) else: ti.add_page(im, res) fp.close() logger.info('Writing transcription to {}'.format(output.name)) message('Writing output', nl=False) ti.write(output) message('\u2713', fg='green') @cli.command('linegen') @click.pass_context @click.option('-f', '--font', default='sans', help='Font family to render texts in.') @click.option('-n', '--maxlines', type=click.INT, default=0, help='Maximum number of lines to generate') @click.option('-e', '--encoding', default='utf-8', help='Decode text files with given codec.') @click.option('-u', '--normalization', type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']), default=None, help='Normalize ground truth') @click.option('-ur', '--renormalize', type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']), default=None, help='Renormalize text for rendering purposes.') @click.option('--reorder/--no-reorder', default=False, help='Reorder code points to display order') @click.option('-fs', '--font-size', type=click.INT, default=32, help='Font size to render texts in.') @click.option('-fw', '--font-weight', type=click.INT, default=400, help='Font weight to render texts in.') @click.option('-l', '--language', help='RFC-3066 language tag for language-dependent font shaping') @click.option('-ll', '--max-length', type=click.INT, default=None, help="Discard lines above length (in Unicode codepoints).") @click.option('--strip/--no-strip', help="Remove whitespace from start and end " "of lines.") @click.option('-d', '--disable-degradation', is_flag=True, help='Dont degrade ' 'output lines.') @click.option('-a', '--alpha', type=click.FLOAT, default=1.5, help="Mean of folded normal distribution for sampling foreground pixel flip probability") @click.option('-b', '--beta', type=click.FLOAT, default=1.5, help="Mean of folded normal distribution for sampling background pixel flip probability") @click.option('-d', '--distort', type=click.FLOAT, default=1.0, help='Mean of folded normal distribution to take distortion values from') @click.option('-ds', '--distortion-sigma', type=click.FLOAT, default=20.0, help='Mean of folded normal distribution to take standard deviations for the ' 'Gaussian kernel from') @click.option('--legacy/--no-legacy', default=False, help='Use ocropy-style degradations') @click.option('-o', '--output', type=click.Path(), default='training_data', help='Output directory') @click.argument('text', nargs=-1, type=click.Path(exists=True)) def line_generator(ctx, font, maxlines, encoding, normalization, renormalize, reorder, font_size, font_weight, language, max_length, strip, disable_degradation, alpha, beta, distort, distortion_sigma, legacy, output, text): """ Generates artificial text line training data. """ import errno import numpy as np from kraken import linegen from kraken.lib.util import make_printable lines: Set[str] = set() if not text: return with log.progressbar(text, label='Reading texts') as bar: for t in text: with click.open_file(t, encoding=encoding) as fp: logger.info('Reading {}'.format(t)) for l in fp: lines.add(l.rstrip('\r\n')) if normalization: lines = set([unicodedata.normalize(normalization, line) for line in lines]) if strip: lines = set([line.strip() for line in lines]) if max_length: lines = set([line for line in lines if len(line) < max_length]) logger.info('Read {} lines'.format(len(lines))) message('Read {} unique lines'.format(len(lines))) if maxlines and maxlines < len(lines): message('Sampling {} lines\t'.format(maxlines), nl=False) llist = list(lines) lines = set(llist[idx] for idx in np.random.randint(0, len(llist), maxlines)) message('\u2713', fg='green') try: os.makedirs(output) except OSError as e: if e.errno != errno.EEXIST: raise # calculate the alphabet and print it for verification purposes alphabet: Set[str] = set() for line in lines: alphabet.update(line) chars = [] combining = [] for char in sorted(alphabet): k = make_printable(char) if k != char: combining.append(k) else: chars.append(k) message('Σ (len: {})'.format(len(alphabet))) message('Symbols: {}'.format(''.join(chars))) if combining: message('Combining Characters: {}'.format(', '.join(combining))) lg = linegen.LineGenerator(font, font_size, font_weight, language) with log.progressbar(lines, label='Writing images') as bar: for idx, line in enumerate(bar): logger.info(line) try: if renormalize: im = lg.render_line(unicodedata.normalize(renormalize, line)) else: im = lg.render_line(line) except KrakenCairoSurfaceException as e: logger.info('{}: {} {}'.format(e.message, e.width, e.height)) continue if not disable_degradation and not legacy: im = linegen.degrade_line(im, alpha=alpha, beta=beta) im = linegen.distort_line(im, abs(np.random.normal(distort)), abs(np.random.normal(distortion_sigma))) elif legacy: im = linegen.ocropy_degrade(im) im.save('{}/{:06d}.png'.format(output, idx)) with open('{}/{:06d}.gt.txt'.format(output, idx), 'wb') as fp: if reorder: fp.write(get_display(line).encode('utf-8')) else: fp.write(line.encode('utf-8')) @cli.command('publish') @click.pass_context @click.option('-i', '--metadata', show_default=True, type=click.File(mode='r', lazy=True), help='Metadata for the ' 'model. Will be prompted from the user if not given') @click.option('-a', '--access-token', prompt=True, help='Zenodo access token') @click.argument('model', nargs=1, type=click.Path(exists=False, readable=True, dir_okay=False)) def publish(ctx, metadata, access_token, model): """ Publishes a model on the zenodo model repository. """ import json import pkg_resources from functools import partial from jsonschema import validate from jsonschema.exceptions import ValidationError from kraken import repo from kraken.lib import models with pkg_resources.resource_stream(__name__, 'metadata.schema.json') as fp: schema = json.load(fp) nn = models.load_any(model) if not metadata: author = click.prompt('author') affiliation = click.prompt('affiliation') summary = click.prompt('summary') description = click.edit('Write long form description (training data, transcription standards) of the model here') accuracy_default = None # take last accuracy measurement in model metadata if 'accuracy' in nn.nn.user_metadata and nn.nn.user_metadata['accuracy']: accuracy_default = nn.nn.user_metadata['accuracy'][-1][1] * 100 accuracy = click.prompt('accuracy on test set', type=float, default=accuracy_default) script = [click.prompt('script', type=click.Choice(sorted(schema['properties']['script']['items']['enum'])), show_choices=True)] license = click.prompt('license', type=click.Choice(sorted(schema['properties']['license']['enum'])), show_choices=True) metadata = { 'authors': [{'name': author, 'affiliation': affiliation}], 'summary': summary, 'description': description, 'accuracy': accuracy, 'license': license, 'script': script, 'name': os.path.basename(model), 'graphemes': ['a'] } while True: try: validate(metadata, schema) except ValidationError as e: message(e.message) metadata[e.path[-1]] = click.prompt(e.path[-1], type=float if e.schema['type'] == 'number' else str) continue break else: metadata = json.load(metadata) validate(metadata, schema) metadata['graphemes'] = [char for char in ''.join(nn.codec.c2l.keys())] oid = repo.publish_model(model, metadata, access_token, partial(message, '.', nl=False)) message('\nmodel PID: {}'.format(oid)) if __name__ == '__main__': cli()
54.548067
229
0.616708
import os import time import json import glob import uuid import click import logging import unicodedata from click import open_file from bidi.algorithm import get_display from typing import cast, Set, List, IO, Any from collections import defaultdict from kraken.lib import log from kraken.lib.exceptions import KrakenCairoSurfaceException from kraken.lib.exceptions import KrakenEncodeException from kraken.lib.exceptions import KrakenInputException from kraken.lib.default_specs import (SEGMENTATION_HYPER_PARAMS, RECOGNITION_HYPER_PARAMS, SEGMENTATION_SPEC, RECOGNITION_SPEC) APP_NAME = 'kraken' logging.captureWarnings(True) logger = logging.getLogger('kraken') def message(msg, **styles): if logger.getEffectiveLevel() >= 30: click.secho(msg, **styles) @click.group() @click.version_option() @click.option('-v', '--verbose', default=0, count=True) @click.option('-s', '--seed', default=None, type=click.INT, help='Seed for numpy\'s and torch\'s RNG. Set to a fixed value to ' 'ensure reproducable random splits of data') def cli(verbose, seed): if seed: import numpy.random numpy.random.seed(seed) from torch import manual_seed manual_seed(seed) log.set_logger(logger, level=30-min(10*verbose, 20)) def _validate_manifests(ctx, param, value): images = [] for manifest in value: for entry in manifest.readlines(): im_p = entry.rstrip('\r\n') if os.path.isfile(im_p): images.append(im_p) else: logger.warning('Invalid entry "{}" in {}'.format(im_p, manifest.name)) return images def _expand_gt(ctx, param, value): images = [] for expression in value: images.extend([x for x in glob.iglob(expression, recursive=True) if os.path.isfile(x)]) return images def _validate_merging(ctx, param, value): if not value: return None merge_dict = {} try: for m in value: k, v = m.split(':') merge_dict[v] = k except Exception: raise click.BadParameter('Mappings must be in format target:src') return merge_dict @cli.command('segtrain') @click.pass_context @click.option('-o', '--output', show_default=True, type=click.Path(), default='model', help='Output model file') @click.option('-s', '--spec', show_default=True, default=SEGMENTATION_SPEC, help='VGSL spec of the baseline labeling network') @click.option('--line-width', show_default=True, default=SEGMENTATION_HYPER_PARAMS['line_width'], help='The height of each baseline in the target after scaling') @click.option('-i', '--load', show_default=True, type=click.Path(exists=True, readable=True), help='Load existing file to continue training') @click.option('-F', '--freq', show_default=True, default=SEGMENTATION_HYPER_PARAMS['freq'], type=click.FLOAT, help='Model saving and report generation frequency in epochs during training') @click.option('-q', '--quit', show_default=True, default=SEGMENTATION_HYPER_PARAMS['quit'], type=click.Choice(['early', 'dumb']), help='Stop condition for training. Set to `early` for early stopping or `dumb` for fixed number of epochs') @click.option('-N', '--epochs', show_default=True, default=SEGMENTATION_HYPER_PARAMS['epochs'], help='Number of epochs to train for') @click.option('--lag', show_default=True, default=SEGMENTATION_HYPER_PARAMS['lag'], help='Number of evaluations (--report frequence) to wait before stopping training without improvement') @click.option('--min-delta', show_default=True, default=SEGMENTATION_HYPER_PARAMS['min_delta'], type=click.FLOAT, help='Minimum improvement between epochs to reset early stopping. By default it scales the delta by the best loss') @click.option('-d', '--device', show_default=True, default='cpu', help='Select device to use (cpu, cuda:0, cuda:1, ...)') @click.option('--optimizer', show_default=True, default=SEGMENTATION_HYPER_PARAMS['optimizer'], type=click.Choice(['Adam', 'SGD', 'RMSprop']), help='Select optimizer') @click.option('-r', '--lrate', show_default=True, default=SEGMENTATION_HYPER_PARAMS['lrate'], help='Learning rate') @click.option('-m', '--momentum', show_default=True, default=SEGMENTATION_HYPER_PARAMS['momentum'], help='Momentum') @click.option('-w', '--weight-decay', show_default=True, default=SEGMENTATION_HYPER_PARAMS['weight_decay'], help='Weight decay') @click.option('--schedule', show_default=True, type=click.Choice(['constant', '1cycle', 'exponential', 'cosine', 'step', 'reduceonplateau']), default=RECOGNITION_HYPER_PARAMS['schedule'], help='Set learning rate scheduler. For 1cycle, cycle length is determined by the `--step-size` option.') @click.option('-g', '--gamma', show_default=True, default=RECOGNITION_HYPER_PARAMS['gamma'], help='Decay factor for exponential, step, and reduceonplateau learning rate schedules') @click.option('-ss', '--step-size', show_default=True, default=RECOGNITION_HYPER_PARAMS['step_size'], help='Number of validation runs between learning rate decay for exponential and step LR schedules') @click.option('--sched-patience', show_default=True, default=RECOGNITION_HYPER_PARAMS['rop_patience'], help='Minimal number of validation runs between LR reduction for reduceonplateau LR schedule.') @click.option('--cos-max', show_default=True, default=RECOGNITION_HYPER_PARAMS['cos_t_max'], help='Epoch of minimal learning rate for cosine LR scheduler.') @click.option('-p', '--partition', show_default=True, default=0.9, help='Ground truth data partition ratio between train/validation set') @click.option('-t', '--training-files', show_default=True, default=None, multiple=True, callback=_validate_manifests, type=click.File(mode='r', lazy=True), help='File(s) with additional paths to training data') @click.option('-e', '--evaluation-files', show_default=True, default=None, multiple=True, callback=_validate_manifests, type=click.File(mode='r', lazy=True), help='File(s) with paths to evaluation data. Overrides the `-p` parameter') @click.option('--threads', show_default=True, default=1, help='Number of OpenMP threads and workers when running on CPU.') @click.option('--load-hyper-parameters/--no-load-hyper-parameters', show_default=True, default=False, help='When loading an existing model, retrieve hyper-parameters from the model') @click.option('--force-binarization/--no-binarization', show_default=True, default=False, help='Forces input images to be binary, otherwise ' 'the appropriate color format will be auto-determined through the ' 'network specification. Will be ignored in `path` mode.') @click.option('-f', '--format-type', type=click.Choice(['path', 'xml', 'alto', 'page']), default='xml', help='Sets the training data format. In ALTO and PageXML mode all ' 'data is extracted from xml files containing both baselines and a ' 'link to source images. In `path` mode arguments are image files ' 'sharing a prefix up to the last extension with JSON `.path` files ' 'containing the baseline information.') @click.option('--suppress-regions/--no-suppress-regions', show_default=True, default=False, help='Disables region segmentation training.') @click.option('--suppress-baselines/--no-suppress-baselines', show_default=True, default=False, help='Disables baseline segmentation training.') @click.option('-vr', '--valid-regions', show_default=True, default=None, multiple=True, help='Valid region types in training data. May be used multiple times.') @click.option('-vb', '--valid-baselines', show_default=True, default=None, multiple=True, help='Valid baseline types in training data. May be used multiple times.') @click.option('-mr', '--merge-regions', show_default=True, default=None, help='Region merge mapping. One or more mappings of the form `$target:$src` where $src is merged into $target.', multiple=True, callback=_validate_merging) @click.option('-mb', '--merge-baselines', show_default=True, default=None, help='Baseline type merge mapping. Same syntax as `--merge-regions`', multiple=True, callback=_validate_merging) @click.option('-br', '--bounding-regions', show_default=True, default=None, multiple=True, help='Regions treated as boundaries for polygonization purposes. May be used multiple times.') @click.option('--augment/--no-augment', show_default=True, default=SEGMENTATION_HYPER_PARAMS['augment'], help='Enable image augmentation') @click.option('--resize', show_default=True, default='fail', type=click.Choice(['add', 'both', 'fail']), help='Output layer resizing option. If set to `add` new classes will be ' 'added, `both` will set the layer to match exactly ' 'the training data classes, `fail` will abort if training data and model ' 'classes do not match.') @click.option('-tl', '--topline', 'topline', show_default=True, flag_value='topline', help='Switch for the baseline location in the scripts. ' 'Set to topline if the data is annotated with a hanging baseline, as is ' 'common with Hebrew, Bengali, Devanagari, etc. Set to ' ' centerline for scripts annotated with a central line.') @click.option('-cl', '--centerline', 'topline', flag_value='centerline') @click.option('-bl', '--baseline', 'topline', flag_value='baseline', default='baseline') @click.argument('ground_truth', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False)) def segtrain(ctx, output, spec, line_width, load, freq, quit, epochs, lag, min_delta, device, optimizer, lrate, momentum, weight_decay, schedule, gamma, step_size, sched_patience, cos_max, partition, training_files, evaluation_files, threads, load_hyper_parameters, force_binarization, format_type, suppress_regions, suppress_baselines, valid_regions, valid_baselines, merge_regions, merge_baselines, bounding_regions, augment, resize, topline, ground_truth): import re import torch import shutil import numpy as np from kraken.lib.train import KrakenTrainer if resize != 'fail' and not load: raise click.BadOptionUsage('resize', 'resize option requires loading an existing model') logger.info('Building ground truth set from {} document images'.format(len(ground_truth) + len(training_files))) nn = None hyper_params = SEGMENTATION_HYPER_PARAMS.copy() hyper_params.update({'line_width': line_width, 'freq': freq, 'quit': quit, 'epochs': epochs, 'lag': lag, 'min_delta': min_delta, 'optimizer': optimizer, 'lrate': lrate, 'momentum': momentum, 'weight_decay': weight_decay, 'schedule': schedule, 'augment': augment, 'gamma': gamma, 'step_size': step_size, 'rop_patience': sched_patience, 'cos_t_max': cos_max }) if evaluation_files: partition = 1 ground_truth = list(ground_truth) if training_files: ground_truth.extend(training_files) if len(ground_truth) == 0: raise click.UsageError('No training data was provided to the train command. Use `-t` or the `ground_truth` argument.') np.random.shuffle(ground_truth) training_files = ground_truth[:int(len(ground_truth) * partition)] if evaluation_files: logger.debug(f'Using {len(evaluation_files)} lines/files from explicit eval set') else: evaluation_files = ground_truth[int(len(ground_truth) * partition):] logger.debug(f'Taking {len(evaluation_files)} lines/files from training set for evaluation') def _init_progressbar(label, length): if 'bar' in ctx.meta: ctx.meta['bar'].__exit__(None, None, None) ctx.meta['bar'] = log.progressbar(label=label, length=length, show_pos=True) ctx.meta['bar'].__enter__() return lambda: ctx.meta['bar'].update(1) topline = {'topline': True, 'baseline': False, 'centerline': None}[topline] trainer = KrakenTrainer.segmentation_train_gen(hyper_params, message=message, progress_callback=_init_progressbar, output=output, spec=spec, load=load, device=device, training_data=training_files, evaluation_data=evaluation_files, threads=threads, load_hyper_parameters=load_hyper_parameters, force_binarization=force_binarization, format_type=format_type, suppress_regions=suppress_regions, suppress_baselines=suppress_baselines, valid_regions=valid_regions, valid_baselines=valid_baselines, merge_regions=merge_regions, merge_baselines=merge_baselines, bounding_regions=bounding_regions, augment=augment, resize=resize, topline=topline) with log.progressbar(label='stage {}/{}'.format(1, trainer.stopper.epochs if trainer.stopper.epochs > 0 else '∞'), length=trainer.event_it, show_pos=True) as bar: def _draw_progressbar(): bar.update(1) def _print_eval(epoch, accuracy, mean_acc, mean_iu, freq_iu, **kwargs): message('Accuracy report ({}) mean_iu: {:0.4f} freq_iu: {:0.4f} mean_acc: {:0.4f} accuracy: {:0.4f}'.format(epoch, mean_iu, freq_iu, mean_acc, accuracy)) bar.label = 'stage {}/{}'.format(epoch+1, trainer.stopper.epochs if trainer.stopper.epochs > 0 else '∞') bar.pos = 0 bar.finished = False bar.start = bar.last_eta = time.time() trainer.run(_print_eval, _draw_progressbar) if quit == 'early': message('Moving best model {0}_{1}.mlmodel ({2}) to {0}_best.mlmodel'.format(output, trainer.stopper.best_epoch, trainer.stopper.best_loss)) logger.info('Moving best model {0}_{1}.mlmodel ({2}) to {0}_best.mlmodel'.format(output, trainer.stopper.best_epoch, trainer.stopper.best_loss)) shutil.copy(f'{output}_{trainer.stopper.best_epoch}.mlmodel', f'{output}_best.mlmodel') @cli.command('train') @click.pass_context @click.option('-B', '--batch-size', show_default=True, type=click.INT, default=RECOGNITION_HYPER_PARAMS['batch_size'], help='batch sample size') @click.option('-p', '--pad', show_default=True, type=click.INT, default=16, help='Left and right ' 'padding around lines') @click.option('-o', '--output', show_default=True, type=click.Path(), default='model', help='Output model file') @click.option('-s', '--spec', show_default=True, default=RECOGNITION_SPEC, help='VGSL spec of the network to train. CTC layer will be added automatically.') @click.option('-a', '--append', show_default=True, default=None, type=click.INT, help='Removes layers before argument and then appends spec. Only works when loading an existing model') @click.option('-i', '--load', show_default=True, type=click.Path(exists=True, readable=True), help='Load existing file to continue training') @click.option('-F', '--freq', show_default=True, default=RECOGNITION_HYPER_PARAMS['freq'], type=click.FLOAT, help='Model saving and report generation frequency in epochs during training') @click.option('-q', '--quit', show_default=True, default=RECOGNITION_HYPER_PARAMS['quit'], type=click.Choice(['early', 'dumb']), help='Stop condition for training. Set to `early` for early stooping or `dumb` for fixed number of epochs') @click.option('-N', '--epochs', show_default=True, default=RECOGNITION_HYPER_PARAMS['epochs'], help='Number of epochs to train for') @click.option('--lag', show_default=True, default=RECOGNITION_HYPER_PARAMS['lag'], help='Number of evaluations (--report frequence) to wait before stopping training without improvement') @click.option('--min-delta', show_default=True, default=RECOGNITION_HYPER_PARAMS['min_delta'], type=click.FLOAT, help='Minimum improvement between epochs to reset early stopping. Default is scales the delta by the best loss') @click.option('-d', '--device', show_default=True, default='cpu', help='Select device to use (cpu, cuda:0, cuda:1, ...)') @click.option('--optimizer', show_default=True, default=RECOGNITION_HYPER_PARAMS['optimizer'], type=click.Choice(['Adam', 'SGD', 'RMSprop']), help='Select optimizer') @click.option('-r', '--lrate', show_default=True, default=RECOGNITION_HYPER_PARAMS['lrate'], help='Learning rate') @click.option('-m', '--momentum', show_default=True, default=RECOGNITION_HYPER_PARAMS['momentum'], help='Momentum') @click.option('-w', '--weight-decay', show_default=True, default=RECOGNITION_HYPER_PARAMS['weight_decay'], help='Weight decay') @click.option('--schedule', show_default=True, type=click.Choice(['constant', '1cycle', 'exponential', 'cosine', 'step', 'reduceonplateau']), default=RECOGNITION_HYPER_PARAMS['schedule'], help='Set learning rate scheduler. For 1cycle, cycle length is determined by the `--epoch` option.') @click.option('-g', '--gamma', show_default=True, default=RECOGNITION_HYPER_PARAMS['gamma'], help='Decay factor for exponential, step, and reduceonplateau learning rate schedules') @click.option('-ss', '--step-size', show_default=True, default=RECOGNITION_HYPER_PARAMS['step_size'], help='Number of validation runs between learning rate decay for exponential and step LR schedules') @click.option('--sched-patience', show_default=True, default=RECOGNITION_HYPER_PARAMS['rop_patience'], help='Minimal number of validation runs between LR reduction for reduceonplateau LR schedule.') @click.option('--cos-max', show_default=True, default=RECOGNITION_HYPER_PARAMS['cos_t_max'], help='Epoch of minimal learning rate for cosine LR scheduler.') @click.option('-p', '--partition', show_default=True, default=0.9, help='Ground truth data partition ratio between train/validation set') @click.option('-u', '--normalization', show_default=True, type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']), default=RECOGNITION_HYPER_PARAMS['normalization'], help='Ground truth normalization') @click.option('-n', '--normalize-whitespace/--no-normalize-whitespace', show_default=True, default=RECOGNITION_HYPER_PARAMS['normalize_whitespace'], help='Normalizes unicode whitespace') @click.option('-c', '--codec', show_default=True, default=None, type=click.File(mode='r', lazy=True), help='Load a codec JSON definition (invalid if loading existing model)') @click.option('--resize', show_default=True, default='fail', type=click.Choice(['add', 'both', 'fail']), help='Codec/output layer resizing option. If set to `add` code ' 'points will be added, `both` will set the layer to match exactly ' 'the training data, `fail` will abort if training data and model ' 'codec do not match.') @click.option('--reorder/--no-reorder', show_default=True, default=True, help='Reordering of code points to display order') @click.option('--base-dir', show_default=True, default='auto', type=click.Choice(['L', 'R', 'auto']), help='Set base text ' 'direction. This should be set to the direction used during the ' 'creation of the training data. If set to `auto` it will be ' 'overridden by any explicit value given in the input files.') @click.option('-t', '--training-files', show_default=True, default=None, multiple=True, callback=_validate_manifests, type=click.File(mode='r', lazy=True), help='File(s) with additional paths to training data') @click.option('-e', '--evaluation-files', show_default=True, default=None, multiple=True, callback=_validate_manifests, type=click.File(mode='r', lazy=True), help='File(s) with paths to evaluation data. Overrides the `-p` parameter') @click.option('--preload/--no-preload', show_default=True, default=None, help='Hard enable/disable for training data preloading') @click.option('--threads', show_default=True, default=1, help='Number of OpenMP threads and workers when running on CPU.') @click.option('--load-hyper-parameters/--no-load-hyper-parameters', show_default=True, default=False, help='When loading an existing model, retrieve hyperparameters from the model') @click.option('--repolygonize/--no-repolygonize', show_default=True, default=False, help='Repolygonizes line data in ALTO/PageXML ' 'files. This ensures that the trained model is compatible with the ' 'segmenter in kraken even if the original image files either do ' 'not contain anything but transcriptions and baseline information ' 'or the polygon data was created using a different method. Will ' 'be ignored in `path` mode. Note that this option will be slow ' 'and will not scale input images to the same size as the segmenter ' 'does.') @click.option('--force-binarization/--no-binarization', show_default=True, default=False, help='Forces input images to be binary, otherwise ' 'the appropriate color format will be auto-determined through the ' 'network specification. Will be ignored in `path` mode.') @click.option('-f', '--format-type', type=click.Choice(['path', 'xml', 'alto', 'page']), default='path', help='Sets the training data format. In ALTO and PageXML mode all ' 'data is extracted from xml files containing both line definitions and a ' 'link to source images. In `path` mode arguments are image files ' 'sharing a prefix up to the last extension with `.gt.txt` text files ' 'containing the transcription.') @click.option('--augment/--no-augment', show_default=True, default=RECOGNITION_HYPER_PARAMS['augment'], help='Enable image augmentation') @click.argument('ground_truth', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False)) def train(ctx, batch_size, pad, output, spec, append, load, freq, quit, epochs, lag, min_delta, device, optimizer, lrate, momentum, weight_decay, schedule, gamma, step_size, sched_patience, cos_max, partition, normalization, normalize_whitespace, codec, resize, reorder, base_dir, training_files, evaluation_files, preload, threads, load_hyper_parameters, repolygonize, force_binarization, format_type, augment, ground_truth): if not load and append: raise click.BadOptionUsage('append', 'append option requires loading an existing model') if resize != 'fail' and not load: raise click.BadOptionUsage('resize', 'resize option requires loading an existing model') import shutil import numpy as np from kraken.lib.train import KrakenTrainer hyper_params = RECOGNITION_HYPER_PARAMS.copy() hyper_params.update({'freq': freq, 'pad': pad, 'batch_size': batch_size, 'quit': quit, 'epochs': epochs, 'lag': lag, 'min_delta': min_delta, 'optimizer': optimizer, 'lrate': lrate, 'momentum': momentum, 'weight_decay': weight_decay, 'schedule': schedule, 'gamma': gamma, 'step_size': step_size, 'rop_patience': sched_patience, 'cos_t_max': cos_max, 'normalization': normalization, 'normalize_whitespace': normalize_whitespace, 'augment': augment }) if evaluation_files: partition = 1 ground_truth = list(ground_truth) if training_files: ground_truth.extend(training_files) if len(ground_truth) == 0: raise click.UsageError('No training data was provided to the train command. Use `-t` or the `ground_truth` argument.') if reorder and base_dir != 'auto': reorder = base_dir np.random.shuffle(ground_truth) training_files = ground_truth[:int(len(ground_truth) * partition)] if evaluation_files: logger.debug(f'Using {len(evaluation_files)} lines/files from explicit eval set') else: evaluation_files = ground_truth[int(len(ground_truth) * partition):] logger.debug(f'Taking {len(evaluation_files)} lines/files from training set for evaluation') def _init_progressbar(label, length): if 'bar' in ctx.meta: ctx.meta['bar'].__exit__(None, None, None) ctx.meta['bar'] = log.progressbar(label=label, length=length, show_pos=True) ctx.meta['bar'].__enter__() return lambda: ctx.meta['bar'].update(1) trainer = KrakenTrainer.recognition_train_gen(hyper_params, message=message, progress_callback=_init_progressbar, output=output, spec=spec, append=append, load=load, device=device, reorder=reorder, training_data=training_files, evaluation_data=evaluation_files, preload=preload, threads=threads, load_hyper_parameters=load_hyper_parameters, repolygonize=repolygonize, force_binarization=force_binarization, format_type=format_type, codec=codec, resize=resize, augment=augment) with log.progressbar(label='stage {}/{}'.format(1, trainer.stopper.epochs if trainer.stopper.epochs > 0 else '∞'), length=trainer.event_it, show_pos=True) as bar: def _draw_progressbar(): bar.update(1) def _print_eval(epoch, accuracy, chars, error, **kwargs): message('Accuracy report ({}) {:0.4f} {} {}'.format(epoch, accuracy, chars, error)) bar.label = 'stage {}/{}'.format(epoch+1, trainer.stopper.epochs if trainer.stopper.epochs > 0 else '∞') bar.pos = 0 bar.finished = False bar.start = bar.last_eta = time.time() trainer.run(_print_eval, _draw_progressbar) if quit == 'early': message('Moving best model {0}_{1}.mlmodel ({2}) to {0}_best.mlmodel'.format(output, trainer.stopper.best_epoch, trainer.stopper.best_loss)) logger.info('Moving best model {0}_{1}.mlmodel ({2}) to {0}_best.mlmodel'.format(output, trainer.stopper.best_epoch, trainer.stopper.best_loss)) shutil.copy(f'{output}_{trainer.stopper.best_epoch}.mlmodel', f'{output}_best.mlmodel') @cli.command('test') @click.pass_context @click.option('-B', '--batch-size', show_default=True, type=click.INT, default=RECOGNITION_HYPER_PARAMS['batch_size'], help='Batch sample size') @click.option('-m', '--model', show_default=True, type=click.Path(exists=True, readable=True), multiple=True, help='Model(s) to evaluate') @click.option('-e', '--evaluation-files', show_default=True, default=None, multiple=True, callback=_validate_manifests, type=click.File(mode='r', lazy=True), help='File(s) with paths to evaluation data.') @click.option('-d', '--device', show_default=True, default='cpu', help='Select device to use (cpu, cuda:0, cuda:1, ...)') @click.option('-p', '--pad', show_default=True, type=click.INT, default=16, help='Left and right ' 'padding around lines') @click.option('--threads', show_default=True, default=1, help='Number of OpenMP threads when running on CPU.') @click.option('--reorder/--no-reorder', show_default=True, default=True, help='Reordering of code points to display order') @click.option('--base-dir', show_default=True, default='auto', type=click.Choice(['L', 'R', 'auto']), help='Set base text ' 'direction. This should be set to the direction used during the ' 'creation of the training data. If set to `auto` it will be ' 'overridden by any explicit value given in the input files.') @click.option('-u', '--normalization', show_default=True, type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']), default=None, help='Ground truth normalization') @click.option('-n', '--normalize-whitespace/--no-normalize-whitespace', show_default=True, default=True, help='Normalizes unicode whitespace') @click.option('--repolygonize/--no-repolygonize', show_default=True, default=False, help='Repolygonizes line data in ALTO/PageXML ' 'files. This ensures that the trained model is compatible with the ' 'segmenter in kraken even if the original image files either do ' 'not contain anything but transcriptions and baseline information ' 'or the polygon data was created using a different method. Will ' 'be ignored in `path` mode. Note, that this option will be slow ' 'and will not scale input images to the same size as the segmenter ' 'does.') @click.option('--force-binarization/--no-binarization', show_default=True, default=False, help='Forces input images to be binary, otherwise ' 'the appropriate color format will be auto-determined through the ' 'network specification. Will be ignored in `path` mode.') @click.option('-f', '--format-type', type=click.Choice(['path', 'xml', 'alto', 'page']), default='path', help='Sets the training data format. In ALTO and PageXML mode all ' 'data is extracted from xml files containing both baselines and a ' 'link to source images. In `path` mode arguments are image files ' 'sharing a prefix up to the last extension with JSON `.path` files ' 'containing the baseline information.') @click.argument('test_set', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False)) def test(ctx, batch_size, model, evaluation_files, device, pad, threads, reorder, base_dir, normalization, normalize_whitespace, repolygonize, force_binarization, format_type, test_set): if not model: raise click.UsageError('No model to evaluate given.') import regex import unicodedata import numpy as np from PIL import Image from torch.utils.data import DataLoader from kraken.serialization import render_report from kraken.lib import models from kraken.lib.dataset import global_align, compute_confusions, preparse_xml_data, PolygonGTDataset, GroundTruthDataset, generate_input_transforms, collate_sequences logger.info('Building test set from {} line images'.format(len(test_set) + len(evaluation_files))) nn = {} for p in model: message('Loading model {}\t'.format(p), nl=False) nn[p] = models.load_any(p) message('\u2713', fg='green') test_set = list(test_set) next(iter(nn.values())).nn.set_num_threads(1) if evaluation_files: test_set.extend(evaluation_files) if len(test_set) == 0: raise click.UsageError('No evaluation data was provided to the test command. Use `-e` or the `test_set` argument.') if format_type != 'path': if repolygonize: message('Repolygonizing data') test_set = preparse_xml_data(test_set, format_type, repolygonize) valid_norm = False DatasetClass = PolygonGTDataset else: DatasetClass = GroundTruthDataset t = [] if force_binarization: logger.warning('Forced binarization enabled in `path` mode. Will be ignored.') force_binarization = False if repolygonize: logger.warning('Repolygonization enabled in `path` mode. Will be ignored.') test_set = [{'image': img} for img in test_set] valid_norm = True if len(test_set) == 0: raise click.UsageError('No evaluation data was provided to the test command. Use `-e` or the `test_set` argument.') if reorder and base_dir != 'auto': reorder = base_dir acc_list = [] for p, net in nn.items(): algn_gt: List[str] = [] algn_pred: List[str] = [] chars = 0 error = 0 message('Evaluating {}'.format(p)) logger.info('Evaluating {}'.format(p)) batch, channels, height, width = net.nn.input ts = generate_input_transforms(batch, height, width, channels, pad, valid_norm, force_binarization) ds = DatasetClass(normalization=normalization, whitespace_normalization=normalize_whitespace, reorder=reorder, im_transforms=ts, preload=False) for line in test_set: try: ds.add(**line) except KrakenInputException as e: logger.info(e) ds.no_encode() ds_loader = DataLoader(ds, batch_size=batch_size, num_workers=threads, pin_memory=True, collate_fn=collate_sequences) with log.progressbar(ds_loader, label='Evaluating') as bar: for batch in bar: im = batch['image'] text = batch['target'] lens = batch['seq_lens'] try: pred = net.predict_string(im, lens) for x, y in zip(pred, text): chars += len(y) c, algn1, algn2 = global_align(y, x) algn_gt.extend(algn1) algn_pred.extend(algn2) error += c except FileNotFoundError as e: logger.warning('{} {}. Skipping.'.format(e.strerror, e.filename)) except KrakenInputException as e: logger.warning(str(e)) acc_list.append((chars-error)/chars) confusions, scripts, ins, dels, subs = compute_confusions(algn_gt, algn_pred) rep = render_report(p, chars, error, confusions, scripts, ins, dels, subs) logger.info(rep) message(rep) logger.info('Average accuracy: {:0.2f}%, (stddev: {:0.2f})'.format(np.mean(acc_list) * 100, np.std(acc_list) * 100)) message('Average accuracy: {:0.2f}%, (stddev: {:0.2f})'.format(np.mean(acc_list) * 100, np.std(acc_list) * 100)) @cli.command('extract') @click.pass_context @click.option('-b', '--binarize/--no-binarize', show_default=True, default=True, help='Binarize color/grayscale images') @click.option('-u', '--normalization', show_default=True, type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']), default=None, help='Normalize ground truth') @click.option('-s', '--normalize-whitespace/--no-normalize-whitespace', show_default=True, default=True, help='Normalizes unicode whitespace') @click.option('-n', '--reorder/--no-reorder', default=False, show_default=True, help='Reorder transcribed lines to display order') @click.option('-r', '--rotate/--no-rotate', default=True, show_default=True, help='Skip rotation of vertical lines') @click.option('-o', '--output', type=click.Path(), default='training', show_default=True, help='Output directory') @click.option('--format', default='{idx:06d}', show_default=True, help='Format for extractor output. valid fields are `src` (source file), `idx` (line number), and `uuid` (v4 uuid)') @click.argument('transcriptions', nargs=-1, type=click.File(lazy=True)) def extract(ctx, binarize, normalization, normalize_whitespace, reorder, rotate, output, format, transcriptions): import regex import base64 from io import BytesIO from PIL import Image from lxml import html, etree from kraken import binarization try: os.mkdir(output) except Exception: pass text_transforms = [] if normalization: text_transforms.append(lambda x: unicodedata.normalize(normalization, x)) if normalize_whitespace: text_transforms.append(lambda x: regex.sub(r'\s', ' ', x)) if reorder: text_transforms.append(get_display) idx = 0 manifest = [] with log.progressbar(transcriptions, label='Reading transcriptions') as bar: for fp in bar: logger.info('Reading {}'.format(fp.name)) doc = html.parse(fp) etree.strip_tags(doc, etree.Comment) td = doc.find(".//meta[@itemprop='text_direction']") if td is None: td = 'horizontal-lr' else: td = td.attrib['content'] im = None dest_dict = {'output': output, 'idx': 0, 'src': fp.name, 'uuid': str(uuid.uuid4())} for section in doc.xpath('//section'): img = section.xpath('.//img')[0].get('src') fd = BytesIO(base64.b64decode(img.split(',')[1])) im = Image.open(fd) if not im: logger.info('Skipping {} because image not found'.format(fp.name)) break if binarize: im = binarization.nlbin(im) for line in section.iter('li'): if line.get('contenteditable') and (not u''.join(line.itertext()).isspace() and u''.join(line.itertext())): dest_dict['idx'] = idx dest_dict['uuid'] = str(uuid.uuid4()) logger.debug('Writing line {:06d}'.format(idx)) l_img = im.crop([int(x) for x in line.get('data-bbox').split(',')]) if rotate and td.startswith('vertical'): im.rotate(90, expand=True) l_img.save(('{output}/' + format + '.png').format(**dest_dict)) manifest.append((format + '.png').format(**dest_dict)) text = u''.join(line.itertext()).strip() for func in text_transforms: text = func(text) with open(('{output}/' + format + '.gt.txt').format(**dest_dict), 'wb') as t: t.write(text.encode('utf-8')) idx += 1 logger.info('Extracted {} lines'.format(idx)) with open('{}/manifest.txt'.format(output), 'w') as fp: fp.write('\n'.join(manifest)) @cli.command('transcribe') @click.pass_context @click.option('-d', '--text-direction', default='horizontal-lr', type=click.Choice(['horizontal-lr', 'horizontal-rl', 'vertical-lr', 'vertical-rl']), help='Sets principal text direction', show_default=True) @click.option('--scale', default=None, type=click.FLOAT) @click.option('--bw/--orig', default=True, show_default=True, help="Put nonbinarized images in output") @click.option('-m', '--maxcolseps', default=2, type=click.INT, show_default=True) @click.option('-b/-w', '--black_colseps/--white_colseps', default=False, show_default=True) @click.option('-f', '--font', default='', help='Font family to use') @click.option('-fs', '--font-style', default=None, help='Font style to use') @click.option('-p', '--prefill', default=None, help='Use given model for prefill mode.') @click.option('--pad', show_default=True, type=(int, int), default=(0, 0), help='Left and right padding around lines') @click.option('-l', '--lines', type=click.Path(exists=True), show_default=True, help='JSON file containing line coordinates') @click.option('-o', '--output', type=click.File(mode='wb'), default='transcription.html', help='Output file', show_default=True) @click.argument('images', nargs=-1, type=click.File(mode='rb', lazy=True)) def transcription(ctx, text_direction, scale, bw, maxcolseps, black_colseps, font, font_style, prefill, pad, lines, output, images): from PIL import Image from kraken import rpred from kraken import pageseg from kraken import transcribe from kraken import binarization from kraken.lib import models ti = transcribe.TranscriptionInterface(font, font_style) if len(images) > 1 and lines: raise click.UsageError('--lines option is incompatible with multiple image files') if prefill: logger.info('Loading model {}'.format(prefill)) message('Loading ANN', nl=False) prefill = models.load_any(prefill) message('\u2713', fg='green') with log.progressbar(images, label='Reading images') as bar: for fp in bar: logger.info('Reading {}'.format(fp.name)) im = Image.open(fp) if im.mode not in ['1', 'L', 'P', 'RGB']: logger.warning('Input {} is in {} color mode. Converting to RGB'.format(fp.name, im.mode)) im = im.convert('RGB') logger.info('Binarizing page') im_bin = binarization.nlbin(im) im_bin = im_bin.convert('1') logger.info('Segmenting page') if not lines: res = pageseg.segment(im_bin, text_direction, scale, maxcolseps, black_colseps, pad=pad) else: with open_file(lines, 'r') as fp: try: fp = cast(IO[Any], fp) res = json.load(fp) except ValueError as e: raise click.UsageError('{} invalid segmentation: {}'.format(lines, str(e))) if prefill: it = rpred.rpred(prefill, im_bin, res.copy()) preds = [] logger.info('Recognizing') for pred in it: logger.debug('{}'.format(pred.prediction)) preds.append(pred) ti.add_page(im, res, records=preds) else: ti.add_page(im, res) fp.close() logger.info('Writing transcription to {}'.format(output.name)) message('Writing output', nl=False) ti.write(output) message('\u2713', fg='green') @cli.command('linegen') @click.pass_context @click.option('-f', '--font', default='sans', help='Font family to render texts in.') @click.option('-n', '--maxlines', type=click.INT, default=0, help='Maximum number of lines to generate') @click.option('-e', '--encoding', default='utf-8', help='Decode text files with given codec.') @click.option('-u', '--normalization', type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']), default=None, help='Normalize ground truth') @click.option('-ur', '--renormalize', type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']), default=None, help='Renormalize text for rendering purposes.') @click.option('--reorder/--no-reorder', default=False, help='Reorder code points to display order') @click.option('-fs', '--font-size', type=click.INT, default=32, help='Font size to render texts in.') @click.option('-fw', '--font-weight', type=click.INT, default=400, help='Font weight to render texts in.') @click.option('-l', '--language', help='RFC-3066 language tag for language-dependent font shaping') @click.option('-ll', '--max-length', type=click.INT, default=None, help="Discard lines above length (in Unicode codepoints).") @click.option('--strip/--no-strip', help="Remove whitespace from start and end " "of lines.") @click.option('-d', '--disable-degradation', is_flag=True, help='Dont degrade ' 'output lines.') @click.option('-a', '--alpha', type=click.FLOAT, default=1.5, help="Mean of folded normal distribution for sampling foreground pixel flip probability") @click.option('-b', '--beta', type=click.FLOAT, default=1.5, help="Mean of folded normal distribution for sampling background pixel flip probability") @click.option('-d', '--distort', type=click.FLOAT, default=1.0, help='Mean of folded normal distribution to take distortion values from') @click.option('-ds', '--distortion-sigma', type=click.FLOAT, default=20.0, help='Mean of folded normal distribution to take standard deviations for the ' 'Gaussian kernel from') @click.option('--legacy/--no-legacy', default=False, help='Use ocropy-style degradations') @click.option('-o', '--output', type=click.Path(), default='training_data', help='Output directory') @click.argument('text', nargs=-1, type=click.Path(exists=True)) def line_generator(ctx, font, maxlines, encoding, normalization, renormalize, reorder, font_size, font_weight, language, max_length, strip, disable_degradation, alpha, beta, distort, distortion_sigma, legacy, output, text): import errno import numpy as np from kraken import linegen from kraken.lib.util import make_printable lines: Set[str] = set() if not text: return with log.progressbar(text, label='Reading texts') as bar: for t in text: with click.open_file(t, encoding=encoding) as fp: logger.info('Reading {}'.format(t)) for l in fp: lines.add(l.rstrip('\r\n')) if normalization: lines = set([unicodedata.normalize(normalization, line) for line in lines]) if strip: lines = set([line.strip() for line in lines]) if max_length: lines = set([line for line in lines if len(line) < max_length]) logger.info('Read {} lines'.format(len(lines))) message('Read {} unique lines'.format(len(lines))) if maxlines and maxlines < len(lines): message('Sampling {} lines\t'.format(maxlines), nl=False) llist = list(lines) lines = set(llist[idx] for idx in np.random.randint(0, len(llist), maxlines)) message('\u2713', fg='green') try: os.makedirs(output) except OSError as e: if e.errno != errno.EEXIST: raise # calculate the alphabet and print it for verification purposes alphabet: Set[str] = set() for line in lines: alphabet.update(line) chars = [] combining = [] for char in sorted(alphabet): k = make_printable(char) if k != char: combining.append(k) else: chars.append(k) message('Σ (len: {})'.format(len(alphabet))) message('Symbols: {}'.format(''.join(chars))) if combining: message('Combining Characters: {}'.format(', '.join(combining))) lg = linegen.LineGenerator(font, font_size, font_weight, language) with log.progressbar(lines, label='Writing images') as bar: for idx, line in enumerate(bar): logger.info(line) try: if renormalize: im = lg.render_line(unicodedata.normalize(renormalize, line)) else: im = lg.render_line(line) except KrakenCairoSurfaceException as e: logger.info('{}: {} {}'.format(e.message, e.width, e.height)) continue if not disable_degradation and not legacy: im = linegen.degrade_line(im, alpha=alpha, beta=beta) im = linegen.distort_line(im, abs(np.random.normal(distort)), abs(np.random.normal(distortion_sigma))) elif legacy: im = linegen.ocropy_degrade(im) im.save('{}/{:06d}.png'.format(output, idx)) with open('{}/{:06d}.gt.txt'.format(output, idx), 'wb') as fp: if reorder: fp.write(get_display(line).encode('utf-8')) else: fp.write(line.encode('utf-8')) @cli.command('publish') @click.pass_context @click.option('-i', '--metadata', show_default=True, type=click.File(mode='r', lazy=True), help='Metadata for the ' 'model. Will be prompted from the user if not given') @click.option('-a', '--access-token', prompt=True, help='Zenodo access token') @click.argument('model', nargs=1, type=click.Path(exists=False, readable=True, dir_okay=False)) def publish(ctx, metadata, access_token, model): import json import pkg_resources from functools import partial from jsonschema import validate from jsonschema.exceptions import ValidationError from kraken import repo from kraken.lib import models with pkg_resources.resource_stream(__name__, 'metadata.schema.json') as fp: schema = json.load(fp) nn = models.load_any(model) if not metadata: author = click.prompt('author') affiliation = click.prompt('affiliation') summary = click.prompt('summary') description = click.edit('Write long form description (training data, transcription standards) of the model here') accuracy_default = None # take last accuracy measurement in model metadata if 'accuracy' in nn.nn.user_metadata and nn.nn.user_metadata['accuracy']: accuracy_default = nn.nn.user_metadata['accuracy'][-1][1] * 100 accuracy = click.prompt('accuracy on test set', type=float, default=accuracy_default) script = [click.prompt('script', type=click.Choice(sorted(schema['properties']['script']['items']['enum'])), show_choices=True)] license = click.prompt('license', type=click.Choice(sorted(schema['properties']['license']['enum'])), show_choices=True) metadata = { 'authors': [{'name': author, 'affiliation': affiliation}], 'summary': summary, 'description': description, 'accuracy': accuracy, 'license': license, 'script': script, 'name': os.path.basename(model), 'graphemes': ['a'] } while True: try: validate(metadata, schema) except ValidationError as e: message(e.message) metadata[e.path[-1]] = click.prompt(e.path[-1], type=float if e.schema['type'] == 'number' else str) continue break else: metadata = json.load(metadata) validate(metadata, schema) metadata['graphemes'] = [char for char in ''.join(nn.codec.c2l.keys())] oid = repo.publish_model(model, metadata, access_token, partial(message, '.', nl=False)) message('\nmodel PID: {}'.format(oid)) if __name__ == '__main__': cli()
true
true
1c435aaf0e77dfe1367116e247fb907561430e8a
3,130
py
Python
app/run.py
adarbha/disaster_response
34e1e3586caffb18e6d7f57a2c121426c02f1886
[ "MIT" ]
null
null
null
app/run.py
adarbha/disaster_response
34e1e3586caffb18e6d7f57a2c121426c02f1886
[ "MIT" ]
22
2020-09-26T00:34:37.000Z
2022-03-29T22:28:45.000Z
app/run.py
adarbha/disaster_response
34e1e3586caffb18e6d7f57a2c121426c02f1886
[ "MIT" ]
null
null
null
import json import plotly import pandas as pd from flask import Flask from flask import render_template, request, jsonify from plotly.graph_objs import Bar from sklearn.externals import joblib from sqlalchemy import create_engine from nltk.corpus import stopwords from nltk.stem.wordnet import WordNetLemmatizer from nltk.tokenize import word_tokenize from nltk.stem.porter import PorterStemmer app = Flask(__name__) def tokenize(text): tokens = word_tokenize(text) lemmatizer = WordNetLemmatizer() clean_tokens = [] for tok in tokens: clean_tok = lemmatizer.lemmatize(tok).lower().strip() clean_tokens.append(clean_tok) return clean_tokens # load data engine = create_engine('sqlite:////home/workspace/data/DisasterResponse.db') df = pd.read_sql_table('msg_cat', engine) # Need to remove child_alone column df = df.drop(columns = ['original','child_alone']) # load model model = joblib.load("/home/workspace/models/test_0.pkl") # index webpage displays cool visuals and receives user input text for model @app.route('/') @app.route('/index') def index(): # extract data needed for visuals # TODO: Below is an example - modify to extract data for your own visuals genre_counts = df.groupby('genre').count()['message'] genre_names = list(genre_counts.index) # create visuals # TODO: Below is an example - modify to create your own visuals graphs = [ { 'data': [ Bar( x=genre_names, y=genre_counts ) ], 'layout': { 'title': 'Distribution of Message Genres', 'yaxis': { 'title': "Count" }, 'xaxis': { 'title': "Genre" } } } ] # encode plotly graphs in JSON ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)] graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder) # render web page with plotly graphs return render_template('master.html', ids=ids, graphJSON=graphJSON) # web page that handles user query and displays model results @app.route('/go') def go(): # save user input in query query = request.args.get('query', '') # use model to predict classification for query print(query) classification_labels = model.predict([query])[0] classification_probas = model.predict_proba([query]) classification_probas = [i.tolist()[0][1] for i in classification_probas] print('classification_probas') classification_results = dict(zip(df.columns[3:], classification_labels)) classification_results_ = dict(zip(classification_results.keys(), list(zip(classification_labels.tolist(), classification_probas)))) # This will render the go.html Please see that file. return render_template( 'go.html', query=query, classification_result=classification_results_ ) def main(): app.run(host='0.0.0.0', port=3001, debug=True) if __name__ == '__main__': main()
27.946429
136
0.64984
import json import plotly import pandas as pd from flask import Flask from flask import render_template, request, jsonify from plotly.graph_objs import Bar from sklearn.externals import joblib from sqlalchemy import create_engine from nltk.corpus import stopwords from nltk.stem.wordnet import WordNetLemmatizer from nltk.tokenize import word_tokenize from nltk.stem.porter import PorterStemmer app = Flask(__name__) def tokenize(text): tokens = word_tokenize(text) lemmatizer = WordNetLemmatizer() clean_tokens = [] for tok in tokens: clean_tok = lemmatizer.lemmatize(tok).lower().strip() clean_tokens.append(clean_tok) return clean_tokens engine = create_engine('sqlite:////home/workspace/data/DisasterResponse.db') df = pd.read_sql_table('msg_cat', engine) df = df.drop(columns = ['original','child_alone']) model = joblib.load("/home/workspace/models/test_0.pkl") @app.route('/') @app.route('/index') def index(): genre_counts = df.groupby('genre').count()['message'] genre_names = list(genre_counts.index) graphs = [ { 'data': [ Bar( x=genre_names, y=genre_counts ) ], 'layout': { 'title': 'Distribution of Message Genres', 'yaxis': { 'title': "Count" }, 'xaxis': { 'title': "Genre" } } } ] ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)] graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder) return render_template('master.html', ids=ids, graphJSON=graphJSON) @app.route('/go') def go(): query = request.args.get('query', '') print(query) classification_labels = model.predict([query])[0] classification_probas = model.predict_proba([query]) classification_probas = [i.tolist()[0][1] for i in classification_probas] print('classification_probas') classification_results = dict(zip(df.columns[3:], classification_labels)) classification_results_ = dict(zip(classification_results.keys(), list(zip(classification_labels.tolist(), classification_probas)))) return render_template( 'go.html', query=query, classification_result=classification_results_ ) def main(): app.run(host='0.0.0.0', port=3001, debug=True) if __name__ == '__main__': main()
true
true
1c435b85f4b9d6a628a79791664b31d42e045c1c
751
py
Python
backend/bikestore/bikestore/urls.py
AlexandreInsua/HacktoberFest-2k9
a929bd8edd90395d2e0d0faac4b73ebfd47ed43a
[ "MIT" ]
3
2019-10-09T08:33:41.000Z
2020-02-23T14:20:53.000Z
backend/bikestore/bikestore/urls.py
AlexandreInsua/HacktoberFest-2k9
a929bd8edd90395d2e0d0faac4b73ebfd47ed43a
[ "MIT" ]
12
2019-10-05T17:27:58.000Z
2019-10-19T15:08:56.000Z
backend/bikestore/bikestore/urls.py
AlexandreInsua/HacktoberFest-2k9
a929bd8edd90395d2e0d0faac4b73ebfd47ed43a
[ "MIT" ]
10
2019-10-05T17:01:37.000Z
2019-10-20T16:24:20.000Z
"""bikestore URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.2/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path urlpatterns = [ path('admin/', admin.site.urls), ]
34.136364
77
0.70972
from django.contrib import admin from django.urls import path urlpatterns = [ path('admin/', admin.site.urls), ]
true
true
1c435c117bd8020ae31707eefb8deb065c4d65d2
1,699
py
Python
backend/debtors/migrations/0001_initial.py
yaseralnajjar/Debtor-Administrator
b75172cf8f833781277b5af4e7fd996c3542dd87
[ "MIT" ]
17
2019-09-11T20:02:09.000Z
2020-04-19T18:20:46.000Z
backend/debtors/migrations/0001_initial.py
yaseralnajjar/Debtor-Administrator
b75172cf8f833781277b5af4e7fd996c3542dd87
[ "MIT" ]
11
2020-02-12T01:17:28.000Z
2022-02-10T18:51:55.000Z
backend/debtors/migrations/0001_initial.py
yaseralnajjar/Debtor-Administrator
b75172cf8f833781277b5af4e7fd996c3542dd87
[ "MIT" ]
5
2019-09-11T19:09:20.000Z
2020-03-09T11:10:19.000Z
# Generated by Django 2.1.3 on 2019-08-10 09:52 from django.conf import settings from django.db import migrations, models import django.db.models.deletion import localflavor.generic.models class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Debtor', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_name', models.CharField(max_length=30)), ('last_name', models.CharField(max_length=30)), ('email', models.EmailField(max_length=254)), ('iban', localflavor.generic.models.IBANField(include_countries=None, max_length=34, use_nordea_extensions=False)), ('admin_creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='created_debtors', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Invoice', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('status', models.CharField(choices=[('0', 'open'), ('1', 'overdue'), ('2', 'paid')], default='0', max_length=2)), ('amount', models.DecimalField(decimal_places=2, max_digits=10)), ('due_date', models.DateField()), ('debtor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invoices', to='debtors.Debtor')), ], ), ]
42.475
159
0.621542
from django.conf import settings from django.db import migrations, models import django.db.models.deletion import localflavor.generic.models class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Debtor', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_name', models.CharField(max_length=30)), ('last_name', models.CharField(max_length=30)), ('email', models.EmailField(max_length=254)), ('iban', localflavor.generic.models.IBANField(include_countries=None, max_length=34, use_nordea_extensions=False)), ('admin_creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='created_debtors', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Invoice', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('status', models.CharField(choices=[('0', 'open'), ('1', 'overdue'), ('2', 'paid')], default='0', max_length=2)), ('amount', models.DecimalField(decimal_places=2, max_digits=10)), ('due_date', models.DateField()), ('debtor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invoices', to='debtors.Debtor')), ], ), ]
true
true
1c435c7fe5171f2e64440da4f2e365288bf81aad
4,934
py
Python
schema.py
cojo24/eia
b2115a4cbc2e30c23abcbf4060deef962d3f82f9
[ "MIT" ]
1
2022-03-07T20:46:09.000Z
2022-03-07T20:46:09.000Z
schema.py
cojo24/eia
b2115a4cbc2e30c23abcbf4060deef962d3f82f9
[ "MIT" ]
null
null
null
schema.py
cojo24/eia
b2115a4cbc2e30c23abcbf4060deef962d3f82f9
[ "MIT" ]
1
2022-03-18T01:07:23.000Z
2022-03-18T01:07:23.000Z
import datetime as dt import pandas as pd from .client import EIA from collections import namedtuple from typing import Optional, Union CategoryCollection = namedtuple('CategoryCollection', 'items') SeriesCollection = namedtuple('SeriesCollection', 'items') class Category: def __init__( self, category_id: Union[int, str], name: str, notes: str, parent_category_id: Union[int, str], childcategories: Optional[CategoryCollection] = None, childseries: Optional[SeriesCollection] = None, ): self.category_id = category_id self.name = name self.notes = notes self.parent_category_id = parent_category_id self.childcategories = CategoryCollection([]) if childcategories is None else childcategories self.childseries = SeriesCollection([]) if childseries is None else childseries def __repr__(self): cat = '[ Category: {}'.format(self.name) attrs = [ '\n\t{} = {}'.format(a, self.__getattribute__(a)) for a in self.__dir__() if not a.startswith('__') and a not in ['childcategories', 'childseries'] if a in [ 'category_id', 'name', 'notes', 'parent_category_id', ] ] ccats = self.__getattribute__('childcategories') cseries = self.__getattribute__('childseries') attrs.append('\n\tchildcategories = [{} Category Objects]'.format(len(ccats.items))) attrs.append('\n\tchildseries = [{} Series Objects]'.format(len(cseries.items))) attrs = ''.join(attrs) cat = cat + attrs + ' ]' return cat @classmethod def from_category_id(cls, category_id: Union[int, str], eia_client: EIA, load_series=False): json = eia_client.get_category(category_id) json = json['category'] category = cls( category_id=json['category_id'], name=json['name'], notes=json['notes'], parent_category_id=json['parent_category_id'], ) # For each child category/series: # Ping EIA api and instantiate classes childcategories = CategoryCollection([ # recursive call cls.from_category_id(c['category_id'], eia_client, load_series=load_series) for c in json['childcategories'] ]) category.childcategories = childcategories if load_series is True: childseries = SeriesCollection([ Series.from_series_id(s['series_id'], eia_client) for s in json['childseries'] ]) for s in childseries.items: s.category_id = category.category_id category.childseries = childseries return category class Series: def __init__( self, series_id: str, name: str, units: str, freq: str, desc: str, start: dt.datetime, end: dt.datetime, updated: dt.datetime, data: pd.DataFrame, category_id: Optional[Union[int, str]] = None, ): self.series_id = series_id self.name = name self.units = units self.freq = freq self.desc = desc self.start = start self.end = end self.updated = updated self.data = data self.category_id = category_id def __repr__(self): s = '[ Series: {}'.format(self.name) attrs = ''.join([ '\n\t{} = {}'.format(a, self.__getattribute__(a)) for a in self.__dir__() if a in [ 'series_id', 'name', 'units', 'freq', 'desc', 'start', 'end', 'updated', 'category_id', ] ]) s = s + attrs + ' ]' return s @classmethod def from_series_id(cls, series_id: Union[int, str], eia_client: EIA, dt_format=None): json = eia_client.get_series(series_id) json = json['series'][0] data = pd.DataFrame(json['data'], columns=['Period', json['series_id']]) data['Period'] = pd.to_datetime(data['Period'], format=dt_format) data = data.set_index('Period') start = pd.to_datetime(json['start'], format=dt_format) end = pd.to_datetime(json['end'], format=dt_format) updated = pd.to_datetime(json['updated']) return cls( series_id=json['series_id'], name=json.get('name', ''), units=json['units'], freq=json['f'], desc=json.get('description', ''), start=start, end=end, updated=updated, data=data, )
31.031447
101
0.542967
import datetime as dt import pandas as pd from .client import EIA from collections import namedtuple from typing import Optional, Union CategoryCollection = namedtuple('CategoryCollection', 'items') SeriesCollection = namedtuple('SeriesCollection', 'items') class Category: def __init__( self, category_id: Union[int, str], name: str, notes: str, parent_category_id: Union[int, str], childcategories: Optional[CategoryCollection] = None, childseries: Optional[SeriesCollection] = None, ): self.category_id = category_id self.name = name self.notes = notes self.parent_category_id = parent_category_id self.childcategories = CategoryCollection([]) if childcategories is None else childcategories self.childseries = SeriesCollection([]) if childseries is None else childseries def __repr__(self): cat = '[ Category: {}'.format(self.name) attrs = [ '\n\t{} = {}'.format(a, self.__getattribute__(a)) for a in self.__dir__() if not a.startswith('__') and a not in ['childcategories', 'childseries'] if a in [ 'category_id', 'name', 'notes', 'parent_category_id', ] ] ccats = self.__getattribute__('childcategories') cseries = self.__getattribute__('childseries') attrs.append('\n\tchildcategories = [{} Category Objects]'.format(len(ccats.items))) attrs.append('\n\tchildseries = [{} Series Objects]'.format(len(cseries.items))) attrs = ''.join(attrs) cat = cat + attrs + ' ]' return cat @classmethod def from_category_id(cls, category_id: Union[int, str], eia_client: EIA, load_series=False): json = eia_client.get_category(category_id) json = json['category'] category = cls( category_id=json['category_id'], name=json['name'], notes=json['notes'], parent_category_id=json['parent_category_id'], ) childcategories = CategoryCollection([ cls.from_category_id(c['category_id'], eia_client, load_series=load_series) for c in json['childcategories'] ]) category.childcategories = childcategories if load_series is True: childseries = SeriesCollection([ Series.from_series_id(s['series_id'], eia_client) for s in json['childseries'] ]) for s in childseries.items: s.category_id = category.category_id category.childseries = childseries return category class Series: def __init__( self, series_id: str, name: str, units: str, freq: str, desc: str, start: dt.datetime, end: dt.datetime, updated: dt.datetime, data: pd.DataFrame, category_id: Optional[Union[int, str]] = None, ): self.series_id = series_id self.name = name self.units = units self.freq = freq self.desc = desc self.start = start self.end = end self.updated = updated self.data = data self.category_id = category_id def __repr__(self): s = '[ Series: {}'.format(self.name) attrs = ''.join([ '\n\t{} = {}'.format(a, self.__getattribute__(a)) for a in self.__dir__() if a in [ 'series_id', 'name', 'units', 'freq', 'desc', 'start', 'end', 'updated', 'category_id', ] ]) s = s + attrs + ' ]' return s @classmethod def from_series_id(cls, series_id: Union[int, str], eia_client: EIA, dt_format=None): json = eia_client.get_series(series_id) json = json['series'][0] data = pd.DataFrame(json['data'], columns=['Period', json['series_id']]) data['Period'] = pd.to_datetime(data['Period'], format=dt_format) data = data.set_index('Period') start = pd.to_datetime(json['start'], format=dt_format) end = pd.to_datetime(json['end'], format=dt_format) updated = pd.to_datetime(json['updated']) return cls( series_id=json['series_id'], name=json.get('name', ''), units=json['units'], freq=json['f'], desc=json.get('description', ''), start=start, end=end, updated=updated, data=data, )
true
true
1c435cc608779fbc867cb796fbc56b574704d723
6,618
py
Python
src/Evaluation/evaluation.py
kuefmz/software_classification
0dee3a046e59052ab272e4029195fb21f3d58c04
[ "Apache-2.0" ]
null
null
null
src/Evaluation/evaluation.py
kuefmz/software_classification
0dee3a046e59052ab272e4029195fb21f3d58c04
[ "Apache-2.0" ]
null
null
null
src/Evaluation/evaluation.py
kuefmz/software_classification
0dee3a046e59052ab272e4029195fb21f3d58c04
[ "Apache-2.0" ]
null
null
null
import argparse import csv import json import os import sys from typing import Any, Callable, Dict, Iterable, Set import logthis sys.path.append(os.path.abspath(os.getcwd()) + '/src') from util.utils import getCategories, BASE_CATEGORIES def lower_transform(predictions: Iterable[str]) -> Set[str]: """ Transforms the items of the given Iterable with str.lower() function.\n Used as a default transform function. Params --------- predictions: (Iterable[str]) Collection of texts to transform. Return --------- (Set[str]): Set of transformed items. """ return {pred.lower() for pred in predictions} def csoc_transform_predictions(predictions: Iterable[str], transform_dict: Dict[str, str]) -> Set[str]: """ Transforms the items of the given Iterable with str.lower() function and given mapping after.\n Used to transform predictions of CSOC. Params --------- predictions: (Iterable[str]) Collection of texts to transform. transform_dict: (Dict[str]) Key-value pairs used to map CSOC predictions into known categories. Return --------- (Set[str]): Set of transformed items. """ ret = set() logthis.say('original: ', predictions) for prediction in predictions: pred = prediction.lower() if pred in transform_dict: ret.add(transform_dict[pred]) else: ret.add(pred) logthis.say('mapped: ', ret) return ret class Evaluator: """ This class is used to evaluate the predictions and save the collected statistics. Methods ---------- resetStats: Initializes or resets the collected stats. evaluate: Runs the evaluation and collects stats. dumpStats: Dumps the collected stats in JSON format. Attributes --------- inputfile: (str) Used to store the input file with predictions. categories: (Iterable[str]) Collection of categories to consider when collecting stats. transformer: (Callable[[Iterable[str]], Set[str]]) Used to store the transform function to transform predictions into categories. stat_fields: (List[str]) Used to store the fields used in statistics. prediction_fieldname: (str) Name of the column where the prediction values are stored in the csv file. """ def __init__(self, inputfile: str, categories: Iterable[str] = None, transformer: Callable[[Iterable[str]], Set[str]] = lower_transform, prediction_fieldname: str = 'Predictions'): """ Params --------- inputfile: (str) Used to store the input file with predictions. categories: (Iterable[str]) Collection of categories to consider when collecting stats. Defaults to {'natural language processing', 'general', 'sequential', 'computer vision', 'reinforcement learning', 'graphs', 'audio'}. transformer: (Callable[[Iterable[str]], Set[str]]) Used to store the transform function to transform predictions into categories. Defaults to lower_transform, which performs str.lower() on predictions. prediction_fieldname: (str) Name of the column where the prediction values are stored in the csv file. Defaults to 'Predictions'. """ self.inputfile = inputfile self.categories = set([cat.lower() for cat in categories]) if categories is not None else {'natural language processing', 'general', 'sequential', 'computer vision', 'reinforcement learning', 'graphs', 'audio'} self.transformer = transformer self.stat_fields = ['tp', 'tn', 'fp', 'fn', 'support'] self.resetStats() self.prediction_fieldname = prediction_fieldname def resetStats(self) -> None: """ Initializes or resets the collected stats. """ self.stats = { 'overall' : {key: 0 for key in self.stat_fields}, 'overall_presentonly' : {key: 0 for key in self.stat_fields}, } for category in self.categories: self.stats[category] = {key: 0 for key in self.stat_fields} def evaluate(self) -> Dict[str, Dict[str, Any]]: """ Runs the evaluation and collects stats. Return -------- (Dict[str, Dict[str, Any]]) Returns the collected statistics. """ with open(self.inputfile) as f: self.reader = csv.DictReader(f, delimiter=';') for row in self.reader: self.stats['overall']['support'] += 1 predictions = self.transformer(row[self.prediction_fieldname].split(',')) labels = lower_transform(row['Labels'].split(',')) for category in self.categories: self.stats[category]['support'] += 1 if category in predictions: if category in labels: self.stats['overall']['tp'] += 1 self.stats[category]['tp'] += 1 else: self.stats['overall']['fp'] += 1 self.stats[category]['fp'] += 1 else: if category in labels: self.stats['overall']['fn'] += 1 self.stats[category]['fn'] += 1 else: self.stats['overall']['tn'] += 1 self.stats[category]['tn'] += 1 for category in self.categories: if self.stats[category]['tp'] != 0 or self.stats[category]['fp'] != 0: for key in self.stat_fields: self.stats['overall_presentonly'][key] += self.stats[category][key] self.stats['overall_presentonly']['support'] += self.stats['overall']['support'] for key, val in self.stats.items(): self.stats[key]['precision'] = f"{val['tp']/(1 + val['tp'] + val['fp']):.2f}" self.stats[key]['recall'] = f"{val['tp']/(1 + val['tp'] + val['fn']):.2f}" self.stats[key]['sample_num'] = val['tp'] + val['fn'] return self.stats.copy() def dumpStats(self, outfile: str): """ Dumps the collected stats in JSON format. Params --------- outfile: (str) Path to the output file. """ with open(outfile, 'w') as f: json.dump(self.stats, f, indent=4) if __name__ == '__main__': parser_evaluate = argparse.ArgumentParser('python src/Evaluation/evaluate.py', description='Evaluate the predictions.') parser_evaluate.add_argument('--inputfile', required=True, help='Path of the csv file with the predictions.') parser_evaluate.add_argument('--outfile', required=True, help='Path of the json file to write scores.') parser_evaluate_categories = parser_evaluate.add_mutually_exclusive_group(required=False) parser_evaluate_categories.add_argument('--all_categories', nargs="+", help=f'List of all categories used. Use only if you want not the basic categories. {BASE_CATEGORIES=}') parser_evaluate_categories.add_argument('--additional_categories', nargs="+", help=f'List of categories adding to basic categories. {BASE_CATEGORIES=}') args = parser_evaluate.parse_args() categories = getCategories(BASE_CATEGORIES, args.all_categories, args.additional_categories) logthis.say(f"{categories=}") evaluator = Evaluator(args.inputfile, set(categories)) evaluator.evaluate() evaluator.dumpStats(args.outfile)
37.602273
212
0.699456
import argparse import csv import json import os import sys from typing import Any, Callable, Dict, Iterable, Set import logthis sys.path.append(os.path.abspath(os.getcwd()) + '/src') from util.utils import getCategories, BASE_CATEGORIES def lower_transform(predictions: Iterable[str]) -> Set[str]: return {pred.lower() for pred in predictions} def csoc_transform_predictions(predictions: Iterable[str], transform_dict: Dict[str, str]) -> Set[str]: ret = set() logthis.say('original: ', predictions) for prediction in predictions: pred = prediction.lower() if pred in transform_dict: ret.add(transform_dict[pred]) else: ret.add(pred) logthis.say('mapped: ', ret) return ret class Evaluator: def __init__(self, inputfile: str, categories: Iterable[str] = None, transformer: Callable[[Iterable[str]], Set[str]] = lower_transform, prediction_fieldname: str = 'Predictions'): self.inputfile = inputfile self.categories = set([cat.lower() for cat in categories]) if categories is not None else {'natural language processing', 'general', 'sequential', 'computer vision', 'reinforcement learning', 'graphs', 'audio'} self.transformer = transformer self.stat_fields = ['tp', 'tn', 'fp', 'fn', 'support'] self.resetStats() self.prediction_fieldname = prediction_fieldname def resetStats(self) -> None: self.stats = { 'overall' : {key: 0 for key in self.stat_fields}, 'overall_presentonly' : {key: 0 for key in self.stat_fields}, } for category in self.categories: self.stats[category] = {key: 0 for key in self.stat_fields} def evaluate(self) -> Dict[str, Dict[str, Any]]: with open(self.inputfile) as f: self.reader = csv.DictReader(f, delimiter=';') for row in self.reader: self.stats['overall']['support'] += 1 predictions = self.transformer(row[self.prediction_fieldname].split(',')) labels = lower_transform(row['Labels'].split(',')) for category in self.categories: self.stats[category]['support'] += 1 if category in predictions: if category in labels: self.stats['overall']['tp'] += 1 self.stats[category]['tp'] += 1 else: self.stats['overall']['fp'] += 1 self.stats[category]['fp'] += 1 else: if category in labels: self.stats['overall']['fn'] += 1 self.stats[category]['fn'] += 1 else: self.stats['overall']['tn'] += 1 self.stats[category]['tn'] += 1 for category in self.categories: if self.stats[category]['tp'] != 0 or self.stats[category]['fp'] != 0: for key in self.stat_fields: self.stats['overall_presentonly'][key] += self.stats[category][key] self.stats['overall_presentonly']['support'] += self.stats['overall']['support'] for key, val in self.stats.items(): self.stats[key]['precision'] = f"{val['tp']/(1 + val['tp'] + val['fp']):.2f}" self.stats[key]['recall'] = f"{val['tp']/(1 + val['tp'] + val['fn']):.2f}" self.stats[key]['sample_num'] = val['tp'] + val['fn'] return self.stats.copy() def dumpStats(self, outfile: str): with open(outfile, 'w') as f: json.dump(self.stats, f, indent=4) if __name__ == '__main__': parser_evaluate = argparse.ArgumentParser('python src/Evaluation/evaluate.py', description='Evaluate the predictions.') parser_evaluate.add_argument('--inputfile', required=True, help='Path of the csv file with the predictions.') parser_evaluate.add_argument('--outfile', required=True, help='Path of the json file to write scores.') parser_evaluate_categories = parser_evaluate.add_mutually_exclusive_group(required=False) parser_evaluate_categories.add_argument('--all_categories', nargs="+", help=f'List of all categories used. Use only if you want not the basic categories. {BASE_CATEGORIES=}') parser_evaluate_categories.add_argument('--additional_categories', nargs="+", help=f'List of categories adding to basic categories. {BASE_CATEGORIES=}') args = parser_evaluate.parse_args() categories = getCategories(BASE_CATEGORIES, args.all_categories, args.additional_categories) logthis.say(f"{categories=}") evaluator = Evaluator(args.inputfile, set(categories)) evaluator.evaluate() evaluator.dumpStats(args.outfile)
true
true
1c435dece6ce248772afe2c144485740d027f7cb
27,181
py
Python
rpyc/core/protocol.py
ramezsaeed/rpyc
5909b7f984969ca8ea453154593d28159cd1c1b2
[ "MIT" ]
null
null
null
rpyc/core/protocol.py
ramezsaeed/rpyc
5909b7f984969ca8ea453154593d28159cd1c1b2
[ "MIT" ]
null
null
null
rpyc/core/protocol.py
ramezsaeed/rpyc
5909b7f984969ca8ea453154593d28159cd1c1b2
[ "MIT" ]
1
2020-09-09T16:27:51.000Z
2020-09-09T16:27:51.000Z
""" The RPyC protocol """ import sys import itertools import socket import time import gc from threading import Lock, Condition from rpyc.lib import spawn, Timeout from rpyc.lib.compat import (pickle, next, is_py3k, maxint, select_error, acquire_lock) from rpyc.lib.colls import WeakValueDict, RefCountingColl from rpyc.core import consts, brine, vinegar, netref from rpyc.core.async_ import AsyncResult class PingError(Exception): """The exception raised should :func:`Connection.ping` fail""" pass DEFAULT_CONFIG = dict( # ATTRIBUTES allow_safe_attrs = True, allow_exposed_attrs = True, allow_public_attrs = False, allow_all_attrs = False, safe_attrs = set(['__abs__', '__add__', '__and__', '__bool__', '__cmp__', '__contains__', '__delitem__', '__delslice__', '__div__', '__divmod__', '__doc__', '__eq__', '__float__', '__floordiv__', '__ge__', '__getitem__', '__getslice__', '__gt__', '__hash__', '__hex__', '__iadd__', '__iand__', '__idiv__', '__ifloordiv__', '__ilshift__', '__imod__', '__imul__', '__index__', '__int__', '__invert__', '__ior__', '__ipow__', '__irshift__', '__isub__', '__iter__', '__itruediv__', '__ixor__', '__le__', '__len__', '__long__', '__lshift__', '__lt__', '__mod__', '__mul__', '__ne__', '__neg__', '__new__', '__nonzero__', '__oct__', '__or__', '__pos__', '__pow__', '__radd__', '__rand__', '__rdiv__', '__rdivmod__', '__repr__', '__rfloordiv__', '__rlshift__', '__rmod__', '__rmul__', '__ror__', '__rpow__', '__rrshift__', '__rshift__', '__rsub__', '__rtruediv__', '__rxor__', '__setitem__', '__setslice__', '__str__', '__sub__', '__truediv__', '__xor__', 'next', '__length_hint__', '__enter__', '__exit__', '__next__',]), exposed_prefix = "exposed_", allow_getattr = True, allow_setattr = False, allow_delattr = False, # EXCEPTIONS include_local_traceback = True, instantiate_custom_exceptions = False, import_custom_exceptions = False, instantiate_oldstyle_exceptions = False, # which don't derive from Exception propagate_SystemExit_locally = False, # whether to propagate SystemExit locally or to the other party propagate_KeyboardInterrupt_locally = True, # whether to propagate KeyboardInterrupt locally or to the other party log_exceptions = True, # MISC allow_pickle = False, connid = None, credentials = None, endpoints = None, logger = None, sync_request_timeout = 30, ) """ The default configuration dictionary of the protocol. You can override these parameters by passing a different configuration dict to the :class:`Connection` class. .. note:: You only need to override the parameters you want to change. There's no need to repeat parameters whose values remain unchanged. ======================================= ================ ===================================================== Parameter Default value Description ======================================= ================ ===================================================== ``allow_safe_attrs`` ``True`` Whether to allow the use of *safe* attributes (only those listed as ``safe_attrs``) ``allow_exposed_attrs`` ``True`` Whether to allow exposed attributes (attributes that start with the ``exposed_prefix``) ``allow_public_attrs`` ``False`` Whether to allow public attributes (attributes that don't start with ``_``) ``allow_all_attrs`` ``False`` Whether to allow all attributes (including private) ``safe_attrs`` ``set([...])`` The set of attributes considered safe ``exposed_prefix`` ``"exposed_"`` The prefix of exposed attributes ``allow_getattr`` ``True`` Whether to allow getting of attributes (``getattr``) ``allow_setattr`` ``False`` Whether to allow setting of attributes (``setattr``) ``allow_delattr`` ``False`` Whether to allow deletion of attributes (``delattr``) ``allow_pickle`` ``False`` Whether to allow the use of ``pickle`` ``include_local_traceback`` ``True`` Whether to include the local traceback in the remote exception ``instantiate_custom_exceptions`` ``False`` Whether to allow instantiation of custom exceptions (not the built in ones) ``import_custom_exceptions`` ``False`` Whether to allow importing of exceptions from not-yet-imported modules ``instantiate_oldstyle_exceptions`` ``False`` Whether to allow instantiation of exceptions which don't derive from ``Exception``. This is not applicable for Python 3 and later. ``propagate_SystemExit_locally`` ``False`` Whether to propagate ``SystemExit`` locally (kill the server) or to the other party (kill the client) ``propagate_KeyboardInterrupt_locally`` ``False`` Whether to propagate ``KeyboardInterrupt`` locally (kill the server) or to the other party (kill the client) ``logger`` ``None`` The logger instance to use to log exceptions (before they are sent to the other party) and other events. If ``None``, no logging takes place. ``connid`` ``None`` **Runtime**: the RPyC connection ID (used mainly for debugging purposes) ``credentials`` ``None`` **Runtime**: the credentails object that was returned by the server's :ref:`authenticator <api-authenticators>` or ``None`` ``endpoints`` ``None`` **Runtime**: The connection's endpoints. This is a tuple made of the local socket endpoint (``getsockname``) and the remote one (``getpeername``). This is set by the server upon accepting a connection; client side connections do no have this configuration option set. ``sync_request_timeout`` ``30`` Default timeout for waiting results ======================================= ================ ===================================================== """ _connection_id_generator = itertools.count(1) class Connection(object): """The RPyC *connection* (AKA *protocol*). :param root: the :class:`~rpyc.core.service.Service` object to expose :param channel: the :class:`~rpyc.core.channel.Channel` over which messages are passed :param config: the connection's configuration dict (overriding parameters from the :data:`default configuration <DEFAULT_CONFIG>`) """ def __init__(self, root, channel, config={}): self._closed = True self._config = DEFAULT_CONFIG.copy() self._config.update(config) if self._config["connid"] is None: self._config["connid"] = "conn%d" % (next(_connection_id_generator),) self._HANDLERS = self._request_handlers() self._channel = channel self._seqcounter = itertools.count() self._recvlock = Lock() self._sendlock = Lock() self._recv_event = Condition() self._request_callbacks = {} self._local_objects = RefCountingColl() self._last_traceback = None self._proxy_cache = WeakValueDict() self._netref_classes_cache = {} self._remote_root = None self._send_queue = [] self._local_root = root self._closed = False def __del__(self): self.close() def __enter__(self): return self def __exit__(self, t, v, tb): self.close() def __repr__(self): a, b = object.__repr__(self).split(" object ") return "%s %r object %s" % (a, self._config["connid"], b) # # IO # def _cleanup(self, _anyway = True): if self._closed and not _anyway: return self._closed = True self._channel.close() self._local_root.on_disconnect(self) self._request_callbacks.clear() self._local_objects.clear() self._proxy_cache.clear() self._netref_classes_cache.clear() self._last_traceback = None self._remote_root = None self._local_root = None #self._seqcounter = None #self._config.clear() del self._HANDLERS def close(self, _catchall = True): """closes the connection, releasing all held resources""" if self._closed: return self._closed = True try: self._async_request(consts.HANDLE_CLOSE) except EOFError: pass except Exception: if not _catchall: raise finally: self._cleanup(_anyway = True) @property def closed(self): """Indicates whether the connection has been closed or not""" return self._closed def fileno(self): """Returns the connectin's underlying file descriptor""" return self._channel.fileno() def ping(self, data = None, timeout = 3): """ Asserts that the other party is functioning properly, by making sure the *data* is echoed back before the *timeout* expires :param data: the data to send (leave ``None`` for the default buffer) :param timeout: the maximal time to wait for echo :raises: :class:`PingError` if the echoed data does not match :raises: :class:`EOFError` if the remote host closes the connection """ if data is None: data = "abcdefghijklmnopqrstuvwxyz" * 20 res = self.async_request(consts.HANDLE_PING, data, timeout = timeout) if res.value != data: raise PingError("echo mismatches sent data") def _get_seq_id(self): return next(self._seqcounter) def _send(self, msg, seq, args): data = brine.dump((msg, seq, args)) # GC might run while sending data # if so, a BaseNetref.__del__ might be called # BaseNetref.__del__ must call asyncreq, # which will cause a deadlock # Solution: # Add the current request to a queue and let the thread that currently # holds the sendlock send it when it's done with its current job. # NOTE: Atomic list operations should be thread safe, # please call me out if they are not on all implementations! self._send_queue.append(data) # It is crucial to check the queue each time AFTER releasing the lock: while self._send_queue: if not self._sendlock.acquire(False): # Another thread holds the lock. It will send the data after # it's done with its current job. We can safely return. return try: # Can happen if another consumer was scheduled in between # `while` and `acquire`: if not self._send_queue: # Must `continue` to ensure that `send_queue` is checked # after releasing the lock! (in case another producer is # scheduled before `release`) continue data = self._send_queue.pop(0) self._channel.send(data) finally: self._sendlock.release() # # boxing # def _box(self, obj): """store a local object in such a way that it could be recreated on the remote party either by-value or by-reference""" if brine.dumpable(obj): return consts.LABEL_VALUE, obj if type(obj) is tuple: return consts.LABEL_TUPLE, tuple(self._box(item) for item in obj) elif isinstance(obj, netref.BaseNetref) and obj.____conn__ is self: return consts.LABEL_LOCAL_REF, obj.____oid__ else: self._local_objects.add(obj) try: cls = obj.__class__ except Exception: # see issue #16 cls = type(obj) if not isinstance(cls, type): cls = type(obj) return consts.LABEL_REMOTE_REF, (id(obj), cls.__name__, cls.__module__) def _unbox(self, package): """recreate a local object representation of the remote object: if the object is passed by value, just return it; if the object is passed by reference, create a netref to it""" label, value = package if label == consts.LABEL_VALUE: return value if label == consts.LABEL_TUPLE: return tuple(self._unbox(item) for item in value) if label == consts.LABEL_LOCAL_REF: return self._local_objects[value] if label == consts.LABEL_REMOTE_REF: oid, clsname, modname = value if oid in self._proxy_cache: proxy = self._proxy_cache[oid] proxy.____refcount__ += 1 # other side increased refcount on boxing, # if I'm returning from cache instead of new object, # must increase refcount to match return proxy proxy = self._netref_factory(oid, clsname, modname) self._proxy_cache[oid] = proxy return proxy raise ValueError("invalid label %r" % (label,)) def _netref_factory(self, oid, clsname, modname): typeinfo = (clsname, modname) if typeinfo in self._netref_classes_cache: cls = self._netref_classes_cache[typeinfo] elif typeinfo in netref.builtin_classes_cache: cls = netref.builtin_classes_cache[typeinfo] else: info = self.sync_request(consts.HANDLE_INSPECT, oid) cls = netref.class_factory(clsname, modname, info) self._netref_classes_cache[typeinfo] = cls return cls(self, oid) # # dispatching # def _dispatch_request(self, seq, raw_args): try: handler, args = raw_args args = self._unbox(args) res = self._HANDLERS[handler](self, *args) except: # need to catch old style exceptions too t, v, tb = sys.exc_info() self._last_traceback = tb logger = self._config["logger"] if logger and t is not StopIteration: logger.debug("Exception caught", exc_info=True) if t is SystemExit and self._config["propagate_SystemExit_locally"]: raise if t is KeyboardInterrupt and self._config["propagate_KeyboardInterrupt_locally"]: raise self._send(consts.MSG_EXCEPTION, seq, self._box_exc(t, v, tb)) else: self._send(consts.MSG_REPLY, seq, self._box(res)) def _box_exc(self, typ, val, tb): return vinegar.dump(typ, val, tb, include_local_traceback= self._config["include_local_traceback"]) def _unbox_exc(self, raw): return vinegar.load(raw, import_custom_exceptions = self._config["import_custom_exceptions"], instantiate_custom_exceptions = self._config["instantiate_custom_exceptions"], instantiate_oldstyle_exceptions = self._config["instantiate_oldstyle_exceptions"]) # # serving # def _dispatch(self, data): msg, seq, args = brine.load(data) if msg == consts.MSG_REQUEST: self._dispatch_request(seq, args) elif msg == consts.MSG_REPLY: obj = self._unbox(args) self._request_callbacks.pop(seq)(False, obj) elif msg == consts.MSG_EXCEPTION: obj = self._unbox_exc(args) self._request_callbacks.pop(seq)(True, obj) else: raise ValueError("invalid message type: %r" % (msg,)) def serve(self, timeout=1, wait_for_lock=True): """Serves a single request or reply that arrives within the given time frame (default is 1 sec). Note that the dispatching of a request might trigger multiple (nested) requests, thus this function may be reentrant. :returns: ``True`` if a request or reply were received, ``False`` otherwise. """ timeout = Timeout(timeout) with self._recv_event: if not self._recvlock.acquire(False): return (wait_for_lock and self._recv_event.wait(timeout.timeleft())) try: data = self._channel.poll(timeout) and self._channel.recv() if not data: return False except EOFError: self.close() raise finally: self._recvlock.release() with self._recv_event: self._recv_event.notify_all() self._dispatch(data) return True def poll(self, timeout = 0): """Serves a single transaction, should one arrives in the given interval. Note that handling a request/reply may trigger nested requests, which are all part of a single transaction. :returns: ``True`` if a transaction was served, ``False`` otherwise""" return self.serve(timeout, False) def serve_all(self): """Serves all requests and replies for as long as the connection is alive.""" try: while True: self.serve(None) except (socket.error, select_error, IOError): if not self.closed: raise except EOFError: pass finally: self.close() def serve_threaded(self, thread_count=10): """Serves all requests and replies for as long as the connection is alive.""" def _thread_target(): try: while True: self.serve(None) except (socket.error, select_error, IOError): if not self.closed: raise except EOFError: pass try: threads = [spawn(_thread_target) for _ in range(thread_count)] for thread in threads: thread.join() finally: self.close() def poll_all(self, timeout=0): """Serves all requests and replies that arrive within the given interval. :returns: ``True`` if at least a single transaction was served, ``False`` otherwise """ at_least_once = False timeout = Timeout(timeout) try: while True: if self.poll(timeout): at_least_once = True if timeout.expired(): break except EOFError: pass return at_least_once # # requests # def sync_request(self, handler, *args): """Sends a synchronous request (waits for the reply to arrive) :raises: any exception that the requets may be generated :returns: the result of the request """ timeout = self._config["sync_request_timeout"] return self.async_request(handler, *args, timeout=timeout).value def _async_request(self, handler, args = (), callback = (lambda a, b: None)): seq = self._get_seq_id() self._request_callbacks[seq] = callback try: self._send(consts.MSG_REQUEST, seq, (handler, self._box(args))) except: self._request_callbacks.pop(seq, None) raise def async_request(self, handler, *args, **kwargs): """Send an asynchronous request (does not wait for it to finish) :returns: an :class:`rpyc.core.async_.AsyncResult` object, which will eventually hold the result (or exception) """ timeout = kwargs.pop("timeout", None) if kwargs: raise TypeError("got unexpected keyword argument(s) %s" % (list(kwargs.keys()),)) res = AsyncResult(self) self._async_request(handler, args, res) if timeout is not None: res.set_expiry(timeout) return res @property def root(self): """Fetches the root object (service) of the other party""" if self._remote_root is None: self._remote_root = self.sync_request(consts.HANDLE_GETROOT) return self._remote_root # # attribute access # def _check_attr(self, obj, name, perm): config = self._config if not config[perm]: raise AttributeError("cannot access %r" % (name,)) prefix = config["allow_exposed_attrs"] and config["exposed_prefix"] plain = (config["allow_all_attrs"] or config["allow_exposed_attrs"] and name.startswith(prefix) or config["allow_safe_attrs"] and name in config["safe_attrs"] or config["allow_public_attrs"] and not name.startswith("_")) has_exposed = prefix and hasattr(obj, prefix+name) if plain and (not has_exposed or hasattr(obj, name)): return name if has_exposed: return prefix+name if plain: return name # chance for better traceback raise AttributeError("cannot access %r" % (name,)) def _access_attr(self, obj, name, args, overrider, param, default): if is_py3k: if type(name) is bytes: name = str(name, "utf8") elif type(name) is not str: raise TypeError("name must be a string") else: if type(name) not in (str, unicode): raise TypeError("name must be a string") name = str(name) # IronPython issue #10 + py3k issue accessor = getattr(type(obj), overrider, None) if accessor is None: accessor = default name = self._check_attr(obj, name, param) return accessor(obj, name, *args) # # request handlers # @classmethod def _request_handlers(cls): return { consts.HANDLE_PING: cls._handle_ping, consts.HANDLE_CLOSE: cls._handle_close, consts.HANDLE_GETROOT: cls._handle_getroot, consts.HANDLE_GETATTR: cls._handle_getattr, consts.HANDLE_DELATTR: cls._handle_delattr, consts.HANDLE_SETATTR: cls._handle_setattr, consts.HANDLE_CALL: cls._handle_call, consts.HANDLE_CALLATTR: cls._handle_callattr, consts.HANDLE_REPR: cls._handle_repr, consts.HANDLE_STR: cls._handle_str, consts.HANDLE_CMP: cls._handle_cmp, consts.HANDLE_HASH: cls._handle_hash, consts.HANDLE_DIR: cls._handle_dir, consts.HANDLE_PICKLE: cls._handle_pickle, consts.HANDLE_DEL: cls._handle_del, consts.HANDLE_INSPECT: cls._handle_inspect, consts.HANDLE_BUFFITER: cls._handle_buffiter, consts.HANDLE_OLDSLICING: cls._handle_oldslicing, consts.HANDLE_CTXEXIT: cls._handle_ctxexit, } def _handle_ping(self, data): return data def _handle_close(self): self._cleanup() def _handle_getroot(self): return self._local_root def _handle_del(self, obj, count=1): self._local_objects.decref(id(obj), count) def _handle_repr(self, obj): return repr(obj) def _handle_str(self, obj): return str(obj) def _handle_cmp(self, obj, other): # cmp() might enter recursive resonance... yet another workaround #return cmp(obj, other) try: return type(obj).__cmp__(obj, other) except (AttributeError, TypeError): return NotImplemented def _handle_hash(self, obj): return hash(obj) def _handle_call(self, obj, args, kwargs=()): return obj(*args, **dict(kwargs)) def _handle_dir(self, obj): return tuple(dir(obj)) def _handle_inspect(self, oid): return tuple(netref.inspect_methods(self._local_objects[oid])) def _handle_getattr(self, obj, name): return self._access_attr(obj, name, (), "_rpyc_getattr", "allow_getattr", getattr) def _handle_delattr(self, obj, name): return self._access_attr(obj, name, (), "_rpyc_delattr", "allow_delattr", delattr) def _handle_setattr(self, obj, name, value): return self._access_attr(obj, name, (value,), "_rpyc_setattr", "allow_setattr", setattr) def _handle_callattr(self, obj, name, args, kwargs=()): obj = self._handle_getattr(obj, name) if not args: # issue #293, HACK args = (name,) return self._handle_call(obj, args, kwargs) def _handle_ctxexit(self, obj, exc): if exc: try: raise exc except: exc, typ, tb = sys.exc_info() else: typ = tb = None return self._handle_getattr(obj, "__exit__")(exc, typ, tb) def _handle_pickle(self, obj, proto): if not self._config["allow_pickle"]: raise ValueError("pickling is disabled") return bytes(pickle.dumps(obj, proto)) def _handle_buffiter(self, obj, count): return tuple(itertools.islice(obj, count)) def _handle_oldslicing(self, obj, attempt, fallback, start, stop, args): try: # first try __xxxitem__ getitem = self._handle_getattr(obj, attempt) return getitem(slice(start, stop), *args) except Exception: # fallback to __xxxslice__. see issue #41 if stop is None: stop = maxint getslice = self._handle_getattr(obj, fallback) return getslice(start, stop, *args)
43.007911
119
0.567124
import sys import itertools import socket import time import gc from threading import Lock, Condition from rpyc.lib import spawn, Timeout from rpyc.lib.compat import (pickle, next, is_py3k, maxint, select_error, acquire_lock) from rpyc.lib.colls import WeakValueDict, RefCountingColl from rpyc.core import consts, brine, vinegar, netref from rpyc.core.async_ import AsyncResult class PingError(Exception): pass DEFAULT_CONFIG = dict( allow_safe_attrs = True, allow_exposed_attrs = True, allow_public_attrs = False, allow_all_attrs = False, safe_attrs = set(['__abs__', '__add__', '__and__', '__bool__', '__cmp__', '__contains__', '__delitem__', '__delslice__', '__div__', '__divmod__', '__doc__', '__eq__', '__float__', '__floordiv__', '__ge__', '__getitem__', '__getslice__', '__gt__', '__hash__', '__hex__', '__iadd__', '__iand__', '__idiv__', '__ifloordiv__', '__ilshift__', '__imod__', '__imul__', '__index__', '__int__', '__invert__', '__ior__', '__ipow__', '__irshift__', '__isub__', '__iter__', '__itruediv__', '__ixor__', '__le__', '__len__', '__long__', '__lshift__', '__lt__', '__mod__', '__mul__', '__ne__', '__neg__', '__new__', '__nonzero__', '__oct__', '__or__', '__pos__', '__pow__', '__radd__', '__rand__', '__rdiv__', '__rdivmod__', '__repr__', '__rfloordiv__', '__rlshift__', '__rmod__', '__rmul__', '__ror__', '__rpow__', '__rrshift__', '__rshift__', '__rsub__', '__rtruediv__', '__rxor__', '__setitem__', '__setslice__', '__str__', '__sub__', '__truediv__', '__xor__', 'next', '__length_hint__', '__enter__', '__exit__', '__next__',]), exposed_prefix = "exposed_", allow_getattr = True, allow_setattr = False, allow_delattr = False, include_local_traceback = True, instantiate_custom_exceptions = False, import_custom_exceptions = False, instantiate_oldstyle_exceptions = False, propagate_SystemExit_locally = False, # whether to propagate SystemExit locally or to the other party propagate_KeyboardInterrupt_locally = True, # whether to propagate KeyboardInterrupt locally or to the other party log_exceptions = True, # MISC allow_pickle = False, connid = None, credentials = None, endpoints = None, logger = None, sync_request_timeout = 30, ) _connection_id_generator = itertools.count(1) class Connection(object): def __init__(self, root, channel, config={}): self._closed = True self._config = DEFAULT_CONFIG.copy() self._config.update(config) if self._config["connid"] is None: self._config["connid"] = "conn%d" % (next(_connection_id_generator),) self._HANDLERS = self._request_handlers() self._channel = channel self._seqcounter = itertools.count() self._recvlock = Lock() self._sendlock = Lock() self._recv_event = Condition() self._request_callbacks = {} self._local_objects = RefCountingColl() self._last_traceback = None self._proxy_cache = WeakValueDict() self._netref_classes_cache = {} self._remote_root = None self._send_queue = [] self._local_root = root self._closed = False def __del__(self): self.close() def __enter__(self): return self def __exit__(self, t, v, tb): self.close() def __repr__(self): a, b = object.__repr__(self).split(" object ") return "%s %r object %s" % (a, self._config["connid"], b) # # IO # def _cleanup(self, _anyway = True): if self._closed and not _anyway: return self._closed = True self._channel.close() self._local_root.on_disconnect(self) self._request_callbacks.clear() self._local_objects.clear() self._proxy_cache.clear() self._netref_classes_cache.clear() self._last_traceback = None self._remote_root = None self._local_root = None #self._seqcounter = None #self._config.clear() del self._HANDLERS def close(self, _catchall = True): if self._closed: return self._closed = True try: self._async_request(consts.HANDLE_CLOSE) except EOFError: pass except Exception: if not _catchall: raise finally: self._cleanup(_anyway = True) @property def closed(self): return self._closed def fileno(self): return self._channel.fileno() def ping(self, data = None, timeout = 3): if data is None: data = "abcdefghijklmnopqrstuvwxyz" * 20 res = self.async_request(consts.HANDLE_PING, data, timeout = timeout) if res.value != data: raise PingError("echo mismatches sent data") def _get_seq_id(self): return next(self._seqcounter) def _send(self, msg, seq, args): data = brine.dump((msg, seq, args)) # GC might run while sending data # if so, a BaseNetref.__del__ might be called # BaseNetref.__del__ must call asyncreq, # which will cause a deadlock # Solution: # Add the current request to a queue and let the thread that currently # holds the sendlock send it when it's done with its current job. self._send_queue.append(data) while self._send_queue: if not self._sendlock.acquire(False): return try: # Can happen if another consumer was scheduled in between # `while` and `acquire`: if not self._send_queue: # Must `continue` to ensure that `send_queue` is checked # after releasing the lock! (in case another producer is # scheduled before `release`) continue data = self._send_queue.pop(0) self._channel.send(data) finally: self._sendlock.release() # # boxing # def _box(self, obj): if brine.dumpable(obj): return consts.LABEL_VALUE, obj if type(obj) is tuple: return consts.LABEL_TUPLE, tuple(self._box(item) for item in obj) elif isinstance(obj, netref.BaseNetref) and obj.____conn__ is self: return consts.LABEL_LOCAL_REF, obj.____oid__ else: self._local_objects.add(obj) try: cls = obj.__class__ except Exception: # see issue #16 cls = type(obj) if not isinstance(cls, type): cls = type(obj) return consts.LABEL_REMOTE_REF, (id(obj), cls.__name__, cls.__module__) def _unbox(self, package): label, value = package if label == consts.LABEL_VALUE: return value if label == consts.LABEL_TUPLE: return tuple(self._unbox(item) for item in value) if label == consts.LABEL_LOCAL_REF: return self._local_objects[value] if label == consts.LABEL_REMOTE_REF: oid, clsname, modname = value if oid in self._proxy_cache: proxy = self._proxy_cache[oid] proxy.____refcount__ += 1 # other side increased refcount on boxing, # if I'm returning from cache instead of new object, return proxy proxy = self._netref_factory(oid, clsname, modname) self._proxy_cache[oid] = proxy return proxy raise ValueError("invalid label %r" % (label,)) def _netref_factory(self, oid, clsname, modname): typeinfo = (clsname, modname) if typeinfo in self._netref_classes_cache: cls = self._netref_classes_cache[typeinfo] elif typeinfo in netref.builtin_classes_cache: cls = netref.builtin_classes_cache[typeinfo] else: info = self.sync_request(consts.HANDLE_INSPECT, oid) cls = netref.class_factory(clsname, modname, info) self._netref_classes_cache[typeinfo] = cls return cls(self, oid) def _dispatch_request(self, seq, raw_args): try: handler, args = raw_args args = self._unbox(args) res = self._HANDLERS[handler](self, *args) except: t, v, tb = sys.exc_info() self._last_traceback = tb logger = self._config["logger"] if logger and t is not StopIteration: logger.debug("Exception caught", exc_info=True) if t is SystemExit and self._config["propagate_SystemExit_locally"]: raise if t is KeyboardInterrupt and self._config["propagate_KeyboardInterrupt_locally"]: raise self._send(consts.MSG_EXCEPTION, seq, self._box_exc(t, v, tb)) else: self._send(consts.MSG_REPLY, seq, self._box(res)) def _box_exc(self, typ, val, tb): return vinegar.dump(typ, val, tb, include_local_traceback= self._config["include_local_traceback"]) def _unbox_exc(self, raw): return vinegar.load(raw, import_custom_exceptions = self._config["import_custom_exceptions"], instantiate_custom_exceptions = self._config["instantiate_custom_exceptions"], instantiate_oldstyle_exceptions = self._config["instantiate_oldstyle_exceptions"]) def _dispatch(self, data): msg, seq, args = brine.load(data) if msg == consts.MSG_REQUEST: self._dispatch_request(seq, args) elif msg == consts.MSG_REPLY: obj = self._unbox(args) self._request_callbacks.pop(seq)(False, obj) elif msg == consts.MSG_EXCEPTION: obj = self._unbox_exc(args) self._request_callbacks.pop(seq)(True, obj) else: raise ValueError("invalid message type: %r" % (msg,)) def serve(self, timeout=1, wait_for_lock=True): timeout = Timeout(timeout) with self._recv_event: if not self._recvlock.acquire(False): return (wait_for_lock and self._recv_event.wait(timeout.timeleft())) try: data = self._channel.poll(timeout) and self._channel.recv() if not data: return False except EOFError: self.close() raise finally: self._recvlock.release() with self._recv_event: self._recv_event.notify_all() self._dispatch(data) return True def poll(self, timeout = 0): return self.serve(timeout, False) def serve_all(self): try: while True: self.serve(None) except (socket.error, select_error, IOError): if not self.closed: raise except EOFError: pass finally: self.close() def serve_threaded(self, thread_count=10): def _thread_target(): try: while True: self.serve(None) except (socket.error, select_error, IOError): if not self.closed: raise except EOFError: pass try: threads = [spawn(_thread_target) for _ in range(thread_count)] for thread in threads: thread.join() finally: self.close() def poll_all(self, timeout=0): at_least_once = False timeout = Timeout(timeout) try: while True: if self.poll(timeout): at_least_once = True if timeout.expired(): break except EOFError: pass return at_least_once def sync_request(self, handler, *args): timeout = self._config["sync_request_timeout"] return self.async_request(handler, *args, timeout=timeout).value def _async_request(self, handler, args = (), callback = (lambda a, b: None)): seq = self._get_seq_id() self._request_callbacks[seq] = callback try: self._send(consts.MSG_REQUEST, seq, (handler, self._box(args))) except: self._request_callbacks.pop(seq, None) raise def async_request(self, handler, *args, **kwargs): timeout = kwargs.pop("timeout", None) if kwargs: raise TypeError("got unexpected keyword argument(s) %s" % (list(kwargs.keys()),)) res = AsyncResult(self) self._async_request(handler, args, res) if timeout is not None: res.set_expiry(timeout) return res @property def root(self): if self._remote_root is None: self._remote_root = self.sync_request(consts.HANDLE_GETROOT) return self._remote_root def _check_attr(self, obj, name, perm): config = self._config if not config[perm]: raise AttributeError("cannot access %r" % (name,)) prefix = config["allow_exposed_attrs"] and config["exposed_prefix"] plain = (config["allow_all_attrs"] or config["allow_exposed_attrs"] and name.startswith(prefix) or config["allow_safe_attrs"] and name in config["safe_attrs"] or config["allow_public_attrs"] and not name.startswith("_")) has_exposed = prefix and hasattr(obj, prefix+name) if plain and (not has_exposed or hasattr(obj, name)): return name if has_exposed: return prefix+name if plain: return name raise AttributeError("cannot access %r" % (name,)) def _access_attr(self, obj, name, args, overrider, param, default): if is_py3k: if type(name) is bytes: name = str(name, "utf8") elif type(name) is not str: raise TypeError("name must be a string") else: if type(name) not in (str, unicode): raise TypeError("name must be a string") name = str(name) r = getattr(type(obj), overrider, None) if accessor is None: accessor = default name = self._check_attr(obj, name, param) return accessor(obj, name, *args) @classmethod def _request_handlers(cls): return { consts.HANDLE_PING: cls._handle_ping, consts.HANDLE_CLOSE: cls._handle_close, consts.HANDLE_GETROOT: cls._handle_getroot, consts.HANDLE_GETATTR: cls._handle_getattr, consts.HANDLE_DELATTR: cls._handle_delattr, consts.HANDLE_SETATTR: cls._handle_setattr, consts.HANDLE_CALL: cls._handle_call, consts.HANDLE_CALLATTR: cls._handle_callattr, consts.HANDLE_REPR: cls._handle_repr, consts.HANDLE_STR: cls._handle_str, consts.HANDLE_CMP: cls._handle_cmp, consts.HANDLE_HASH: cls._handle_hash, consts.HANDLE_DIR: cls._handle_dir, consts.HANDLE_PICKLE: cls._handle_pickle, consts.HANDLE_DEL: cls._handle_del, consts.HANDLE_INSPECT: cls._handle_inspect, consts.HANDLE_BUFFITER: cls._handle_buffiter, consts.HANDLE_OLDSLICING: cls._handle_oldslicing, consts.HANDLE_CTXEXIT: cls._handle_ctxexit, } def _handle_ping(self, data): return data def _handle_close(self): self._cleanup() def _handle_getroot(self): return self._local_root def _handle_del(self, obj, count=1): self._local_objects.decref(id(obj), count) def _handle_repr(self, obj): return repr(obj) def _handle_str(self, obj): return str(obj) def _handle_cmp(self, obj, other): try: return type(obj).__cmp__(obj, other) except (AttributeError, TypeError): return NotImplemented def _handle_hash(self, obj): return hash(obj) def _handle_call(self, obj, args, kwargs=()): return obj(*args, **dict(kwargs)) def _handle_dir(self, obj): return tuple(dir(obj)) def _handle_inspect(self, oid): return tuple(netref.inspect_methods(self._local_objects[oid])) def _handle_getattr(self, obj, name): return self._access_attr(obj, name, (), "_rpyc_getattr", "allow_getattr", getattr) def _handle_delattr(self, obj, name): return self._access_attr(obj, name, (), "_rpyc_delattr", "allow_delattr", delattr) def _handle_setattr(self, obj, name, value): return self._access_attr(obj, name, (value,), "_rpyc_setattr", "allow_setattr", setattr) def _handle_callattr(self, obj, name, args, kwargs=()): obj = self._handle_getattr(obj, name) if not args: args = (name,) return self._handle_call(obj, args, kwargs) def _handle_ctxexit(self, obj, exc): if exc: try: raise exc except: exc, typ, tb = sys.exc_info() else: typ = tb = None return self._handle_getattr(obj, "__exit__")(exc, typ, tb) def _handle_pickle(self, obj, proto): if not self._config["allow_pickle"]: raise ValueError("pickling is disabled") return bytes(pickle.dumps(obj, proto)) def _handle_buffiter(self, obj, count): return tuple(itertools.islice(obj, count)) def _handle_oldslicing(self, obj, attempt, fallback, start, stop, args): try: getitem = self._handle_getattr(obj, attempt) return getitem(slice(start, stop), *args) except Exception: if stop is None: stop = maxint getslice = self._handle_getattr(obj, fallback) return getslice(start, stop, *args)
true
true
1c435df40ccfb869ca74b75601e2b8f81af69a5e
630
py
Python
flask/migrations/versions/6a6005dc2d4c_.py
schinke/solid-fortnight-ba
221ad998e51e9b2634d1cfb17c84ad28dbc9a7d5
[ "MIT" ]
1
2016-08-19T09:33:09.000Z
2016-08-19T09:33:09.000Z
flask/migrations/versions/6a6005dc2d4c_.py
schinke/solid-fortnight-ba
221ad998e51e9b2634d1cfb17c84ad28dbc9a7d5
[ "MIT" ]
null
null
null
flask/migrations/versions/6a6005dc2d4c_.py
schinke/solid-fortnight-ba
221ad998e51e9b2634d1cfb17c84ad28dbc9a7d5
[ "MIT" ]
null
null
null
"""empty message Revision ID: 6a6005dc2d4c Revises: 574cfcd5eb17 Create Date: 2016-09-22 00:24:15.999102 """ # revision identifiers, used by Alembic. revision = '6a6005dc2d4c' down_revision = '574cfcd5eb17' from alembic import op import sqlalchemy as sa def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_column('scivalue', 'amount') ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.add_column('scivalue', sa.Column('amount', sa.INTEGER(), autoincrement=False, nullable=True)) ### end Alembic commands ###
23.333333
100
0.7
revision = '6a6005dc2d4c' down_revision = '574cfcd5eb17' from alembic import op import sqlalchemy as sa def upgrade():
true
true
1c435e59e7f071e12abce0d551669c93ab68ebbc
2,289
bzl
Python
envoy_build_config/extensions_build_config.bzl
Yannic/envoy-mobile
27fd74c88d71b2c91f484e3660c936948b2eb481
[ "Apache-2.0" ]
1
2021-06-24T15:10:49.000Z
2021-06-24T15:10:49.000Z
envoy_build_config/extensions_build_config.bzl
Yannic/envoy-mobile
27fd74c88d71b2c91f484e3660c936948b2eb481
[ "Apache-2.0" ]
null
null
null
envoy_build_config/extensions_build_config.bzl
Yannic/envoy-mobile
27fd74c88d71b2c91f484e3660c936948b2eb481
[ "Apache-2.0" ]
null
null
null
EXTENSION_CONFIG_VISIBILITY = ["//visibility:public"] EXTENSION_PACKAGE_VISIBILITY = ["//visibility:public"] EXTENSIONS = { "envoy.clusters.dynamic_forward_proxy": "//source/extensions/clusters/dynamic_forward_proxy:cluster", "envoy.filters.connection_pools.http.generic": "//source/extensions/upstreams/http/generic:config", "envoy.filters.http.assertion": "@envoy_mobile//library/common/extensions/filters/http/assertion:config", "envoy.filters.http.buffer": "//source/extensions/filters/http/buffer:config", "envoy.filters.http.dynamic_forward_proxy": "//source/extensions/filters/http/dynamic_forward_proxy:config", "envoy.filters.http.local_error": "@envoy_mobile//library/common/extensions/filters/http/local_error:config", "envoy.filters.http.platform_bridge": "@envoy_mobile//library/common/extensions/filters/http/platform_bridge:config", "envoy.filters.http.route_cache_reset": "@envoy_mobile//library/common/extensions/filters/http/route_cache_reset:config", "envoy.filters.http.router": "//source/extensions/filters/http/router:config", "envoy.filters.http.test_accessor": "@envoy_mobile//library/common/extensions/filters/http/test_accessor:config", "envoy.filters.http.test_event_tracker": "@envoy_mobile//library/common/extensions/filters/http/test_event_tracker:config", "envoy.filters.network.http_connection_manager": "//source/extensions/filters/network/http_connection_manager:config", "envoy.http.original_ip_detection.xff": "//source/extensions/http/original_ip_detection/xff:config", "envoy.stat_sinks.metrics_service": "//source/extensions/stat_sinks/metrics_service:config", "envoy.transport_sockets.raw_buffer": "//source/extensions/transport_sockets/raw_buffer:config", "envoy.transport_sockets.tls": "//source/extensions/transport_sockets/tls:config", "envoy.http.stateful_header_formatters.preserve_case": "//source/extensions/http/header_formatters/preserve_case:preserve_case_formatter", } WINDOWS_EXTENSIONS = {}
99.521739
142
0.698558
EXTENSION_CONFIG_VISIBILITY = ["//visibility:public"] EXTENSION_PACKAGE_VISIBILITY = ["//visibility:public"] EXTENSIONS = { "envoy.clusters.dynamic_forward_proxy": "//source/extensions/clusters/dynamic_forward_proxy:cluster", "envoy.filters.connection_pools.http.generic": "//source/extensions/upstreams/http/generic:config", "envoy.filters.http.assertion": "@envoy_mobile//library/common/extensions/filters/http/assertion:config", "envoy.filters.http.buffer": "//source/extensions/filters/http/buffer:config", "envoy.filters.http.dynamic_forward_proxy": "//source/extensions/filters/http/dynamic_forward_proxy:config", "envoy.filters.http.local_error": "@envoy_mobile//library/common/extensions/filters/http/local_error:config", "envoy.filters.http.platform_bridge": "@envoy_mobile//library/common/extensions/filters/http/platform_bridge:config", "envoy.filters.http.route_cache_reset": "@envoy_mobile//library/common/extensions/filters/http/route_cache_reset:config", "envoy.filters.http.router": "//source/extensions/filters/http/router:config", "envoy.filters.http.test_accessor": "@envoy_mobile//library/common/extensions/filters/http/test_accessor:config", "envoy.filters.http.test_event_tracker": "@envoy_mobile//library/common/extensions/filters/http/test_event_tracker:config", "envoy.filters.network.http_connection_manager": "//source/extensions/filters/network/http_connection_manager:config", "envoy.http.original_ip_detection.xff": "//source/extensions/http/original_ip_detection/xff:config", "envoy.stat_sinks.metrics_service": "//source/extensions/stat_sinks/metrics_service:config", "envoy.transport_sockets.raw_buffer": "//source/extensions/transport_sockets/raw_buffer:config", "envoy.transport_sockets.tls": "//source/extensions/transport_sockets/tls:config", "envoy.http.stateful_header_formatters.preserve_case": "//source/extensions/http/header_formatters/preserve_case:preserve_case_formatter", } WINDOWS_EXTENSIONS = {}
true
true
1c436011cf53472714dfe02473381d8d6a4e7c58
2,560
py
Python
ariadne_v2/data_chunk.py
t3hseus/ariadne
b4471a37741000e22281c4d6ff647d65ab9e1914
[ "MIT" ]
6
2020-08-28T22:44:07.000Z
2022-01-24T20:53:00.000Z
ariadne_v2/data_chunk.py
t3hseus/ariadne
b4471a37741000e22281c4d6ff647d65ab9e1914
[ "MIT" ]
1
2021-02-20T09:38:46.000Z
2021-02-20T09:38:46.000Z
ariadne_v2/data_chunk.py
t3hseus/ariadne
b4471a37741000e22281c4d6ff647d65ab9e1914
[ "MIT" ]
2
2021-10-04T09:25:06.000Z
2022-02-09T09:09:09.000Z
import multiprocessing from abc import abstractmethod, ABCMeta from contextlib import contextmanager from typing import List, Union, Callable, Any, Dict import numpy as np import pandas as pd class HDF5Serializable: @abstractmethod def to_hdf5(self, db, hash, path): pass @staticmethod def from_hdf5(db, hash, path): pass class DataChunk(HDF5Serializable): def __init__(self, np_ndarr: np.ndarray, source=None): self.np_arr = np_ndarr self.__source = source def jit_hash(self): assert self.__source is not None return self.__source def cachable(self): return self.__source is not None class DFDataChunk(DataChunk): def __init__(self, np_index: np.ndarray, np_chunk_data: np.ndarray, columns: List[str], dtypes: List, source=None): super(DFDataChunk, self).__init__(np_ndarr=np_chunk_data, source=source) self.index = np_index self.columns = columns self.dtypes = dtypes @staticmethod def from_df(df: pd.DataFrame, hash_source=None): return DFDataChunk(np_chunk_data=df.values, np_index=df.index.values, columns=list(df.columns), dtypes=[dt.str for dt in df.dtypes], source=hash_source) def as_df(self): return pd.DataFrame({ column: self.np_arr[:, idx].astype(self.dtypes[idx] if self.dtypes[idx] != '|O' else 'str') for idx, column in enumerate(self.columns)}, index=self.index) def to_hdf5(self, db, hash, path): columns = list(self.columns) ndarr = self.np_arr idx = self.index if f"{path}" in db: del db[f"{path}"] db.create_dataset(f"{path}/idx", data=idx, compression='gzip') db[f"{path}"].attrs["col"] = columns db[f"{path}"].attrs["dtype"] = self.dtypes for idx, col in enumerate(columns): tgt = ndarr[:, idx].astype(self.dtypes[idx]) db.create_dataset(f"{path}/objs/{idx}/{columns[idx]}", data=tgt, shape=tgt.shape, compression="gzip") @staticmethod def from_hdf5(db, hash, path): index = db[f"{path}/idx"][()] vals = {} for idx, col in enumerate(db[path].attrs['col']): vals[col] = db[f"{path}/objs/{idx}/{col}"][()] return DFDataChunk(index, pd.DataFrame.from_dict(vals).values, db[path].attrs['col'], db[path].attrs['dtype'], source=hash)
32.820513
131
0.596875
import multiprocessing from abc import abstractmethod, ABCMeta from contextlib import contextmanager from typing import List, Union, Callable, Any, Dict import numpy as np import pandas as pd class HDF5Serializable: @abstractmethod def to_hdf5(self, db, hash, path): pass @staticmethod def from_hdf5(db, hash, path): pass class DataChunk(HDF5Serializable): def __init__(self, np_ndarr: np.ndarray, source=None): self.np_arr = np_ndarr self.__source = source def jit_hash(self): assert self.__source is not None return self.__source def cachable(self): return self.__source is not None class DFDataChunk(DataChunk): def __init__(self, np_index: np.ndarray, np_chunk_data: np.ndarray, columns: List[str], dtypes: List, source=None): super(DFDataChunk, self).__init__(np_ndarr=np_chunk_data, source=source) self.index = np_index self.columns = columns self.dtypes = dtypes @staticmethod def from_df(df: pd.DataFrame, hash_source=None): return DFDataChunk(np_chunk_data=df.values, np_index=df.index.values, columns=list(df.columns), dtypes=[dt.str for dt in df.dtypes], source=hash_source) def as_df(self): return pd.DataFrame({ column: self.np_arr[:, idx].astype(self.dtypes[idx] if self.dtypes[idx] != '|O' else 'str') for idx, column in enumerate(self.columns)}, index=self.index) def to_hdf5(self, db, hash, path): columns = list(self.columns) ndarr = self.np_arr idx = self.index if f"{path}" in db: del db[f"{path}"] db.create_dataset(f"{path}/idx", data=idx, compression='gzip') db[f"{path}"].attrs["col"] = columns db[f"{path}"].attrs["dtype"] = self.dtypes for idx, col in enumerate(columns): tgt = ndarr[:, idx].astype(self.dtypes[idx]) db.create_dataset(f"{path}/objs/{idx}/{columns[idx]}", data=tgt, shape=tgt.shape, compression="gzip") @staticmethod def from_hdf5(db, hash, path): index = db[f"{path}/idx"][()] vals = {} for idx, col in enumerate(db[path].attrs['col']): vals[col] = db[f"{path}/objs/{idx}/{col}"][()] return DFDataChunk(index, pd.DataFrame.from_dict(vals).values, db[path].attrs['col'], db[path].attrs['dtype'], source=hash)
true
true
1c436014310567c0ee3904476243c19fb93b383e
532
py
Python
laterpay/constants.py
laterpay/laterpay-client-python
c75e13408f6900202108f52e386e1c8c719e5377
[ "MIT" ]
3
2015-09-16T13:32:41.000Z
2015-09-16T15:10:20.000Z
laterpay/constants.py
laterpay/laterpay-client-python
c75e13408f6900202108f52e386e1c8c719e5377
[ "MIT" ]
81
2015-02-05T07:05:50.000Z
2020-06-02T11:27:24.000Z
laterpay/constants.py
laterpay/laterpay-client-python
c75e13408f6900202108f52e386e1c8c719e5377
[ "MIT" ]
1
2016-12-14T12:26:38.000Z
2016-12-14T12:26:38.000Z
EXPIRY_SECONDS_FOR_HOUR = 60 * 60 EXPIRY_SECONDS_FOR_DAY = 60 * 60 * 24 EXPIRY_SECONDS_FOR_WEEK = 60 * 60 * 24 * 7 EXPIRY_SECONDS_FOR_MONTH = 60 * 60 * 24 * 31 EXPIRY_SECONDS_FOR_YEAR = 60 * 60 * 24 * 365 EXPIRY_SECONDS_FOR_TIME_UNITS = { 'h': EXPIRY_SECONDS_FOR_HOUR, 'd': EXPIRY_SECONDS_FOR_DAY, 'w': EXPIRY_SECONDS_FOR_WEEK, 'm': EXPIRY_SECONDS_FOR_MONTH, 'y': EXPIRY_SECONDS_FOR_YEAR, } ITEM_TYPE_CONTRIBUTION = 'contribution' ITEM_TYPE_DONATION = 'donation' ITEM_TYPE_POLITICAL_CONTRIBUTION = 'political'
29.555556
46
0.75
EXPIRY_SECONDS_FOR_HOUR = 60 * 60 EXPIRY_SECONDS_FOR_DAY = 60 * 60 * 24 EXPIRY_SECONDS_FOR_WEEK = 60 * 60 * 24 * 7 EXPIRY_SECONDS_FOR_MONTH = 60 * 60 * 24 * 31 EXPIRY_SECONDS_FOR_YEAR = 60 * 60 * 24 * 365 EXPIRY_SECONDS_FOR_TIME_UNITS = { 'h': EXPIRY_SECONDS_FOR_HOUR, 'd': EXPIRY_SECONDS_FOR_DAY, 'w': EXPIRY_SECONDS_FOR_WEEK, 'm': EXPIRY_SECONDS_FOR_MONTH, 'y': EXPIRY_SECONDS_FOR_YEAR, } ITEM_TYPE_CONTRIBUTION = 'contribution' ITEM_TYPE_DONATION = 'donation' ITEM_TYPE_POLITICAL_CONTRIBUTION = 'political'
true
true
1c43614701fd1d9f4f8204066b3f979895e61bb2
140
py
Python
mcc_f1/__init__.py
arthurcgusmao/py-mcc-f1
d1b7cb856fbf03faad6a9eeeaea08da049c603c0
[ "MIT" ]
7
2020-10-26T21:33:40.000Z
2022-02-14T10:56:06.000Z
mcc_f1/__init__.py
arthurcgusmao/py-mcc-f1
d1b7cb856fbf03faad6a9eeeaea08da049c603c0
[ "MIT" ]
1
2022-02-13T19:17:15.000Z
2022-02-13T19:17:15.000Z
mcc_f1/__init__.py
arthurcgusmao/py-mcc-f1
d1b7cb856fbf03faad6a9eeeaea08da049c603c0
[ "MIT" ]
1
2022-02-14T10:56:08.000Z
2022-02-14T10:56:08.000Z
from .mcc_f1_curve import mcc_f1_curve from ._plot.mcc_f1_curve import plot_mcc_f1_curve from ._plot.mcc_f1_curve import MCCF1CurveDisplay
28
49
0.871429
from .mcc_f1_curve import mcc_f1_curve from ._plot.mcc_f1_curve import plot_mcc_f1_curve from ._plot.mcc_f1_curve import MCCF1CurveDisplay
true
true
1c43626b704050331b0176d0ad3a8b95c275535f
7,675
py
Python
solutions/projectstaffing/deployment/arm/scripts/post_deploy/db.py
dgeorge-blueprint/dataconnect-solutions
40f54abb98af6a3cdb1389086f7d7270d40fa9b6
[ "MIT" ]
10
2021-06-18T16:32:07.000Z
2022-02-11T06:25:03.000Z
solutions/projectstaffing/deployment/arm/scripts/post_deploy/db.py
dgeorge-blueprint/dataconnect-solutions
40f54abb98af6a3cdb1389086f7d7270d40fa9b6
[ "MIT" ]
13
2021-05-27T00:48:15.000Z
2022-01-08T16:13:59.000Z
solutions/projectstaffing/deployment/arm/scripts/post_deploy/db.py
dgeorge-blueprint/dataconnect-solutions
40f54abb98af6a3cdb1389086f7d7270d40fa9b6
[ "MIT" ]
18
2021-06-03T13:43:43.000Z
2022-03-25T11:04:47.000Z
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT license. See LICENSE file in the project root for full license information. import datetime import os import re from os.path import join, dirname from skills_finder_utils import ad_ops, blob_ops from skills_finder_utils import arm_ops from config import InstallConfiguration def create_gdc_deployer_identity(install_config: InstallConfiguration, resource_group: str): admin_ad_group_name = install_config.gdc_admin_ad_group['ad_group_name'] gdc_deployer = ad_ops.add_gdc_deployer_identity(resource_group=resource_group, gdc_admin_ad_group=admin_ad_group_name, identity_name="gdc-deployer") install_config.gdc_deployer_identity = { "principalId": gdc_deployer["principalId"], "id": gdc_deployer["id"], "name": gdc_deployer["name"] } def initialize_db_schema_via_arm(install_config: InstallConfiguration, template_base_uri: str, storage_sas: str, rs_group_name: str, custom_init_file: str): if not install_config.gdc_deployer_identity: create_gdc_deployer_identity(install_config=install_config, resource_group=rs_group_name) if custom_init_file: matcher = re.search("https:\/\/(\w+)\..*", template_base_uri, re.IGNORECASE) deployment_storage = matcher.group(1) blob_ops.copy_file(source_path=custom_init_file, resource_group=rs_group_name, runtime_storage=deployment_storage, dest_container_name="gdc-artifacts", dest_path="sql-server/custom-init.sql") else: print("No custom-init.sql provided") template_uri = template_base_uri + "sql-server/init_sql_schema.json" + "?" + storage_sas json_params = arm_ops.create_sql_init_arm_parameters(install_config=install_config, base_uri=template_base_uri, sas_token=storage_sas) arm_params_json_file = os.path.join(install_config.get_gdc_dir(), "gdc_sql_arm_params.json") with open(arm_params_json_file, "w") as param_file: param_file.write(json_params) print("Validating SQL schema ARM configuration ...") arm_ops.validate_templates(template_uri=template_uri, param_file_path=arm_params_json_file, resource_group=rs_group_name) print("Deploying SQL schema using managed identity %s " % str(install_config.gdc_deployer_identity.get('id'))) arm_ops.deploy_arm_template(template_uri=template_uri, param_file_path=arm_params_json_file, resource_group=rs_group_name) def initialize_db_schema_manually(install_config: InstallConfiguration): return True def resolve_custom_sql_runtime_path(): project_root = dirname(dirname(dirname(__file__))) sql_server_tmpl_dir = join(project_root, "sql-server") custom_init_file = os.path.join(sql_server_tmpl_dir, "custom-init.sql") return custom_init_file def generate_custom_init_sql(install_config): # In AzureSql the DB user names can contain any symbols, including ' and " and [ and ] # In AzureSql identifiers can be quoted with [] or "" # In identifiers quoted with [], the [ character does not require escaping, however, ] must be escaped by doubling it # Only users that are mapped to Windows principals can contain the backslash character (\). # In AzureSql strings are quoted with ', can contain any special character and escaping is only needed for ' via '' # Therefore, below we shall handle such special characters in user names and passwords, depending on quotation gdc_service_user = install_config.gdc_service_principal['name'] jgraph_user = install_config.appservice_name gdc_data_ingestion_mode = install_config.gdc_data_ingestion_mode if install_config.sql_auth: gdc_service_password = install_config.gdc_service_db_user_password jgraph_user_password = install_config.jgraph_db_user_password return """ USE [gdc_database] GO IF NOT EXISTS(SELECT uid FROM DBO.SYSUSERS Where Name = '{gdc_service_user_string_literal}') BEGIN CREATE USER [{gdc_service_user_identifier}] WITH PASSWORD = '{gdc_service_password_string_literal}'; ALTER ROLE db_datareader ADD MEMBER [{gdc_service_user_identifier}]; ALTER ROLE db_datawriter ADD MEMBER [{gdc_service_user_identifier}]; ALTER ROLE db_ddladmin ADD MEMBER [{gdc_service_user_identifier}]; END GO IF NOT EXISTS(SELECT uid FROM DBO.SYSUSERS Where Name = '{jgraph_user_string_literal}') BEGIN CREATE USER [{jgraph_user_identifier}] WITH PASSWORD = '{jgraph_user_password_escaped}'; ALTER ROLE db_datareader ADD MEMBER [{jgraph_user_identifier}]; ALTER ROLE db_datawriter ADD MEMBER [{jgraph_user_identifier}]; END GO GRANT EXECUTE ON OBJECT::[dbo].[find_recommended_employees] TO [{jgraph_user_identifier}]; GO INSERT INTO ingestion_mode_switch_state(ingestion_mode, phase, paused) VALUES ('{gdc_data_ingestion_mode_string_literal}', 'completed', 0); GO """.format(gdc_service_user_identifier=gdc_service_user.replace("]", "]]"), gdc_service_user_string_literal=gdc_service_user.replace("'", "''"), gdc_service_password_string_literal=gdc_service_password.replace("'", "''"), jgraph_user_identifier=jgraph_user.replace("]", "]]"), jgraph_user_string_literal=jgraph_user.replace("'", "''"), jgraph_user_password_escaped=jgraph_user_password.replace("'", "''"), gdc_data_ingestion_mode_string_literal=gdc_data_ingestion_mode) else: return """ IF NOT EXISTS(SELECT uid FROM DBO.SYSUSERS Where Name = '{gdc_service_user_string_literal}') BEGIN CREATE USER [{gdc_service_user_identifier}] FROM EXTERNAL PROVIDER; ALTER ROLE db_datareader ADD MEMBER [{gdc_service_user_identifier}]; ALTER ROLE db_datawriter ADD MEMBER [{gdc_service_user_identifier}]; ALTER ROLE db_ddladmin ADD MEMBER [{gdc_service_user_identifier}]; END GO IF NOT EXISTS(SELECT uid FROM DBO.SYSUSERS Where Name = '{jgraph_user_string_literal}') BEGIN CREATE USER [{jgraph_user_identifier}] FROM EXTERNAL PROVIDER; ALTER ROLE db_datareader ADD MEMBER [{jgraph_user_identifier}]; ALTER ROLE db_datawriter ADD MEMBER [{jgraph_user_identifier}]; END GO GRANT EXECUTE ON OBJECT::[dbo].[find_recommended_employees] TO [{jgraph_user_identifier}]; GO INSERT INTO ingestion_mode_switch_state(ingestion_mode, phase, paused) VALUES ('{gdc_data_ingestion_mode_string_literal}', 'completed', 0); GO """.format(gdc_service_user_identifier=gdc_service_user.replace("]", "]]"), gdc_service_user_string_literal=gdc_service_user.replace("'", "''"), jgraph_user_identifier=jgraph_user.replace("]", "]]"), jgraph_user_string_literal=jgraph_user.replace("'", "''"), gdc_data_ingestion_mode_string_literal=gdc_data_ingestion_mode)
54.821429
121
0.673746
import datetime import os import re from os.path import join, dirname from skills_finder_utils import ad_ops, blob_ops from skills_finder_utils import arm_ops from config import InstallConfiguration def create_gdc_deployer_identity(install_config: InstallConfiguration, resource_group: str): admin_ad_group_name = install_config.gdc_admin_ad_group['ad_group_name'] gdc_deployer = ad_ops.add_gdc_deployer_identity(resource_group=resource_group, gdc_admin_ad_group=admin_ad_group_name, identity_name="gdc-deployer") install_config.gdc_deployer_identity = { "principalId": gdc_deployer["principalId"], "id": gdc_deployer["id"], "name": gdc_deployer["name"] } def initialize_db_schema_via_arm(install_config: InstallConfiguration, template_base_uri: str, storage_sas: str, rs_group_name: str, custom_init_file: str): if not install_config.gdc_deployer_identity: create_gdc_deployer_identity(install_config=install_config, resource_group=rs_group_name) if custom_init_file: matcher = re.search("https:\/\/(\w+)\..*", template_base_uri, re.IGNORECASE) deployment_storage = matcher.group(1) blob_ops.copy_file(source_path=custom_init_file, resource_group=rs_group_name, runtime_storage=deployment_storage, dest_container_name="gdc-artifacts", dest_path="sql-server/custom-init.sql") else: print("No custom-init.sql provided") template_uri = template_base_uri + "sql-server/init_sql_schema.json" + "?" + storage_sas json_params = arm_ops.create_sql_init_arm_parameters(install_config=install_config, base_uri=template_base_uri, sas_token=storage_sas) arm_params_json_file = os.path.join(install_config.get_gdc_dir(), "gdc_sql_arm_params.json") with open(arm_params_json_file, "w") as param_file: param_file.write(json_params) print("Validating SQL schema ARM configuration ...") arm_ops.validate_templates(template_uri=template_uri, param_file_path=arm_params_json_file, resource_group=rs_group_name) print("Deploying SQL schema using managed identity %s " % str(install_config.gdc_deployer_identity.get('id'))) arm_ops.deploy_arm_template(template_uri=template_uri, param_file_path=arm_params_json_file, resource_group=rs_group_name) def initialize_db_schema_manually(install_config: InstallConfiguration): return True def resolve_custom_sql_runtime_path(): project_root = dirname(dirname(dirname(__file__))) sql_server_tmpl_dir = join(project_root, "sql-server") custom_init_file = os.path.join(sql_server_tmpl_dir, "custom-init.sql") return custom_init_file def generate_custom_init_sql(install_config): # In AzureSql identifiers can be quoted with [] or "" # In identifiers quoted with [], the [ character does not require escaping, however, ] must be escaped by doubling it # Only users that are mapped to Windows principals can contain the backslash character (\). # In AzureSql strings are quoted with ', can contain any special character and escaping is only needed for ' via '' # Therefore, below we shall handle such special characters in user names and passwords, depending on quotation gdc_service_user = install_config.gdc_service_principal['name'] jgraph_user = install_config.appservice_name gdc_data_ingestion_mode = install_config.gdc_data_ingestion_mode if install_config.sql_auth: gdc_service_password = install_config.gdc_service_db_user_password jgraph_user_password = install_config.jgraph_db_user_password return """ USE [gdc_database] GO IF NOT EXISTS(SELECT uid FROM DBO.SYSUSERS Where Name = '{gdc_service_user_string_literal}') BEGIN CREATE USER [{gdc_service_user_identifier}] WITH PASSWORD = '{gdc_service_password_string_literal}'; ALTER ROLE db_datareader ADD MEMBER [{gdc_service_user_identifier}]; ALTER ROLE db_datawriter ADD MEMBER [{gdc_service_user_identifier}]; ALTER ROLE db_ddladmin ADD MEMBER [{gdc_service_user_identifier}]; END GO IF NOT EXISTS(SELECT uid FROM DBO.SYSUSERS Where Name = '{jgraph_user_string_literal}') BEGIN CREATE USER [{jgraph_user_identifier}] WITH PASSWORD = '{jgraph_user_password_escaped}'; ALTER ROLE db_datareader ADD MEMBER [{jgraph_user_identifier}]; ALTER ROLE db_datawriter ADD MEMBER [{jgraph_user_identifier}]; END GO GRANT EXECUTE ON OBJECT::[dbo].[find_recommended_employees] TO [{jgraph_user_identifier}]; GO INSERT INTO ingestion_mode_switch_state(ingestion_mode, phase, paused) VALUES ('{gdc_data_ingestion_mode_string_literal}', 'completed', 0); GO """.format(gdc_service_user_identifier=gdc_service_user.replace("]", "]]"), gdc_service_user_string_literal=gdc_service_user.replace("'", "''"), gdc_service_password_string_literal=gdc_service_password.replace("'", "''"), jgraph_user_identifier=jgraph_user.replace("]", "]]"), jgraph_user_string_literal=jgraph_user.replace("'", "''"), jgraph_user_password_escaped=jgraph_user_password.replace("'", "''"), gdc_data_ingestion_mode_string_literal=gdc_data_ingestion_mode) else: return """ IF NOT EXISTS(SELECT uid FROM DBO.SYSUSERS Where Name = '{gdc_service_user_string_literal}') BEGIN CREATE USER [{gdc_service_user_identifier}] FROM EXTERNAL PROVIDER; ALTER ROLE db_datareader ADD MEMBER [{gdc_service_user_identifier}]; ALTER ROLE db_datawriter ADD MEMBER [{gdc_service_user_identifier}]; ALTER ROLE db_ddladmin ADD MEMBER [{gdc_service_user_identifier}]; END GO IF NOT EXISTS(SELECT uid FROM DBO.SYSUSERS Where Name = '{jgraph_user_string_literal}') BEGIN CREATE USER [{jgraph_user_identifier}] FROM EXTERNAL PROVIDER; ALTER ROLE db_datareader ADD MEMBER [{jgraph_user_identifier}]; ALTER ROLE db_datawriter ADD MEMBER [{jgraph_user_identifier}]; END GO GRANT EXECUTE ON OBJECT::[dbo].[find_recommended_employees] TO [{jgraph_user_identifier}]; GO INSERT INTO ingestion_mode_switch_state(ingestion_mode, phase, paused) VALUES ('{gdc_data_ingestion_mode_string_literal}', 'completed', 0); GO """.format(gdc_service_user_identifier=gdc_service_user.replace("]", "]]"), gdc_service_user_string_literal=gdc_service_user.replace("'", "''"), jgraph_user_identifier=jgraph_user.replace("]", "]]"), jgraph_user_string_literal=jgraph_user.replace("'", "''"), gdc_data_ingestion_mode_string_literal=gdc_data_ingestion_mode)
true
true
1c4362b3e15b08f17949d9f9d632bb7a3c1b0d2e
681
py
Python
day01/p1.py
pwicks86/adventofcode2016
331523e45095962f5e4614bdca15cc35e3243fdb
[ "MIT" ]
null
null
null
day01/p1.py
pwicks86/adventofcode2016
331523e45095962f5e4614bdca15cc35e3243fdb
[ "MIT" ]
null
null
null
day01/p1.py
pwicks86/adventofcode2016
331523e45095962f5e4614bdca15cc35e3243fdb
[ "MIT" ]
null
null
null
f = open('input.txt') steps = f.read().strip().split(",") steps = [s.strip() for s in steps] cur_x = 0 cur_y = 0 facing = "N" for step in steps: blocks = int(step[1:]) lr = step[0] if facing == "N": facing = "W" if lr == "L" else "E" elif facing == "S": facing = "E" if lr == "L" else "W" elif facing == "E": facing = "N" if lr == "L" else "S" elif facing == "W": facing = "S" if lr == "L" else "N" if facing == "N": cur_y += blocks elif facing == "S": cur_y -= blocks elif facing == "E": cur_x += blocks elif facing == "W": cur_x -= blocks print(abs(cur_x) + abs(cur_y))
21.28125
42
0.478708
f = open('input.txt') steps = f.read().strip().split(",") steps = [s.strip() for s in steps] cur_x = 0 cur_y = 0 facing = "N" for step in steps: blocks = int(step[1:]) lr = step[0] if facing == "N": facing = "W" if lr == "L" else "E" elif facing == "S": facing = "E" if lr == "L" else "W" elif facing == "E": facing = "N" if lr == "L" else "S" elif facing == "W": facing = "S" if lr == "L" else "N" if facing == "N": cur_y += blocks elif facing == "S": cur_y -= blocks elif facing == "E": cur_x += blocks elif facing == "W": cur_x -= blocks print(abs(cur_x) + abs(cur_y))
true
true
1c436417afdedf6574bf5c1a0a23938183ec0be9
3,110
py
Python
dataworkspace/dataworkspace/apps/eventlog/admin.py
uktrade/jupyterhub-data-auth-admin
91544f376209a201531f4dbfb8faad1b8ada18c9
[ "MIT" ]
1
2019-06-10T08:22:56.000Z
2019-06-10T08:22:56.000Z
dataworkspace/dataworkspace/apps/eventlog/admin.py
uktrade/jupyterhub-data-auth-admin
91544f376209a201531f4dbfb8faad1b8ada18c9
[ "MIT" ]
2
2019-05-17T13:10:42.000Z
2019-06-17T10:48:46.000Z
dataworkspace/dataworkspace/apps/eventlog/admin.py
uktrade/jupyterhub-data-auth-admin
91544f376209a201531f4dbfb8faad1b8ada18c9
[ "MIT" ]
null
null
null
import csv import json from datetime import datetime from django.contrib import admin from django.contrib.admin.templatetags.admin_urls import admin_urlname from django.http import HttpResponse from django.urls import reverse, NoReverseMatch from django.utils.html import format_html from dataworkspace.apps.eventlog.models import EventLog @admin.register(EventLog) class EventLogAdmin(admin.ModelAdmin): list_display = ("timestamp", "user_link", "event_type", "related_object_link") list_filter = ("event_type",) list_display_links = ["timestamp"] fields = ( "timestamp", "user_link", "event_type", "related_object_link", "event_data", ) search_fields = ("user__email", "user__first_name", "user__last_name") actions = ["export_events"] list_per_page = 50 def user_link(self, obj): return format_html( '<a href="{}">{}</a>'.format( reverse("admin:auth_user_change", args=(obj.user.id,)), obj.user.get_full_name(), ) ) user_link.short_description = "User" def related_object_link(self, obj): if obj.related_object is None: return None try: url = reverse( admin_urlname(obj.related_object._meta, "change"), args=(obj.related_object.id,), ) except NoReverseMatch: return obj.related_object return format_html('<a href="{}">{}</a>'.format(url, obj.related_object)) related_object_link.short_description = "Related Object" def event_data(self, obj): return format_html("<pre>{0}</pre>", json.dumps(obj.extra, indent=2)) def get_actions(self, request): # Disable bulk delete actions = super().get_actions(request) if "delete_selected" in actions: del actions["delete_selected"] return actions def has_add_permission(self, request): return False def has_change_permission(self, request, obj=None): return False def has_delete_permission(self, request, obj=None): return False def export_events(self, request, queryset): field_names = ["timestamp", "user", "event_type", "related_object", "extra"] response = HttpResponse(content_type="text/csv") response["Content-Disposition"] = "attachment; filename=event-log-{}.csv".format( datetime.now().strftime("%Y-%m-%d-%H-%M-%S") ) writer = csv.DictWriter(response, field_names, quoting=csv.QUOTE_NONNUMERIC) writer.writeheader() for eventlog in queryset: writer.writerow( { "timestamp": eventlog.timestamp, "user": eventlog.user.get_full_name(), "event_type": eventlog.get_event_type_display(), "related_object": eventlog.related_object, "extra": json.dumps(eventlog.extra), } ) return response export_events.short_description = "Export Selected"
32.395833
89
0.619614
import csv import json from datetime import datetime from django.contrib import admin from django.contrib.admin.templatetags.admin_urls import admin_urlname from django.http import HttpResponse from django.urls import reverse, NoReverseMatch from django.utils.html import format_html from dataworkspace.apps.eventlog.models import EventLog @admin.register(EventLog) class EventLogAdmin(admin.ModelAdmin): list_display = ("timestamp", "user_link", "event_type", "related_object_link") list_filter = ("event_type",) list_display_links = ["timestamp"] fields = ( "timestamp", "user_link", "event_type", "related_object_link", "event_data", ) search_fields = ("user__email", "user__first_name", "user__last_name") actions = ["export_events"] list_per_page = 50 def user_link(self, obj): return format_html( '<a href="{}">{}</a>'.format( reverse("admin:auth_user_change", args=(obj.user.id,)), obj.user.get_full_name(), ) ) user_link.short_description = "User" def related_object_link(self, obj): if obj.related_object is None: return None try: url = reverse( admin_urlname(obj.related_object._meta, "change"), args=(obj.related_object.id,), ) except NoReverseMatch: return obj.related_object return format_html('<a href="{}">{}</a>'.format(url, obj.related_object)) related_object_link.short_description = "Related Object" def event_data(self, obj): return format_html("<pre>{0}</pre>", json.dumps(obj.extra, indent=2)) def get_actions(self, request): actions = super().get_actions(request) if "delete_selected" in actions: del actions["delete_selected"] return actions def has_add_permission(self, request): return False def has_change_permission(self, request, obj=None): return False def has_delete_permission(self, request, obj=None): return False def export_events(self, request, queryset): field_names = ["timestamp", "user", "event_type", "related_object", "extra"] response = HttpResponse(content_type="text/csv") response["Content-Disposition"] = "attachment; filename=event-log-{}.csv".format( datetime.now().strftime("%Y-%m-%d-%H-%M-%S") ) writer = csv.DictWriter(response, field_names, quoting=csv.QUOTE_NONNUMERIC) writer.writeheader() for eventlog in queryset: writer.writerow( { "timestamp": eventlog.timestamp, "user": eventlog.user.get_full_name(), "event_type": eventlog.get_event_type_display(), "related_object": eventlog.related_object, "extra": json.dumps(eventlog.extra), } ) return response export_events.short_description = "Export Selected"
true
true
1c436436f2326798aaf205de74c074ae0cbd9838
5,245
py
Python
policies.py
kyuhyoung/grasping-invisible
2aaaeb9e28995628ec038a79496453be9f26ffff
[ "BSD-2-Clause" ]
29
2020-01-30T00:10:59.000Z
2022-03-26T21:25:25.000Z
policies.py
kyuhyoung/grasping-invisible
2aaaeb9e28995628ec038a79496453be9f26ffff
[ "BSD-2-Clause" ]
13
2020-04-28T09:38:58.000Z
2022-03-12T00:15:46.000Z
policies.py
kyuhyoung/grasping-invisible
2aaaeb9e28995628ec038a79496453be9f26ffff
[ "BSD-2-Clause" ]
9
2020-09-15T10:34:31.000Z
2021-06-16T20:51:04.000Z
import os import random from collections import deque from collections import namedtuple import torch.nn as nn import torch.nn.functional as F import numpy as np import torch from scipy import signal from scipy.ndimage.interpolation import shift Transition = namedtuple('Transition', ('inputs', 'labels')) class ReplayMemory(object): def __init__(self, capacity): self.capacity = capacity self.memory = [] self.position = 0 def push(self, *args): """Saves a transition.""" if len(self.memory) < self.capacity: self.memory.append(None) self.memory[self.position] = Transition(*args) self.position = (self.position + 1) % self.capacity def get_data(self): transitions = self.memory data = Transition(*zip(*transitions)) return data.inputs, data.labels def sample(self, batch_size): return random.sample(self.memory, batch_size) def __len__(self): return len(self.memory) class MLP(nn.Module): def __init__(self, in_dim): super(MLP, self).__init__() self.in_dim = in_dim self.bn1 = nn.BatchNorm1d(in_dim) self.fc1 = nn.Linear(in_dim, 16) self.bn2 = nn.BatchNorm1d(16) self.fc2 = nn.Linear(16, 8) self.bn3 = nn.BatchNorm1d(8) self.fc3 = nn.Linear(8, 2) def forward(self, x): x = F.relu(self.fc1(self.bn1(x))) x = F.relu(self.fc2(self.bn2(x))) x = self.fc3(self.bn3(x)) return x def init_weights(m): if type(m) == nn.Linear: torch.nn.init.xavier_uniform_(m.weight) m.bias.data.fill_(0.01) class Coordinator(object): def __init__(self, save_dir, ckpt_file, feat_size=5, buffer_size=200, batch_size=20): self.save_dir = save_dir self.buffer_size = buffer_size self.batch_size = batch_size self.memory = ReplayMemory(buffer_size) self.net = MLP(in_dim=feat_size) self.net.apply(init_weights) if ckpt_file is not None: self.load_networks(ckpt_file) print('Pre-trained coordinator model loaded from: %s' % ckpt_file) self.optimizer = torch.optim.SGD(self.net.parameters(), lr=0.001, momentum=0.9) self.criterion = nn.CrossEntropyLoss() def optimize_model(self): if len(self.memory) < self.batch_size: return None, None transitions = self.memory.sample(self.batch_size) batch = Transition(*zip(*transitions)) self.net.train() self.optimizer.zero_grad() outputs = self.net(torch.tensor(batch.inputs, dtype=torch.float).float()) labels = torch.tensor(batch.labels).float().flatten().long() loss = self.criterion(outputs, labels) loss.backward() self.optimizer.step() lc = loss.cpu().detach().numpy() acc = self.get_accuracy() print('Coordinator training loss %f, acc %f' % (lc, acc)) return lc, acc def predict(self, X): self.net.eval() net_outputs = self.net(torch.tensor(X, dtype=torch.float).view(-1, self.net.in_dim)) return np.argmax(net_outputs.view(-1,2).cpu().detach().numpy(), axis=1) def get_accuracy(self): X_val, y_val = self.memory.get_data() y_pre = self.predict(X_val) accuracy = np.array(y_val == y_pre).mean() return accuracy def save_networks(self, which_epoch): save_filename = 'coordinator-%06d.pth' % which_epoch save_path = os.path.join(self.save_dir, save_filename) torch.save(self.net.cpu().state_dict(), save_path) def load_networks(self, load_path): self.net.load_state_dict(torch.load(load_path)) def gkern(kernlen, std): """Returns a 2D Gaussian kernel array.""" gkern1d = signal.gaussian(kernlen, std=std).reshape(kernlen, 1) gkern2d = np.outer(gkern1d, gkern1d) return gkern2d class Explorer(object): def __init__(self, map_size, buffer_size=3, prob_scaled=0.75, std=25): # assume the map is square, so map_size is a scalar # prob_scaled and std is the fine-tuned parameters for our workspace # compute basic kernel gkernel = gkern(kernlen=map_size, std=std) self.kcenter = np.array(np.unravel_index(np.argmax(gkernel), gkernel.shape)) ad_gkernel = (1 - prob_scaled) * gkernel / (np.max(gkernel)) self.bkernel = 1 - ad_gkernel self.ones_kernel = np.ones((map_size, map_size)) # kernel buffer self.kbuffer = deque([], buffer_size) self.kbuffer.append(self.ones_kernel) def get_kernel(self, center): bkernel_shifted = shift(self.bkernel, np.array(center).reshape(-1)-self.kcenter, cval=1.0) return bkernel_shifted def update(self, prev_act_pos): prev_kernel = self.get_kernel(prev_act_pos) self.kbuffer.append(self.ones_kernel) for i in range(len(self.kbuffer)): self.kbuffer[i] = np.multiply(self.kbuffer[i], prev_kernel) def reset(self): self.kbuffer.clear() self.kbuffer.append(self.bkernel) def get_action_maps(self, prior): post = np.multiply(prior, self.kbuffer[0]) return post / np.max(post)
31.787879
98
0.638322
import os import random from collections import deque from collections import namedtuple import torch.nn as nn import torch.nn.functional as F import numpy as np import torch from scipy import signal from scipy.ndimage.interpolation import shift Transition = namedtuple('Transition', ('inputs', 'labels')) class ReplayMemory(object): def __init__(self, capacity): self.capacity = capacity self.memory = [] self.position = 0 def push(self, *args): if len(self.memory) < self.capacity: self.memory.append(None) self.memory[self.position] = Transition(*args) self.position = (self.position + 1) % self.capacity def get_data(self): transitions = self.memory data = Transition(*zip(*transitions)) return data.inputs, data.labels def sample(self, batch_size): return random.sample(self.memory, batch_size) def __len__(self): return len(self.memory) class MLP(nn.Module): def __init__(self, in_dim): super(MLP, self).__init__() self.in_dim = in_dim self.bn1 = nn.BatchNorm1d(in_dim) self.fc1 = nn.Linear(in_dim, 16) self.bn2 = nn.BatchNorm1d(16) self.fc2 = nn.Linear(16, 8) self.bn3 = nn.BatchNorm1d(8) self.fc3 = nn.Linear(8, 2) def forward(self, x): x = F.relu(self.fc1(self.bn1(x))) x = F.relu(self.fc2(self.bn2(x))) x = self.fc3(self.bn3(x)) return x def init_weights(m): if type(m) == nn.Linear: torch.nn.init.xavier_uniform_(m.weight) m.bias.data.fill_(0.01) class Coordinator(object): def __init__(self, save_dir, ckpt_file, feat_size=5, buffer_size=200, batch_size=20): self.save_dir = save_dir self.buffer_size = buffer_size self.batch_size = batch_size self.memory = ReplayMemory(buffer_size) self.net = MLP(in_dim=feat_size) self.net.apply(init_weights) if ckpt_file is not None: self.load_networks(ckpt_file) print('Pre-trained coordinator model loaded from: %s' % ckpt_file) self.optimizer = torch.optim.SGD(self.net.parameters(), lr=0.001, momentum=0.9) self.criterion = nn.CrossEntropyLoss() def optimize_model(self): if len(self.memory) < self.batch_size: return None, None transitions = self.memory.sample(self.batch_size) batch = Transition(*zip(*transitions)) self.net.train() self.optimizer.zero_grad() outputs = self.net(torch.tensor(batch.inputs, dtype=torch.float).float()) labels = torch.tensor(batch.labels).float().flatten().long() loss = self.criterion(outputs, labels) loss.backward() self.optimizer.step() lc = loss.cpu().detach().numpy() acc = self.get_accuracy() print('Coordinator training loss %f, acc %f' % (lc, acc)) return lc, acc def predict(self, X): self.net.eval() net_outputs = self.net(torch.tensor(X, dtype=torch.float).view(-1, self.net.in_dim)) return np.argmax(net_outputs.view(-1,2).cpu().detach().numpy(), axis=1) def get_accuracy(self): X_val, y_val = self.memory.get_data() y_pre = self.predict(X_val) accuracy = np.array(y_val == y_pre).mean() return accuracy def save_networks(self, which_epoch): save_filename = 'coordinator-%06d.pth' % which_epoch save_path = os.path.join(self.save_dir, save_filename) torch.save(self.net.cpu().state_dict(), save_path) def load_networks(self, load_path): self.net.load_state_dict(torch.load(load_path)) def gkern(kernlen, std): gkern1d = signal.gaussian(kernlen, std=std).reshape(kernlen, 1) gkern2d = np.outer(gkern1d, gkern1d) return gkern2d class Explorer(object): def __init__(self, map_size, buffer_size=3, prob_scaled=0.75, std=25): gkernel = gkern(kernlen=map_size, std=std) self.kcenter = np.array(np.unravel_index(np.argmax(gkernel), gkernel.shape)) ad_gkernel = (1 - prob_scaled) * gkernel / (np.max(gkernel)) self.bkernel = 1 - ad_gkernel self.ones_kernel = np.ones((map_size, map_size)) self.kbuffer = deque([], buffer_size) self.kbuffer.append(self.ones_kernel) def get_kernel(self, center): bkernel_shifted = shift(self.bkernel, np.array(center).reshape(-1)-self.kcenter, cval=1.0) return bkernel_shifted def update(self, prev_act_pos): prev_kernel = self.get_kernel(prev_act_pos) self.kbuffer.append(self.ones_kernel) for i in range(len(self.kbuffer)): self.kbuffer[i] = np.multiply(self.kbuffer[i], prev_kernel) def reset(self): self.kbuffer.clear() self.kbuffer.append(self.bkernel) def get_action_maps(self, prior): post = np.multiply(prior, self.kbuffer[0]) return post / np.max(post)
true
true
1c436493685d5365b2b9572faad7ced80b6cdd9a
2,794
py
Python
selfdrive/car/mock/interface.py
u8511049/dragonpilot
cc0d4c938dc2aff089a0c15e0532de90ff342659
[ "MIT" ]
1
2020-08-04T14:19:30.000Z
2020-08-04T14:19:30.000Z
selfdrive/car/mock/interface.py
u8511049/dragonpilot
cc0d4c938dc2aff089a0c15e0532de90ff342659
[ "MIT" ]
1
2021-09-29T22:44:24.000Z
2021-09-29T22:44:24.000Z
selfdrive/car/mock/interface.py
u8511049/dragonpilot
cc0d4c938dc2aff089a0c15e0532de90ff342659
[ "MIT" ]
2
2020-03-10T19:11:34.000Z
2020-08-04T14:19:30.000Z
#!/usr/bin/env python3 from cereal import car from selfdrive.config import Conversions as CV from selfdrive.swaglog import cloudlog import cereal.messaging as messaging from selfdrive.car import gen_empty_fingerprint from selfdrive.car.interfaces import CarInterfaceBase # mocked car interface to work with chffrplus TS = 0.01 # 100Hz YAW_FR = 0.2 # ~0.8s time constant on yaw rate filter # low pass gain LPG = 2 * 3.1415 * YAW_FR * TS / (1 + 2 * 3.1415 * YAW_FR * TS) class CarInterface(CarInterfaceBase): def __init__(self, CP, CarController, CarState): self.CP = CP self.CC = CarController cloudlog.debug("Using Mock Car Interface") # TODO: subscribe to phone sensor self.sensor = messaging.sub_sock('sensorEvents') self.gps = messaging.sub_sock('gpsLocation') self.speed = 0. self.prev_speed = 0. self.yaw_rate = 0. self.yaw_rate_meas = 0. @staticmethod def compute_gb(accel, speed): return accel @staticmethod def get_params(candidate, fingerprint=gen_empty_fingerprint(), has_relay=False, car_fw=[]): ret = CarInterfaceBase.get_std_params(candidate, fingerprint, has_relay) ret.carName = "mock" ret.safetyModel = car.CarParams.SafetyModel.noOutput ret.mass = 1700. ret.rotationalInertia = 2500. ret.wheelbase = 2.70 ret.centerToFront = ret.wheelbase * 0.5 ret.steerRatio = 13. # reasonable ret.tireStiffnessFront = 1e6 # very stiff to neglect slip ret.tireStiffnessRear = 1e6 # very stiff to neglect slip return ret # returns a car.CarState def update(self, c, can_strings): self.dp_load_params('mock') # get basic data from phone and gps since CAN isn't connected sensors = messaging.recv_sock(self.sensor) if sensors is not None: for sensor in sensors.sensorEvents: if sensor.type == 4: # gyro self.yaw_rate_meas = -sensor.gyro.v[0] gps = messaging.recv_sock(self.gps) if gps is not None: self.prev_speed = self.speed self.speed = gps.gpsLocation.speed # create message ret = car.CarState.new_message() # speeds ret.vEgo = self.speed ret.vEgoRaw = self.speed a = self.speed - self.prev_speed ret.aEgo = a ret.brakePressed = a < -0.5 ret.standstill = self.speed < 0.01 ret.wheelSpeeds.fl = self.speed ret.wheelSpeeds.fr = self.speed ret.wheelSpeeds.rl = self.speed ret.wheelSpeeds.rr = self.speed self.yawRate = LPG * self.yaw_rate_meas + (1. - LPG) * self.yaw_rate curvature = self.yaw_rate / max(self.speed, 1.) ret.steeringAngle = curvature * self.CP.steerRatio * self.CP.wheelbase * CV.RAD_TO_DEG events = [] ret.events = events return ret.as_reader() def apply(self, c): # in mock no carcontrols return []
29.410526
93
0.687903
from cereal import car from selfdrive.config import Conversions as CV from selfdrive.swaglog import cloudlog import cereal.messaging as messaging from selfdrive.car import gen_empty_fingerprint from selfdrive.car.interfaces import CarInterfaceBase TS = 0.01 YAW_FR = 0.2 LPG = 2 * 3.1415 * YAW_FR * TS / (1 + 2 * 3.1415 * YAW_FR * TS) class CarInterface(CarInterfaceBase): def __init__(self, CP, CarController, CarState): self.CP = CP self.CC = CarController cloudlog.debug("Using Mock Car Interface") self.sensor = messaging.sub_sock('sensorEvents') self.gps = messaging.sub_sock('gpsLocation') self.speed = 0. self.prev_speed = 0. self.yaw_rate = 0. self.yaw_rate_meas = 0. @staticmethod def compute_gb(accel, speed): return accel @staticmethod def get_params(candidate, fingerprint=gen_empty_fingerprint(), has_relay=False, car_fw=[]): ret = CarInterfaceBase.get_std_params(candidate, fingerprint, has_relay) ret.carName = "mock" ret.safetyModel = car.CarParams.SafetyModel.noOutput ret.mass = 1700. ret.rotationalInertia = 2500. ret.wheelbase = 2.70 ret.centerToFront = ret.wheelbase * 0.5 ret.steerRatio = 13. ret.tireStiffnessFront = 1e6 ret.tireStiffnessRear = 1e6 return ret def update(self, c, can_strings): self.dp_load_params('mock') sensors = messaging.recv_sock(self.sensor) if sensors is not None: for sensor in sensors.sensorEvents: if sensor.type == 4: # gyro self.yaw_rate_meas = -sensor.gyro.v[0] gps = messaging.recv_sock(self.gps) if gps is not None: self.prev_speed = self.speed self.speed = gps.gpsLocation.speed # create message ret = car.CarState.new_message() # speeds ret.vEgo = self.speed ret.vEgoRaw = self.speed a = self.speed - self.prev_speed ret.aEgo = a ret.brakePressed = a < -0.5 ret.standstill = self.speed < 0.01 ret.wheelSpeeds.fl = self.speed ret.wheelSpeeds.fr = self.speed ret.wheelSpeeds.rl = self.speed ret.wheelSpeeds.rr = self.speed self.yawRate = LPG * self.yaw_rate_meas + (1. - LPG) * self.yaw_rate curvature = self.yaw_rate / max(self.speed, 1.) ret.steeringAngle = curvature * self.CP.steerRatio * self.CP.wheelbase * CV.RAD_TO_DEG events = [] ret.events = events return ret.as_reader() def apply(self, c): # in mock no carcontrols return []
true
true
1c4365c916fd11ebbac04860062e52fb7cd37f81
765
py
Python
from_3b1b/on_hold/eop/chapter1/think_about_coin.py
sanjaydatasciencedojo/manim
603a1a21dbb5eca325ed670f46ea72401a8edf1d
[ "MIT" ]
null
null
null
from_3b1b/on_hold/eop/chapter1/think_about_coin.py
sanjaydatasciencedojo/manim
603a1a21dbb5eca325ed670f46ea72401a8edf1d
[ "MIT" ]
null
null
null
from_3b1b/on_hold/eop/chapter1/think_about_coin.py
sanjaydatasciencedojo/manim
603a1a21dbb5eca325ed670f46ea72401a8edf1d
[ "MIT" ]
null
null
null
from active_projects.eop.reusable_imports import * from manimlib.imports import * class RandyThinksAboutCoin(PiCreatureScene): def construct(self): randy = self.get_primary_pi_creature() randy.center() self.add(randy) self.wait() h_or_t = BinaryOption(UprightHeads().scale(3), UprightTails().scale(3), text_scale = 1.5) self.think(h_or_t, direction = LEFT) v = 0.3 self.play( h_or_t[0].shift,v*UP, h_or_t[2].shift,v*DOWN, ) self.play( h_or_t[0].shift,2*v*DOWN, h_or_t[2].shift,2*v*UP, ) self.play( h_or_t[0].shift,v*UP, h_or_t[2].shift,v*DOWN, ) self.wait()
23.181818
79
0.542484
from active_projects.eop.reusable_imports import * from manimlib.imports import * class RandyThinksAboutCoin(PiCreatureScene): def construct(self): randy = self.get_primary_pi_creature() randy.center() self.add(randy) self.wait() h_or_t = BinaryOption(UprightHeads().scale(3), UprightTails().scale(3), text_scale = 1.5) self.think(h_or_t, direction = LEFT) v = 0.3 self.play( h_or_t[0].shift,v*UP, h_or_t[2].shift,v*DOWN, ) self.play( h_or_t[0].shift,2*v*DOWN, h_or_t[2].shift,2*v*UP, ) self.play( h_or_t[0].shift,v*UP, h_or_t[2].shift,v*DOWN, ) self.wait()
true
true
1c436727ce1a44e7f9cbc3035c82def6d153f509
37,345
py
Python
nexus/lib/generic.py
eugeneswalker/qmcpack
352ff27f163bb92e0c232c48bec8ae7951ed9d8c
[ "NCSA" ]
null
null
null
nexus/lib/generic.py
eugeneswalker/qmcpack
352ff27f163bb92e0c232c48bec8ae7951ed9d8c
[ "NCSA" ]
11
2020-05-09T20:57:21.000Z
2020-06-10T00:00:17.000Z
nexus/lib/generic.py
williamfgc/qmcpack
732b473841e7823a21ab55ff397eed059f0f2e96
[ "NCSA" ]
null
null
null
################################################################## ## (c) Copyright 2015- by Jaron T. Krogel ## ################################################################## #====================================================================# # generic.py # # Base class for all Nexus classes (obj). Support for hidden # # data UI (hidden). # # # # Content summary: # # obj # # Base class for all Nexus classes. # # Inherits from AllAbilities and wraps all functions for UI. # # Also basic working object/class for generic use. # # Can function like a standard dict, also mixes in parts of # # the list interface. # # # # generic # # More efficient implementation of AllAbilities+obj interface. # # Intended to allow for method namespace infringement without # # loss of access to functionality. # # Limited use so far. # # # # hidden # # Like generic, but allows for hidden storage. # # Can be used, e.g., to make an ordered object class. # # See use in qmcpack_input.py. # # # #====================================================================# import sys import traceback from copy import deepcopy import pickle from random import randint from utilities import sorted_py2 class generic_settings: devlog = sys.stdout raise_error = False #end class generic_settings class NexusError(Exception): None #end class NexusError exit_call = sys.exit def nocopy(value): return value #end def nocopy sorted_generic = sorted_py2 def log(*items,**kwargs): indent = None logfile = generic_settings.devlog if len(kwargs)>0: indent = kwargs.pop('indent' ,None ) logfile = kwargs.pop('logfile',logfile) n = kwargs.pop('n',0) if n!=0: if indent is None: indent = n*' ' else: indent = n*indent #end if #end if if len(kwargs)>0: valid = 'indent logfile n'.split() error('Invalid keyword arguments provided.\nInvalid keywords: {0}\nValid options are: {1}'.format(sorted(kwargs.keys()),valid),'log') #end if #end if if len(items)==1 and isinstance(items[0],str): s = items[0] else: s='' for item in items: s+=str(item)+' ' #end for #end if if len(s)>0: if isinstance(indent,str): s=indent+s.replace('\n','\n'+indent) #end if s += '\n' #end if logfile.write(s) #end def log def message(msg,header=None,post_header=' message:',indent=' ',logfile=None): if logfile is None: logfile = generic_settings.devlog #end if if header is None: header = post_header.lstrip() else: header += post_header #end if log('\n '+header,logfile=logfile) log(msg.rstrip(),indent=indent,logfile=logfile) #end def message def warn(msg,header=None,indent=' ',logfile=None): if logfile is None: logfile = generic_settings.devlog #end if post_header=' warning:' message(msg,header,post_header,indent,logfile) #end def warn def error(msg,header=None,exit=True,trace=True,indent=' ',logfile=None): if generic_settings.raise_error: raise NexusError(msg) #end if if logfile is None: logfile = generic_settings.devlog #end if post_header=' error:' message(msg,header,post_header,indent,logfile) if exit: log(' exiting.\n') if trace: traceback.print_stack() #end if exit_call() #end if #end def error class object_interface(object): _logfile = sys.stdout def __len__(self): return len(self.__dict__) #end def __len__ def __contains__(self,name): return name in self.__dict__ #end def def __getitem__(self,name): return self.__dict__[name] #end def __getitem__ def __setitem__(self,name,value): self.__dict__[name]=value #end def __setitem__ def __delitem__(self,name): del self.__dict__[name] #end def __delitem__ def __iter__(self): for item in self.__dict__: yield self.__dict__[item] #end for #end def __iter__ def __repr__(self): s='' for k in sorted_generic(self._keys()): if not isinstance(k,str) or k[0]!='_': v=self.__dict__[k] if hasattr(v,'__class__'): s+=' {0:<20} {1:<20}\n'.format(str(k),v.__class__.__name__) else: s+=' {0:<20} {1:<20}\n'.format(str(k),type(v)) #end if #end if #end for return s #end def __repr__ def __str__(self,nindent=1): pad = ' ' npad = nindent*pad s='' normal = [] qable = [] for k,v in self._items(): if not isinstance(k,str) or k[0]!='_': if isinstance(v,object_interface): qable.append(k) else: normal.append(k) #end if #end if #end for normal = sorted_generic(normal) qable = sorted_generic(qable) indent = npad+18*' ' for k in normal: v = self[k] vstr = str(v).replace('\n','\n'+indent) s+=npad+'{0:<15} = '.format(str(k))+vstr+'\n' #end for for k in qable: v = self[k] s+=npad+str(k)+'\n' s+=v.__str__(nindent+1) if isinstance(k,str): s+=npad+'end '+k+'\n' #end if #end for return s #end def __str__ def __eq__(self,other): if not hasattr(other,'__dict__'): return False #end if eq = True for sname in self.__dict__: if sname not in other.__dict__: return False #end if svar = self.__dict__[sname] ovar = other.__dict__[sname] stype = type(svar) otype = type(ovar) if stype!=otype: return False #end if eqval = svar==ovar if isinstance(eqval,bool): eq &= eqval else: try: # accommodate numpy arrays implicitly eq &= eqval.all() except: return False #end try #end if #end for return eq #end def __eq__ def tree(self,depth=None,all=False,types=False,nindent=1): if depth==nindent-1: return '' #end if pad = ' ' npad = nindent*pad s='' normal = [] qable = [] for k,v in self._items(): if not isinstance(k,str) or k[0]!='_': if isinstance(v,object_interface): qable.append(k) else: normal.append(k) #end if #end if #end for normal.sort() qable.sort() indent = npad+18*' ' if all: for k in normal: v = self[k] if types: s+=npad+'{0:<15} = '.format(k) if hasattr(v,'__class__'): s+='{0:<20}'.format(v.__class__.__name__) else: s+='{0:<20}'.format(type(v)) #end if else: s+=npad+str(k) #end if s+='\n' #end for #end if if all and depth!=nindent: for k in qable: v = self[k] s+=npad+str(k)+'\n' s+=v.tree(depth,all,types,nindent+1) if isinstance(k,str): s+=npad+'end '+k+'\n' #end if #end for else: for k in qable: v = self[k] if types: s+=npad+'{0:<15} = '.format(k) if hasattr(v,'__class__'): s+='{0:<20}'.format(v.__class__.__name__) else: s+='{0:<20}'.format(type(v)) #end if else: s+=npad+str(k) #end if s+='\n' s+=v.tree(depth,all,types,nindent+1) #end for #end if return s #end def tree def data_repr(self,nindent=1,ret_str_keys=False): pad = ' ' npad = nindent*pad normal = [] qable = [] str_keys = True for k,v in self._items(): k_str = isinstance(k,str) str_keys &= k_str if not k_str or k[0]!='_': if isinstance(v,object_interface): qable.append(k) else: normal.append(k) #end if #end if #end for normal = sorted_generic(normal) qable = sorted_generic(qable) if str_keys: nkmax = 0 for k in normal: nkmax = max(nkmax,len(k)) #end for for k in qable: nkmax = max(nkmax,len(k)) #end for k_fmt = '{0:<'+str(nkmax)+'} = ' k_delim = '=' k_func = str else: nkmax = 20 k_fmt = '{0:<20} : ' k_delim = ':' k_func = repr o_delim = '' #end if print(str_keys,list(self.keys())) indent = npad+(nkmax+3)*' ' if nindent==1: if str_keys: s = 'd = obj(\n' else: s = 'd = obj({\n' #end if else: s='' #end if for k in normal: v = self[k] vstr = (repr(v)+',').replace('\n','\n'+indent) s+=npad+k_fmt.format(k_func(k))+vstr+'\n' #end for for k in qable: v = self[k] sv,contains_str_keys = v.data_repr(nindent+1,ret_str_keys=True) if contains_str_keys: o_open = '' o_close = '' else: o_open = '{' o_close = '}' #end if s+=npad+k_func(k)+' {} obj({}\n'.format(k_delim,o_open) s+=sv s+=npad+pad+'{}),\n'.format(o_close) #end for if nindent==1: if str_keys: s += pad + ')\n' else: s += pad + '})\n' #end if #end if if not ret_str_keys: return s else: return s,str_keys #end if #end def data_repr # dict interface def keys(self): return self.__dict__.keys() #end def keys def values(self): return self.__dict__.values() #end def values def items(self): return self.__dict__.items() #end def items def copy(self): return deepcopy(self) #end def copy def clear(self): self.__dict__.clear() #end def clear # save/load def save(self,fpath=None): if fpath is None: fpath='./'+self.__class__.__name__+'.p' #end if fobj = open(fpath,'wb') binary = pickle.HIGHEST_PROTOCOL pickle.dump(self,fobj,binary) fobj.close() del fobj del binary return #end def save def load(self,fpath=None): if fpath is None: fpath='./'+self.__class__.__name__+'.p' #end if fobj = open(fpath,'rb') try: tmp = pickle.load(fobj) except: tmp = pickle.load(fobj,encoding='latin1') #end try fobj.close() d = self.__dict__ d.clear() for k,v in tmp.__dict__.items(): d[k] = v #end for del fobj del tmp return #end def load # log, warning, and error messages def open_log(self,filepath): self._logfile = open(filepath,'w') #end def open_log def close_log(self): self._logfile.close() #end def close_log def write(self,s): self._logfile.write(s) #end def write def log(self,*items,**kwargs): if 'logfile' not in kwargs: kwargs['logfile'] = self._logfile #end if log(*items,**kwargs) #end def log def warn(self,message,header=None): if header is None: header=self.__class__.__name__ #end if warn(message,header,logfile=self._logfile) #end def warn def error(self,message,header=None,exit=True,trace=True): if header==None: header = self.__class__.__name__ #end if error(message,header,exit,trace,logfile=self._logfile) #end def error @classmethod def class_log(cls,message): log(message,logfile=cls._logfile) #end def class_log @classmethod def class_warn(cls,message,header=None,post_header=' Warning:'): if header==None: header=cls.__name__ #end if warn(message,header,logfile=cls._logfile) #end def class_warn @classmethod def class_error(cls,message,header=None,exit=True,trace=True,post_header=' Error:'): if header==None: header = cls.__name__ #end if error(message,header,exit,trace,logfile=cls._logfile) #end def class_error @classmethod def class_has(cls,k): return hasattr(cls,k) #end def classmethod @classmethod def class_keys(cls): return cls.__dict__.keys() #end def class_keys @classmethod def class_items(cls): return cls.__dict__.items() #end def class_items @classmethod def class_get(cls,k): return getattr(cls,k) #end def class_set @classmethod def class_set(cls,**kwargs): for k,v in kwargs.items(): setattr(cls,k,v) #end for #end def class_set @classmethod def class_set_single(cls,k,v): setattr(cls,k,v) #end def class_set_single @classmethod def class_set_optional(cls,**kwargs): for k,v in kwargs.items(): if not hasattr(cls,k): setattr(cls,k,v) #end if #end for #end def class_set_optional # access preserving functions # dict interface def _keys(self,*args,**kwargs): return object_interface.keys(self,*args,**kwargs) def _values(self,*args,**kwargs): object_interface.values(self,*args,**kwargs) def _items(self,*args,**kwargs): return object_interface.items(self,*args,**kwargs) def _copy(self,*args,**kwargs): return object_interface.copy(self,*args,**kwargs) def _clear(self,*args,**kwargs): object_interface.clear(self,*args,**kwargs) # save/load def _save(self,*args,**kwargs): object_interface.save(self,*args,**kwargs) def _load(self,*args,**kwargs): object_interface.load(self,*args,**kwargs) # log, warning, and error messages def _open_log(self,*args,**kwargs): object_interface.open_log(self,*args,**kwargs) def _close_log(self,*args,**kwargs): object_interface.close_log(self,*args,**kwargs) def _write(self,*args,**kwargs): object_interface.write(self,*args,**kwargs) def _log(self,*args,**kwargs): object_interface.log(self,*args,**kwargs) def _error(self,*args,**kwargs): object_interface.error(self,*args,**kwargs) def _warn(self,*args,**kwargs): object_interface.warn(self,*args,**kwargs) #end class object_interface class obj(object_interface): def __init__(self,*vars,**kwargs): for var in vars: if isinstance(var,(dict,object_interface)): for k,v in var.items(): self[k] = v #end for else: self[var] = None #end if #end for for k,v in kwargs.items(): self[k] = v #end for #end def __init__ # list interface def append(self,value): self[len(self)] = value #end def append # return representations def list(self,*keys): nkeys = len(keys) if nkeys==0: keys = self._sorted_keys() elif nkeys==1 and isinstance(keys[0],(list,tuple)): keys = keys[0] #end if values = [] for key in keys: values.append(self[key]) #end if return values #end def list def list_optional(self,*keys): nkeys = len(keys) if nkeys==0: keys = self._sorted_keys() elif nkeys==1 and isinstance(keys[0],(list,tuple)): keys = keys[0] #end if values = [] for key in keys: if key in self: values.append(self[key]) else: values.append(None) #end if #end if return values #end def list_optional def tuple(self,*keys): return tuple(obj.list(self,*keys)) #end def tuple def dict(self,*keys): nkeys = len(keys) if nkeys==0: keys = self._keys() elif nkeys==1 and isinstance(keys[0],(list,tuple)): keys = keys[0] #end if d = dict() for k in keys: d[k] = self[k] #end for return d #end def dict def to_dict(self): d = dict() for k,v in self._items(): if isinstance(v,obj): d[k] = v._to_dict() else: d[k] = v #end if #end for return d #end def to_dict def obj(self,*keys): nkeys = len(keys) if nkeys==0: keys = self._keys() elif nkeys==1 and isinstance(keys[0],(list,tuple)): keys = keys[0] #end if o = obj() for k in keys: o[k] = self[k] #end for return o #end def obj def to_obj(self): o = obj() for k,v in self._items(): if isinstance(v,obj): o[k] = v._to_obj() else: o[k] = v #end if #end for return o #end def to_obj # list extensions def first(self): return self[min(self._keys())] #end def first def last(self): return self[max(self._keys())] #end def last def select_random(self): return self[randint(0,len(self)-1)] #end def select_random # dict extensions def sorted_keys(self): return sorted_generic(self._keys()) #end def sorted_keys def random_key(self): key = None nkeys = len(self) if nkeys>0: key = list(self._keys())[randint(0,nkeys-1)] #end if return key #end def random_key def set(self,*objs,**kwargs): for key,value in kwargs.items(): self[key]=value #end for if len(objs)>0: for o in objs: for k,v in o.items(): self[k] = v #end for #end for #end if return self #end def set def set_optional(self,*objs,**kwargs): for key,value in kwargs.items(): if key not in self: self[key]=value #end if #end for if len(objs)>0: for o in objs: for k,v in o.items(): if k not in self: self[k] = v #end if #end for #end for #end if return self #end def set_optional def get(self,key,value=None): # follow dict interface, no plural if key in self: value = self[key] #end if return value #end def get def get_optional(self,key,value=None): if key in self: value = self[key] #end if return value #end def get_optional def get_required(self,key): if key in self: value = self[key] else: obj.error(self,'a required key is not present\nkey required: {0}\nkeys present: {1}'.format(key,self._sorted_keys())) #end if return value #end def get_required def delete(self,*keys): nkeys = len(keys) single = False if nkeys==0: keys = self._sorted_keys() elif nkeys==1 and isinstance(keys[0],(list,tuple)): keys = keys[0] elif nkeys==1: single = True #end if values = [] for key in keys: values.append(self[key]) del self[key] #end for if single: return values[0] else: return values #end if #end def delete def delete_optional(self,key,value=None): if key in self: value = self[key] del self[key] #end if return value #end def delete_optional def delete_required(self,key): if key in self: value = self[key] del self[key] else: obj.error(self,'a required key is not present\nkey required: {0}\nkeys present: {1}'.format(key,self._sorted_keys())) #end if return value #end def delete_required def add(self,key,value): self[key] = value #end def add def add_optional(self,key,value): if key not in self: self[key] = value #end if #end def add_optional def transfer_from(self,other,keys=None,copy=False,overwrite=True): if keys is None: if isinstance(other,object_interface): keys = other._keys() else: keys = other.keys() #end if #end if if copy: copier = deepcopy else: copier = nocopy #end if if overwrite: for k in keys: self[k]=copier(other[k]) #end for else: for k in keys: if k not in self: self[k]=copier(other[k]) #end if #end for #end if #end def transfer_from def transfer_to(self,other,keys=None,copy=False,overwrite=True): if keys is None: keys = self._keys() #end if if copy: copier = deepcopy else: copier = nocopy #end if if overwrite: for k in keys: other[k]=copier(self[k]) #end for else: for k in keys: if k not in self: other[k]=copier(self[k]) #end if #end for #end if #end def transfer_to def move_from(self,other,keys=None,optional=False): if keys is None: if isinstance(other,object_interface): keys = list(other._keys()) else: keys = list(other.keys()) #end if #end if if not optional: for k in keys: self[k]=other[k] del other[k] #end for else: for k in keys: if k in other: self[k]=other[k] del other[k] #end if #end for #end if #end def move_from def move_to(self,other,keys=None,optional=False): if keys is None: keys = list(self._keys()) #end if if not optional: for k in keys: other[k]=self[k] del self[k] #end for else: for k in keys: if k in self: other[k]=self[k] del self[k] #end if #end for #end if #end def move_to def move_from_optional(self,other,keys=None): self.move_from(other,keys,optional=True) #end def move_from_optional def move_to_optional(self,other,keys=None): self.move_to(other,keys,optional=True) #end def move_to_optional def copy_from(self,other,keys=None,deep=True): obj.transfer_from(self,other,keys,copy=deep) #end def copy_from def copy_to(self,other,keys=None,deep=True): obj.transfer_to(self,other,keys,copy=deep) #end def copy_to def extract(self,keys=None,optional=False): ext = obj() ext.move_from(self,keys,optional=optional) return ext #end def extract def extract_optional(self,keys=None): return self.extract(keys,optional=True) #end def extract_optional def check_required(self,keys,exit=True): if not isinstance(keys,set): keys = set(keys) #end if missing = keys-set(self.keys()) if exit and len(missing)>0: self._error('required keys are missing\nmissing keys: {0}'.format(sorted_generic(missing))) #end if return missing #end def check_required def check_types(self,types,optional=False,exit=True): kfail = None tfail = None if not optional: for k,t in types.items(): if not isinstance(self[k],t): kfail = k tfail = t break #end if #end for else: for k,t in types.items(): if k in self and not isinstance(self[k],t): kfail = k tfail = t break #end if #end for #end if if exit and kfail is not None: self._error('incorrect type encountered for key value\ntype required: {0}\ntype encountered: {1}\ninvalid key: {2}'.format(tfail.__name__,self[kfail].__class__.__name__,kfail)) #end if return kfail,tfail #end def check_types def check_types_optional(self,types,exit=True): return self.check_types(types,exit=exit,optional=True) #end def check_types_optional def shallow_copy(self): new = self.__class__() for k,v in self._items(): new[k] = v #end for return new #end def shallow_copy def inverse(self): new = self.__class__() for k,v in self._items(): new[v] = k #end for return new #end def inverse def path_exists(self,path): o = self if isinstance(path,str): path = path.split('/') #end if for p in path: if not p in o: return False #end if o = o[p] #end for return True #end def path_exists def set_path(self,path,value=None): o = self cls = self.__class__ if isinstance(path,str): path = path.split('/') #end if for p in path[0:-1]: if not p in o: o[p] = cls() #end if o = o[p] #end for o[path[-1]] = value #end def set_path def get_path(self,path,value=None): o = self if isinstance(path,str): path = path.split('/') #end if for p in path[0:-1]: if not p in o: return value #end if o = o[p] #end for lp = path[-1] if lp not in o: return value else: return o[lp] #end if #end def get_path def serial(self,s=None,path=None): first = s is None if first: s = obj() path = '' #end if for k,v in self._items(): p = path+str(k) if isinstance(v,obj): if len(v)==0: s[p]=v else: v._serial(s,p+'/') #end if else: s[p]=v #end if #end for if first: return s #end if #end def serial # access preserving functions # list interface def _append(self,*args,**kwargs): obj.append(self,*args,**kwargs) # return representations def _list(self,*args,**kwargs): return obj.list(self,*args,**kwargs) def _list_optional(self,*args,**kwargs): return obj.list_optional(self,*args,**kwargs) def _tuple(self,*args,**kwargs): return obj.tuple(self,*args,**kwargs) def _dict(self,*args,**kwargs): return obj.dict(self,*args,**kwargs) def _to_dict(self,*args,**kwargs): return obj.to_dict(self,*args,**kwargs) def _obj(self,*args,**kwargs): return obj.obj(self,*args,**kwargs) def _to_obj(self,*args,**kwargs): return obj.to_obj(self,*args,**kwargs) # list extensions def _first(self,*args,**kwargs): return obj.first(self,*args,**kwargs) def _last(self,*args,**kwargs): return obj.last(self,*args,**kwargs) def _select_random(self,*args,**kwargs): return obj.select_random(self,*args,**kwargs) # dict extensions def _sorted_keys(self,*args,**kwargs): return obj.sorted_keys(self,*args,**kwargs) def _random_key(self,*args,**kwargs): obj.random_key(self,*args,**kwargs) def _set(self,*args,**kwargs): obj.set(self,*args,**kwargs) def _set_optional(self,*args,**kwargs): obj.set_optional(self,*args,**kwargs) def _get(self,*args,**kwargs): obj.get(self,*args,**kwargs) def _get_optional(self,*args,**kwargs): obj.get_optional(self,*args,**kwargs) def _get_required(self,*args,**kwargs): obj.get_required(self,*args,**kwargs) def _delete(self,*args,**kwargs): obj.delete(self,*args,**kwargs) def _delete_optional(self,*args,**kwargs): obj.delete_optional(self,*args,**kwargs) def _delete_required(self,*args,**kwargs): obj.delete_required(self,*args,**kwargs) def _add(self,*args,**kwargs): obj.add(self,*args,**kwargs) def _add_optional(self,*args,**kwargs): obj.add_optional(self,*args,**kwargs) def _transfer_from(self,*args,**kwargs): obj.transfer_from(self,*args,**kwargs) def _transfer_to(self,*args,**kwargs): obj.transfer_to(self,*args,**kwargs) def _move_from(self,*args,**kwargs): obj.move_from(self,*args,**kwargs) def _move_to(self,*args,**kwargs): obj.move_to(self,*args,**kwargs) def _move_from_optional(self,*args,**kwargs): obj.move_from_optional(self,*args,**kwargs) def _move_to_optional(self,*args,**kwargs): obj.move_to_optional(self,*args,**kwargs) def _copy_from(self,*args,**kwargs): obj.copy_from(self,*args,**kwargs) def _copy_to(self,*args,**kwargs): obj.copy_to(self,*args,**kwargs) def _extract(self,*args,**kwargs): obj.extract(self,*args,**kwargs) def _extract_optional(self,*args,**kwargs): obj.extract_optional(self,*args,**kwargs) def _check_required(self,*args,**kwargs): obj.check_required(self,*args,**kwargs) def _check_types(self,*args,**kwargs): obj.check_types(self,*args,**kwargs) def _check_types_optional(self,*args,**kwargs): obj.check_types_optional(self,*args,**kwargs) def _shallow_copy(self,*args,**kwargs): obj.shallow_copy(self,*args,**kwargs) def _inverse(self,*args,**kwargs): return obj.inverse(self,*args,**kwargs) def _path_exists(self,*args,**kwargs): obj.path_exists(self,*args,**kwargs) def _set_path(self,*args,**kwargs): obj.set_path(self,*args,**kwargs) def _get_path(self,*args,**kwargs): obj.get_path(self,*args,**kwargs) def _serial(self,*args,**kwargs): return obj.serial(self,*args,**kwargs) #end class obj class hobj(obj): def __init__(self,*args,**kwargs): obj.__init__(self,*args,**kwargs) #end def __init__ @property def _dict(self): return self.__dict__ #end def _dict @property def _alt(self): return self.__dict__ #end def _alt def __len__(self): return len(self._dict) #end def __len__ def __contains__(self,name): return name in self._dict #end def __contains__ def __getitem__(self,name): return self._dict[name] #end def __getitem__ def __setitem__(self,name,value): self._dict[name] = value #end def __setitem__ def __delitem__(self,name): del self._dict[name] #end def __delitem__ def __iter__(self): d = self._dict for item in d.__dict__: yield d[item] #end for #end def __iter__ def keys(self): return self._dict.keys() #end def keys def values(self): return self._dict.values() #end def keys def items(self): return self._dict.items() #end def items def clear(self): self._dict.clear() #end def clear # access preserving functions # dict interface def _keys(self,*args,**kwargs): return hobj.keys(self,*args,**kwargs) def _values(self,*args,**kwargs): hobj.values(self,*args,**kwargs) def _items(self,*args,**kwargs): return hobj.items(self,*args,**kwargs) def _clear(self,*args,**kwargs): hobj.clear(self,*args,**kwargs) #end class hobj class hidden(hobj): def __init__(self,*vals,**kwargs): d = object.__getattribute__(self,'__dict__') d['_hidden_'] = hobj() d['_public_'] = hobj() hobj.__init__(self,*vals,**kwargs) #end def __init__ @property def _dict(self): return self.__dict__['_public_'] #end def __get_dict @property def _alt(self): return self.__dict__['_hidden_'] #end def __alt def __getattribute__(self,name): d = object.__getattribute__(self,'__dict__') if '_public_' in d: p = d['_public_'] if name in p: return p[name] else: return object.__getattribute__(self,name) #end if else: return object.__getattribute__(self,name) #end if #end def __getattribute__ def __setattr__(self,name,value): self._dict[name] = value #end def __setattr__ def __delattr__(self,name): del self._dict[name] #end def __delattr__ def hidden(self): return self.__dict__['_hidden_'] #end def hidden def public(self): return self.__dict__['_public_'] #end def public def _hidden(self): return hidden.hidden(self) #end def _hidden def _public(self): return hidden.public(self) #end def _public def open_log(self,filepath): self._alt._open_log(filepath) #end def open_log def close_log(self): self._alt._close_log() #end def close_log def write(self,s): self._alt._write(s) #end def write def log(self,*items,**kwargs): self._alt._log(*items,**kwargs) #end def log def __repr__(self): s='' for k in self._sorted_keys(): if not isinstance(k,str) or k[0]!='_': v=self._dict[k] if hasattr(v,'__class__'): s+=' {0:<20} {1:<20}\n'.format(k,v.__class__.__name__) else: s+=' {0:<20} {1:<20}\n'.format(k,type(v)) #end if #end if #end for return s #end def __repr__ # log, warning, and error messages def _open_log(self,*args,**kwargs): hidden.open_log(self,*args,**kwargs) def _close_log(self,*args,**kwargs): hidden.close_log(self,*args,**kwargs) def _write(self,*args,**kwargs): hidden.write(self,*args,**kwargs) def _log(self,*args,**kwargs): hidden.log(self,*args,**kwargs) #end class hidden
27.622041
188
0.502664
t in other.__dict__: return False svar = self.__dict__[sname] ovar = other.__dict__[sname] stype = type(svar) otype = type(ovar) if stype!=otype: return False eqval = svar==ovar if isinstance(eqval,bool): eq &= eqval else: try: eq &= eqval.all() except: return False return eq def tree(self,depth=None,all=False,types=False,nindent=1): if depth==nindent-1: return '' pad = ' ' npad = nindent*pad s='' normal = [] qable = [] for k,v in self._items(): if not isinstance(k,str) or k[0]!='_': if isinstance(v,object_interface): qable.append(k) else: normal.append(k) normal.sort() qable.sort() indent = npad+18*' ' if all: for k in normal: v = self[k] if types: s+=npad+'{0:<15} = '.format(k) if hasattr(v,'__class__'): s+='{0:<20}'.format(v.__class__.__name__) else: s+='{0:<20}'.format(type(v)) else: s+=npad+str(k) s+='\n' if all and depth!=nindent: for k in qable: v = self[k] s+=npad+str(k)+'\n' s+=v.tree(depth,all,types,nindent+1) if isinstance(k,str): s+=npad+'end '+k+'\n' else: for k in qable: v = self[k] if types: s+=npad+'{0:<15} = '.format(k) if hasattr(v,'__class__'): s+='{0:<20}'.format(v.__class__.__name__) else: s+='{0:<20}'.format(type(v)) else: s+=npad+str(k) s+='\n' s+=v.tree(depth,all,types,nindent+1) return s def data_repr(self,nindent=1,ret_str_keys=False): pad = ' ' npad = nindent*pad normal = [] qable = [] str_keys = True for k,v in self._items(): k_str = isinstance(k,str) str_keys &= k_str if not k_str or k[0]!='_': if isinstance(v,object_interface): qable.append(k) else: normal.append(k) normal = sorted_generic(normal) qable = sorted_generic(qable) if str_keys: nkmax = 0 for k in normal: nkmax = max(nkmax,len(k)) for k in qable: nkmax = max(nkmax,len(k)) k_fmt = '{0:<'+str(nkmax)+'} = ' k_delim = '=' k_func = str else: nkmax = 20 k_fmt = '{0:<20} : ' k_delim = ':' k_func = repr o_delim = '' print(str_keys,list(self.keys())) indent = npad+(nkmax+3)*' ' if nindent==1: if str_keys: s = 'd = obj(\n' else: s = 'd = obj({\n' else: s='' for k in normal: v = self[k] vstr = (repr(v)+',').replace('\n','\n'+indent) s+=npad+k_fmt.format(k_func(k))+vstr+'\n' for k in qable: v = self[k] sv,contains_str_keys = v.data_repr(nindent+1,ret_str_keys=True) if contains_str_keys: o_open = '' o_close = '' else: o_open = '{' o_close = '}' s+=npad+k_func(k)+' {} obj({}\n'.format(k_delim,o_open) s+=sv s+=npad+pad+'{}),\n'.format(o_close) if nindent==1: if str_keys: s += pad + ')\n' else: s += pad + '})\n' if not ret_str_keys: return s else: return s,str_keys def keys(self): return self.__dict__.keys() def values(self): return self.__dict__.values() def items(self): return self.__dict__.items() def copy(self): return deepcopy(self) def clear(self): self.__dict__.clear() def save(self,fpath=None): if fpath is None: fpath='./'+self.__class__.__name__+'.p' fobj = open(fpath,'wb') binary = pickle.HIGHEST_PROTOCOL pickle.dump(self,fobj,binary) fobj.close() del fobj del binary return def load(self,fpath=None): if fpath is None: fpath='./'+self.__class__.__name__+'.p' fobj = open(fpath,'rb') try: tmp = pickle.load(fobj) except: tmp = pickle.load(fobj,encoding='latin1') fobj.close() d = self.__dict__ d.clear() for k,v in tmp.__dict__.items(): d[k] = v del fobj del tmp return def open_log(self,filepath): self._logfile = open(filepath,'w') def close_log(self): self._logfile.close() def write(self,s): self._logfile.write(s) def log(self,*items,**kwargs): if 'logfile' not in kwargs: kwargs['logfile'] = self._logfile log(*items,**kwargs) def warn(self,message,header=None): if header is None: header=self.__class__.__name__ warn(message,header,logfile=self._logfile) def error(self,message,header=None,exit=True,trace=True): if header==None: header = self.__class__.__name__ error(message,header,exit,trace,logfile=self._logfile) @classmethod def class_log(cls,message): log(message,logfile=cls._logfile) @classmethod def class_warn(cls,message,header=None,post_header=' Warning:'): if header==None: header=cls.__name__ warn(message,header,logfile=cls._logfile) @classmethod def class_error(cls,message,header=None,exit=True,trace=True,post_header=' Error:'): if header==None: header = cls.__name__ error(message,header,exit,trace,logfile=cls._logfile) @classmethod def class_has(cls,k): return hasattr(cls,k) @classmethod def class_keys(cls): return cls.__dict__.keys() @classmethod def class_items(cls): return cls.__dict__.items() @classmethod def class_get(cls,k): return getattr(cls,k) @classmethod def class_set(cls,**kwargs): for k,v in kwargs.items(): setattr(cls,k,v) @classmethod def class_set_single(cls,k,v): setattr(cls,k,v) @classmethod def class_set_optional(cls,**kwargs): for k,v in kwargs.items(): if not hasattr(cls,k): setattr(cls,k,v) def _keys(self,*args,**kwargs): return object_interface.keys(self,*args,**kwargs) def _values(self,*args,**kwargs): object_interface.values(self,*args,**kwargs) def _items(self,*args,**kwargs): return object_interface.items(self,*args,**kwargs) def _copy(self,*args,**kwargs): return object_interface.copy(self,*args,**kwargs) def _clear(self,*args,**kwargs): object_interface.clear(self,*args,**kwargs) def _save(self,*args,**kwargs): object_interface.save(self,*args,**kwargs) def _load(self,*args,**kwargs): object_interface.load(self,*args,**kwargs) def _open_log(self,*args,**kwargs): object_interface.open_log(self,*args,**kwargs) def _close_log(self,*args,**kwargs): object_interface.close_log(self,*args,**kwargs) def _write(self,*args,**kwargs): object_interface.write(self,*args,**kwargs) def _log(self,*args,**kwargs): object_interface.log(self,*args,**kwargs) def _error(self,*args,**kwargs): object_interface.error(self,*args,**kwargs) def _warn(self,*args,**kwargs): object_interface.warn(self,*args,**kwargs) class obj(object_interface): def __init__(self,*vars,**kwargs): for var in vars: if isinstance(var,(dict,object_interface)): for k,v in var.items(): self[k] = v else: self[var] = None for k,v in kwargs.items(): self[k] = v def append(self,value): self[len(self)] = value def list(self,*keys): nkeys = len(keys) if nkeys==0: keys = self._sorted_keys() elif nkeys==1 and isinstance(keys[0],(list,tuple)): keys = keys[0] values = [] for key in keys: values.append(self[key]) return values def list_optional(self,*keys): nkeys = len(keys) if nkeys==0: keys = self._sorted_keys() elif nkeys==1 and isinstance(keys[0],(list,tuple)): keys = keys[0] values = [] for key in keys: if key in self: values.append(self[key]) else: values.append(None) return values def tuple(self,*keys): return tuple(obj.list(self,*keys)) def dict(self,*keys): nkeys = len(keys) if nkeys==0: keys = self._keys() elif nkeys==1 and isinstance(keys[0],(list,tuple)): keys = keys[0] d = dict() for k in keys: d[k] = self[k] return d def to_dict(self): d = dict() for k,v in self._items(): if isinstance(v,obj): d[k] = v._to_dict() else: d[k] = v return d def obj(self,*keys): nkeys = len(keys) if nkeys==0: keys = self._keys() elif nkeys==1 and isinstance(keys[0],(list,tuple)): keys = keys[0] o = obj() for k in keys: o[k] = self[k] return o def to_obj(self): o = obj() for k,v in self._items(): if isinstance(v,obj): o[k] = v._to_obj() else: o[k] = v return o def first(self): return self[min(self._keys())] def last(self): return self[max(self._keys())] def select_random(self): return self[randint(0,len(self)-1)] def sorted_keys(self): return sorted_generic(self._keys()) def random_key(self): key = None nkeys = len(self) if nkeys>0: key = list(self._keys())[randint(0,nkeys-1)] return key def set(self,*objs,**kwargs): for key,value in kwargs.items(): self[key]=value if len(objs)>0: for o in objs: for k,v in o.items(): self[k] = v return self def set_optional(self,*objs,**kwargs): for key,value in kwargs.items(): if key not in self: self[key]=value if len(objs)>0: for o in objs: for k,v in o.items(): if k not in self: self[k] = v return self def get(self,key,value=None): if key in self: value = self[key] return value def get_optional(self,key,value=None): if key in self: value = self[key] return value def get_required(self,key): if key in self: value = self[key] else: obj.error(self,'a required key is not present\nkey required: {0}\nkeys present: {1}'.format(key,self._sorted_keys())) return value def delete(self,*keys): nkeys = len(keys) single = False if nkeys==0: keys = self._sorted_keys() elif nkeys==1 and isinstance(keys[0],(list,tuple)): keys = keys[0] elif nkeys==1: single = True values = [] for key in keys: values.append(self[key]) del self[key] if single: return values[0] else: return values def delete_optional(self,key,value=None): if key in self: value = self[key] del self[key] return value def delete_required(self,key): if key in self: value = self[key] del self[key] else: obj.error(self,'a required key is not present\nkey required: {0}\nkeys present: {1}'.format(key,self._sorted_keys())) return value def add(self,key,value): self[key] = value def add_optional(self,key,value): if key not in self: self[key] = value def transfer_from(self,other,keys=None,copy=False,overwrite=True): if keys is None: if isinstance(other,object_interface): keys = other._keys() else: keys = other.keys() if copy: copier = deepcopy else: copier = nocopy if overwrite: for k in keys: self[k]=copier(other[k]) else: for k in keys: if k not in self: self[k]=copier(other[k]) def transfer_to(self,other,keys=None,copy=False,overwrite=True): if keys is None: keys = self._keys() if copy: copier = deepcopy else: copier = nocopy if overwrite: for k in keys: other[k]=copier(self[k]) else: for k in keys: if k not in self: other[k]=copier(self[k]) def move_from(self,other,keys=None,optional=False): if keys is None: if isinstance(other,object_interface): keys = list(other._keys()) else: keys = list(other.keys()) if not optional: for k in keys: self[k]=other[k] del other[k] else: for k in keys: if k in other: self[k]=other[k] del other[k] def move_to(self,other,keys=None,optional=False): if keys is None: keys = list(self._keys()) if not optional: for k in keys: other[k]=self[k] del self[k] else: for k in keys: if k in self: other[k]=self[k] del self[k] def move_from_optional(self,other,keys=None): self.move_from(other,keys,optional=True) def move_to_optional(self,other,keys=None): self.move_to(other,keys,optional=True) def copy_from(self,other,keys=None,deep=True): obj.transfer_from(self,other,keys,copy=deep) def copy_to(self,other,keys=None,deep=True): obj.transfer_to(self,other,keys,copy=deep) def extract(self,keys=None,optional=False): ext = obj() ext.move_from(self,keys,optional=optional) return ext def extract_optional(self,keys=None): return self.extract(keys,optional=True) def check_required(self,keys,exit=True): if not isinstance(keys,set): keys = set(keys) missing = keys-set(self.keys()) if exit and len(missing)>0: self._error('required keys are missing\nmissing keys: {0}'.format(sorted_generic(missing))) return missing def check_types(self,types,optional=False,exit=True): kfail = None tfail = None if not optional: for k,t in types.items(): if not isinstance(self[k],t): kfail = k tfail = t break else: for k,t in types.items(): if k in self and not isinstance(self[k],t): kfail = k tfail = t break if exit and kfail is not None: self._error('incorrect type encountered for key value\ntype required: {0}\ntype encountered: {1}\ninvalid key: {2}'.format(tfail.__name__,self[kfail].__class__.__name__,kfail)) return kfail,tfail def check_types_optional(self,types,exit=True): return self.check_types(types,exit=exit,optional=True) def shallow_copy(self): new = self.__class__() for k,v in self._items(): new[k] = v return new def inverse(self): new = self.__class__() for k,v in self._items(): new[v] = k return new def path_exists(self,path): o = self if isinstance(path,str): path = path.split('/') for p in path: if not p in o: return False o = o[p] return True def set_path(self,path,value=None): o = self cls = self.__class__ if isinstance(path,str): path = path.split('/') for p in path[0:-1]: if not p in o: o[p] = cls() o = o[p] o[path[-1]] = value def get_path(self,path,value=None): o = self if isinstance(path,str): path = path.split('/') for p in path[0:-1]: if not p in o: return value o = o[p] lp = path[-1] if lp not in o: return value else: return o[lp] def serial(self,s=None,path=None): first = s is None if first: s = obj() path = '' for k,v in self._items(): p = path+str(k) if isinstance(v,obj): if len(v)==0: s[p]=v else: v._serial(s,p+'/') else: s[p]=v if first: return s def _append(self,*args,**kwargs): obj.append(self,*args,**kwargs) def _list(self,*args,**kwargs): return obj.list(self,*args,**kwargs) def _list_optional(self,*args,**kwargs): return obj.list_optional(self,*args,**kwargs) def _tuple(self,*args,**kwargs): return obj.tuple(self,*args,**kwargs) def _dict(self,*args,**kwargs): return obj.dict(self,*args,**kwargs) def _to_dict(self,*args,**kwargs): return obj.to_dict(self,*args,**kwargs) def _obj(self,*args,**kwargs): return obj.obj(self,*args,**kwargs) def _to_obj(self,*args,**kwargs): return obj.to_obj(self,*args,**kwargs) def _first(self,*args,**kwargs): return obj.first(self,*args,**kwargs) def _last(self,*args,**kwargs): return obj.last(self,*args,**kwargs) def _select_random(self,*args,**kwargs): return obj.select_random(self,*args,**kwargs) def _sorted_keys(self,*args,**kwargs): return obj.sorted_keys(self,*args,**kwargs) def _random_key(self,*args,**kwargs): obj.random_key(self,*args,**kwargs) def _set(self,*args,**kwargs): obj.set(self,*args,**kwargs) def _set_optional(self,*args,**kwargs): obj.set_optional(self,*args,**kwargs) def _get(self,*args,**kwargs): obj.get(self,*args,**kwargs) def _get_optional(self,*args,**kwargs): obj.get_optional(self,*args,**kwargs) def _get_required(self,*args,**kwargs): obj.get_required(self,*args,**kwargs) def _delete(self,*args,**kwargs): obj.delete(self,*args,**kwargs) def _delete_optional(self,*args,**kwargs): obj.delete_optional(self,*args,**kwargs) def _delete_required(self,*args,**kwargs): obj.delete_required(self,*args,**kwargs) def _add(self,*args,**kwargs): obj.add(self,*args,**kwargs) def _add_optional(self,*args,**kwargs): obj.add_optional(self,*args,**kwargs) def _transfer_from(self,*args,**kwargs): obj.transfer_from(self,*args,**kwargs) def _transfer_to(self,*args,**kwargs): obj.transfer_to(self,*args,**kwargs) def _move_from(self,*args,**kwargs): obj.move_from(self,*args,**kwargs) def _move_to(self,*args,**kwargs): obj.move_to(self,*args,**kwargs) def _move_from_optional(self,*args,**kwargs): obj.move_from_optional(self,*args,**kwargs) def _move_to_optional(self,*args,**kwargs): obj.move_to_optional(self,*args,**kwargs) def _copy_from(self,*args,**kwargs): obj.copy_from(self,*args,**kwargs) def _copy_to(self,*args,**kwargs): obj.copy_to(self,*args,**kwargs) def _extract(self,*args,**kwargs): obj.extract(self,*args,**kwargs) def _extract_optional(self,*args,**kwargs): obj.extract_optional(self,*args,**kwargs) def _check_required(self,*args,**kwargs): obj.check_required(self,*args,**kwargs) def _check_types(self,*args,**kwargs): obj.check_types(self,*args,**kwargs) def _check_types_optional(self,*args,**kwargs): obj.check_types_optional(self,*args,**kwargs) def _shallow_copy(self,*args,**kwargs): obj.shallow_copy(self,*args,**kwargs) def _inverse(self,*args,**kwargs): return obj.inverse(self,*args,**kwargs) def _path_exists(self,*args,**kwargs): obj.path_exists(self,*args,**kwargs) def _set_path(self,*args,**kwargs): obj.set_path(self,*args,**kwargs) def _get_path(self,*args,**kwargs): obj.get_path(self,*args,**kwargs) def _serial(self,*args,**kwargs): return obj.serial(self,*args,**kwargs) class hobj(obj): def __init__(self,*args,**kwargs): obj.__init__(self,*args,**kwargs) @property def _dict(self): return self.__dict__ @property def _alt(self): return self.__dict__ def __len__(self): return len(self._dict) def __contains__(self,name): return name in self._dict def __getitem__(self,name): return self._dict[name] def __setitem__(self,name,value): self._dict[name] = value def __delitem__(self,name): del self._dict[name] def __iter__(self): d = self._dict for item in d.__dict__: yield d[item] def keys(self): return self._dict.keys() def values(self): return self._dict.values() def items(self): return self._dict.items() def clear(self): self._dict.clear() def _keys(self,*args,**kwargs): return hobj.keys(self,*args,**kwargs) def _values(self,*args,**kwargs): hobj.values(self,*args,**kwargs) def _items(self,*args,**kwargs): return hobj.items(self,*args,**kwargs) def _clear(self,*args,**kwargs): hobj.clear(self,*args,**kwargs) class hidden(hobj): def __init__(self,*vals,**kwargs): d = object.__getattribute__(self,'__dict__') d['_hidden_'] = hobj() d['_public_'] = hobj() hobj.__init__(self,*vals,**kwargs) @property def _dict(self): return self.__dict__['_public_'] @property def _alt(self): return self.__dict__['_hidden_'] def __getattribute__(self,name): d = object.__getattribute__(self,'__dict__') if '_public_' in d: p = d['_public_'] if name in p: return p[name] else: return object.__getattribute__(self,name) else: return object.__getattribute__(self,name) def __setattr__(self,name,value): self._dict[name] = value def __delattr__(self,name): del self._dict[name] def hidden(self): return self.__dict__['_hidden_'] def public(self): return self.__dict__['_public_'] def _hidden(self): return hidden.hidden(self) def _public(self): return hidden.public(self) def open_log(self,filepath): self._alt._open_log(filepath) def close_log(self): self._alt._close_log() def write(self,s): self._alt._write(s) def log(self,*items,**kwargs): self._alt._log(*items,**kwargs) def __repr__(self): s='' for k in self._sorted_keys(): if not isinstance(k,str) or k[0]!='_': v=self._dict[k] if hasattr(v,'__class__'): s+=' {0:<20} {1:<20}\n'.format(k,v.__class__.__name__) else: s+=' {0:<20} {1:<20}\n'.format(k,type(v)) return s def _open_log(self,*args,**kwargs): hidden.open_log(self,*args,**kwargs) def _close_log(self,*args,**kwargs): hidden.close_log(self,*args,**kwargs) def _write(self,*args,**kwargs): hidden.write(self,*args,**kwargs) def _log(self,*args,**kwargs): hidden.log(self,*args,**kwargs)
true
true
1c4367cf95f9922bb13c21a0d1b1367e1787deb5
63,124
py
Python
sdk/python/pulumi_azure_native/documentdb/v20210615/database_account.py
polivbr/pulumi-azure-native
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_native/documentdb/v20210615/database_account.py
polivbr/pulumi-azure-native
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_native/documentdb/v20210615/database_account.py
polivbr/pulumi-azure-native
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from . import outputs from ._enums import * from ._inputs import * __all__ = ['DatabaseAccountArgs', 'DatabaseAccount'] @pulumi.input_type class DatabaseAccountArgs: def __init__(__self__, *, database_account_offer_type: pulumi.Input['DatabaseAccountOfferType'], locations: pulumi.Input[Sequence[pulumi.Input['LocationArgs']]], resource_group_name: pulumi.Input[str], account_name: Optional[pulumi.Input[str]] = None, analytical_storage_configuration: Optional[pulumi.Input['AnalyticalStorageConfigurationArgs']] = None, api_properties: Optional[pulumi.Input['ApiPropertiesArgs']] = None, backup_policy: Optional[pulumi.Input[Union['ContinuousModeBackupPolicyArgs', 'PeriodicModeBackupPolicyArgs']]] = None, capabilities: Optional[pulumi.Input[Sequence[pulumi.Input['CapabilityArgs']]]] = None, connector_offer: Optional[pulumi.Input[Union[str, 'ConnectorOffer']]] = None, consistency_policy: Optional[pulumi.Input['ConsistencyPolicyArgs']] = None, cors: Optional[pulumi.Input[Sequence[pulumi.Input['CorsPolicyArgs']]]] = None, create_mode: Optional[pulumi.Input[Union[str, 'CreateMode']]] = None, default_identity: Optional[pulumi.Input[str]] = None, disable_key_based_metadata_write_access: Optional[pulumi.Input[bool]] = None, disable_local_auth: Optional[pulumi.Input[bool]] = None, enable_analytical_storage: Optional[pulumi.Input[bool]] = None, enable_automatic_failover: Optional[pulumi.Input[bool]] = None, enable_cassandra_connector: Optional[pulumi.Input[bool]] = None, enable_free_tier: Optional[pulumi.Input[bool]] = None, enable_multiple_write_locations: Optional[pulumi.Input[bool]] = None, identity: Optional[pulumi.Input['ManagedServiceIdentityArgs']] = None, ip_rules: Optional[pulumi.Input[Sequence[pulumi.Input['IpAddressOrRangeArgs']]]] = None, is_virtual_network_filter_enabled: Optional[pulumi.Input[bool]] = None, key_vault_key_uri: Optional[pulumi.Input[str]] = None, kind: Optional[pulumi.Input[Union[str, 'DatabaseAccountKind']]] = None, location: Optional[pulumi.Input[str]] = None, network_acl_bypass: Optional[pulumi.Input['NetworkAclBypass']] = None, network_acl_bypass_resource_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, public_network_access: Optional[pulumi.Input[Union[str, 'PublicNetworkAccess']]] = None, restore_parameters: Optional[pulumi.Input['RestoreParametersArgs']] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, virtual_network_rules: Optional[pulumi.Input[Sequence[pulumi.Input['VirtualNetworkRuleArgs']]]] = None): """ The set of arguments for constructing a DatabaseAccount resource. :param pulumi.Input['DatabaseAccountOfferType'] database_account_offer_type: The offer type for the database :param pulumi.Input[Sequence[pulumi.Input['LocationArgs']]] locations: An array that contains the georeplication locations enabled for the Cosmos DB account. :param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive. :param pulumi.Input[str] account_name: Cosmos DB database account name. :param pulumi.Input['AnalyticalStorageConfigurationArgs'] analytical_storage_configuration: Analytical storage specific properties. :param pulumi.Input['ApiPropertiesArgs'] api_properties: API specific properties. Currently, supported only for MongoDB API. :param pulumi.Input[Union['ContinuousModeBackupPolicyArgs', 'PeriodicModeBackupPolicyArgs']] backup_policy: The object representing the policy for taking backups on an account. :param pulumi.Input[Sequence[pulumi.Input['CapabilityArgs']]] capabilities: List of Cosmos DB capabilities for the account :param pulumi.Input[Union[str, 'ConnectorOffer']] connector_offer: The cassandra connector offer type for the Cosmos DB database C* account. :param pulumi.Input['ConsistencyPolicyArgs'] consistency_policy: The consistency policy for the Cosmos DB account. :param pulumi.Input[Sequence[pulumi.Input['CorsPolicyArgs']]] cors: The CORS policy for the Cosmos DB database account. :param pulumi.Input[Union[str, 'CreateMode']] create_mode: Enum to indicate the mode of account creation. :param pulumi.Input[str] default_identity: The default identity for accessing key vault used in features like customer managed keys. The default identity needs to be explicitly set by the users. It can be "FirstPartyIdentity", "SystemAssignedIdentity" and more. :param pulumi.Input[bool] disable_key_based_metadata_write_access: Disable write operations on metadata resources (databases, containers, throughput) via account keys :param pulumi.Input[bool] disable_local_auth: Opt-out of local authentication and ensure only MSI and AAD can be used exclusively for authentication. :param pulumi.Input[bool] enable_analytical_storage: Flag to indicate whether to enable storage analytics. :param pulumi.Input[bool] enable_automatic_failover: Enables automatic failover of the write region in the rare event that the region is unavailable due to an outage. Automatic failover will result in a new write region for the account and is chosen based on the failover priorities configured for the account. :param pulumi.Input[bool] enable_cassandra_connector: Enables the cassandra connector on the Cosmos DB C* account :param pulumi.Input[bool] enable_free_tier: Flag to indicate whether Free Tier is enabled. :param pulumi.Input[bool] enable_multiple_write_locations: Enables the account to write in multiple locations :param pulumi.Input['ManagedServiceIdentityArgs'] identity: Identity for the resource. :param pulumi.Input[Sequence[pulumi.Input['IpAddressOrRangeArgs']]] ip_rules: List of IpRules. :param pulumi.Input[bool] is_virtual_network_filter_enabled: Flag to indicate whether to enable/disable Virtual Network ACL rules. :param pulumi.Input[str] key_vault_key_uri: The URI of the key vault :param pulumi.Input[Union[str, 'DatabaseAccountKind']] kind: Indicates the type of database account. This can only be set at database account creation. :param pulumi.Input[str] location: The location of the resource group to which the resource belongs. :param pulumi.Input['NetworkAclBypass'] network_acl_bypass: Indicates what services are allowed to bypass firewall checks. :param pulumi.Input[Sequence[pulumi.Input[str]]] network_acl_bypass_resource_ids: An array that contains the Resource Ids for Network Acl Bypass for the Cosmos DB account. :param pulumi.Input[Union[str, 'PublicNetworkAccess']] public_network_access: Whether requests from Public Network are allowed :param pulumi.Input['RestoreParametersArgs'] restore_parameters: Parameters to indicate the information about the restore. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB". :param pulumi.Input[Sequence[pulumi.Input['VirtualNetworkRuleArgs']]] virtual_network_rules: List of Virtual Network ACL rules configured for the Cosmos DB account. """ pulumi.set(__self__, "database_account_offer_type", database_account_offer_type) pulumi.set(__self__, "locations", locations) pulumi.set(__self__, "resource_group_name", resource_group_name) if account_name is not None: pulumi.set(__self__, "account_name", account_name) if analytical_storage_configuration is not None: pulumi.set(__self__, "analytical_storage_configuration", analytical_storage_configuration) if api_properties is not None: pulumi.set(__self__, "api_properties", api_properties) if backup_policy is not None: pulumi.set(__self__, "backup_policy", backup_policy) if capabilities is not None: pulumi.set(__self__, "capabilities", capabilities) if connector_offer is not None: pulumi.set(__self__, "connector_offer", connector_offer) if consistency_policy is not None: pulumi.set(__self__, "consistency_policy", consistency_policy) if cors is not None: pulumi.set(__self__, "cors", cors) if create_mode is None: create_mode = 'Default' if create_mode is not None: pulumi.set(__self__, "create_mode", create_mode) if default_identity is not None: pulumi.set(__self__, "default_identity", default_identity) if disable_key_based_metadata_write_access is not None: pulumi.set(__self__, "disable_key_based_metadata_write_access", disable_key_based_metadata_write_access) if disable_local_auth is not None: pulumi.set(__self__, "disable_local_auth", disable_local_auth) if enable_analytical_storage is not None: pulumi.set(__self__, "enable_analytical_storage", enable_analytical_storage) if enable_automatic_failover is not None: pulumi.set(__self__, "enable_automatic_failover", enable_automatic_failover) if enable_cassandra_connector is not None: pulumi.set(__self__, "enable_cassandra_connector", enable_cassandra_connector) if enable_free_tier is not None: pulumi.set(__self__, "enable_free_tier", enable_free_tier) if enable_multiple_write_locations is not None: pulumi.set(__self__, "enable_multiple_write_locations", enable_multiple_write_locations) if identity is not None: pulumi.set(__self__, "identity", identity) if ip_rules is not None: pulumi.set(__self__, "ip_rules", ip_rules) if is_virtual_network_filter_enabled is not None: pulumi.set(__self__, "is_virtual_network_filter_enabled", is_virtual_network_filter_enabled) if key_vault_key_uri is not None: pulumi.set(__self__, "key_vault_key_uri", key_vault_key_uri) if kind is None: kind = 'GlobalDocumentDB' if kind is not None: pulumi.set(__self__, "kind", kind) if location is not None: pulumi.set(__self__, "location", location) if network_acl_bypass is not None: pulumi.set(__self__, "network_acl_bypass", network_acl_bypass) if network_acl_bypass_resource_ids is not None: pulumi.set(__self__, "network_acl_bypass_resource_ids", network_acl_bypass_resource_ids) if public_network_access is not None: pulumi.set(__self__, "public_network_access", public_network_access) if restore_parameters is not None: pulumi.set(__self__, "restore_parameters", restore_parameters) if tags is not None: pulumi.set(__self__, "tags", tags) if virtual_network_rules is not None: pulumi.set(__self__, "virtual_network_rules", virtual_network_rules) @property @pulumi.getter(name="databaseAccountOfferType") def database_account_offer_type(self) -> pulumi.Input['DatabaseAccountOfferType']: """ The offer type for the database """ return pulumi.get(self, "database_account_offer_type") @database_account_offer_type.setter def database_account_offer_type(self, value: pulumi.Input['DatabaseAccountOfferType']): pulumi.set(self, "database_account_offer_type", value) @property @pulumi.getter def locations(self) -> pulumi.Input[Sequence[pulumi.Input['LocationArgs']]]: """ An array that contains the georeplication locations enabled for the Cosmos DB account. """ return pulumi.get(self, "locations") @locations.setter def locations(self, value: pulumi.Input[Sequence[pulumi.Input['LocationArgs']]]): pulumi.set(self, "locations", value) @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> pulumi.Input[str]: """ The name of the resource group. The name is case insensitive. """ return pulumi.get(self, "resource_group_name") @resource_group_name.setter def resource_group_name(self, value: pulumi.Input[str]): pulumi.set(self, "resource_group_name", value) @property @pulumi.getter(name="accountName") def account_name(self) -> Optional[pulumi.Input[str]]: """ Cosmos DB database account name. """ return pulumi.get(self, "account_name") @account_name.setter def account_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "account_name", value) @property @pulumi.getter(name="analyticalStorageConfiguration") def analytical_storage_configuration(self) -> Optional[pulumi.Input['AnalyticalStorageConfigurationArgs']]: """ Analytical storage specific properties. """ return pulumi.get(self, "analytical_storage_configuration") @analytical_storage_configuration.setter def analytical_storage_configuration(self, value: Optional[pulumi.Input['AnalyticalStorageConfigurationArgs']]): pulumi.set(self, "analytical_storage_configuration", value) @property @pulumi.getter(name="apiProperties") def api_properties(self) -> Optional[pulumi.Input['ApiPropertiesArgs']]: """ API specific properties. Currently, supported only for MongoDB API. """ return pulumi.get(self, "api_properties") @api_properties.setter def api_properties(self, value: Optional[pulumi.Input['ApiPropertiesArgs']]): pulumi.set(self, "api_properties", value) @property @pulumi.getter(name="backupPolicy") def backup_policy(self) -> Optional[pulumi.Input[Union['ContinuousModeBackupPolicyArgs', 'PeriodicModeBackupPolicyArgs']]]: """ The object representing the policy for taking backups on an account. """ return pulumi.get(self, "backup_policy") @backup_policy.setter def backup_policy(self, value: Optional[pulumi.Input[Union['ContinuousModeBackupPolicyArgs', 'PeriodicModeBackupPolicyArgs']]]): pulumi.set(self, "backup_policy", value) @property @pulumi.getter def capabilities(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['CapabilityArgs']]]]: """ List of Cosmos DB capabilities for the account """ return pulumi.get(self, "capabilities") @capabilities.setter def capabilities(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['CapabilityArgs']]]]): pulumi.set(self, "capabilities", value) @property @pulumi.getter(name="connectorOffer") def connector_offer(self) -> Optional[pulumi.Input[Union[str, 'ConnectorOffer']]]: """ The cassandra connector offer type for the Cosmos DB database C* account. """ return pulumi.get(self, "connector_offer") @connector_offer.setter def connector_offer(self, value: Optional[pulumi.Input[Union[str, 'ConnectorOffer']]]): pulumi.set(self, "connector_offer", value) @property @pulumi.getter(name="consistencyPolicy") def consistency_policy(self) -> Optional[pulumi.Input['ConsistencyPolicyArgs']]: """ The consistency policy for the Cosmos DB account. """ return pulumi.get(self, "consistency_policy") @consistency_policy.setter def consistency_policy(self, value: Optional[pulumi.Input['ConsistencyPolicyArgs']]): pulumi.set(self, "consistency_policy", value) @property @pulumi.getter def cors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['CorsPolicyArgs']]]]: """ The CORS policy for the Cosmos DB database account. """ return pulumi.get(self, "cors") @cors.setter def cors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['CorsPolicyArgs']]]]): pulumi.set(self, "cors", value) @property @pulumi.getter(name="createMode") def create_mode(self) -> Optional[pulumi.Input[Union[str, 'CreateMode']]]: """ Enum to indicate the mode of account creation. """ return pulumi.get(self, "create_mode") @create_mode.setter def create_mode(self, value: Optional[pulumi.Input[Union[str, 'CreateMode']]]): pulumi.set(self, "create_mode", value) @property @pulumi.getter(name="defaultIdentity") def default_identity(self) -> Optional[pulumi.Input[str]]: """ The default identity for accessing key vault used in features like customer managed keys. The default identity needs to be explicitly set by the users. It can be "FirstPartyIdentity", "SystemAssignedIdentity" and more. """ return pulumi.get(self, "default_identity") @default_identity.setter def default_identity(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "default_identity", value) @property @pulumi.getter(name="disableKeyBasedMetadataWriteAccess") def disable_key_based_metadata_write_access(self) -> Optional[pulumi.Input[bool]]: """ Disable write operations on metadata resources (databases, containers, throughput) via account keys """ return pulumi.get(self, "disable_key_based_metadata_write_access") @disable_key_based_metadata_write_access.setter def disable_key_based_metadata_write_access(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "disable_key_based_metadata_write_access", value) @property @pulumi.getter(name="disableLocalAuth") def disable_local_auth(self) -> Optional[pulumi.Input[bool]]: """ Opt-out of local authentication and ensure only MSI and AAD can be used exclusively for authentication. """ return pulumi.get(self, "disable_local_auth") @disable_local_auth.setter def disable_local_auth(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "disable_local_auth", value) @property @pulumi.getter(name="enableAnalyticalStorage") def enable_analytical_storage(self) -> Optional[pulumi.Input[bool]]: """ Flag to indicate whether to enable storage analytics. """ return pulumi.get(self, "enable_analytical_storage") @enable_analytical_storage.setter def enable_analytical_storage(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_analytical_storage", value) @property @pulumi.getter(name="enableAutomaticFailover") def enable_automatic_failover(self) -> Optional[pulumi.Input[bool]]: """ Enables automatic failover of the write region in the rare event that the region is unavailable due to an outage. Automatic failover will result in a new write region for the account and is chosen based on the failover priorities configured for the account. """ return pulumi.get(self, "enable_automatic_failover") @enable_automatic_failover.setter def enable_automatic_failover(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_automatic_failover", value) @property @pulumi.getter(name="enableCassandraConnector") def enable_cassandra_connector(self) -> Optional[pulumi.Input[bool]]: """ Enables the cassandra connector on the Cosmos DB C* account """ return pulumi.get(self, "enable_cassandra_connector") @enable_cassandra_connector.setter def enable_cassandra_connector(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_cassandra_connector", value) @property @pulumi.getter(name="enableFreeTier") def enable_free_tier(self) -> Optional[pulumi.Input[bool]]: """ Flag to indicate whether Free Tier is enabled. """ return pulumi.get(self, "enable_free_tier") @enable_free_tier.setter def enable_free_tier(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_free_tier", value) @property @pulumi.getter(name="enableMultipleWriteLocations") def enable_multiple_write_locations(self) -> Optional[pulumi.Input[bool]]: """ Enables the account to write in multiple locations """ return pulumi.get(self, "enable_multiple_write_locations") @enable_multiple_write_locations.setter def enable_multiple_write_locations(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_multiple_write_locations", value) @property @pulumi.getter def identity(self) -> Optional[pulumi.Input['ManagedServiceIdentityArgs']]: """ Identity for the resource. """ return pulumi.get(self, "identity") @identity.setter def identity(self, value: Optional[pulumi.Input['ManagedServiceIdentityArgs']]): pulumi.set(self, "identity", value) @property @pulumi.getter(name="ipRules") def ip_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IpAddressOrRangeArgs']]]]: """ List of IpRules. """ return pulumi.get(self, "ip_rules") @ip_rules.setter def ip_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['IpAddressOrRangeArgs']]]]): pulumi.set(self, "ip_rules", value) @property @pulumi.getter(name="isVirtualNetworkFilterEnabled") def is_virtual_network_filter_enabled(self) -> Optional[pulumi.Input[bool]]: """ Flag to indicate whether to enable/disable Virtual Network ACL rules. """ return pulumi.get(self, "is_virtual_network_filter_enabled") @is_virtual_network_filter_enabled.setter def is_virtual_network_filter_enabled(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "is_virtual_network_filter_enabled", value) @property @pulumi.getter(name="keyVaultKeyUri") def key_vault_key_uri(self) -> Optional[pulumi.Input[str]]: """ The URI of the key vault """ return pulumi.get(self, "key_vault_key_uri") @key_vault_key_uri.setter def key_vault_key_uri(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "key_vault_key_uri", value) @property @pulumi.getter def kind(self) -> Optional[pulumi.Input[Union[str, 'DatabaseAccountKind']]]: """ Indicates the type of database account. This can only be set at database account creation. """ return pulumi.get(self, "kind") @kind.setter def kind(self, value: Optional[pulumi.Input[Union[str, 'DatabaseAccountKind']]]): pulumi.set(self, "kind", value) @property @pulumi.getter def location(self) -> Optional[pulumi.Input[str]]: """ The location of the resource group to which the resource belongs. """ return pulumi.get(self, "location") @location.setter def location(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "location", value) @property @pulumi.getter(name="networkAclBypass") def network_acl_bypass(self) -> Optional[pulumi.Input['NetworkAclBypass']]: """ Indicates what services are allowed to bypass firewall checks. """ return pulumi.get(self, "network_acl_bypass") @network_acl_bypass.setter def network_acl_bypass(self, value: Optional[pulumi.Input['NetworkAclBypass']]): pulumi.set(self, "network_acl_bypass", value) @property @pulumi.getter(name="networkAclBypassResourceIds") def network_acl_bypass_resource_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ An array that contains the Resource Ids for Network Acl Bypass for the Cosmos DB account. """ return pulumi.get(self, "network_acl_bypass_resource_ids") @network_acl_bypass_resource_ids.setter def network_acl_bypass_resource_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "network_acl_bypass_resource_ids", value) @property @pulumi.getter(name="publicNetworkAccess") def public_network_access(self) -> Optional[pulumi.Input[Union[str, 'PublicNetworkAccess']]]: """ Whether requests from Public Network are allowed """ return pulumi.get(self, "public_network_access") @public_network_access.setter def public_network_access(self, value: Optional[pulumi.Input[Union[str, 'PublicNetworkAccess']]]): pulumi.set(self, "public_network_access", value) @property @pulumi.getter(name="restoreParameters") def restore_parameters(self) -> Optional[pulumi.Input['RestoreParametersArgs']]: """ Parameters to indicate the information about the restore. """ return pulumi.get(self, "restore_parameters") @restore_parameters.setter def restore_parameters(self, value: Optional[pulumi.Input['RestoreParametersArgs']]): pulumi.set(self, "restore_parameters", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB". """ return pulumi.get(self, "tags") @tags.setter def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "tags", value) @property @pulumi.getter(name="virtualNetworkRules") def virtual_network_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VirtualNetworkRuleArgs']]]]: """ List of Virtual Network ACL rules configured for the Cosmos DB account. """ return pulumi.get(self, "virtual_network_rules") @virtual_network_rules.setter def virtual_network_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['VirtualNetworkRuleArgs']]]]): pulumi.set(self, "virtual_network_rules", value) class DatabaseAccount(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, account_name: Optional[pulumi.Input[str]] = None, analytical_storage_configuration: Optional[pulumi.Input[pulumi.InputType['AnalyticalStorageConfigurationArgs']]] = None, api_properties: Optional[pulumi.Input[pulumi.InputType['ApiPropertiesArgs']]] = None, backup_policy: Optional[pulumi.Input[Union[pulumi.InputType['ContinuousModeBackupPolicyArgs'], pulumi.InputType['PeriodicModeBackupPolicyArgs']]]] = None, capabilities: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['CapabilityArgs']]]]] = None, connector_offer: Optional[pulumi.Input[Union[str, 'ConnectorOffer']]] = None, consistency_policy: Optional[pulumi.Input[pulumi.InputType['ConsistencyPolicyArgs']]] = None, cors: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['CorsPolicyArgs']]]]] = None, create_mode: Optional[pulumi.Input[Union[str, 'CreateMode']]] = None, database_account_offer_type: Optional[pulumi.Input['DatabaseAccountOfferType']] = None, default_identity: Optional[pulumi.Input[str]] = None, disable_key_based_metadata_write_access: Optional[pulumi.Input[bool]] = None, disable_local_auth: Optional[pulumi.Input[bool]] = None, enable_analytical_storage: Optional[pulumi.Input[bool]] = None, enable_automatic_failover: Optional[pulumi.Input[bool]] = None, enable_cassandra_connector: Optional[pulumi.Input[bool]] = None, enable_free_tier: Optional[pulumi.Input[bool]] = None, enable_multiple_write_locations: Optional[pulumi.Input[bool]] = None, identity: Optional[pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']]] = None, ip_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpAddressOrRangeArgs']]]]] = None, is_virtual_network_filter_enabled: Optional[pulumi.Input[bool]] = None, key_vault_key_uri: Optional[pulumi.Input[str]] = None, kind: Optional[pulumi.Input[Union[str, 'DatabaseAccountKind']]] = None, location: Optional[pulumi.Input[str]] = None, locations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LocationArgs']]]]] = None, network_acl_bypass: Optional[pulumi.Input['NetworkAclBypass']] = None, network_acl_bypass_resource_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, public_network_access: Optional[pulumi.Input[Union[str, 'PublicNetworkAccess']]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, restore_parameters: Optional[pulumi.Input[pulumi.InputType['RestoreParametersArgs']]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, virtual_network_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VirtualNetworkRuleArgs']]]]] = None, __props__=None): """ An Azure Cosmos DB database account. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] account_name: Cosmos DB database account name. :param pulumi.Input[pulumi.InputType['AnalyticalStorageConfigurationArgs']] analytical_storage_configuration: Analytical storage specific properties. :param pulumi.Input[pulumi.InputType['ApiPropertiesArgs']] api_properties: API specific properties. Currently, supported only for MongoDB API. :param pulumi.Input[Union[pulumi.InputType['ContinuousModeBackupPolicyArgs'], pulumi.InputType['PeriodicModeBackupPolicyArgs']]] backup_policy: The object representing the policy for taking backups on an account. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['CapabilityArgs']]]] capabilities: List of Cosmos DB capabilities for the account :param pulumi.Input[Union[str, 'ConnectorOffer']] connector_offer: The cassandra connector offer type for the Cosmos DB database C* account. :param pulumi.Input[pulumi.InputType['ConsistencyPolicyArgs']] consistency_policy: The consistency policy for the Cosmos DB account. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['CorsPolicyArgs']]]] cors: The CORS policy for the Cosmos DB database account. :param pulumi.Input[Union[str, 'CreateMode']] create_mode: Enum to indicate the mode of account creation. :param pulumi.Input['DatabaseAccountOfferType'] database_account_offer_type: The offer type for the database :param pulumi.Input[str] default_identity: The default identity for accessing key vault used in features like customer managed keys. The default identity needs to be explicitly set by the users. It can be "FirstPartyIdentity", "SystemAssignedIdentity" and more. :param pulumi.Input[bool] disable_key_based_metadata_write_access: Disable write operations on metadata resources (databases, containers, throughput) via account keys :param pulumi.Input[bool] disable_local_auth: Opt-out of local authentication and ensure only MSI and AAD can be used exclusively for authentication. :param pulumi.Input[bool] enable_analytical_storage: Flag to indicate whether to enable storage analytics. :param pulumi.Input[bool] enable_automatic_failover: Enables automatic failover of the write region in the rare event that the region is unavailable due to an outage. Automatic failover will result in a new write region for the account and is chosen based on the failover priorities configured for the account. :param pulumi.Input[bool] enable_cassandra_connector: Enables the cassandra connector on the Cosmos DB C* account :param pulumi.Input[bool] enable_free_tier: Flag to indicate whether Free Tier is enabled. :param pulumi.Input[bool] enable_multiple_write_locations: Enables the account to write in multiple locations :param pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']] identity: Identity for the resource. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpAddressOrRangeArgs']]]] ip_rules: List of IpRules. :param pulumi.Input[bool] is_virtual_network_filter_enabled: Flag to indicate whether to enable/disable Virtual Network ACL rules. :param pulumi.Input[str] key_vault_key_uri: The URI of the key vault :param pulumi.Input[Union[str, 'DatabaseAccountKind']] kind: Indicates the type of database account. This can only be set at database account creation. :param pulumi.Input[str] location: The location of the resource group to which the resource belongs. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LocationArgs']]]] locations: An array that contains the georeplication locations enabled for the Cosmos DB account. :param pulumi.Input['NetworkAclBypass'] network_acl_bypass: Indicates what services are allowed to bypass firewall checks. :param pulumi.Input[Sequence[pulumi.Input[str]]] network_acl_bypass_resource_ids: An array that contains the Resource Ids for Network Acl Bypass for the Cosmos DB account. :param pulumi.Input[Union[str, 'PublicNetworkAccess']] public_network_access: Whether requests from Public Network are allowed :param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive. :param pulumi.Input[pulumi.InputType['RestoreParametersArgs']] restore_parameters: Parameters to indicate the information about the restore. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB". :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VirtualNetworkRuleArgs']]]] virtual_network_rules: List of Virtual Network ACL rules configured for the Cosmos DB account. """ ... @overload def __init__(__self__, resource_name: str, args: DatabaseAccountArgs, opts: Optional[pulumi.ResourceOptions] = None): """ An Azure Cosmos DB database account. :param str resource_name: The name of the resource. :param DatabaseAccountArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(DatabaseAccountArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, account_name: Optional[pulumi.Input[str]] = None, analytical_storage_configuration: Optional[pulumi.Input[pulumi.InputType['AnalyticalStorageConfigurationArgs']]] = None, api_properties: Optional[pulumi.Input[pulumi.InputType['ApiPropertiesArgs']]] = None, backup_policy: Optional[pulumi.Input[Union[pulumi.InputType['ContinuousModeBackupPolicyArgs'], pulumi.InputType['PeriodicModeBackupPolicyArgs']]]] = None, capabilities: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['CapabilityArgs']]]]] = None, connector_offer: Optional[pulumi.Input[Union[str, 'ConnectorOffer']]] = None, consistency_policy: Optional[pulumi.Input[pulumi.InputType['ConsistencyPolicyArgs']]] = None, cors: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['CorsPolicyArgs']]]]] = None, create_mode: Optional[pulumi.Input[Union[str, 'CreateMode']]] = None, database_account_offer_type: Optional[pulumi.Input['DatabaseAccountOfferType']] = None, default_identity: Optional[pulumi.Input[str]] = None, disable_key_based_metadata_write_access: Optional[pulumi.Input[bool]] = None, disable_local_auth: Optional[pulumi.Input[bool]] = None, enable_analytical_storage: Optional[pulumi.Input[bool]] = None, enable_automatic_failover: Optional[pulumi.Input[bool]] = None, enable_cassandra_connector: Optional[pulumi.Input[bool]] = None, enable_free_tier: Optional[pulumi.Input[bool]] = None, enable_multiple_write_locations: Optional[pulumi.Input[bool]] = None, identity: Optional[pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']]] = None, ip_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpAddressOrRangeArgs']]]]] = None, is_virtual_network_filter_enabled: Optional[pulumi.Input[bool]] = None, key_vault_key_uri: Optional[pulumi.Input[str]] = None, kind: Optional[pulumi.Input[Union[str, 'DatabaseAccountKind']]] = None, location: Optional[pulumi.Input[str]] = None, locations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LocationArgs']]]]] = None, network_acl_bypass: Optional[pulumi.Input['NetworkAclBypass']] = None, network_acl_bypass_resource_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, public_network_access: Optional[pulumi.Input[Union[str, 'PublicNetworkAccess']]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, restore_parameters: Optional[pulumi.Input[pulumi.InputType['RestoreParametersArgs']]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, virtual_network_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VirtualNetworkRuleArgs']]]]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = DatabaseAccountArgs.__new__(DatabaseAccountArgs) __props__.__dict__["account_name"] = account_name __props__.__dict__["analytical_storage_configuration"] = analytical_storage_configuration __props__.__dict__["api_properties"] = api_properties __props__.__dict__["backup_policy"] = backup_policy __props__.__dict__["capabilities"] = capabilities __props__.__dict__["connector_offer"] = connector_offer __props__.__dict__["consistency_policy"] = consistency_policy __props__.__dict__["cors"] = cors if create_mode is None: create_mode = 'Default' __props__.__dict__["create_mode"] = create_mode if database_account_offer_type is None and not opts.urn: raise TypeError("Missing required property 'database_account_offer_type'") __props__.__dict__["database_account_offer_type"] = database_account_offer_type __props__.__dict__["default_identity"] = default_identity __props__.__dict__["disable_key_based_metadata_write_access"] = disable_key_based_metadata_write_access __props__.__dict__["disable_local_auth"] = disable_local_auth __props__.__dict__["enable_analytical_storage"] = enable_analytical_storage __props__.__dict__["enable_automatic_failover"] = enable_automatic_failover __props__.__dict__["enable_cassandra_connector"] = enable_cassandra_connector __props__.__dict__["enable_free_tier"] = enable_free_tier __props__.__dict__["enable_multiple_write_locations"] = enable_multiple_write_locations __props__.__dict__["identity"] = identity __props__.__dict__["ip_rules"] = ip_rules __props__.__dict__["is_virtual_network_filter_enabled"] = is_virtual_network_filter_enabled __props__.__dict__["key_vault_key_uri"] = key_vault_key_uri if kind is None: kind = 'GlobalDocumentDB' __props__.__dict__["kind"] = kind __props__.__dict__["location"] = location if locations is None and not opts.urn: raise TypeError("Missing required property 'locations'") __props__.__dict__["locations"] = locations __props__.__dict__["network_acl_bypass"] = network_acl_bypass __props__.__dict__["network_acl_bypass_resource_ids"] = network_acl_bypass_resource_ids __props__.__dict__["public_network_access"] = public_network_access if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__.__dict__["resource_group_name"] = resource_group_name __props__.__dict__["restore_parameters"] = restore_parameters __props__.__dict__["tags"] = tags __props__.__dict__["virtual_network_rules"] = virtual_network_rules __props__.__dict__["document_endpoint"] = None __props__.__dict__["failover_policies"] = None __props__.__dict__["instance_id"] = None __props__.__dict__["name"] = None __props__.__dict__["private_endpoint_connections"] = None __props__.__dict__["provisioning_state"] = None __props__.__dict__["read_locations"] = None __props__.__dict__["system_data"] = None __props__.__dict__["type"] = None __props__.__dict__["write_locations"] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:documentdb/v20210615:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20150401:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20150401:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20150408:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20150408:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20151106:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20151106:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20160319:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20160319:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20160331:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20160331:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20190801:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20190801:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20191212:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20191212:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20200301:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200301:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20200401:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200401:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20200601preview:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200601preview:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20200901:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200901:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20210115:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210115:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20210301preview:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210301preview:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20210315:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210315:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20210401preview:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210401preview:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20210415:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210415:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20210515:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210515:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20210701preview:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210701preview:DatabaseAccount")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(DatabaseAccount, __self__).__init__( 'azure-native:documentdb/v20210615:DatabaseAccount', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'DatabaseAccount': """ Get an existing DatabaseAccount resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = DatabaseAccountArgs.__new__(DatabaseAccountArgs) __props__.__dict__["analytical_storage_configuration"] = None __props__.__dict__["api_properties"] = None __props__.__dict__["backup_policy"] = None __props__.__dict__["capabilities"] = None __props__.__dict__["connector_offer"] = None __props__.__dict__["consistency_policy"] = None __props__.__dict__["cors"] = None __props__.__dict__["create_mode"] = None __props__.__dict__["database_account_offer_type"] = None __props__.__dict__["default_identity"] = None __props__.__dict__["disable_key_based_metadata_write_access"] = None __props__.__dict__["disable_local_auth"] = None __props__.__dict__["document_endpoint"] = None __props__.__dict__["enable_analytical_storage"] = None __props__.__dict__["enable_automatic_failover"] = None __props__.__dict__["enable_cassandra_connector"] = None __props__.__dict__["enable_free_tier"] = None __props__.__dict__["enable_multiple_write_locations"] = None __props__.__dict__["failover_policies"] = None __props__.__dict__["identity"] = None __props__.__dict__["instance_id"] = None __props__.__dict__["ip_rules"] = None __props__.__dict__["is_virtual_network_filter_enabled"] = None __props__.__dict__["key_vault_key_uri"] = None __props__.__dict__["kind"] = None __props__.__dict__["location"] = None __props__.__dict__["locations"] = None __props__.__dict__["name"] = None __props__.__dict__["network_acl_bypass"] = None __props__.__dict__["network_acl_bypass_resource_ids"] = None __props__.__dict__["private_endpoint_connections"] = None __props__.__dict__["provisioning_state"] = None __props__.__dict__["public_network_access"] = None __props__.__dict__["read_locations"] = None __props__.__dict__["restore_parameters"] = None __props__.__dict__["system_data"] = None __props__.__dict__["tags"] = None __props__.__dict__["type"] = None __props__.__dict__["virtual_network_rules"] = None __props__.__dict__["write_locations"] = None return DatabaseAccount(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="analyticalStorageConfiguration") def analytical_storage_configuration(self) -> pulumi.Output[Optional['outputs.AnalyticalStorageConfigurationResponse']]: """ Analytical storage specific properties. """ return pulumi.get(self, "analytical_storage_configuration") @property @pulumi.getter(name="apiProperties") def api_properties(self) -> pulumi.Output[Optional['outputs.ApiPropertiesResponse']]: """ API specific properties. """ return pulumi.get(self, "api_properties") @property @pulumi.getter(name="backupPolicy") def backup_policy(self) -> pulumi.Output[Optional[Any]]: """ The object representing the policy for taking backups on an account. """ return pulumi.get(self, "backup_policy") @property @pulumi.getter def capabilities(self) -> pulumi.Output[Optional[Sequence['outputs.CapabilityResponse']]]: """ List of Cosmos DB capabilities for the account """ return pulumi.get(self, "capabilities") @property @pulumi.getter(name="connectorOffer") def connector_offer(self) -> pulumi.Output[Optional[str]]: """ The cassandra connector offer type for the Cosmos DB database C* account. """ return pulumi.get(self, "connector_offer") @property @pulumi.getter(name="consistencyPolicy") def consistency_policy(self) -> pulumi.Output[Optional['outputs.ConsistencyPolicyResponse']]: """ The consistency policy for the Cosmos DB database account. """ return pulumi.get(self, "consistency_policy") @property @pulumi.getter def cors(self) -> pulumi.Output[Optional[Sequence['outputs.CorsPolicyResponse']]]: """ The CORS policy for the Cosmos DB database account. """ return pulumi.get(self, "cors") @property @pulumi.getter(name="createMode") def create_mode(self) -> pulumi.Output[Optional[str]]: """ Enum to indicate the mode of account creation. """ return pulumi.get(self, "create_mode") @property @pulumi.getter(name="databaseAccountOfferType") def database_account_offer_type(self) -> pulumi.Output[str]: """ The offer type for the Cosmos DB database account. Default value: Standard. """ return pulumi.get(self, "database_account_offer_type") @property @pulumi.getter(name="defaultIdentity") def default_identity(self) -> pulumi.Output[Optional[str]]: """ The default identity for accessing key vault used in features like customer managed keys. The default identity needs to be explicitly set by the users. It can be "FirstPartyIdentity", "SystemAssignedIdentity" and more. """ return pulumi.get(self, "default_identity") @property @pulumi.getter(name="disableKeyBasedMetadataWriteAccess") def disable_key_based_metadata_write_access(self) -> pulumi.Output[Optional[bool]]: """ Disable write operations on metadata resources (databases, containers, throughput) via account keys """ return pulumi.get(self, "disable_key_based_metadata_write_access") @property @pulumi.getter(name="disableLocalAuth") def disable_local_auth(self) -> pulumi.Output[Optional[bool]]: """ Opt-out of local authentication and ensure only MSI and AAD can be used exclusively for authentication. """ return pulumi.get(self, "disable_local_auth") @property @pulumi.getter(name="documentEndpoint") def document_endpoint(self) -> pulumi.Output[str]: """ The connection endpoint for the Cosmos DB database account. """ return pulumi.get(self, "document_endpoint") @property @pulumi.getter(name="enableAnalyticalStorage") def enable_analytical_storage(self) -> pulumi.Output[Optional[bool]]: """ Flag to indicate whether to enable storage analytics. """ return pulumi.get(self, "enable_analytical_storage") @property @pulumi.getter(name="enableAutomaticFailover") def enable_automatic_failover(self) -> pulumi.Output[Optional[bool]]: """ Enables automatic failover of the write region in the rare event that the region is unavailable due to an outage. Automatic failover will result in a new write region for the account and is chosen based on the failover priorities configured for the account. """ return pulumi.get(self, "enable_automatic_failover") @property @pulumi.getter(name="enableCassandraConnector") def enable_cassandra_connector(self) -> pulumi.Output[Optional[bool]]: """ Enables the cassandra connector on the Cosmos DB C* account """ return pulumi.get(self, "enable_cassandra_connector") @property @pulumi.getter(name="enableFreeTier") def enable_free_tier(self) -> pulumi.Output[Optional[bool]]: """ Flag to indicate whether Free Tier is enabled. """ return pulumi.get(self, "enable_free_tier") @property @pulumi.getter(name="enableMultipleWriteLocations") def enable_multiple_write_locations(self) -> pulumi.Output[Optional[bool]]: """ Enables the account to write in multiple locations """ return pulumi.get(self, "enable_multiple_write_locations") @property @pulumi.getter(name="failoverPolicies") def failover_policies(self) -> pulumi.Output[Sequence['outputs.FailoverPolicyResponse']]: """ An array that contains the regions ordered by their failover priorities. """ return pulumi.get(self, "failover_policies") @property @pulumi.getter def identity(self) -> pulumi.Output[Optional['outputs.ManagedServiceIdentityResponse']]: """ Identity for the resource. """ return pulumi.get(self, "identity") @property @pulumi.getter(name="instanceId") def instance_id(self) -> pulumi.Output[str]: """ A unique identifier assigned to the database account """ return pulumi.get(self, "instance_id") @property @pulumi.getter(name="ipRules") def ip_rules(self) -> pulumi.Output[Optional[Sequence['outputs.IpAddressOrRangeResponse']]]: """ List of IpRules. """ return pulumi.get(self, "ip_rules") @property @pulumi.getter(name="isVirtualNetworkFilterEnabled") def is_virtual_network_filter_enabled(self) -> pulumi.Output[Optional[bool]]: """ Flag to indicate whether to enable/disable Virtual Network ACL rules. """ return pulumi.get(self, "is_virtual_network_filter_enabled") @property @pulumi.getter(name="keyVaultKeyUri") def key_vault_key_uri(self) -> pulumi.Output[Optional[str]]: """ The URI of the key vault """ return pulumi.get(self, "key_vault_key_uri") @property @pulumi.getter def kind(self) -> pulumi.Output[Optional[str]]: """ Indicates the type of database account. This can only be set at database account creation. """ return pulumi.get(self, "kind") @property @pulumi.getter def location(self) -> pulumi.Output[Optional[str]]: """ The location of the resource group to which the resource belongs. """ return pulumi.get(self, "location") @property @pulumi.getter def locations(self) -> pulumi.Output[Sequence['outputs.LocationResponse']]: """ An array that contains all of the locations enabled for the Cosmos DB account. """ return pulumi.get(self, "locations") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ The name of the ARM resource. """ return pulumi.get(self, "name") @property @pulumi.getter(name="networkAclBypass") def network_acl_bypass(self) -> pulumi.Output[Optional[str]]: """ Indicates what services are allowed to bypass firewall checks. """ return pulumi.get(self, "network_acl_bypass") @property @pulumi.getter(name="networkAclBypassResourceIds") def network_acl_bypass_resource_ids(self) -> pulumi.Output[Optional[Sequence[str]]]: """ An array that contains the Resource Ids for Network Acl Bypass for the Cosmos DB account. """ return pulumi.get(self, "network_acl_bypass_resource_ids") @property @pulumi.getter(name="privateEndpointConnections") def private_endpoint_connections(self) -> pulumi.Output[Sequence['outputs.PrivateEndpointConnectionResponse']]: """ List of Private Endpoint Connections configured for the Cosmos DB account. """ return pulumi.get(self, "private_endpoint_connections") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> pulumi.Output[str]: """ The status of the Cosmos DB account at the time the operation was called. The status can be one of following. 'Creating' – the Cosmos DB account is being created. When an account is in Creating state, only properties that are specified as input for the Create Cosmos DB account operation are returned. 'Succeeded' – the Cosmos DB account is active for use. 'Updating' – the Cosmos DB account is being updated. 'Deleting' – the Cosmos DB account is being deleted. 'Failed' – the Cosmos DB account failed creation. 'DeletionFailed' – the Cosmos DB account deletion failed. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="publicNetworkAccess") def public_network_access(self) -> pulumi.Output[Optional[str]]: """ Whether requests from Public Network are allowed """ return pulumi.get(self, "public_network_access") @property @pulumi.getter(name="readLocations") def read_locations(self) -> pulumi.Output[Sequence['outputs.LocationResponse']]: """ An array that contains of the read locations enabled for the Cosmos DB account. """ return pulumi.get(self, "read_locations") @property @pulumi.getter(name="restoreParameters") def restore_parameters(self) -> pulumi.Output[Optional['outputs.RestoreParametersResponse']]: """ Parameters to indicate the information about the restore. """ return pulumi.get(self, "restore_parameters") @property @pulumi.getter(name="systemData") def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']: """ The system meta data relating to this resource. """ return pulumi.get(self, "system_data") @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: """ Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB". """ return pulumi.get(self, "tags") @property @pulumi.getter def type(self) -> pulumi.Output[str]: """ The type of Azure resource. """ return pulumi.get(self, "type") @property @pulumi.getter(name="virtualNetworkRules") def virtual_network_rules(self) -> pulumi.Output[Optional[Sequence['outputs.VirtualNetworkRuleResponse']]]: """ List of Virtual Network ACL rules configured for the Cosmos DB account. """ return pulumi.get(self, "virtual_network_rules") @property @pulumi.getter(name="writeLocations") def write_locations(self) -> pulumi.Output[Sequence['outputs.LocationResponse']]: """ An array that contains the write location for the Cosmos DB account. """ return pulumi.get(self, "write_locations")
56.411081
2,956
0.699021
import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from . import outputs from ._enums import * from ._inputs import * __all__ = ['DatabaseAccountArgs', 'DatabaseAccount'] @pulumi.input_type class DatabaseAccountArgs: def __init__(__self__, *, database_account_offer_type: pulumi.Input['DatabaseAccountOfferType'], locations: pulumi.Input[Sequence[pulumi.Input['LocationArgs']]], resource_group_name: pulumi.Input[str], account_name: Optional[pulumi.Input[str]] = None, analytical_storage_configuration: Optional[pulumi.Input['AnalyticalStorageConfigurationArgs']] = None, api_properties: Optional[pulumi.Input['ApiPropertiesArgs']] = None, backup_policy: Optional[pulumi.Input[Union['ContinuousModeBackupPolicyArgs', 'PeriodicModeBackupPolicyArgs']]] = None, capabilities: Optional[pulumi.Input[Sequence[pulumi.Input['CapabilityArgs']]]] = None, connector_offer: Optional[pulumi.Input[Union[str, 'ConnectorOffer']]] = None, consistency_policy: Optional[pulumi.Input['ConsistencyPolicyArgs']] = None, cors: Optional[pulumi.Input[Sequence[pulumi.Input['CorsPolicyArgs']]]] = None, create_mode: Optional[pulumi.Input[Union[str, 'CreateMode']]] = None, default_identity: Optional[pulumi.Input[str]] = None, disable_key_based_metadata_write_access: Optional[pulumi.Input[bool]] = None, disable_local_auth: Optional[pulumi.Input[bool]] = None, enable_analytical_storage: Optional[pulumi.Input[bool]] = None, enable_automatic_failover: Optional[pulumi.Input[bool]] = None, enable_cassandra_connector: Optional[pulumi.Input[bool]] = None, enable_free_tier: Optional[pulumi.Input[bool]] = None, enable_multiple_write_locations: Optional[pulumi.Input[bool]] = None, identity: Optional[pulumi.Input['ManagedServiceIdentityArgs']] = None, ip_rules: Optional[pulumi.Input[Sequence[pulumi.Input['IpAddressOrRangeArgs']]]] = None, is_virtual_network_filter_enabled: Optional[pulumi.Input[bool]] = None, key_vault_key_uri: Optional[pulumi.Input[str]] = None, kind: Optional[pulumi.Input[Union[str, 'DatabaseAccountKind']]] = None, location: Optional[pulumi.Input[str]] = None, network_acl_bypass: Optional[pulumi.Input['NetworkAclBypass']] = None, network_acl_bypass_resource_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, public_network_access: Optional[pulumi.Input[Union[str, 'PublicNetworkAccess']]] = None, restore_parameters: Optional[pulumi.Input['RestoreParametersArgs']] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, virtual_network_rules: Optional[pulumi.Input[Sequence[pulumi.Input['VirtualNetworkRuleArgs']]]] = None): pulumi.set(__self__, "database_account_offer_type", database_account_offer_type) pulumi.set(__self__, "locations", locations) pulumi.set(__self__, "resource_group_name", resource_group_name) if account_name is not None: pulumi.set(__self__, "account_name", account_name) if analytical_storage_configuration is not None: pulumi.set(__self__, "analytical_storage_configuration", analytical_storage_configuration) if api_properties is not None: pulumi.set(__self__, "api_properties", api_properties) if backup_policy is not None: pulumi.set(__self__, "backup_policy", backup_policy) if capabilities is not None: pulumi.set(__self__, "capabilities", capabilities) if connector_offer is not None: pulumi.set(__self__, "connector_offer", connector_offer) if consistency_policy is not None: pulumi.set(__self__, "consistency_policy", consistency_policy) if cors is not None: pulumi.set(__self__, "cors", cors) if create_mode is None: create_mode = 'Default' if create_mode is not None: pulumi.set(__self__, "create_mode", create_mode) if default_identity is not None: pulumi.set(__self__, "default_identity", default_identity) if disable_key_based_metadata_write_access is not None: pulumi.set(__self__, "disable_key_based_metadata_write_access", disable_key_based_metadata_write_access) if disable_local_auth is not None: pulumi.set(__self__, "disable_local_auth", disable_local_auth) if enable_analytical_storage is not None: pulumi.set(__self__, "enable_analytical_storage", enable_analytical_storage) if enable_automatic_failover is not None: pulumi.set(__self__, "enable_automatic_failover", enable_automatic_failover) if enable_cassandra_connector is not None: pulumi.set(__self__, "enable_cassandra_connector", enable_cassandra_connector) if enable_free_tier is not None: pulumi.set(__self__, "enable_free_tier", enable_free_tier) if enable_multiple_write_locations is not None: pulumi.set(__self__, "enable_multiple_write_locations", enable_multiple_write_locations) if identity is not None: pulumi.set(__self__, "identity", identity) if ip_rules is not None: pulumi.set(__self__, "ip_rules", ip_rules) if is_virtual_network_filter_enabled is not None: pulumi.set(__self__, "is_virtual_network_filter_enabled", is_virtual_network_filter_enabled) if key_vault_key_uri is not None: pulumi.set(__self__, "key_vault_key_uri", key_vault_key_uri) if kind is None: kind = 'GlobalDocumentDB' if kind is not None: pulumi.set(__self__, "kind", kind) if location is not None: pulumi.set(__self__, "location", location) if network_acl_bypass is not None: pulumi.set(__self__, "network_acl_bypass", network_acl_bypass) if network_acl_bypass_resource_ids is not None: pulumi.set(__self__, "network_acl_bypass_resource_ids", network_acl_bypass_resource_ids) if public_network_access is not None: pulumi.set(__self__, "public_network_access", public_network_access) if restore_parameters is not None: pulumi.set(__self__, "restore_parameters", restore_parameters) if tags is not None: pulumi.set(__self__, "tags", tags) if virtual_network_rules is not None: pulumi.set(__self__, "virtual_network_rules", virtual_network_rules) @property @pulumi.getter(name="databaseAccountOfferType") def database_account_offer_type(self) -> pulumi.Input['DatabaseAccountOfferType']: return pulumi.get(self, "database_account_offer_type") @database_account_offer_type.setter def database_account_offer_type(self, value: pulumi.Input['DatabaseAccountOfferType']): pulumi.set(self, "database_account_offer_type", value) @property @pulumi.getter def locations(self) -> pulumi.Input[Sequence[pulumi.Input['LocationArgs']]]: return pulumi.get(self, "locations") @locations.setter def locations(self, value: pulumi.Input[Sequence[pulumi.Input['LocationArgs']]]): pulumi.set(self, "locations", value) @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> pulumi.Input[str]: return pulumi.get(self, "resource_group_name") @resource_group_name.setter def resource_group_name(self, value: pulumi.Input[str]): pulumi.set(self, "resource_group_name", value) @property @pulumi.getter(name="accountName") def account_name(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "account_name") @account_name.setter def account_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "account_name", value) @property @pulumi.getter(name="analyticalStorageConfiguration") def analytical_storage_configuration(self) -> Optional[pulumi.Input['AnalyticalStorageConfigurationArgs']]: return pulumi.get(self, "analytical_storage_configuration") @analytical_storage_configuration.setter def analytical_storage_configuration(self, value: Optional[pulumi.Input['AnalyticalStorageConfigurationArgs']]): pulumi.set(self, "analytical_storage_configuration", value) @property @pulumi.getter(name="apiProperties") def api_properties(self) -> Optional[pulumi.Input['ApiPropertiesArgs']]: return pulumi.get(self, "api_properties") @api_properties.setter def api_properties(self, value: Optional[pulumi.Input['ApiPropertiesArgs']]): pulumi.set(self, "api_properties", value) @property @pulumi.getter(name="backupPolicy") def backup_policy(self) -> Optional[pulumi.Input[Union['ContinuousModeBackupPolicyArgs', 'PeriodicModeBackupPolicyArgs']]]: return pulumi.get(self, "backup_policy") @backup_policy.setter def backup_policy(self, value: Optional[pulumi.Input[Union['ContinuousModeBackupPolicyArgs', 'PeriodicModeBackupPolicyArgs']]]): pulumi.set(self, "backup_policy", value) @property @pulumi.getter def capabilities(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['CapabilityArgs']]]]: return pulumi.get(self, "capabilities") @capabilities.setter def capabilities(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['CapabilityArgs']]]]): pulumi.set(self, "capabilities", value) @property @pulumi.getter(name="connectorOffer") def connector_offer(self) -> Optional[pulumi.Input[Union[str, 'ConnectorOffer']]]: return pulumi.get(self, "connector_offer") @connector_offer.setter def connector_offer(self, value: Optional[pulumi.Input[Union[str, 'ConnectorOffer']]]): pulumi.set(self, "connector_offer", value) @property @pulumi.getter(name="consistencyPolicy") def consistency_policy(self) -> Optional[pulumi.Input['ConsistencyPolicyArgs']]: return pulumi.get(self, "consistency_policy") @consistency_policy.setter def consistency_policy(self, value: Optional[pulumi.Input['ConsistencyPolicyArgs']]): pulumi.set(self, "consistency_policy", value) @property @pulumi.getter def cors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['CorsPolicyArgs']]]]: return pulumi.get(self, "cors") @cors.setter def cors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['CorsPolicyArgs']]]]): pulumi.set(self, "cors", value) @property @pulumi.getter(name="createMode") def create_mode(self) -> Optional[pulumi.Input[Union[str, 'CreateMode']]]: return pulumi.get(self, "create_mode") @create_mode.setter def create_mode(self, value: Optional[pulumi.Input[Union[str, 'CreateMode']]]): pulumi.set(self, "create_mode", value) @property @pulumi.getter(name="defaultIdentity") def default_identity(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "default_identity") @default_identity.setter def default_identity(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "default_identity", value) @property @pulumi.getter(name="disableKeyBasedMetadataWriteAccess") def disable_key_based_metadata_write_access(self) -> Optional[pulumi.Input[bool]]: return pulumi.get(self, "disable_key_based_metadata_write_access") @disable_key_based_metadata_write_access.setter def disable_key_based_metadata_write_access(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "disable_key_based_metadata_write_access", value) @property @pulumi.getter(name="disableLocalAuth") def disable_local_auth(self) -> Optional[pulumi.Input[bool]]: return pulumi.get(self, "disable_local_auth") @disable_local_auth.setter def disable_local_auth(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "disable_local_auth", value) @property @pulumi.getter(name="enableAnalyticalStorage") def enable_analytical_storage(self) -> Optional[pulumi.Input[bool]]: return pulumi.get(self, "enable_analytical_storage") @enable_analytical_storage.setter def enable_analytical_storage(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_analytical_storage", value) @property @pulumi.getter(name="enableAutomaticFailover") def enable_automatic_failover(self) -> Optional[pulumi.Input[bool]]: return pulumi.get(self, "enable_automatic_failover") @enable_automatic_failover.setter def enable_automatic_failover(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_automatic_failover", value) @property @pulumi.getter(name="enableCassandraConnector") def enable_cassandra_connector(self) -> Optional[pulumi.Input[bool]]: return pulumi.get(self, "enable_cassandra_connector") @enable_cassandra_connector.setter def enable_cassandra_connector(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_cassandra_connector", value) @property @pulumi.getter(name="enableFreeTier") def enable_free_tier(self) -> Optional[pulumi.Input[bool]]: return pulumi.get(self, "enable_free_tier") @enable_free_tier.setter def enable_free_tier(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_free_tier", value) @property @pulumi.getter(name="enableMultipleWriteLocations") def enable_multiple_write_locations(self) -> Optional[pulumi.Input[bool]]: return pulumi.get(self, "enable_multiple_write_locations") @enable_multiple_write_locations.setter def enable_multiple_write_locations(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "enable_multiple_write_locations", value) @property @pulumi.getter def identity(self) -> Optional[pulumi.Input['ManagedServiceIdentityArgs']]: return pulumi.get(self, "identity") @identity.setter def identity(self, value: Optional[pulumi.Input['ManagedServiceIdentityArgs']]): pulumi.set(self, "identity", value) @property @pulumi.getter(name="ipRules") def ip_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IpAddressOrRangeArgs']]]]: return pulumi.get(self, "ip_rules") @ip_rules.setter def ip_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['IpAddressOrRangeArgs']]]]): pulumi.set(self, "ip_rules", value) @property @pulumi.getter(name="isVirtualNetworkFilterEnabled") def is_virtual_network_filter_enabled(self) -> Optional[pulumi.Input[bool]]: return pulumi.get(self, "is_virtual_network_filter_enabled") @is_virtual_network_filter_enabled.setter def is_virtual_network_filter_enabled(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "is_virtual_network_filter_enabled", value) @property @pulumi.getter(name="keyVaultKeyUri") def key_vault_key_uri(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "key_vault_key_uri") @key_vault_key_uri.setter def key_vault_key_uri(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "key_vault_key_uri", value) @property @pulumi.getter def kind(self) -> Optional[pulumi.Input[Union[str, 'DatabaseAccountKind']]]: return pulumi.get(self, "kind") @kind.setter def kind(self, value: Optional[pulumi.Input[Union[str, 'DatabaseAccountKind']]]): pulumi.set(self, "kind", value) @property @pulumi.getter def location(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "location") @location.setter def location(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "location", value) @property @pulumi.getter(name="networkAclBypass") def network_acl_bypass(self) -> Optional[pulumi.Input['NetworkAclBypass']]: return pulumi.get(self, "network_acl_bypass") @network_acl_bypass.setter def network_acl_bypass(self, value: Optional[pulumi.Input['NetworkAclBypass']]): pulumi.set(self, "network_acl_bypass", value) @property @pulumi.getter(name="networkAclBypassResourceIds") def network_acl_bypass_resource_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: return pulumi.get(self, "network_acl_bypass_resource_ids") @network_acl_bypass_resource_ids.setter def network_acl_bypass_resource_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "network_acl_bypass_resource_ids", value) @property @pulumi.getter(name="publicNetworkAccess") def public_network_access(self) -> Optional[pulumi.Input[Union[str, 'PublicNetworkAccess']]]: return pulumi.get(self, "public_network_access") @public_network_access.setter def public_network_access(self, value: Optional[pulumi.Input[Union[str, 'PublicNetworkAccess']]]): pulumi.set(self, "public_network_access", value) @property @pulumi.getter(name="restoreParameters") def restore_parameters(self) -> Optional[pulumi.Input['RestoreParametersArgs']]: return pulumi.get(self, "restore_parameters") @restore_parameters.setter def restore_parameters(self, value: Optional[pulumi.Input['RestoreParametersArgs']]): pulumi.set(self, "restore_parameters", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: return pulumi.get(self, "tags") @tags.setter def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "tags", value) @property @pulumi.getter(name="virtualNetworkRules") def virtual_network_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VirtualNetworkRuleArgs']]]]: return pulumi.get(self, "virtual_network_rules") @virtual_network_rules.setter def virtual_network_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['VirtualNetworkRuleArgs']]]]): pulumi.set(self, "virtual_network_rules", value) class DatabaseAccount(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, account_name: Optional[pulumi.Input[str]] = None, analytical_storage_configuration: Optional[pulumi.Input[pulumi.InputType['AnalyticalStorageConfigurationArgs']]] = None, api_properties: Optional[pulumi.Input[pulumi.InputType['ApiPropertiesArgs']]] = None, backup_policy: Optional[pulumi.Input[Union[pulumi.InputType['ContinuousModeBackupPolicyArgs'], pulumi.InputType['PeriodicModeBackupPolicyArgs']]]] = None, capabilities: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['CapabilityArgs']]]]] = None, connector_offer: Optional[pulumi.Input[Union[str, 'ConnectorOffer']]] = None, consistency_policy: Optional[pulumi.Input[pulumi.InputType['ConsistencyPolicyArgs']]] = None, cors: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['CorsPolicyArgs']]]]] = None, create_mode: Optional[pulumi.Input[Union[str, 'CreateMode']]] = None, database_account_offer_type: Optional[pulumi.Input['DatabaseAccountOfferType']] = None, default_identity: Optional[pulumi.Input[str]] = None, disable_key_based_metadata_write_access: Optional[pulumi.Input[bool]] = None, disable_local_auth: Optional[pulumi.Input[bool]] = None, enable_analytical_storage: Optional[pulumi.Input[bool]] = None, enable_automatic_failover: Optional[pulumi.Input[bool]] = None, enable_cassandra_connector: Optional[pulumi.Input[bool]] = None, enable_free_tier: Optional[pulumi.Input[bool]] = None, enable_multiple_write_locations: Optional[pulumi.Input[bool]] = None, identity: Optional[pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']]] = None, ip_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpAddressOrRangeArgs']]]]] = None, is_virtual_network_filter_enabled: Optional[pulumi.Input[bool]] = None, key_vault_key_uri: Optional[pulumi.Input[str]] = None, kind: Optional[pulumi.Input[Union[str, 'DatabaseAccountKind']]] = None, location: Optional[pulumi.Input[str]] = None, locations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LocationArgs']]]]] = None, network_acl_bypass: Optional[pulumi.Input['NetworkAclBypass']] = None, network_acl_bypass_resource_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, public_network_access: Optional[pulumi.Input[Union[str, 'PublicNetworkAccess']]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, restore_parameters: Optional[pulumi.Input[pulumi.InputType['RestoreParametersArgs']]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, virtual_network_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VirtualNetworkRuleArgs']]]]] = None, __props__=None): ... @overload def __init__(__self__, resource_name: str, args: DatabaseAccountArgs, opts: Optional[pulumi.ResourceOptions] = None): ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(DatabaseAccountArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, account_name: Optional[pulumi.Input[str]] = None, analytical_storage_configuration: Optional[pulumi.Input[pulumi.InputType['AnalyticalStorageConfigurationArgs']]] = None, api_properties: Optional[pulumi.Input[pulumi.InputType['ApiPropertiesArgs']]] = None, backup_policy: Optional[pulumi.Input[Union[pulumi.InputType['ContinuousModeBackupPolicyArgs'], pulumi.InputType['PeriodicModeBackupPolicyArgs']]]] = None, capabilities: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['CapabilityArgs']]]]] = None, connector_offer: Optional[pulumi.Input[Union[str, 'ConnectorOffer']]] = None, consistency_policy: Optional[pulumi.Input[pulumi.InputType['ConsistencyPolicyArgs']]] = None, cors: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['CorsPolicyArgs']]]]] = None, create_mode: Optional[pulumi.Input[Union[str, 'CreateMode']]] = None, database_account_offer_type: Optional[pulumi.Input['DatabaseAccountOfferType']] = None, default_identity: Optional[pulumi.Input[str]] = None, disable_key_based_metadata_write_access: Optional[pulumi.Input[bool]] = None, disable_local_auth: Optional[pulumi.Input[bool]] = None, enable_analytical_storage: Optional[pulumi.Input[bool]] = None, enable_automatic_failover: Optional[pulumi.Input[bool]] = None, enable_cassandra_connector: Optional[pulumi.Input[bool]] = None, enable_free_tier: Optional[pulumi.Input[bool]] = None, enable_multiple_write_locations: Optional[pulumi.Input[bool]] = None, identity: Optional[pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']]] = None, ip_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpAddressOrRangeArgs']]]]] = None, is_virtual_network_filter_enabled: Optional[pulumi.Input[bool]] = None, key_vault_key_uri: Optional[pulumi.Input[str]] = None, kind: Optional[pulumi.Input[Union[str, 'DatabaseAccountKind']]] = None, location: Optional[pulumi.Input[str]] = None, locations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LocationArgs']]]]] = None, network_acl_bypass: Optional[pulumi.Input['NetworkAclBypass']] = None, network_acl_bypass_resource_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, public_network_access: Optional[pulumi.Input[Union[str, 'PublicNetworkAccess']]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, restore_parameters: Optional[pulumi.Input[pulumi.InputType['RestoreParametersArgs']]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, virtual_network_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VirtualNetworkRuleArgs']]]]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = DatabaseAccountArgs.__new__(DatabaseAccountArgs) __props__.__dict__["account_name"] = account_name __props__.__dict__["analytical_storage_configuration"] = analytical_storage_configuration __props__.__dict__["api_properties"] = api_properties __props__.__dict__["backup_policy"] = backup_policy __props__.__dict__["capabilities"] = capabilities __props__.__dict__["connector_offer"] = connector_offer __props__.__dict__["consistency_policy"] = consistency_policy __props__.__dict__["cors"] = cors if create_mode is None: create_mode = 'Default' __props__.__dict__["create_mode"] = create_mode if database_account_offer_type is None and not opts.urn: raise TypeError("Missing required property 'database_account_offer_type'") __props__.__dict__["database_account_offer_type"] = database_account_offer_type __props__.__dict__["default_identity"] = default_identity __props__.__dict__["disable_key_based_metadata_write_access"] = disable_key_based_metadata_write_access __props__.__dict__["disable_local_auth"] = disable_local_auth __props__.__dict__["enable_analytical_storage"] = enable_analytical_storage __props__.__dict__["enable_automatic_failover"] = enable_automatic_failover __props__.__dict__["enable_cassandra_connector"] = enable_cassandra_connector __props__.__dict__["enable_free_tier"] = enable_free_tier __props__.__dict__["enable_multiple_write_locations"] = enable_multiple_write_locations __props__.__dict__["identity"] = identity __props__.__dict__["ip_rules"] = ip_rules __props__.__dict__["is_virtual_network_filter_enabled"] = is_virtual_network_filter_enabled __props__.__dict__["key_vault_key_uri"] = key_vault_key_uri if kind is None: kind = 'GlobalDocumentDB' __props__.__dict__["kind"] = kind __props__.__dict__["location"] = location if locations is None and not opts.urn: raise TypeError("Missing required property 'locations'") __props__.__dict__["locations"] = locations __props__.__dict__["network_acl_bypass"] = network_acl_bypass __props__.__dict__["network_acl_bypass_resource_ids"] = network_acl_bypass_resource_ids __props__.__dict__["public_network_access"] = public_network_access if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__.__dict__["resource_group_name"] = resource_group_name __props__.__dict__["restore_parameters"] = restore_parameters __props__.__dict__["tags"] = tags __props__.__dict__["virtual_network_rules"] = virtual_network_rules __props__.__dict__["document_endpoint"] = None __props__.__dict__["failover_policies"] = None __props__.__dict__["instance_id"] = None __props__.__dict__["name"] = None __props__.__dict__["private_endpoint_connections"] = None __props__.__dict__["provisioning_state"] = None __props__.__dict__["read_locations"] = None __props__.__dict__["system_data"] = None __props__.__dict__["type"] = None __props__.__dict__["write_locations"] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:documentdb/v20210615:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20150401:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20150401:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20150408:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20150408:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20151106:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20151106:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20160319:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20160319:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20160331:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20160331:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20190801:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20190801:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20191212:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20191212:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20200301:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200301:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20200401:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200401:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20200601preview:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200601preview:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20200901:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200901:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20210115:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210115:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20210301preview:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210301preview:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20210315:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210315:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20210401preview:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210401preview:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20210415:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210415:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20210515:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210515:DatabaseAccount"), pulumi.Alias(type_="azure-native:documentdb/v20210701preview:DatabaseAccount"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210701preview:DatabaseAccount")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(DatabaseAccount, __self__).__init__( 'azure-native:documentdb/v20210615:DatabaseAccount', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'DatabaseAccount': opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = DatabaseAccountArgs.__new__(DatabaseAccountArgs) __props__.__dict__["analytical_storage_configuration"] = None __props__.__dict__["api_properties"] = None __props__.__dict__["backup_policy"] = None __props__.__dict__["capabilities"] = None __props__.__dict__["connector_offer"] = None __props__.__dict__["consistency_policy"] = None __props__.__dict__["cors"] = None __props__.__dict__["create_mode"] = None __props__.__dict__["database_account_offer_type"] = None __props__.__dict__["default_identity"] = None __props__.__dict__["disable_key_based_metadata_write_access"] = None __props__.__dict__["disable_local_auth"] = None __props__.__dict__["document_endpoint"] = None __props__.__dict__["enable_analytical_storage"] = None __props__.__dict__["enable_automatic_failover"] = None __props__.__dict__["enable_cassandra_connector"] = None __props__.__dict__["enable_free_tier"] = None __props__.__dict__["enable_multiple_write_locations"] = None __props__.__dict__["failover_policies"] = None __props__.__dict__["identity"] = None __props__.__dict__["instance_id"] = None __props__.__dict__["ip_rules"] = None __props__.__dict__["is_virtual_network_filter_enabled"] = None __props__.__dict__["key_vault_key_uri"] = None __props__.__dict__["kind"] = None __props__.__dict__["location"] = None __props__.__dict__["locations"] = None __props__.__dict__["name"] = None __props__.__dict__["network_acl_bypass"] = None __props__.__dict__["network_acl_bypass_resource_ids"] = None __props__.__dict__["private_endpoint_connections"] = None __props__.__dict__["provisioning_state"] = None __props__.__dict__["public_network_access"] = None __props__.__dict__["read_locations"] = None __props__.__dict__["restore_parameters"] = None __props__.__dict__["system_data"] = None __props__.__dict__["tags"] = None __props__.__dict__["type"] = None __props__.__dict__["virtual_network_rules"] = None __props__.__dict__["write_locations"] = None return DatabaseAccount(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="analyticalStorageConfiguration") def analytical_storage_configuration(self) -> pulumi.Output[Optional['outputs.AnalyticalStorageConfigurationResponse']]: return pulumi.get(self, "analytical_storage_configuration") @property @pulumi.getter(name="apiProperties") def api_properties(self) -> pulumi.Output[Optional['outputs.ApiPropertiesResponse']]: return pulumi.get(self, "api_properties") @property @pulumi.getter(name="backupPolicy") def backup_policy(self) -> pulumi.Output[Optional[Any]]: return pulumi.get(self, "backup_policy") @property @pulumi.getter def capabilities(self) -> pulumi.Output[Optional[Sequence['outputs.CapabilityResponse']]]: return pulumi.get(self, "capabilities") @property @pulumi.getter(name="connectorOffer") def connector_offer(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "connector_offer") @property @pulumi.getter(name="consistencyPolicy") def consistency_policy(self) -> pulumi.Output[Optional['outputs.ConsistencyPolicyResponse']]: return pulumi.get(self, "consistency_policy") @property @pulumi.getter def cors(self) -> pulumi.Output[Optional[Sequence['outputs.CorsPolicyResponse']]]: return pulumi.get(self, "cors") @property @pulumi.getter(name="createMode") def create_mode(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "create_mode") @property @pulumi.getter(name="databaseAccountOfferType") def database_account_offer_type(self) -> pulumi.Output[str]: return pulumi.get(self, "database_account_offer_type") @property @pulumi.getter(name="defaultIdentity") def default_identity(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "default_identity") @property @pulumi.getter(name="disableKeyBasedMetadataWriteAccess") def disable_key_based_metadata_write_access(self) -> pulumi.Output[Optional[bool]]: return pulumi.get(self, "disable_key_based_metadata_write_access") @property @pulumi.getter(name="disableLocalAuth") def disable_local_auth(self) -> pulumi.Output[Optional[bool]]: return pulumi.get(self, "disable_local_auth") @property @pulumi.getter(name="documentEndpoint") def document_endpoint(self) -> pulumi.Output[str]: return pulumi.get(self, "document_endpoint") @property @pulumi.getter(name="enableAnalyticalStorage") def enable_analytical_storage(self) -> pulumi.Output[Optional[bool]]: return pulumi.get(self, "enable_analytical_storage") @property @pulumi.getter(name="enableAutomaticFailover") def enable_automatic_failover(self) -> pulumi.Output[Optional[bool]]: return pulumi.get(self, "enable_automatic_failover") @property @pulumi.getter(name="enableCassandraConnector") def enable_cassandra_connector(self) -> pulumi.Output[Optional[bool]]: return pulumi.get(self, "enable_cassandra_connector") @property @pulumi.getter(name="enableFreeTier") def enable_free_tier(self) -> pulumi.Output[Optional[bool]]: return pulumi.get(self, "enable_free_tier") @property @pulumi.getter(name="enableMultipleWriteLocations") def enable_multiple_write_locations(self) -> pulumi.Output[Optional[bool]]: return pulumi.get(self, "enable_multiple_write_locations") @property @pulumi.getter(name="failoverPolicies") def failover_policies(self) -> pulumi.Output[Sequence['outputs.FailoverPolicyResponse']]: return pulumi.get(self, "failover_policies") @property @pulumi.getter def identity(self) -> pulumi.Output[Optional['outputs.ManagedServiceIdentityResponse']]: return pulumi.get(self, "identity") @property @pulumi.getter(name="instanceId") def instance_id(self) -> pulumi.Output[str]: return pulumi.get(self, "instance_id") @property @pulumi.getter(name="ipRules") def ip_rules(self) -> pulumi.Output[Optional[Sequence['outputs.IpAddressOrRangeResponse']]]: return pulumi.get(self, "ip_rules") @property @pulumi.getter(name="isVirtualNetworkFilterEnabled") def is_virtual_network_filter_enabled(self) -> pulumi.Output[Optional[bool]]: return pulumi.get(self, "is_virtual_network_filter_enabled") @property @pulumi.getter(name="keyVaultKeyUri") def key_vault_key_uri(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "key_vault_key_uri") @property @pulumi.getter def kind(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "kind") @property @pulumi.getter def location(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "location") @property @pulumi.getter def locations(self) -> pulumi.Output[Sequence['outputs.LocationResponse']]: return pulumi.get(self, "locations") @property @pulumi.getter def name(self) -> pulumi.Output[str]: return pulumi.get(self, "name") @property @pulumi.getter(name="networkAclBypass") def network_acl_bypass(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "network_acl_bypass") @property @pulumi.getter(name="networkAclBypassResourceIds") def network_acl_bypass_resource_ids(self) -> pulumi.Output[Optional[Sequence[str]]]: return pulumi.get(self, "network_acl_bypass_resource_ids") @property @pulumi.getter(name="privateEndpointConnections") def private_endpoint_connections(self) -> pulumi.Output[Sequence['outputs.PrivateEndpointConnectionResponse']]: return pulumi.get(self, "private_endpoint_connections") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> pulumi.Output[str]: return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="publicNetworkAccess") def public_network_access(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "public_network_access") @property @pulumi.getter(name="readLocations") def read_locations(self) -> pulumi.Output[Sequence['outputs.LocationResponse']]: return pulumi.get(self, "read_locations") @property @pulumi.getter(name="restoreParameters") def restore_parameters(self) -> pulumi.Output[Optional['outputs.RestoreParametersResponse']]: return pulumi.get(self, "restore_parameters") @property @pulumi.getter(name="systemData") def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']: return pulumi.get(self, "system_data") @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: return pulumi.get(self, "tags") @property @pulumi.getter def type(self) -> pulumi.Output[str]: return pulumi.get(self, "type") @property @pulumi.getter(name="virtualNetworkRules") def virtual_network_rules(self) -> pulumi.Output[Optional[Sequence['outputs.VirtualNetworkRuleResponse']]]: return pulumi.get(self, "virtual_network_rules") @property @pulumi.getter(name="writeLocations") def write_locations(self) -> pulumi.Output[Sequence['outputs.LocationResponse']]: return pulumi.get(self, "write_locations")
true
true
1c4369aea349a0415f317871e3e85ce3efe8e7c9
3,971
py
Python
find_trans.py
jacob975/TATIRP
2d81fa280e039aa931c6f8456632a23ef123282a
[ "MIT" ]
null
null
null
find_trans.py
jacob975/TATIRP
2d81fa280e039aa931c6f8456632a23ef123282a
[ "MIT" ]
4
2017-08-22T03:15:22.000Z
2017-12-19T17:55:31.000Z
find_trans.py
jacob975/TATIRP
2d81fa280e039aa931c6f8456632a23ef123282a
[ "MIT" ]
null
null
null
#!/usr/bin/python3 # author: Serebryakov S., 2019 #https://astro.swarthmore.edu/transits.cgi import os import json import requests from lxml import html page = requests.get('https://astro.swarthmore.edu/print_transits.cgi?observatory_string=28.758333%3B-17.88%3BAtlantic%2FCanary%3BRoque+de+los+Muchachos%2C+La+Palma&use_utc=1&observatory_latitude=&observatory_longitude=&timezone=UTC&start_date=today&days_to_print=1&days_in_past=0&minimum_start_elevation=+&and_vs_or=or&minimum_end_elevation=+&minimum_depth=0&target_string=&single_object=0&ra=&dec=&epoch=&period=&duration=&target=&show_ephemeris=0&print_html=1&twilight=-12&max_airmass=2.4') document = html.fromstring(page.content) data_arr = [] date_par = "" for tr_elements in document.xpath('//tbody/tr'): td_elements = tr_elements.xpath('.//td/text() | .//td/span/text()' ) target_names = tr_elements.xpath('.//td//span/a/text()') tt_magnitude = tr_elements.xpath('.//td//div/div/text()')[0] obs_end_time = tr_elements.xpath('.//td//span/span/text()')[0].replace('\n','') for i in range(len(td_elements)): td_elements[i] = td_elements[i].replace('\n','') td_elements[i] = td_elements[i].replace('\t','') td_elements[i] = td_elements[i].strip() date_par = td_elements.pop(0).split()[1] for target in target_names: data_arr.append({"date" : date_par , "target" : target , "mag" : tt_magnitude, "start-end" : str(td_elements[18])+" - "+str(obs_end_time), "dur" : td_elements[26], "elev" : [ elem.replace('°','') for elem in [td_elements[31],td_elements[34],td_elements[37]] ], "RA,DEC" : [ td_elements[-5],td_elements[-4] ], "depth" : td_elements[-1] }) print("=======================================================================================================================================================================") target_list = [] for target in data_arr: if float(target["mag"]) < 16: dec = int(target["RA,DEC"][1][0:3]) if dec > -30 and dec < 35: if int(target["elev"][0]) > 45 and int(target["elev"][1]) > 45: target_list.append(target) if len(target_list) == 0: print("couldn't find any target for today...") else: print("||%15s||%15s||%15s||%15s||%15s||%25s||%35s||%15s||"%("date","target","magnitude","start-end","duration","elev","RA,DEC","depth")) with open("trastits_tonight","w") as t_file: for target in target_list: t_file.write(json.dumps(target)+"\n") print("||%15s||%15s||%15s||%15s||%15s||%25s||%35s||%15s||"%(target['date'],target['target'],target['mag'],target['start-end'],target['dur'],target['elev'],target['RA,DEC'],target['depth'])) print("=======================================================================================================================================================================") ''' OUTPUT: ======================================================================================================================================================================= || date|| target|| magnitude|| start-end|| duration|| elev|| RA,DEC|| depth|| || 2019-08-05|| GJ 9827 c|| 10.10|| 02:34 - 04:24|| 1:49|| ['56', '60', '58']|| ['23:27:04.62', '-01:17:12.5']|| 0.4|| || 2019-08-05|| K2-141 b|| 11.45|| 03:13 - 04:09|| 0:56|| ['60', '60', '59']|| ['23:23:39.97', '-01:11:21.4']|| 0.4|| ======================================================================================================================================================================= '''
70.910714
492
0.458071
import os import json import requests from lxml import html page = requests.get('https://astro.swarthmore.edu/print_transits.cgi?observatory_string=28.758333%3B-17.88%3BAtlantic%2FCanary%3BRoque+de+los+Muchachos%2C+La+Palma&use_utc=1&observatory_latitude=&observatory_longitude=&timezone=UTC&start_date=today&days_to_print=1&days_in_past=0&minimum_start_elevation=+&and_vs_or=or&minimum_end_elevation=+&minimum_depth=0&target_string=&single_object=0&ra=&dec=&epoch=&period=&duration=&target=&show_ephemeris=0&print_html=1&twilight=-12&max_airmass=2.4') document = html.fromstring(page.content) data_arr = [] date_par = "" for tr_elements in document.xpath('//tbody/tr'): td_elements = tr_elements.xpath('.//td/text() | .//td/span/text()' ) target_names = tr_elements.xpath('.//td//span/a/text()') tt_magnitude = tr_elements.xpath('.//td//div/div/text()')[0] obs_end_time = tr_elements.xpath('.//td//span/span/text()')[0].replace('\n','') for i in range(len(td_elements)): td_elements[i] = td_elements[i].replace('\n','') td_elements[i] = td_elements[i].replace('\t','') td_elements[i] = td_elements[i].strip() date_par = td_elements.pop(0).split()[1] for target in target_names: data_arr.append({"date" : date_par , "target" : target , "mag" : tt_magnitude, "start-end" : str(td_elements[18])+" - "+str(obs_end_time), "dur" : td_elements[26], "elev" : [ elem.replace('°','') for elem in [td_elements[31],td_elements[34],td_elements[37]] ], "RA,DEC" : [ td_elements[-5],td_elements[-4] ], "depth" : td_elements[-1] }) print("=======================================================================================================================================================================") target_list = [] for target in data_arr: if float(target["mag"]) < 16: dec = int(target["RA,DEC"][1][0:3]) if dec > -30 and dec < 35: if int(target["elev"][0]) > 45 and int(target["elev"][1]) > 45: target_list.append(target) if len(target_list) == 0: print("couldn't find any target for today...") else: print("||%15s||%15s||%15s||%15s||%15s||%25s||%35s||%15s||"%("date","target","magnitude","start-end","duration","elev","RA,DEC","depth")) with open("trastits_tonight","w") as t_file: for target in target_list: t_file.write(json.dumps(target)+"\n") print("||%15s||%15s||%15s||%15s||%15s||%25s||%35s||%15s||"%(target['date'],target['target'],target['mag'],target['start-end'],target['dur'],target['elev'],target['RA,DEC'],target['depth'])) print("=======================================================================================================================================================================")
true
true
1c4369fdab4c48c1a95e2314a68965d8415983d5
2,686
py
Python
graphgym/custom_graphgym/train/example.py
itamblyn/pytorch_geometric
67ed16492863378b8434b03713a75924f0cc5df1
[ "MIT" ]
1
2022-01-05T05:46:50.000Z
2022-01-05T05:46:50.000Z
graphgym/custom_graphgym/train/example.py
itamblyn/pytorch_geometric
67ed16492863378b8434b03713a75924f0cc5df1
[ "MIT" ]
null
null
null
graphgym/custom_graphgym/train/example.py
itamblyn/pytorch_geometric
67ed16492863378b8434b03713a75924f0cc5df1
[ "MIT" ]
null
null
null
import torch import time import logging from torch_geometric.graphgym.config import cfg from torch_geometric.graphgym.loss import compute_loss from torch_geometric.graphgym.utils.epoch import is_eval_epoch, is_ckpt_epoch from torch_geometric.graphgym.checkpoint import load_ckpt, save_ckpt, \ clean_ckpt from torch_geometric.graphgym.register import register_train def train_epoch(logger, loader, model, optimizer, scheduler): model.train() time_start = time.time() for batch in loader: optimizer.zero_grad() batch.to(torch.device(cfg.device)) pred, true = model(batch) loss, pred_score = compute_loss(pred, true) loss.backward() optimizer.step() logger.update_stats(true=true.detach().cpu(), pred=pred_score.detach().cpu(), loss=loss.item(), lr=scheduler.get_last_lr()[0], time_used=time.time() - time_start, params=cfg.params) time_start = time.time() scheduler.step() def eval_epoch(logger, loader, model): model.eval() time_start = time.time() for batch in loader: batch.to(torch.device(cfg.device)) pred, true = model(batch) loss, pred_score = compute_loss(pred, true) logger.update_stats(true=true.detach().cpu(), pred=pred_score.detach().cpu(), loss=loss.item(), lr=0, time_used=time.time() - time_start, params=cfg.params) time_start = time.time() @register_train('example') def train_example(loggers, loaders, model, optimizer, scheduler): start_epoch = 0 if cfg.train.auto_resume: start_epoch = load_ckpt(model, optimizer, scheduler, cfg.train.epoch_resume) if start_epoch == cfg.optim.max_epoch: logging.info('Checkpoint found, Task already done') else: logging.info('Start from epoch %s', start_epoch) num_splits = len(loggers) for cur_epoch in range(start_epoch, cfg.optim.max_epoch): train_epoch(loggers[0], loaders[0], model, optimizer, scheduler) loggers[0].write_epoch(cur_epoch) if is_eval_epoch(cur_epoch): for i in range(1, num_splits): eval_epoch(loggers[i], loaders[i], model) loggers[i].write_epoch(cur_epoch) if is_ckpt_epoch(cur_epoch): save_ckpt(model, optimizer, scheduler, cur_epoch) for logger in loggers: logger.close() if cfg.train.ckpt_clean: clean_ckpt() logging.info('Task done, results saved in %s', cfg.run_dir)
36.297297
77
0.627699
import torch import time import logging from torch_geometric.graphgym.config import cfg from torch_geometric.graphgym.loss import compute_loss from torch_geometric.graphgym.utils.epoch import is_eval_epoch, is_ckpt_epoch from torch_geometric.graphgym.checkpoint import load_ckpt, save_ckpt, \ clean_ckpt from torch_geometric.graphgym.register import register_train def train_epoch(logger, loader, model, optimizer, scheduler): model.train() time_start = time.time() for batch in loader: optimizer.zero_grad() batch.to(torch.device(cfg.device)) pred, true = model(batch) loss, pred_score = compute_loss(pred, true) loss.backward() optimizer.step() logger.update_stats(true=true.detach().cpu(), pred=pred_score.detach().cpu(), loss=loss.item(), lr=scheduler.get_last_lr()[0], time_used=time.time() - time_start, params=cfg.params) time_start = time.time() scheduler.step() def eval_epoch(logger, loader, model): model.eval() time_start = time.time() for batch in loader: batch.to(torch.device(cfg.device)) pred, true = model(batch) loss, pred_score = compute_loss(pred, true) logger.update_stats(true=true.detach().cpu(), pred=pred_score.detach().cpu(), loss=loss.item(), lr=0, time_used=time.time() - time_start, params=cfg.params) time_start = time.time() @register_train('example') def train_example(loggers, loaders, model, optimizer, scheduler): start_epoch = 0 if cfg.train.auto_resume: start_epoch = load_ckpt(model, optimizer, scheduler, cfg.train.epoch_resume) if start_epoch == cfg.optim.max_epoch: logging.info('Checkpoint found, Task already done') else: logging.info('Start from epoch %s', start_epoch) num_splits = len(loggers) for cur_epoch in range(start_epoch, cfg.optim.max_epoch): train_epoch(loggers[0], loaders[0], model, optimizer, scheduler) loggers[0].write_epoch(cur_epoch) if is_eval_epoch(cur_epoch): for i in range(1, num_splits): eval_epoch(loggers[i], loaders[i], model) loggers[i].write_epoch(cur_epoch) if is_ckpt_epoch(cur_epoch): save_ckpt(model, optimizer, scheduler, cur_epoch) for logger in loggers: logger.close() if cfg.train.ckpt_clean: clean_ckpt() logging.info('Task done, results saved in %s', cfg.run_dir)
true
true
1c436a4ec3351d4cc481420600a5673157ef3b13
4,929
py
Python
Round3/train.py
teejaytanmay/image_object_localization_flipkart
ca0976a7df1280be942d666cdebea110e1a70633
[ "MIT" ]
null
null
null
Round3/train.py
teejaytanmay/image_object_localization_flipkart
ca0976a7df1280be942d666cdebea110e1a70633
[ "MIT" ]
null
null
null
Round3/train.py
teejaytanmay/image_object_localization_flipkart
ca0976a7df1280be942d666cdebea110e1a70633
[ "MIT" ]
null
null
null
# coding: utf-8 import keras from keras.layers import Dense, Conv2D, BatchNormalization, Activation from keras.layers import AveragePooling2D, MaxPooling2D, Input, Flatten from keras.optimizers import Adam from keras.regularizers import l2 from keras import backend as K from keras.models import Model,load_model from keras.callbacks import ModelCheckpoint,LearningRateScheduler,ReduceLROnPlateau import utils import numpy as np import tensorflow as tf data_train,box_train,data_test,box_test=utils.getdata() # metric function def my_metric(labels,predictions): threshhold=0.9 x=predictions[:,0]*20 x=tf.maximum(tf.minimum(x,192.0),0.0) y=predictions[:,1]*20 y=tf.maximum(tf.minimum(y,144.0),0.0) width=predictions[:,2]*20 width=tf.maximum(tf.minimum(width,192.0),0.0) height=predictions[:,3]*20 height=tf.maximum(tf.minimum(height,144.0),0.0) label_x=labels[:,0] label_y=labels[:,1] label_width=labels[:,2] label_height=labels[:,3] a1=tf.multiply(width,height) a2=tf.multiply(label_width,label_height) x1=tf.maximum(x,label_x) y1=tf.maximum(y,label_y) x2=tf.minimum(x+width,label_x+label_width) y2=tf.minimum(y+height,label_y+label_height) IoU=tf.abs(tf.multiply((x1-x2),(y1-y2)))/(a1+a2-tf.abs(tf.multiply((x1-x2),(y1-y2)))) condition=tf.less(threshhold,IoU) sum=tf.where(condition,tf.ones(tf.shape(condition)),tf.zeros(tf.shape(condition))) return tf.reduce_mean(sum) # loss function def smooth_l1_loss(true_box,pred_box): loss=0.0 for i in range(4): residual=tf.abs(true_box[:,i]-pred_box[:,i]) condition=tf.less(residual,1.0) small_res=0.5*tf.square(residual) large_res=residual-0.5 loss=loss+tf.where(condition,small_res,large_res) return tf.reduce_mean(loss) def resnet_block(inputs,num_filters,kernel_size,strides,activation='relu'): x=Conv2D(num_filters,kernel_size=kernel_size,strides=strides,padding='same',kernel_initializer='he_normal',kernel_regularizer=l2(1e-3))(inputs) x=BatchNormalization()(x) if(activation): x=Activation('relu')(x) return x def resnet18(): inputs=Input((192,144,3)) # conv1 x=resnet_block(inputs,64,[7,7],2) # conv2 x=MaxPooling2D([3,3],2,'same')(x) for i in range(2): a=resnet_block(x,64,[3,3],1) b=resnet_block(a,64,[3,3],1,activation=None) x=keras.layers.add([x,b]) x=Activation('relu')(x) # conv3 a=resnet_block(x,128,[1,1],2) b=resnet_block(a,128,[3,3],1,activation=None) x=Conv2D(128,kernel_size=[1,1],strides=2,padding='same',kernel_initializer='he_normal',kernel_regularizer=l2(1e-3))(x) x=keras.layers.add([x,b]) x=Activation('relu')(x) a=resnet_block(x,128,[3,3],1) b=resnet_block(a,128,[3,3],1,activation=None) x=keras.layers.add([x,b]) x=Activation('relu')(x) # conv4 a=resnet_block(x,256,[1,1],2) b=resnet_block(a,256,[3,3],1,activation=None) x=Conv2D(256,kernel_size=[1,1],strides=2,padding='same',kernel_initializer='he_normal',kernel_regularizer=l2(1e-3))(x) x=keras.layers.add([x,b]) x=Activation('relu')(x) a=resnet_block(x,256,[3,3],1) b=resnet_block(a,256,[3,3],1,activation=None) x=keras.layers.add([x,b]) x=Activation('relu')(x) # conv5 a=resnet_block(x,512,[1,1],2) b=resnet_block(a,512,[3,3],1,activation=None) x=Conv2D(512,kernel_size=[1,1],strides=2,padding='same',kernel_initializer='he_normal',kernel_regularizer=l2(1e-3))(x) x=keras.layers.add([x,b]) x=Activation('relu')(x) a=resnet_block(x,512,[3,3],1) b=resnet_block(a,512,[3,3],1,activation=None) x=keras.layers.add([x,b]) x=Activation('relu')(x) x=AveragePooling2D(pool_size=1,data_format="channels_last")(x) # out:1*1*512 y=Flatten()(x) # out:512 y=Dense(1000,kernel_initializer='he_normal',kernel_regularizer=l2(1e-3))(y) outputs=Dense(4,kernel_initializer='he_normal',kernel_regularizer=l2(1e-3))(y) model=Model(inputs=inputs,outputs=outputs) return model model = resnet18() model.compile(loss=smooth_l1_loss,optimizer=Adam(),metrics=[my_metric]) model.summary() def lr_sch(epoch): #200 total if epoch <50: return 1e-3 if 50<=epoch<100: return 1e-4 if epoch>=100: return 1e-5 lr_scheduler=LearningRateScheduler(lr_sch) lr_reducer=ReduceLROnPlateau(monitor='val_my_metric',factor=0.2,patience=5,mode='max',min_lr=1e-3) checkpoint=ModelCheckpoint('model5.h5',monitor='val_loss',verbose=0,save_best_only=True,mode='auto') model_details=model.fit(data_train,box_train,batch_size=128,epochs=55,shuffle=True,validation_split=0.1,callbacks=[lr_scheduler,lr_reducer,checkpoint],verbose=1) model.save('model.h5') scores=model.evaluate(data_test,box_test,verbose=1) print('Test loss : ',scores[0]) print('Test accuracy : ',scores[1]) utils.plot_model(model_details)
31.596154
161
0.695476
import keras from keras.layers import Dense, Conv2D, BatchNormalization, Activation from keras.layers import AveragePooling2D, MaxPooling2D, Input, Flatten from keras.optimizers import Adam from keras.regularizers import l2 from keras import backend as K from keras.models import Model,load_model from keras.callbacks import ModelCheckpoint,LearningRateScheduler,ReduceLROnPlateau import utils import numpy as np import tensorflow as tf data_train,box_train,data_test,box_test=utils.getdata() def my_metric(labels,predictions): threshhold=0.9 x=predictions[:,0]*20 x=tf.maximum(tf.minimum(x,192.0),0.0) y=predictions[:,1]*20 y=tf.maximum(tf.minimum(y,144.0),0.0) width=predictions[:,2]*20 width=tf.maximum(tf.minimum(width,192.0),0.0) height=predictions[:,3]*20 height=tf.maximum(tf.minimum(height,144.0),0.0) label_x=labels[:,0] label_y=labels[:,1] label_width=labels[:,2] label_height=labels[:,3] a1=tf.multiply(width,height) a2=tf.multiply(label_width,label_height) x1=tf.maximum(x,label_x) y1=tf.maximum(y,label_y) x2=tf.minimum(x+width,label_x+label_width) y2=tf.minimum(y+height,label_y+label_height) IoU=tf.abs(tf.multiply((x1-x2),(y1-y2)))/(a1+a2-tf.abs(tf.multiply((x1-x2),(y1-y2)))) condition=tf.less(threshhold,IoU) sum=tf.where(condition,tf.ones(tf.shape(condition)),tf.zeros(tf.shape(condition))) return tf.reduce_mean(sum) def smooth_l1_loss(true_box,pred_box): loss=0.0 for i in range(4): residual=tf.abs(true_box[:,i]-pred_box[:,i]) condition=tf.less(residual,1.0) small_res=0.5*tf.square(residual) large_res=residual-0.5 loss=loss+tf.where(condition,small_res,large_res) return tf.reduce_mean(loss) def resnet_block(inputs,num_filters,kernel_size,strides,activation='relu'): x=Conv2D(num_filters,kernel_size=kernel_size,strides=strides,padding='same',kernel_initializer='he_normal',kernel_regularizer=l2(1e-3))(inputs) x=BatchNormalization()(x) if(activation): x=Activation('relu')(x) return x def resnet18(): inputs=Input((192,144,3)) x=resnet_block(inputs,64,[7,7],2) x=MaxPooling2D([3,3],2,'same')(x) for i in range(2): a=resnet_block(x,64,[3,3],1) b=resnet_block(a,64,[3,3],1,activation=None) x=keras.layers.add([x,b]) x=Activation('relu')(x) a=resnet_block(x,128,[1,1],2) b=resnet_block(a,128,[3,3],1,activation=None) x=Conv2D(128,kernel_size=[1,1],strides=2,padding='same',kernel_initializer='he_normal',kernel_regularizer=l2(1e-3))(x) x=keras.layers.add([x,b]) x=Activation('relu')(x) a=resnet_block(x,128,[3,3],1) b=resnet_block(a,128,[3,3],1,activation=None) x=keras.layers.add([x,b]) x=Activation('relu')(x) a=resnet_block(x,256,[1,1],2) b=resnet_block(a,256,[3,3],1,activation=None) x=Conv2D(256,kernel_size=[1,1],strides=2,padding='same',kernel_initializer='he_normal',kernel_regularizer=l2(1e-3))(x) x=keras.layers.add([x,b]) x=Activation('relu')(x) a=resnet_block(x,256,[3,3],1) b=resnet_block(a,256,[3,3],1,activation=None) x=keras.layers.add([x,b]) x=Activation('relu')(x) a=resnet_block(x,512,[1,1],2) b=resnet_block(a,512,[3,3],1,activation=None) x=Conv2D(512,kernel_size=[1,1],strides=2,padding='same',kernel_initializer='he_normal',kernel_regularizer=l2(1e-3))(x) x=keras.layers.add([x,b]) x=Activation('relu')(x) a=resnet_block(x,512,[3,3],1) b=resnet_block(a,512,[3,3],1,activation=None) x=keras.layers.add([x,b]) x=Activation('relu')(x) x=AveragePooling2D(pool_size=1,data_format="channels_last")(x) y=Flatten()(x) y=Dense(1000,kernel_initializer='he_normal',kernel_regularizer=l2(1e-3))(y) outputs=Dense(4,kernel_initializer='he_normal',kernel_regularizer=l2(1e-3))(y) model=Model(inputs=inputs,outputs=outputs) return model model = resnet18() model.compile(loss=smooth_l1_loss,optimizer=Adam(),metrics=[my_metric]) model.summary() def lr_sch(epoch): if epoch <50: return 1e-3 if 50<=epoch<100: return 1e-4 if epoch>=100: return 1e-5 lr_scheduler=LearningRateScheduler(lr_sch) lr_reducer=ReduceLROnPlateau(monitor='val_my_metric',factor=0.2,patience=5,mode='max',min_lr=1e-3) checkpoint=ModelCheckpoint('model5.h5',monitor='val_loss',verbose=0,save_best_only=True,mode='auto') model_details=model.fit(data_train,box_train,batch_size=128,epochs=55,shuffle=True,validation_split=0.1,callbacks=[lr_scheduler,lr_reducer,checkpoint],verbose=1) model.save('model.h5') scores=model.evaluate(data_test,box_test,verbose=1) print('Test loss : ',scores[0]) print('Test accuracy : ',scores[1]) utils.plot_model(model_details)
true
true
1c436b649a82cf4960e00d0baa54f3341444e559
2,087
py
Python
malib/trainers/utils.py
wwxFromTju/malib
7cd2a4af55cf1f56da8854e26ea7a4f3782ceea2
[ "MIT" ]
6
2021-05-19T10:25:36.000Z
2021-12-27T03:30:33.000Z
malib/trainers/utils.py
wwxFromTju/malib
7cd2a4af55cf1f56da8854e26ea7a4f3782ceea2
[ "MIT" ]
1
2021-05-29T04:51:37.000Z
2021-05-30T06:18:10.000Z
malib/trainers/utils.py
ying-wen/malib_deprecated
875338b81c4d87064ad31201f461ef742db05f25
[ "MIT" ]
1
2021-05-31T16:16:12.000Z
2021-05-31T16:16:12.000Z
# Created by yingwen at 2019-06-30 from copy import deepcopy import numpy as np import tensorflow as tf def add_target_actions(batch_n, agents, batch_size, multiagent=True): target_actions_n = [] for i, agent in enumerate(agents): target_actions_n.append( agent.act(batch_n[i]["next_observations"], use_target=True) ) if multiagent: for i in range(len(agents)): target_actions = target_actions_n[i] opponent_target_actions = np.reshape( np.delete(deepcopy(target_actions_n), i, 0), (batch_size, -1) ) target_actions = np.concatenate( (target_actions, opponent_target_actions), 1 ) # assert target_actions.shape[0] == batch_size batch_n[i]["target_actions"] = target_actions else: batch_n[0]["target_actions"] = target_actions_n[0] return batch_n def add_recent_batches(batches, agents, batch_size): for batch, agent in zip(batches, agents): recent_batch = agent.replay_buffer.recent_batch(batch_size) batch["recent_observations"] = recent_batch["observations"] batch["recent_actions"] = recent_batch["actions"] if "opponent_actions" in recent_batch.keys(): batch["recent_opponent_actions"] = recent_batch["opponent_actions"] return batches def add_annealing(batches, step, annealing_scale=1.0): annealing = 0.1 + np.exp(-0.1 * max(step - 10, 0)) * 500 annealing = annealing_scale * annealing for batch in batches: batch["annealing"] = tf.constant(annealing, dtype=tf.float32) return batches def get_batches(agents, batch_size): assert len(agents) > 1 batches = [] indices = agents[0].replay_buffer.random_indices(batch_size) for agent in agents: batch = agent.replay_buffer.batch_by_indices(indices) batches.append(batch) return batches get_extra_experiences = { "annealing": add_annealing, "recent_experiences": add_recent_batches, "target_actions": add_target_actions, }
33.66129
79
0.668903
from copy import deepcopy import numpy as np import tensorflow as tf def add_target_actions(batch_n, agents, batch_size, multiagent=True): target_actions_n = [] for i, agent in enumerate(agents): target_actions_n.append( agent.act(batch_n[i]["next_observations"], use_target=True) ) if multiagent: for i in range(len(agents)): target_actions = target_actions_n[i] opponent_target_actions = np.reshape( np.delete(deepcopy(target_actions_n), i, 0), (batch_size, -1) ) target_actions = np.concatenate( (target_actions, opponent_target_actions), 1 ) batch_n[i]["target_actions"] = target_actions else: batch_n[0]["target_actions"] = target_actions_n[0] return batch_n def add_recent_batches(batches, agents, batch_size): for batch, agent in zip(batches, agents): recent_batch = agent.replay_buffer.recent_batch(batch_size) batch["recent_observations"] = recent_batch["observations"] batch["recent_actions"] = recent_batch["actions"] if "opponent_actions" in recent_batch.keys(): batch["recent_opponent_actions"] = recent_batch["opponent_actions"] return batches def add_annealing(batches, step, annealing_scale=1.0): annealing = 0.1 + np.exp(-0.1 * max(step - 10, 0)) * 500 annealing = annealing_scale * annealing for batch in batches: batch["annealing"] = tf.constant(annealing, dtype=tf.float32) return batches def get_batches(agents, batch_size): assert len(agents) > 1 batches = [] indices = agents[0].replay_buffer.random_indices(batch_size) for agent in agents: batch = agent.replay_buffer.batch_by_indices(indices) batches.append(batch) return batches get_extra_experiences = { "annealing": add_annealing, "recent_experiences": add_recent_batches, "target_actions": add_target_actions, }
true
true
1c436c37a9e54c42d49e6960d46a6ddee487ea78
414
py
Python
day2/test_program_alarm.py
marinmuso/adventofcode
41320aef44b4ade2f36392e5ed77363dbd5c6e79
[ "Apache-2.0" ]
null
null
null
day2/test_program_alarm.py
marinmuso/adventofcode
41320aef44b4ade2f36392e5ed77363dbd5c6e79
[ "Apache-2.0" ]
null
null
null
day2/test_program_alarm.py
marinmuso/adventofcode
41320aef44b4ade2f36392e5ed77363dbd5c6e79
[ "Apache-2.0" ]
null
null
null
import pytest from program_alarm import process_opcodes @pytest.mark.parametrize("test_input, expected", [ ([1, 0, 0, 0, 99], [2, 0, 0, 0, 99]), ([2, 3, 0, 3, 99], [2, 3, 0, 6, 99]), ([2, 4, 4, 5, 99, 0], [2, 4, 4, 5, 99, 9801]), ([1, 1, 1, 4, 99, 5, 6, 0, 99], [30, 1, 1, 4, 2, 5, 6, 0, 99]) ]) def test_process_opcodes(test_input, expected): assert process_opcodes(test_input) == expected
29.571429
66
0.560386
import pytest from program_alarm import process_opcodes @pytest.mark.parametrize("test_input, expected", [ ([1, 0, 0, 0, 99], [2, 0, 0, 0, 99]), ([2, 3, 0, 3, 99], [2, 3, 0, 6, 99]), ([2, 4, 4, 5, 99, 0], [2, 4, 4, 5, 99, 9801]), ([1, 1, 1, 4, 99, 5, 6, 0, 99], [30, 1, 1, 4, 2, 5, 6, 0, 99]) ]) def test_process_opcodes(test_input, expected): assert process_opcodes(test_input) == expected
true
true
1c436cbbda96f2d2fcc0119360c4a52d77484b57
1,436
py
Python
models/fix_sclstm16.py
latte488/smth-smth-v2
8504d10a994458769707108cbbe62adde81ca5aa
[ "Apache-2.0", "MIT" ]
2
2021-08-17T14:16:54.000Z
2021-11-06T06:06:28.000Z
models/fix_sclstm16.py
latte488/smth-smth-v2
8504d10a994458769707108cbbe62adde81ca5aa
[ "Apache-2.0", "MIT" ]
null
null
null
models/fix_sclstm16.py
latte488/smth-smth-v2
8504d10a994458769707108cbbe62adde81ca5aa
[ "Apache-2.0", "MIT" ]
1
2021-11-07T12:27:24.000Z
2021-11-07T12:27:24.000Z
import torch from torch import nn import selected_dropout rnn_units = 16 class Model(nn.Module): def __init__(self, column_units): super(Model, self).__init__() self.cnn = nn.Sequential( nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(16), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2) ) self.rnn = nn.LSTMCell(16 * (32 // 2) * (32 // 2), rnn_units) self.classifier = nn.Sequential( nn.Dropout(0.5), nn.Linear(rnn_units, column_units), ) def forward(self, x): x = x[0] device = x.device b, c, t, h, w = 0, 1, 2, 3, 4 x = x.permute(b, t, c, h, w).clone() b, t, c, h, w = x.shape x = x.view(-1, c, h, w) x = self.cnn(x) x = x.view(b, t, -1) hx = torch.zeros(b, rnn_units).to(device) cx = torch.zeros(b, rnn_units).to(device) hxs = torch.zeros(b, t, rnn_units).to(device) for i in range(t): hx, cx = self.rnn(x[:, i, :], (hx, cx)) hxs[:, i, :] = hx self.h = hxs.detach() b, t, f = hxs.shape x = torch.stack([self.classifier(hxs[:, i, :]) for i in range(t)]) return x if __name__ == '__main__': model = CLSTM() inputs = torch.randn(8, 10, 3, 32, 32) outputs = model(inputs) print(outputs.shape)
29.306122
74
0.510446
import torch from torch import nn import selected_dropout rnn_units = 16 class Model(nn.Module): def __init__(self, column_units): super(Model, self).__init__() self.cnn = nn.Sequential( nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(16), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2) ) self.rnn = nn.LSTMCell(16 * (32 // 2) * (32 // 2), rnn_units) self.classifier = nn.Sequential( nn.Dropout(0.5), nn.Linear(rnn_units, column_units), ) def forward(self, x): x = x[0] device = x.device b, c, t, h, w = 0, 1, 2, 3, 4 x = x.permute(b, t, c, h, w).clone() b, t, c, h, w = x.shape x = x.view(-1, c, h, w) x = self.cnn(x) x = x.view(b, t, -1) hx = torch.zeros(b, rnn_units).to(device) cx = torch.zeros(b, rnn_units).to(device) hxs = torch.zeros(b, t, rnn_units).to(device) for i in range(t): hx, cx = self.rnn(x[:, i, :], (hx, cx)) hxs[:, i, :] = hx self.h = hxs.detach() b, t, f = hxs.shape x = torch.stack([self.classifier(hxs[:, i, :]) for i in range(t)]) return x if __name__ == '__main__': model = CLSTM() inputs = torch.randn(8, 10, 3, 32, 32) outputs = model(inputs) print(outputs.shape)
true
true
1c436cfd7b87bf108a646221ae9e344c7add96a8
80
py
Python
tiletanic/__init__.py
umeier/tiletanic
461958452a793e781607deddf9828f8e2f55d248
[ "MIT" ]
19
2015-11-09T16:10:20.000Z
2020-11-27T21:28:35.000Z
tiletanic/__init__.py
umeier/tiletanic
461958452a793e781607deddf9828f8e2f55d248
[ "MIT" ]
4
2015-11-06T19:30:08.000Z
2020-03-28T19:10:25.000Z
tiletanic/__init__.py
umeier/tiletanic
461958452a793e781607deddf9828f8e2f55d248
[ "MIT" ]
9
2015-12-22T19:51:28.000Z
2021-01-27T22:52:32.000Z
from .base import Tile, Coords, CoordsBbox from . import tileschemes, tilecover
26.666667
42
0.8
from .base import Tile, Coords, CoordsBbox from . import tileschemes, tilecover
true
true
1c436dd9bb8061c46ffe29a26b483f322f9063aa
98
py
Python
chat/apps.py
Kevin1289/Clique.io_site
01e1d5505b293419da75aedb40bdefb9086696a6
[ "MIT" ]
1
2020-09-01T05:15:33.000Z
2020-09-01T05:15:33.000Z
chat/apps.py
Kevin1289/Clique.io_site
01e1d5505b293419da75aedb40bdefb9086696a6
[ "MIT" ]
null
null
null
chat/apps.py
Kevin1289/Clique.io_site
01e1d5505b293419da75aedb40bdefb9086696a6
[ "MIT" ]
null
null
null
from django.apps import AppConfig class ChatConfig(AppConfig): name = 'django_private_chat'
16.333333
33
0.77551
from django.apps import AppConfig class ChatConfig(AppConfig): name = 'django_private_chat'
true
true
1c436e9788ed48c1ecb285aa39a09207734ffac6
1,068
py
Python
992. Subarrays with K Different Integers/solution4.py
sunshot/LeetCode
8f6503201831055f1d49ed3abb25be44a13ec317
[ "MIT" ]
null
null
null
992. Subarrays with K Different Integers/solution4.py
sunshot/LeetCode
8f6503201831055f1d49ed3abb25be44a13ec317
[ "MIT" ]
null
null
null
992. Subarrays with K Different Integers/solution4.py
sunshot/LeetCode
8f6503201831055f1d49ed3abb25be44a13ec317
[ "MIT" ]
null
null
null
from typing import List class Solution: def subarraysWithKDistinct(self, A: List[int], K: int) -> int: ans = 0 d_cnt = {} start = 0 start_k = 0 # start is the starting of the window # start_k is the starting point of k distinct integers # in the window start_k-start number are repeated # so 1+start_k - start number of subarrays are possible for x in A: d_cnt[x] = d_cnt.get(x, 0) + 1 if len(d_cnt) == K+1: del d_cnt[A[start_k]] start_k += 1 start = start_k if len(d_cnt) == K: while d_cnt[A[start_k]] > 1: d_cnt[A[start_k]] -= 1 start_k += 1 # Until d_cnt[A[start_k]] = 1 ans += start_k - start + 1 return ans if __name__== '__main__': solution = Solution() A = [1,2,1,3,4] K = 3 result = solution.subarraysWithKDistinct(A, K) print(result)
29.666667
66
0.48221
from typing import List class Solution: def subarraysWithKDistinct(self, A: List[int], K: int) -> int: ans = 0 d_cnt = {} start = 0 start_k = 0 for x in A: d_cnt[x] = d_cnt.get(x, 0) + 1 if len(d_cnt) == K+1: del d_cnt[A[start_k]] start_k += 1 start = start_k if len(d_cnt) == K: while d_cnt[A[start_k]] > 1: d_cnt[A[start_k]] -= 1 start_k += 1 ans += start_k - start + 1 return ans if __name__== '__main__': solution = Solution() A = [1,2,1,3,4] K = 3 result = solution.subarraysWithKDistinct(A, K) print(result)
true
true
1c436ef56a0d3571f86e1fca49e76adc1d362e2d
4,610
py
Python
benchmark/startQiskit_QC3054.py
UCLA-SEAL/QDiff
d968cbc47fe926b7f88b4adf10490f1edd6f8819
[ "BSD-3-Clause" ]
null
null
null
benchmark/startQiskit_QC3054.py
UCLA-SEAL/QDiff
d968cbc47fe926b7f88b4adf10490f1edd6f8819
[ "BSD-3-Clause" ]
null
null
null
benchmark/startQiskit_QC3054.py
UCLA-SEAL/QDiff
d968cbc47fe926b7f88b4adf10490f1edd6f8819
[ "BSD-3-Clause" ]
null
null
null
# qubit number=4 # total number=46 import cirq import qiskit from qiskit import IBMQ from qiskit.providers.ibmq import least_busy from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister from qiskit import BasicAer, execute, transpile from pprint import pprint from qiskit.test.mock import FakeVigo from math import log2 import numpy as np import networkx as nx def bitwise_xor(s: str, t: str) -> str: length = len(s) res = [] for i in range(length): res.append(str(int(s[i]) ^ int(t[i]))) return ''.join(res[::-1]) def bitwise_dot(s: str, t: str) -> str: length = len(s) res = 0 for i in range(length): res += int(s[i]) * int(t[i]) return str(res % 2) def build_oracle(n: int, f) -> QuantumCircuit: # implement the oracle O_f # NOTE: use multi_control_toffoli_gate ('noancilla' mode) # https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html # https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates # https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate controls = QuantumRegister(n, "ofc") target = QuantumRegister(1, "oft") oracle = QuantumCircuit(controls, target, name="Of") for i in range(2 ** n): rep = np.binary_repr(i, n) if f(rep) == "1": for j in range(n): if rep[j] == "0": oracle.x(controls[j]) oracle.mct(controls, target[0], None, mode='noancilla') for j in range(n): if rep[j] == "0": oracle.x(controls[j]) # oracle.barrier() return oracle def make_circuit(n:int,f) -> QuantumCircuit: # circuit begin input_qubit = QuantumRegister(n,"qc") classical = ClassicalRegister(n, "qm") prog = QuantumCircuit(input_qubit, classical) prog.h(input_qubit[3]) # number=23 prog.rx(-0.6848671984825748,input_qubit[1]) # number=26 prog.cz(input_qubit[0],input_qubit[3]) # number=24 prog.h(input_qubit[3]) # number=25 prog.h(input_qubit[3]) # number=37 prog.cz(input_qubit[0],input_qubit[3]) # number=38 prog.h(input_qubit[3]) # number=39 prog.cx(input_qubit[0],input_qubit[3]) # number=30 prog.cx(input_qubit[0],input_qubit[3]) # number=40 prog.x(input_qubit[3]) # number=41 prog.cx(input_qubit[0],input_qubit[3]) # number=42 prog.cx(input_qubit[0],input_qubit[3]) # number=32 prog.h(input_qubit[3]) # number=33 prog.cz(input_qubit[0],input_qubit[3]) # number=34 prog.h(input_qubit[3]) # number=35 prog.cx(input_qubit[0],input_qubit[3]) # number=15 prog.h(input_qubit[1]) # number=2 prog.h(input_qubit[2]) # number=3 prog.h(input_qubit[3]) # number=4 prog.y(input_qubit[3]) # number=12 prog.h(input_qubit[0]) # number=5 oracle = build_oracle(n-1, f) prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]]) prog.h(input_qubit[1]) # number=6 prog.h(input_qubit[2]) # number=7 prog.cx(input_qubit[3],input_qubit[0]) # number=20 prog.cx(input_qubit[3],input_qubit[0]) # number=43 prog.z(input_qubit[3]) # number=44 prog.cx(input_qubit[3],input_qubit[0]) # number=45 prog.h(input_qubit[0]) # number=27 prog.cz(input_qubit[3],input_qubit[0]) # number=28 prog.h(input_qubit[0]) # number=29 prog.h(input_qubit[3]) # number=8 prog.h(input_qubit[0]) # number=9 prog.y(input_qubit[2]) # number=10 prog.h(input_qubit[1]) # number=36 prog.y(input_qubit[2]) # number=11 # circuit end for i in range(n): prog.measure(input_qubit[i], classical[i]) return prog if __name__ == '__main__': a = "111" b = "0" f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b) prog = make_circuit(4,f) IBMQ.load_account() provider = IBMQ.get_provider(hub='ibm-q') provider.backends() backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True)) sample_shot =8000 info = execute(prog, backend=backend, shots=sample_shot).result().get_counts() backend = FakeVigo() circuit1 = transpile(prog,backend,optimization_level=2) writefile = open("../data/startQiskit_QC3054.csv","w") print(info,file=writefile) print("results end", file=writefile) print(circuit1.__len__(),file=writefile) print(circuit1,file=writefile) writefile.close()
36.015625
165
0.657918
import cirq import qiskit from qiskit import IBMQ from qiskit.providers.ibmq import least_busy from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister from qiskit import BasicAer, execute, transpile from pprint import pprint from qiskit.test.mock import FakeVigo from math import log2 import numpy as np import networkx as nx def bitwise_xor(s: str, t: str) -> str: length = len(s) res = [] for i in range(length): res.append(str(int(s[i]) ^ int(t[i]))) return ''.join(res[::-1]) def bitwise_dot(s: str, t: str) -> str: length = len(s) res = 0 for i in range(length): res += int(s[i]) * int(t[i]) return str(res % 2) def build_oracle(n: int, f) -> QuantumCircuit: controls = QuantumRegister(n, "ofc") target = QuantumRegister(1, "oft") oracle = QuantumCircuit(controls, target, name="Of") for i in range(2 ** n): rep = np.binary_repr(i, n) if f(rep) == "1": for j in range(n): if rep[j] == "0": oracle.x(controls[j]) oracle.mct(controls, target[0], None, mode='noancilla') for j in range(n): if rep[j] == "0": oracle.x(controls[j]) return oracle def make_circuit(n:int,f) -> QuantumCircuit: input_qubit = QuantumRegister(n,"qc") classical = ClassicalRegister(n, "qm") prog = QuantumCircuit(input_qubit, classical) prog.h(input_qubit[3]) prog.rx(-0.6848671984825748,input_qubit[1]) prog.cz(input_qubit[0],input_qubit[3]) prog.h(input_qubit[3]) prog.h(input_qubit[3]) prog.cz(input_qubit[0],input_qubit[3]) prog.h(input_qubit[3]) prog.cx(input_qubit[0],input_qubit[3]) prog.cx(input_qubit[0],input_qubit[3]) prog.x(input_qubit[3]) prog.cx(input_qubit[0],input_qubit[3]) prog.cx(input_qubit[0],input_qubit[3]) prog.h(input_qubit[3]) prog.cz(input_qubit[0],input_qubit[3]) prog.h(input_qubit[3]) prog.cx(input_qubit[0],input_qubit[3]) prog.h(input_qubit[1]) prog.h(input_qubit[2]) prog.h(input_qubit[3]) prog.y(input_qubit[3]) prog.h(input_qubit[0]) oracle = build_oracle(n-1, f) prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]]) prog.h(input_qubit[1]) prog.h(input_qubit[2]) prog.cx(input_qubit[3],input_qubit[0]) prog.cx(input_qubit[3],input_qubit[0]) prog.z(input_qubit[3]) prog.cx(input_qubit[3],input_qubit[0]) prog.h(input_qubit[0]) prog.cz(input_qubit[3],input_qubit[0]) prog.h(input_qubit[0]) prog.h(input_qubit[3]) prog.h(input_qubit[0]) prog.y(input_qubit[2]) prog.h(input_qubit[1]) prog.y(input_qubit[2]) for i in range(n): prog.measure(input_qubit[i], classical[i]) return prog if __name__ == '__main__': a = "111" b = "0" f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b) prog = make_circuit(4,f) IBMQ.load_account() provider = IBMQ.get_provider(hub='ibm-q') provider.backends() backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True)) sample_shot =8000 info = execute(prog, backend=backend, shots=sample_shot).result().get_counts() backend = FakeVigo() circuit1 = transpile(prog,backend,optimization_level=2) writefile = open("../data/startQiskit_QC3054.csv","w") print(info,file=writefile) print("results end", file=writefile) print(circuit1.__len__(),file=writefile) print(circuit1,file=writefile) writefile.close()
true
true
1c436f5695176a99b1087e41e25bf532fa3c7feb
18,608
py
Python
tensornets/contrib_layers/optimizers.py
mehrdad-shokri/tensornets
e36eff73e5fc984977c5ceadefc1adb089e7bab5
[ "MIT" ]
1,057
2017-10-13T08:30:16.000Z
2022-03-13T19:20:22.000Z
tensornets/contrib_layers/optimizers.py
mehrdad-shokri/tensornets
e36eff73e5fc984977c5ceadefc1adb089e7bab5
[ "MIT" ]
62
2017-11-13T00:10:56.000Z
2021-11-04T09:22:43.000Z
tensornets/contrib_layers/optimizers.py
mehrdad-shokri/tensornets
e36eff73e5fc984977c5ceadefc1adb089e7bab5
[ "MIT" ]
210
2017-11-02T08:41:49.000Z
2022-03-13T19:20:25.000Z
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Optimizer ops for use in layers and tf.learn.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import six from .. import contrib_framework from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import clip_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variable_scope as vs from tensorflow.python.ops import variables as vars_ from tensorflow.python.summary import summary from tensorflow.python.training import moving_averages from tensorflow.python.training import optimizer as optimizer_ from tensorflow.python.training import training as train OPTIMIZER_CLS_NAMES = { "Adagrad": train.AdagradOptimizer, "Adam": train.AdamOptimizer, "Ftrl": train.FtrlOptimizer, "Momentum": lambda learning_rate: train.MomentumOptimizer(learning_rate, momentum=0.9), # pylint: disable=line-too-long "RMSProp": train.RMSPropOptimizer, "SGD": train.GradientDescentOptimizer, } OPTIMIZER_SUMMARIES = [ "learning_rate", "loss", "gradients", "gradient_norm", "global_gradient_norm", ] def optimize_loss(loss, global_step, learning_rate, optimizer, gradient_noise_scale=None, gradient_multipliers=None, clip_gradients=None, learning_rate_decay_fn=None, update_ops=None, variables=None, name=None, summaries=None, colocate_gradients_with_ops=False, increment_global_step=True): """Given loss and parameters for optimizer, returns a training op. Various ways of passing optimizers include: - by string specifying the name of the optimizer. See OPTIMIZER_CLS_NAMES for full list. E.g. `optimize_loss(..., optimizer='Adam')`. - by function taking learning rate `Tensor` as argument and returning an `Optimizer` instance. E.g. `optimize_loss(..., optimizer=lambda lr: tf.compat.v1.train.MomentumOptimizer(lr, momentum=0.5))`. Alternatively, if `learning_rate` is `None`, the function takes no arguments. E.g. `optimize_loss(..., learning_rate=None, optimizer=lambda: tf.compat.v1.train.MomentumOptimizer(0.5, momentum=0.5))`. - by a subclass of `Optimizer` having a single-argument constructor (the argument is the learning rate), such as AdamOptimizer or AdagradOptimizer. E.g. `optimize_loss(..., optimizer=tf.compat.v1.train.AdagradOptimizer)`. - by an instance of a subclass of `Optimizer`. E.g., `optimize_loss(..., optimizer=tf.compat.v1.train.AdagradOptimizer(0.5))`. Args: loss: Scalar `Tensor`. global_step: Scalar int `Tensor`, step counter to update on each step unless `increment_global_step` is `False`. If not supplied, it will be fetched from the default graph (see `tf.compat.v1.train.get_global_step` for details). If it has not been created, no step will be incremented with each weight update. `learning_rate_decay_fn` requires `global_step`. learning_rate: float or `Tensor`, magnitude of update per each training step. Can be `None`. optimizer: string, class or optimizer instance, used as trainer. string should be name of optimizer, like 'SGD', 'Adam', 'Adagrad'. Full list in OPTIMIZER_CLS_NAMES constant. class should be sub-class of `tf.Optimizer` that implements `compute_gradients` and `apply_gradients` functions. optimizer instance should be instantiation of `tf.Optimizer` sub-class and have `compute_gradients` and `apply_gradients` functions. gradient_noise_scale: float or None, adds 0-mean normal noise scaled by this value. gradient_multipliers: dict of variables or variable names to floats. If present, gradients for specified variables will be multiplied by given constant. clip_gradients: float, callable or `None`. If a float is provided, a global clipping is applied to prevent the norm of the gradient from exceeding this value. Alternatively, a callable can be provided, e.g., `adaptive_clipping_fn()`. This callable takes a list of `(gradients, variables)` tuples and returns the same thing with the gradients modified. learning_rate_decay_fn: function, takes `learning_rate` and `global_step` `Tensor`s, returns `Tensor`. Can be used to implement any learning rate decay functions. For example: `tf.compat.v1.train.exponential_decay`. Ignored if `learning_rate` is not supplied. update_ops: list of update `Operation`s to execute at each step. If `None`, uses elements of UPDATE_OPS collection. The order of execution between `update_ops` and `loss` is non-deterministic. variables: list of variables to optimize or `None` to use all trainable variables. name: The name for this operation is used to scope operations and summaries. summaries: List of internal quantities to visualize on tensorboard. If not set, the loss, the learning rate, and the global norm of the gradients will be reported. The complete list of possible values is in OPTIMIZER_SUMMARIES. colocate_gradients_with_ops: If True, try colocating gradients with the corresponding op. increment_global_step: Whether to increment `global_step`. If your model calls `optimize_loss` multiple times per training step (e.g. to optimize different parts of the model), use this arg to avoid incrementing `global_step` more times than necessary. Returns: Training op. Raises: ValueError: if: * `loss` is an invalid type or shape. * `global_step` is an invalid type or shape. * `learning_rate` is an invalid type or value. * `optimizer` has the wrong type. * `clip_gradients` is neither float nor callable. * `learning_rate` and `learning_rate_decay_fn` are supplied, but no `global_step` is available. * `gradients` is empty. """ loss = ops.convert_to_tensor(loss) contrib_framework.assert_scalar(loss) if global_step is None: global_step = train.get_global_step() else: train.assert_global_step(global_step) with vs.variable_scope(name, "OptimizeLoss", [loss, global_step]): # Update ops take UPDATE_OPS collection if not provided. if update_ops is None: update_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS)) # Make sure update ops are ran before computing loss. if update_ops: loss = control_flow_ops.with_dependencies(list(update_ops), loss) # Learning rate variable, with possible decay. lr = None if learning_rate is not None: if (isinstance(learning_rate, ops.Tensor) and learning_rate.get_shape().ndims == 0): lr = learning_rate elif isinstance(learning_rate, float): if learning_rate < 0.0: raise ValueError("Invalid learning_rate %s.", learning_rate) lr = vs.get_variable( "learning_rate", [], trainable=False, initializer=init_ops.constant_initializer(learning_rate)) else: raise ValueError("Learning rate should be 0d Tensor or float. " "Got %s of type %s" % (str(learning_rate), str(type(learning_rate)))) if summaries is None: summaries = ["loss", "learning_rate", "global_gradient_norm"] else: for summ in summaries: if summ not in OPTIMIZER_SUMMARIES: raise ValueError("Summaries should be one of [%s], you provided %s." % (", ".join(OPTIMIZER_SUMMARIES), summ)) if learning_rate is not None and learning_rate_decay_fn is not None: if global_step is None: raise ValueError("global_step is required for learning_rate_decay_fn.") lr = learning_rate_decay_fn(lr, global_step) if "learning_rate" in summaries: summary.scalar("learning_rate", lr) # Create optimizer, given specified parameters. if isinstance(optimizer, six.string_types): if lr is None: raise ValueError("Learning rate is None, but should be specified if " "optimizer is string (%s)." % optimizer) if optimizer not in OPTIMIZER_CLS_NAMES: raise ValueError( "Optimizer name should be one of [%s], you provided %s." % (", ".join(OPTIMIZER_CLS_NAMES), optimizer)) opt = OPTIMIZER_CLS_NAMES[optimizer](learning_rate=lr) elif (isinstance(optimizer, type) and issubclass(optimizer, optimizer_.Optimizer)): if lr is None: raise ValueError("Learning rate is None, but should be specified if " "optimizer is class (%s)." % optimizer) opt = optimizer(learning_rate=lr) elif isinstance(optimizer, optimizer_.Optimizer): opt = optimizer elif callable(optimizer): if learning_rate is not None: opt = optimizer(lr) else: opt = optimizer() if not isinstance(opt, optimizer_.Optimizer): raise ValueError("Unrecognized optimizer: function should return " "subclass of Optimizer. Got %s." % str(opt)) else: raise ValueError("Unrecognized optimizer: should be string, " "subclass of Optimizer, instance of " "subclass of Optimizer or function with one argument. " "Got %s." % str(optimizer)) # All trainable variables, if specific variables are not specified. if variables is None: variables = vars_.trainable_variables() # Compute gradients. gradients = opt.compute_gradients( loss, variables, colocate_gradients_with_ops=colocate_gradients_with_ops) # Optionally add gradient noise. if gradient_noise_scale is not None: gradients = _add_scaled_noise_to_gradients(gradients, gradient_noise_scale) # Multiply some gradients. if gradient_multipliers is not None: gradients = _multiply_gradients(gradients, gradient_multipliers) if not gradients: raise ValueError( "Empty list of (gradient, var) pairs encountered. This is most " "likely to be caused by an improper value of gradient_multipliers.") if "global_gradient_norm" in summaries or "gradient_norm" in summaries: summary.scalar("global_norm/gradient_norm", clip_ops.global_norm(list(zip(*gradients))[0])) # Optionally clip gradients by global norm. if isinstance(clip_gradients, float): gradients = _clip_gradients_by_norm(gradients, clip_gradients) elif callable(clip_gradients): gradients = clip_gradients(gradients) elif clip_gradients is not None: raise ValueError("Unknown type %s for clip_gradients" % type(clip_gradients)) # Add scalar summary for loss. if "loss" in summaries: summary.scalar("loss", loss) # Add histograms for variables, gradients and gradient norms. for gradient, variable in gradients: if isinstance(gradient, ops.IndexedSlices): grad_values = gradient.values else: grad_values = gradient if grad_values is not None: var_name = variable.name.replace(":", "_") if "gradients" in summaries: summary.histogram("gradients/%s" % var_name, grad_values) if "gradient_norm" in summaries: summary.scalar("gradient_norm/%s" % var_name, clip_ops.global_norm([grad_values])) if clip_gradients is not None and ("global_gradient_norm" in summaries or "gradient_norm" in summaries): summary.scalar("global_norm/clipped_gradient_norm", clip_ops.global_norm(list(zip(*gradients))[0])) # Create gradient updates. grad_updates = opt.apply_gradients( gradients, global_step=global_step if increment_global_step else None, name="train") # Ensure the train_tensor computes grad_updates. train_tensor = control_flow_ops.with_dependencies([grad_updates], loss) return train_tensor def _clip_gradients_by_norm(grads_and_vars, clip_gradients): """Clips gradients by global norm.""" gradients, variables = zip(*grads_and_vars) clipped_gradients, _ = clip_ops.clip_by_global_norm(gradients, clip_gradients) return list(zip(clipped_gradients, variables)) def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name): """Find max_norm given norm and previous average.""" with vs.variable_scope(name, "AdaptiveMaxNorm", [norm]): log_norm = math_ops.log(norm + epsilon) def moving_average(name, value, decay): moving_average_variable = vs.get_variable( name, shape=value.get_shape(), dtype=value.dtype, initializer=init_ops.zeros_initializer(), trainable=False) return moving_averages.assign_moving_average( moving_average_variable, value, decay, zero_debias=False) # quicker adaptation at the beginning if global_step is not None: n = math_ops.cast(global_step, dtypes.float32) decay = math_ops.minimum(decay, n / (n + 1.)) # update averages mean = moving_average("mean", log_norm, decay) sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay) variance = sq_mean - math_ops.square(mean) std = math_ops.sqrt(math_ops.maximum(epsilon, variance)) max_norms = math_ops.exp(mean + std_factor * std) return max_norms, mean def adaptive_clipping_fn(std_factor=2., decay=0.95, static_max_norm=None, global_step=None, report_summary=False, epsilon=1e-8, name=None): """Adapt the clipping value using statistics on the norms. Implement adaptive gradient as presented in section 3.2.1 of https://arxiv.org/abs/1412.1602. Keeps a moving average of the mean and std of the log(norm) of the gradient. If the norm exceeds `exp(mean + std_factor*std)` then all gradients will be rescaled such that the global norm becomes `exp(mean)`. Args: std_factor: Python scaler (or tensor). `max_norm = exp(mean + std_factor*std)` decay: The smoothing factor of the moving averages. static_max_norm: If provided, will threshold the norm to this value as an extra safety. global_step: Optional global_step. If provided, `decay = decay*n/(n+1)`. This provides a quicker adaptation of the mean for the first steps. report_summary: If `True`, will add histogram summaries of the `max_norm`. epsilon: Small value chosen to avoid zero variance. name: The name for this operation is used to scope operations and summaries. Returns: A function for applying gradient clipping. """ def gradient_clipping(grads_and_vars): """Internal function for adaptive clipping.""" grads, variables = zip(*grads_and_vars) norm = clip_ops.global_norm(grads) max_norm, log_mean = _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name) # reports the max gradient norm for debugging if report_summary: summary.scalar("global_norm/adaptive_max_gradient_norm", max_norm) # factor will be 1. if norm is smaller than max_norm factor = array_ops.where(norm < max_norm, array_ops.ones_like(norm), math_ops.exp(log_mean) / norm) if static_max_norm is not None: factor = math_ops.minimum(static_max_norm / norm, factor) # apply factor clipped_grads = [] for grad in grads: if grad is None: clipped_grads.append(None) elif isinstance(grad, ops.IndexedSlices): clipped_grads.append( ops.IndexedSlices(grad.values * factor, grad.indices, grad.dense_shape)) else: clipped_grads.append(grad * factor) return list(zip(clipped_grads, variables)) return gradient_clipping def _add_scaled_noise_to_gradients(grads_and_vars, gradient_noise_scale): """Adds scaled noise from a 0-mean normal distribution to gradients.""" gradients, variables = zip(*grads_and_vars) noisy_gradients = [] for gradient in gradients: if gradient is None: noisy_gradients.append(None) continue if isinstance(gradient, ops.IndexedSlices): gradient_shape = gradient.dense_shape else: gradient_shape = gradient.get_shape() noise = random_ops.truncated_normal(gradient_shape) * gradient_noise_scale noisy_gradients.append(gradient + noise) return list(zip(noisy_gradients, variables)) def _multiply_gradients(grads_and_vars, gradient_multipliers): """Multiply specified gradients.""" multiplied_grads_and_vars = [] for grad, var in grads_and_vars: if (grad is not None and (var in gradient_multipliers or var.name in gradient_multipliers)): key = var if var in gradient_multipliers else var.name multiplier = gradient_multipliers[key] if isinstance(grad, ops.IndexedSlices): grad_values = grad.values * multiplier grad = ops.IndexedSlices(grad_values, grad.indices, grad.dense_shape) else: grad *= math_ops.cast(multiplier, grad.dtype) multiplied_grads_and_vars.append((grad, var)) return multiplied_grads_and_vars
42.195011
124
0.680729
from __future__ import absolute_import from __future__ import division from __future__ import print_function import six from .. import contrib_framework from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import clip_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variable_scope as vs from tensorflow.python.ops import variables as vars_ from tensorflow.python.summary import summary from tensorflow.python.training import moving_averages from tensorflow.python.training import optimizer as optimizer_ from tensorflow.python.training import training as train OPTIMIZER_CLS_NAMES = { "Adagrad": train.AdagradOptimizer, "Adam": train.AdamOptimizer, "Ftrl": train.FtrlOptimizer, "Momentum": lambda learning_rate: train.MomentumOptimizer(learning_rate, momentum=0.9), "RMSProp": train.RMSPropOptimizer, "SGD": train.GradientDescentOptimizer, } OPTIMIZER_SUMMARIES = [ "learning_rate", "loss", "gradients", "gradient_norm", "global_gradient_norm", ] def optimize_loss(loss, global_step, learning_rate, optimizer, gradient_noise_scale=None, gradient_multipliers=None, clip_gradients=None, learning_rate_decay_fn=None, update_ops=None, variables=None, name=None, summaries=None, colocate_gradients_with_ops=False, increment_global_step=True): loss = ops.convert_to_tensor(loss) contrib_framework.assert_scalar(loss) if global_step is None: global_step = train.get_global_step() else: train.assert_global_step(global_step) with vs.variable_scope(name, "OptimizeLoss", [loss, global_step]): if update_ops is None: update_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS)) if update_ops: loss = control_flow_ops.with_dependencies(list(update_ops), loss) lr = None if learning_rate is not None: if (isinstance(learning_rate, ops.Tensor) and learning_rate.get_shape().ndims == 0): lr = learning_rate elif isinstance(learning_rate, float): if learning_rate < 0.0: raise ValueError("Invalid learning_rate %s.", learning_rate) lr = vs.get_variable( "learning_rate", [], trainable=False, initializer=init_ops.constant_initializer(learning_rate)) else: raise ValueError("Learning rate should be 0d Tensor or float. " "Got %s of type %s" % (str(learning_rate), str(type(learning_rate)))) if summaries is None: summaries = ["loss", "learning_rate", "global_gradient_norm"] else: for summ in summaries: if summ not in OPTIMIZER_SUMMARIES: raise ValueError("Summaries should be one of [%s], you provided %s." % (", ".join(OPTIMIZER_SUMMARIES), summ)) if learning_rate is not None and learning_rate_decay_fn is not None: if global_step is None: raise ValueError("global_step is required for learning_rate_decay_fn.") lr = learning_rate_decay_fn(lr, global_step) if "learning_rate" in summaries: summary.scalar("learning_rate", lr) if isinstance(optimizer, six.string_types): if lr is None: raise ValueError("Learning rate is None, but should be specified if " "optimizer is string (%s)." % optimizer) if optimizer not in OPTIMIZER_CLS_NAMES: raise ValueError( "Optimizer name should be one of [%s], you provided %s." % (", ".join(OPTIMIZER_CLS_NAMES), optimizer)) opt = OPTIMIZER_CLS_NAMES[optimizer](learning_rate=lr) elif (isinstance(optimizer, type) and issubclass(optimizer, optimizer_.Optimizer)): if lr is None: raise ValueError("Learning rate is None, but should be specified if " "optimizer is class (%s)." % optimizer) opt = optimizer(learning_rate=lr) elif isinstance(optimizer, optimizer_.Optimizer): opt = optimizer elif callable(optimizer): if learning_rate is not None: opt = optimizer(lr) else: opt = optimizer() if not isinstance(opt, optimizer_.Optimizer): raise ValueError("Unrecognized optimizer: function should return " "subclass of Optimizer. Got %s." % str(opt)) else: raise ValueError("Unrecognized optimizer: should be string, " "subclass of Optimizer, instance of " "subclass of Optimizer or function with one argument. " "Got %s." % str(optimizer)) if variables is None: variables = vars_.trainable_variables() gradients = opt.compute_gradients( loss, variables, colocate_gradients_with_ops=colocate_gradients_with_ops) if gradient_noise_scale is not None: gradients = _add_scaled_noise_to_gradients(gradients, gradient_noise_scale) if gradient_multipliers is not None: gradients = _multiply_gradients(gradients, gradient_multipliers) if not gradients: raise ValueError( "Empty list of (gradient, var) pairs encountered. This is most " "likely to be caused by an improper value of gradient_multipliers.") if "global_gradient_norm" in summaries or "gradient_norm" in summaries: summary.scalar("global_norm/gradient_norm", clip_ops.global_norm(list(zip(*gradients))[0])) if isinstance(clip_gradients, float): gradients = _clip_gradients_by_norm(gradients, clip_gradients) elif callable(clip_gradients): gradients = clip_gradients(gradients) elif clip_gradients is not None: raise ValueError("Unknown type %s for clip_gradients" % type(clip_gradients)) if "loss" in summaries: summary.scalar("loss", loss) for gradient, variable in gradients: if isinstance(gradient, ops.IndexedSlices): grad_values = gradient.values else: grad_values = gradient if grad_values is not None: var_name = variable.name.replace(":", "_") if "gradients" in summaries: summary.histogram("gradients/%s" % var_name, grad_values) if "gradient_norm" in summaries: summary.scalar("gradient_norm/%s" % var_name, clip_ops.global_norm([grad_values])) if clip_gradients is not None and ("global_gradient_norm" in summaries or "gradient_norm" in summaries): summary.scalar("global_norm/clipped_gradient_norm", clip_ops.global_norm(list(zip(*gradients))[0])) grad_updates = opt.apply_gradients( gradients, global_step=global_step if increment_global_step else None, name="train") train_tensor = control_flow_ops.with_dependencies([grad_updates], loss) return train_tensor def _clip_gradients_by_norm(grads_and_vars, clip_gradients): gradients, variables = zip(*grads_and_vars) clipped_gradients, _ = clip_ops.clip_by_global_norm(gradients, clip_gradients) return list(zip(clipped_gradients, variables)) def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name): with vs.variable_scope(name, "AdaptiveMaxNorm", [norm]): log_norm = math_ops.log(norm + epsilon) def moving_average(name, value, decay): moving_average_variable = vs.get_variable( name, shape=value.get_shape(), dtype=value.dtype, initializer=init_ops.zeros_initializer(), trainable=False) return moving_averages.assign_moving_average( moving_average_variable, value, decay, zero_debias=False) if global_step is not None: n = math_ops.cast(global_step, dtypes.float32) decay = math_ops.minimum(decay, n / (n + 1.)) mean = moving_average("mean", log_norm, decay) sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay) variance = sq_mean - math_ops.square(mean) std = math_ops.sqrt(math_ops.maximum(epsilon, variance)) max_norms = math_ops.exp(mean + std_factor * std) return max_norms, mean def adaptive_clipping_fn(std_factor=2., decay=0.95, static_max_norm=None, global_step=None, report_summary=False, epsilon=1e-8, name=None): def gradient_clipping(grads_and_vars): grads, variables = zip(*grads_and_vars) norm = clip_ops.global_norm(grads) max_norm, log_mean = _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name) if report_summary: summary.scalar("global_norm/adaptive_max_gradient_norm", max_norm) factor = array_ops.where(norm < max_norm, array_ops.ones_like(norm), math_ops.exp(log_mean) / norm) if static_max_norm is not None: factor = math_ops.minimum(static_max_norm / norm, factor) clipped_grads = [] for grad in grads: if grad is None: clipped_grads.append(None) elif isinstance(grad, ops.IndexedSlices): clipped_grads.append( ops.IndexedSlices(grad.values * factor, grad.indices, grad.dense_shape)) else: clipped_grads.append(grad * factor) return list(zip(clipped_grads, variables)) return gradient_clipping def _add_scaled_noise_to_gradients(grads_and_vars, gradient_noise_scale): gradients, variables = zip(*grads_and_vars) noisy_gradients = [] for gradient in gradients: if gradient is None: noisy_gradients.append(None) continue if isinstance(gradient, ops.IndexedSlices): gradient_shape = gradient.dense_shape else: gradient_shape = gradient.get_shape() noise = random_ops.truncated_normal(gradient_shape) * gradient_noise_scale noisy_gradients.append(gradient + noise) return list(zip(noisy_gradients, variables)) def _multiply_gradients(grads_and_vars, gradient_multipliers): multiplied_grads_and_vars = [] for grad, var in grads_and_vars: if (grad is not None and (var in gradient_multipliers or var.name in gradient_multipliers)): key = var if var in gradient_multipliers else var.name multiplier = gradient_multipliers[key] if isinstance(grad, ops.IndexedSlices): grad_values = grad.values * multiplier grad = ops.IndexedSlices(grad_values, grad.indices, grad.dense_shape) else: grad *= math_ops.cast(multiplier, grad.dtype) multiplied_grads_and_vars.append((grad, var)) return multiplied_grads_and_vars
true
true
1c436f90b8d2bf1ddeb5aaa0ae0d9ea423af407f
2,692
py
Python
sarpy/io/product/converter.py
jordan-heemskerk/sarpy
9684c3e58a8e0d0db0cb9b16a5adb79fe9852a0a
[ "MIT" ]
null
null
null
sarpy/io/product/converter.py
jordan-heemskerk/sarpy
9684c3e58a8e0d0db0cb9b16a5adb79fe9852a0a
[ "MIT" ]
null
null
null
sarpy/io/product/converter.py
jordan-heemskerk/sarpy
9684c3e58a8e0d0db0cb9b16a5adb79fe9852a0a
[ "MIT" ]
null
null
null
""" This module provide utilities for opening any files analogous to Sensor Independent Derived Data, namely detected images in the ground plane. """ import os import sys import pkgutil from importlib import import_module from sarpy.io.general.base import BaseReader __classification__ = "UNCLASSIFIED" __author__ = "Thomas McCullough" ########### # Module variables _openers = [] _parsed_openers = False def register_opener(open_func): """ Provide a new opener. Parameters ---------- open_func This is required to be a function which takes a single argument (file name). This function should return a sarpy.io.general.base.BaseReader instance if the referenced file is viable for the underlying type, and None otherwise. Returns ------- None """ if not callable(open_func): raise TypeError('open_func must be a callable') if open_func not in _openers: _openers.append(open_func) def parse_openers(): """ Automatically find the viable openers (i.e. :func:`is_a`) in the various modules. Returns ------- """ global _parsed_openers if _parsed_openers: return _parsed_openers = True def check_module(mod_name): # import the module import_module(mod_name) # fetch the module from the modules dict module = sys.modules[mod_name] # see if it has an is_a function, if so, register it if hasattr(module, 'is_a'): register_opener(module.is_a) # walk down any subpackages path, fil = os.path.split(module.__file__) if not fil.startswith('__init__.py'): # there are no subpackages return for sub_module in pkgutil.walk_packages([path, ]): _, sub_module_name, _ = sub_module sub_name = "{}.{}".format(mod_name, sub_module_name) check_module(sub_name) check_module('sarpy.io.product') def open_product(file_name): """ Given a file, try to find and return the appropriate reader object. Parameters ---------- file_name : str Returns ------- BaseReader Raises ------ IOError """ if not os.path.exists(file_name): raise IOError('File {} does not exist.'.format(file_name)) # parse openers, if not already done parse_openers() # see if we can find a reader though trial and error for opener in _openers: reader = opener(file_name) if reader is not None: return reader # If for loop completes, no matching file format was found. raise IOError('Unable to determine product image format.')
24.252252
85
0.641902
import os import sys import pkgutil from importlib import import_module from sarpy.io.general.base import BaseReader __classification__ = "UNCLASSIFIED" __author__ = "Thomas McCullough" opener(open_func): if not callable(open_func): raise TypeError('open_func must be a callable') if open_func not in _openers: _openers.append(open_func) def parse_openers(): global _parsed_openers if _parsed_openers: return _parsed_openers = True def check_module(mod_name): import_module(mod_name) module = sys.modules[mod_name] if hasattr(module, 'is_a'): register_opener(module.is_a) path, fil = os.path.split(module.__file__) if not fil.startswith('__init__.py'): return for sub_module in pkgutil.walk_packages([path, ]): _, sub_module_name, _ = sub_module sub_name = "{}.{}".format(mod_name, sub_module_name) check_module(sub_name) check_module('sarpy.io.product') def open_product(file_name): if not os.path.exists(file_name): raise IOError('File {} does not exist.'.format(file_name)) parse_openers() for opener in _openers: reader = opener(file_name) if reader is not None: return reader raise IOError('Unable to determine product image format.')
true
true
1c436faf2475e151b87920566c243934785ebf80
1,664
py
Python
buff/trinket/spider.py
dannl/hunter-sim-classic
e32cccc8431cc3e78b08067dd58e10fec52aac6a
[ "MIT" ]
null
null
null
buff/trinket/spider.py
dannl/hunter-sim-classic
e32cccc8431cc3e78b08067dd58e10fec52aac6a
[ "MIT" ]
null
null
null
buff/trinket/spider.py
dannl/hunter-sim-classic
e32cccc8431cc3e78b08067dd58e10fec52aac6a
[ "MIT" ]
null
null
null
from buff import LastingBuff class Spider(LastingBuff): def __init__(self): super().__init__('spider', 2 * 60, 15) def equip(self, engine, char_state): char_state.crit += 0.01 def dequip(self, engine, char_state): char_state.crit -= 0.01 def timeout(self, rotation, engine, char_state): char_state.remove_haste(1.2) def perform_impl(self, rotation, engine, char_state): char_state.apply_haste(1.2, engine.current_priority(), engine.current_priority() + self.duration) # rotation.rapid.next_available = max(engine.current_priority() + 20, rotation.rapid.next_available) def trigger(self, rotation, engine, char_state): if self.next_available > engine.current_priority(): return False if rotation.is_casting(): return False trinkets = rotation.current_trinket if 'bugs' in trinkets: bugs = rotation.get_trinket('bugs') if not (0 < bugs.next_available - engine.current_priority() < 3*60 - 13): return False if 'zug' in trinkets: zug = rotation.get_trinket('zug') if not (0 < zug.next_available - engine.current_priority() < 3 * 60 - 0.43): return False if 'sand_bug' in trinkets: sand_bugs = rotation.get_trinket('sand_bug') if not (0 < sand_bugs.next_available - engine.current_priority() < 2 * 60 - 4): return False rotation.statistics.add_start(self.name, engine.current_priority()) self.perform(rotation,engine, char_state) return True
37.818182
108
0.616587
from buff import LastingBuff class Spider(LastingBuff): def __init__(self): super().__init__('spider', 2 * 60, 15) def equip(self, engine, char_state): char_state.crit += 0.01 def dequip(self, engine, char_state): char_state.crit -= 0.01 def timeout(self, rotation, engine, char_state): char_state.remove_haste(1.2) def perform_impl(self, rotation, engine, char_state): char_state.apply_haste(1.2, engine.current_priority(), engine.current_priority() + self.duration) def trigger(self, rotation, engine, char_state): if self.next_available > engine.current_priority(): return False if rotation.is_casting(): return False trinkets = rotation.current_trinket if 'bugs' in trinkets: bugs = rotation.get_trinket('bugs') if not (0 < bugs.next_available - engine.current_priority() < 3*60 - 13): return False if 'zug' in trinkets: zug = rotation.get_trinket('zug') if not (0 < zug.next_available - engine.current_priority() < 3 * 60 - 0.43): return False if 'sand_bug' in trinkets: sand_bugs = rotation.get_trinket('sand_bug') if not (0 < sand_bugs.next_available - engine.current_priority() < 2 * 60 - 4): return False rotation.statistics.add_start(self.name, engine.current_priority()) self.perform(rotation,engine, char_state) return True
true
true
1c437097aa7324512cba0b35f6327c391f210265
9,375
py
Python
src/plotmanx/reporting.py
tokenchain/plotman
666e4e34eecda90aa94004ff042fefebfbf3fead
[ "Apache-2.0" ]
null
null
null
src/plotmanx/reporting.py
tokenchain/plotman
666e4e34eecda90aa94004ff042fefebfbf3fead
[ "Apache-2.0" ]
null
null
null
src/plotmanx/reporting.py
tokenchain/plotman
666e4e34eecda90aa94004ff042fefebfbf3fead
[ "Apache-2.0" ]
null
null
null
import math import os import psutil import texttable as tt # from somewhere? from . import archive, job, manager, plot_util, configuration def abbr_path(path, putative_prefix): if putative_prefix and path.startswith(putative_prefix): return os.path.relpath(path, putative_prefix) else: return path def phase_str(phase_pair): (ph, subph) = phase_pair return ((str(ph) if ph is not None else '?') + ':' + (str(subph) if subph is not None else '?')) def phases_str(phases, max_num=None): """ Take a list of phase-subphase pairs and return them as a compact string """ if not max_num or len(phases) <= max_num: return ' '.join([phase_str(pair) for pair in phases]) else: n_first = math.floor(max_num / 2) n_last = max_num - n_first n_elided = len(phases) - (n_first + n_last) first = ' '.join([phase_str(pair) for pair in phases[:n_first]]) elided = " [+%d] " % n_elided last = ' '.join([phase_str(pair) for pair in phases[n_first + n_elided:]]) return first + elided + last def n_at_ph(jobs, ph): return sum([1 for j in jobs if j.progress() == ph]) def n_to_char(n): n_to_char_map = dict(enumerate(" .:;!")) if n < 0: return 'X' # Should never be negative elif n >= len(n_to_char_map): n = len(n_to_char_map) - 1 return n_to_char_map[n] def n_to_char_emo(n): n_to_char_map = dict(enumerate(" 12345")) if n < 0: return 'X️' # Should never be negative elif n >= len(n_to_char_map): n = len(n_to_char_map) - 1 return n_to_char_map[n] def job_viz(jobs): # TODO: Rewrite this in a way that ensures we count every job # even if the reported phases don't line up with expectations. result = '' result += '(1)' for i in range(0, 8): result += n_to_char_emo(n_at_ph(jobs, (1, i))) result += '(2)' for i in range(0, 8): result += n_to_char_emo(n_at_ph(jobs, (2, i))) result += '(3)' for i in range(0, 7): result += n_to_char_emo(n_at_ph(jobs, (3, i))) result += '(4)' result += n_to_char_emo(n_at_ph(jobs, (4, 0))) return result def status_report(jobs, width, height=None, tmp_prefix='', dst_prefix=''): """ Height, if provided, will limit the number of rows in the table, showing first and last rows, row numbers and an elipsis in the middle. """ abbreviate_jobs_list = False n_begin_rows = 0 n_end_rows = 0 if height and height < len(jobs) + 1: # One row for header abbreviate_jobs_list = True if abbreviate_jobs_list: n_rows = height - 2 # Minus one for header, one for ellipsis n_begin_rows = int(n_rows / 2) n_end_rows = n_rows - n_begin_rows tab = tt.Texttable() headings = ['plot id', 'k', 'tmp', 'dst', 'wall', 'phase', 'tmp', 'pid', 'stat', 'mem', 'user', 'sys', 'io', 'freezed', 'logfile'] headingwidth = len(headings) if height: headings.insert(0, '#') tab.header(headings) tab.set_cols_dtype('t' * len(headings)) tab.set_cols_align('r' * len(headings)) tab.set_header_align('r' * len(headings)) for i, j in enumerate(sorted(jobs, key=job.Job.get_time_wall)): # Elipsis row if abbreviate_jobs_list and i == n_begin_rows: row = ['...'] + ([''] * headingwidth) # Omitted row elif abbreviate_jobs_list and i > n_begin_rows and i < (len(jobs) - n_end_rows): continue # Regular row else: try: with j.proc.oneshot(): row = [j.plot_id[:8], j.k, abbr_path(j.tmpdir, tmp_prefix), abbr_path(j.dstdir, dst_prefix), plot_util.time_format(j.get_time_wall()), phase_str(j.progress()), plot_util.human_format(j.get_tmp_usage(), 0), j.proc.pid, j.get_run_status(), plot_util.human_format(j.get_mem_usage(), 1), plot_util.time_format(j.get_time_user()), plot_util.time_format(j.get_time_sys()), plot_util.time_format(j.get_time_iowait()), plot_util.is_freezed(j), os.path.basename(j.logfile) ] except (psutil.NoSuchProcess, psutil.AccessDenied): # In case the job has disappeared row = [j.plot_id[:8]] + (['--'] * (headingwidth - 1)) if height: row.insert(0, '%3d' % i) tab.add_row(row) tab.set_max_width(width) tab.set_deco(0) # No borders # return ('tmp dir prefix: %s ; dst dir prefix: %s\n' % (tmp_prefix, dst_prefix) return tab.draw() def tmp_dir_report(jobs, dir_cfg, sched_cfg, width, start_row=None, end_row=None, prefix=''): '''start_row, end_row let you split the table up if you want''' tab = tt.Texttable() headings = ['tmp', 'ready', 'phases'] tab.header(headings) tab.set_cols_dtype('t' * len(headings)) tab.set_cols_align('r' * (len(headings) - 1) + 'l') for i, d in enumerate(sorted(dir_cfg.tmp)): if (start_row and i < start_row) or (end_row and i >= end_row): continue phases = sorted(job.job_phases_for_tmpdir(d, jobs)) ready = manager.phases_permit_new_job(phases, d, sched_cfg, dir_cfg) row = [abbr_path(d, prefix), 'OK' if ready else '--', phases_str(phases)] tab.add_row(row) tab.set_max_width(width) tab.set_deco(tt.Texttable.BORDER | tt.Texttable.HEADER) tab.set_deco(0) # No borders return tab.draw() def dst_dir_report(jobs, dstdirs, width, prefix=''): tab = tt.Texttable() dir2oldphase = manager.dstdirs_to_furthest_phase(jobs) dir2newphase = manager.dstdirs_to_youngest_phase(jobs) headings = ['dst', 'plots', 'GBfree', 'inbnd phases', 'pri'] tab.header(headings) tab.set_cols_dtype('t' * len(headings)) for d in sorted(dstdirs): # TODO: This logic is replicated in archive.py's priority computation, # maybe by moving more of the logic in to directory.py eldest_ph = dir2oldphase.get(d, (0, 0)) phases = job.job_phases_for_dstdir(d, jobs) dir_plots = plot_util.list_k32_plots(d) gb_free = int(plot_util.df_b(d) / plot_util.GB) n_plots = len(dir_plots) priority = archive.compute_priority(eldest_ph, gb_free, n_plots) row = [abbr_path(d, prefix), n_plots, gb_free, phases_str(phases, 5), priority] tab.add_row(row) tab.set_max_width(width) tab.set_deco(tt.Texttable.BORDER | tt.Texttable.HEADER) tab.set_deco(0) # No borders return tab.draw() def arch_dir_report(archdir_freebytes, width, prefix=''): cells = ['%s:%5dGB' % (abbr_path(d, prefix), int(int(space) / plot_util.GB)) for (d, space) in sorted(archdir_freebytes.items())] if not cells: return '' n_columns = int(width / (len(max(cells, key=len)) + 3)) tab = tt.Texttable() tab.set_max_width(width) for row in plot_util.column_wrap(cells, n_columns, filler=''): tab.add_row(row) tab.set_cols_align('r' * (n_columns)) tab.set_deco(tt.Texttable.VLINES) return tab.draw() # TODO: remove this def dirs_report(jobs, dir_cfg, sched_cfg, width): (is_dst, dst_dir) = configuration.get_dst_directories(dir_cfg) """ reports = [ tmp_dir_report(jobs, dir_cfg, sched_cfg, width), dst_dir_report(jobs, dir_cfg.dst, width), ] if dir_cfg.archive is not None: reports.extend([ 'archive dirs free space:', arch_dir_report(archive.get_archdir_freebytes(dir_cfg.archive), width), ]) """ # return '\n'.join(reports) + '\n' return ( tmp_dir_report(jobs, dir_cfg, sched_cfg, width) + '\n' + dst_dir_report(jobs, dst_dir, width) + '\n' + 'archive dirs free space:\n' + arch_dir_report(archive.get_archdir_freebytes(dir_cfg.archive), width) + '\n' ) def jsondata(jobs, tmp_prefix='', dst_prefix='') -> list: jobsr = list() for i, j in enumerate(sorted(jobs, key=job.Job.get_time_wall)): with j.proc.oneshot(): dictionary = { 'plotid': j.plot_id[:8], 'k': j.k, 'tmp': abbr_path(j.tmpdir, tmp_prefix), 'dst': abbr_path(j.dstdir, dst_prefix), 'wall': plot_util.time_format(j.get_time_wall()), 'phase': phase_str(j.progress()), 'tmpdisk': plot_util.human_format(j.get_tmp_usage(), 0), 'pid': j.proc.pid, 'stat': j.get_run_status(), 'mem': plot_util.human_format(j.get_mem_usage(), 1), 'user': plot_util.time_format(j.get_time_user()), 'sys': plot_util.time_format(j.get_time_sys()), 'io': plot_util.time_format(j.get_time_iowait()), 'freezed': plot_util.is_freezed(j), 'logfile': os.path.basename(j.logfile) } jobsr.append(dictionary) return jobsr
35.11236
134
0.58464
import math import os import psutil import texttable as tt from . import archive, job, manager, plot_util, configuration def abbr_path(path, putative_prefix): if putative_prefix and path.startswith(putative_prefix): return os.path.relpath(path, putative_prefix) else: return path def phase_str(phase_pair): (ph, subph) = phase_pair return ((str(ph) if ph is not None else '?') + ':' + (str(subph) if subph is not None else '?')) def phases_str(phases, max_num=None): if not max_num or len(phases) <= max_num: return ' '.join([phase_str(pair) for pair in phases]) else: n_first = math.floor(max_num / 2) n_last = max_num - n_first n_elided = len(phases) - (n_first + n_last) first = ' '.join([phase_str(pair) for pair in phases[:n_first]]) elided = " [+%d] " % n_elided last = ' '.join([phase_str(pair) for pair in phases[n_first + n_elided:]]) return first + elided + last def n_at_ph(jobs, ph): return sum([1 for j in jobs if j.progress() == ph]) def n_to_char(n): n_to_char_map = dict(enumerate(" .:;!")) if n < 0: return 'X' elif n >= len(n_to_char_map): n = len(n_to_char_map) - 1 return n_to_char_map[n] def n_to_char_emo(n): n_to_char_map = dict(enumerate(" 12345")) if n < 0: return 'X️' elif n >= len(n_to_char_map): n = len(n_to_char_map) - 1 return n_to_char_map[n] def job_viz(jobs): result = '' result += '(1)' for i in range(0, 8): result += n_to_char_emo(n_at_ph(jobs, (1, i))) result += '(2)' for i in range(0, 8): result += n_to_char_emo(n_at_ph(jobs, (2, i))) result += '(3)' for i in range(0, 7): result += n_to_char_emo(n_at_ph(jobs, (3, i))) result += '(4)' result += n_to_char_emo(n_at_ph(jobs, (4, 0))) return result def status_report(jobs, width, height=None, tmp_prefix='', dst_prefix=''): abbreviate_jobs_list = False n_begin_rows = 0 n_end_rows = 0 if height and height < len(jobs) + 1: # One row for header abbreviate_jobs_list = True if abbreviate_jobs_list: n_rows = height - 2 # Minus one for header, one for ellipsis n_begin_rows = int(n_rows / 2) n_end_rows = n_rows - n_begin_rows tab = tt.Texttable() headings = ['plot id', 'k', 'tmp', 'dst', 'wall', 'phase', 'tmp', 'pid', 'stat', 'mem', 'user', 'sys', 'io', 'freezed', 'logfile'] headingwidth = len(headings) if height: headings.insert(0, ' tab.header(headings) tab.set_cols_dtype('t' * len(headings)) tab.set_cols_align('r' * len(headings)) tab.set_header_align('r' * len(headings)) for i, j in enumerate(sorted(jobs, key=job.Job.get_time_wall)): # Elipsis row if abbreviate_jobs_list and i == n_begin_rows: row = ['...'] + ([''] * headingwidth) # Omitted row elif abbreviate_jobs_list and i > n_begin_rows and i < (len(jobs) - n_end_rows): continue # Regular row else: try: with j.proc.oneshot(): row = [j.plot_id[:8], j.k, abbr_path(j.tmpdir, tmp_prefix), abbr_path(j.dstdir, dst_prefix), plot_util.time_format(j.get_time_wall()), phase_str(j.progress()), plot_util.human_format(j.get_tmp_usage(), 0), j.proc.pid, j.get_run_status(), plot_util.human_format(j.get_mem_usage(), 1), plot_util.time_format(j.get_time_user()), plot_util.time_format(j.get_time_sys()), plot_util.time_format(j.get_time_iowait()), plot_util.is_freezed(j), os.path.basename(j.logfile) ] except (psutil.NoSuchProcess, psutil.AccessDenied): # In case the job has disappeared row = [j.plot_id[:8]] + (['--'] * (headingwidth - 1)) if height: row.insert(0, '%3d' % i) tab.add_row(row) tab.set_max_width(width) tab.set_deco(0) # No borders # return ('tmp dir prefix: %s ; dst dir prefix: %s\n' % (tmp_prefix, dst_prefix) return tab.draw() def tmp_dir_report(jobs, dir_cfg, sched_cfg, width, start_row=None, end_row=None, prefix=''): tab = tt.Texttable() headings = ['tmp', 'ready', 'phases'] tab.header(headings) tab.set_cols_dtype('t' * len(headings)) tab.set_cols_align('r' * (len(headings) - 1) + 'l') for i, d in enumerate(sorted(dir_cfg.tmp)): if (start_row and i < start_row) or (end_row and i >= end_row): continue phases = sorted(job.job_phases_for_tmpdir(d, jobs)) ready = manager.phases_permit_new_job(phases, d, sched_cfg, dir_cfg) row = [abbr_path(d, prefix), 'OK' if ready else '--', phases_str(phases)] tab.add_row(row) tab.set_max_width(width) tab.set_deco(tt.Texttable.BORDER | tt.Texttable.HEADER) tab.set_deco(0) # No borders return tab.draw() def dst_dir_report(jobs, dstdirs, width, prefix=''): tab = tt.Texttable() dir2oldphase = manager.dstdirs_to_furthest_phase(jobs) dir2newphase = manager.dstdirs_to_youngest_phase(jobs) headings = ['dst', 'plots', 'GBfree', 'inbnd phases', 'pri'] tab.header(headings) tab.set_cols_dtype('t' * len(headings)) for d in sorted(dstdirs): # TODO: This logic is replicated in archive.py's priority computation, eldest_ph = dir2oldphase.get(d, (0, 0)) phases = job.job_phases_for_dstdir(d, jobs) dir_plots = plot_util.list_k32_plots(d) gb_free = int(plot_util.df_b(d) / plot_util.GB) n_plots = len(dir_plots) priority = archive.compute_priority(eldest_ph, gb_free, n_plots) row = [abbr_path(d, prefix), n_plots, gb_free, phases_str(phases, 5), priority] tab.add_row(row) tab.set_max_width(width) tab.set_deco(tt.Texttable.BORDER | tt.Texttable.HEADER) tab.set_deco(0) return tab.draw() def arch_dir_report(archdir_freebytes, width, prefix=''): cells = ['%s:%5dGB' % (abbr_path(d, prefix), int(int(space) / plot_util.GB)) for (d, space) in sorted(archdir_freebytes.items())] if not cells: return '' n_columns = int(width / (len(max(cells, key=len)) + 3)) tab = tt.Texttable() tab.set_max_width(width) for row in plot_util.column_wrap(cells, n_columns, filler=''): tab.add_row(row) tab.set_cols_align('r' * (n_columns)) tab.set_deco(tt.Texttable.VLINES) return tab.draw() def dirs_report(jobs, dir_cfg, sched_cfg, width): (is_dst, dst_dir) = configuration.get_dst_directories(dir_cfg) return ( tmp_dir_report(jobs, dir_cfg, sched_cfg, width) + '\n' + dst_dir_report(jobs, dst_dir, width) + '\n' + 'archive dirs free space:\n' + arch_dir_report(archive.get_archdir_freebytes(dir_cfg.archive), width) + '\n' ) def jsondata(jobs, tmp_prefix='', dst_prefix='') -> list: jobsr = list() for i, j in enumerate(sorted(jobs, key=job.Job.get_time_wall)): with j.proc.oneshot(): dictionary = { 'plotid': j.plot_id[:8], 'k': j.k, 'tmp': abbr_path(j.tmpdir, tmp_prefix), 'dst': abbr_path(j.dstdir, dst_prefix), 'wall': plot_util.time_format(j.get_time_wall()), 'phase': phase_str(j.progress()), 'tmpdisk': plot_util.human_format(j.get_tmp_usage(), 0), 'pid': j.proc.pid, 'stat': j.get_run_status(), 'mem': plot_util.human_format(j.get_mem_usage(), 1), 'user': plot_util.time_format(j.get_time_user()), 'sys': plot_util.time_format(j.get_time_sys()), 'io': plot_util.time_format(j.get_time_iowait()), 'freezed': plot_util.is_freezed(j), 'logfile': os.path.basename(j.logfile) } jobsr.append(dictionary) return jobsr
true
true
1c4371ed067b7cc5d0e0120d86bfb0013fc0a9a5
2,906
py
Python
netbox/virtualization/api/views.py
mrevilme/netbox
51f6d2f45e1bb9d53da6a7f7733673a458c50060
[ "Apache-2.0" ]
1
2021-05-04T12:53:09.000Z
2021-05-04T12:53:09.000Z
netbox/virtualization/api/views.py
emersonfelipesp/netbox
fecca5ad83fb6b48a2f15982dfd3242653f105f9
[ "Apache-2.0" ]
null
null
null
netbox/virtualization/api/views.py
emersonfelipesp/netbox
fecca5ad83fb6b48a2f15982dfd3242653f105f9
[ "Apache-2.0" ]
null
null
null
from rest_framework.routers import APIRootView from dcim.models import Device from extras.api.views import ConfigContextQuerySetMixin, CustomFieldModelViewSet, ModelViewSet from utilities.utils import count_related from virtualization import filters from virtualization.models import Cluster, ClusterGroup, ClusterType, VirtualMachine, VMInterface from . import serializers class VirtualizationRootView(APIRootView): """ Virtualization API root view """ def get_view_name(self): return 'Virtualization' # # Clusters # class ClusterTypeViewSet(CustomFieldModelViewSet): queryset = ClusterType.objects.annotate( cluster_count=count_related(Cluster, 'type') ) serializer_class = serializers.ClusterTypeSerializer filterset_class = filters.ClusterTypeFilterSet class ClusterGroupViewSet(CustomFieldModelViewSet): queryset = ClusterGroup.objects.annotate( cluster_count=count_related(Cluster, 'group') ) serializer_class = serializers.ClusterGroupSerializer filterset_class = filters.ClusterGroupFilterSet class ClusterViewSet(CustomFieldModelViewSet): queryset = Cluster.objects.prefetch_related( 'type', 'group', 'tenant', 'site', 'tags' ).annotate( device_count=count_related(Device, 'cluster'), virtualmachine_count=count_related(VirtualMachine, 'cluster') ) serializer_class = serializers.ClusterSerializer filterset_class = filters.ClusterFilterSet # # Virtual machines # class VirtualMachineViewSet(ConfigContextQuerySetMixin, CustomFieldModelViewSet): queryset = VirtualMachine.objects.prefetch_related( 'cluster__site', 'role', 'tenant', 'platform', 'primary_ip4', 'primary_ip6', 'tags' ) filterset_class = filters.VirtualMachineFilterSet def get_serializer_class(self): """ Select the specific serializer based on the request context. If the `brief` query param equates to True, return the NestedVirtualMachineSerializer If the `exclude` query param includes `config_context` as a value, return the VirtualMachineSerializer Else, return the VirtualMachineWithConfigContextSerializer """ request = self.get_serializer_context()['request'] if request.query_params.get('brief', False): return serializers.NestedVirtualMachineSerializer elif 'config_context' in request.query_params.get('exclude', []): return serializers.VirtualMachineSerializer return serializers.VirtualMachineWithConfigContextSerializer class VMInterfaceViewSet(ModelViewSet): queryset = VMInterface.objects.prefetch_related( 'virtual_machine', 'parent', 'tags', 'tagged_vlans', 'ip_addresses' ) serializer_class = serializers.VMInterfaceSerializer filterset_class = filters.VMInterfaceFilterSet brief_prefetch_fields = ['virtual_machine']
33.022727
110
0.753269
from rest_framework.routers import APIRootView from dcim.models import Device from extras.api.views import ConfigContextQuerySetMixin, CustomFieldModelViewSet, ModelViewSet from utilities.utils import count_related from virtualization import filters from virtualization.models import Cluster, ClusterGroup, ClusterType, VirtualMachine, VMInterface from . import serializers class VirtualizationRootView(APIRootView): def get_view_name(self): return 'Virtualization' class ClusterTypeViewSet(CustomFieldModelViewSet): queryset = ClusterType.objects.annotate( cluster_count=count_related(Cluster, 'type') ) serializer_class = serializers.ClusterTypeSerializer filterset_class = filters.ClusterTypeFilterSet class ClusterGroupViewSet(CustomFieldModelViewSet): queryset = ClusterGroup.objects.annotate( cluster_count=count_related(Cluster, 'group') ) serializer_class = serializers.ClusterGroupSerializer filterset_class = filters.ClusterGroupFilterSet class ClusterViewSet(CustomFieldModelViewSet): queryset = Cluster.objects.prefetch_related( 'type', 'group', 'tenant', 'site', 'tags' ).annotate( device_count=count_related(Device, 'cluster'), virtualmachine_count=count_related(VirtualMachine, 'cluster') ) serializer_class = serializers.ClusterSerializer filterset_class = filters.ClusterFilterSet class VirtualMachineViewSet(ConfigContextQuerySetMixin, CustomFieldModelViewSet): queryset = VirtualMachine.objects.prefetch_related( 'cluster__site', 'role', 'tenant', 'platform', 'primary_ip4', 'primary_ip6', 'tags' ) filterset_class = filters.VirtualMachineFilterSet def get_serializer_class(self): request = self.get_serializer_context()['request'] if request.query_params.get('brief', False): return serializers.NestedVirtualMachineSerializer elif 'config_context' in request.query_params.get('exclude', []): return serializers.VirtualMachineSerializer return serializers.VirtualMachineWithConfigContextSerializer class VMInterfaceViewSet(ModelViewSet): queryset = VMInterface.objects.prefetch_related( 'virtual_machine', 'parent', 'tags', 'tagged_vlans', 'ip_addresses' ) serializer_class = serializers.VMInterfaceSerializer filterset_class = filters.VMInterfaceFilterSet brief_prefetch_fields = ['virtual_machine']
true
true
1c43729c8f65039b3f467f122673f02a5cc4cbc1
11,256
py
Python
saleor/payment/gateways/np_atobarai/tests/test_api_helpers.py
victor-abz/saleor
f8e2b49703d995d4304d5a690dbe9c83631419d0
[ "CC-BY-4.0" ]
1,392
2021-10-06T15:54:28.000Z
2022-03-31T20:50:55.000Z
saleor/payment/gateways/np_atobarai/tests/test_api_helpers.py
victor-abz/saleor
f8e2b49703d995d4304d5a690dbe9c83631419d0
[ "CC-BY-4.0" ]
888
2021-10-06T10:48:54.000Z
2022-03-31T11:00:30.000Z
saleor/payment/gateways/np_atobarai/tests/test_api_helpers.py
victor-abz/saleor
f8e2b49703d995d4304d5a690dbe9c83631419d0
[ "CC-BY-4.0" ]
538
2021-10-07T16:21:27.000Z
2022-03-31T22:58:57.000Z
from dataclasses import fields from decimal import Decimal from unittest.mock import DEFAULT, Mock, patch, sentinel import pytest from posuto import Posuto from .....order.fetch import OrderLineInfo from ....interface import AddressData, RefundData from ....utils import price_to_minor_unit from .. import api_helpers, errors from ..api_helpers import get_goods, get_goods_with_refunds def test_register_no_billing_address(config, np_payment_data): # given np_payment_data.billing = None # when np_response = api_helpers.register(config, np_payment_data) # then assert not np_response.result assert np_response.error_codes == [f"{errors.NO_BILLING_ADDRESS}"] def test_register_no_shipping_address(config, np_payment_data): # given np_payment_data.shipping = None # when np_response = api_helpers.register(config, np_payment_data) # then assert not np_response.result assert np_response.error_codes == [f"{errors.NO_SHIPPING_ADDRESS}"] INVALID = sentinel.INVALID def format_address_side_effect(config, address): return None if address is INVALID else DEFAULT @patch( "saleor.payment.gateways.np_atobarai.api_helpers.format_address", new=Mock(side_effect=format_address_side_effect), ) @patch("saleor.payment.gateways.np_atobarai.api_helpers._request", new=Mock()) def test_register_invalid_billing_address(config, np_payment_data): # given np_payment_data.billing = INVALID # when np_response = api_helpers.register(config, np_payment_data) # then assert not np_response.result assert np_response.error_codes == [f"{errors.BILLING_ADDRESS_INVALID}"] @patch( "saleor.payment.gateways.np_atobarai.api_helpers.format_address", new=Mock(side_effect=format_address_side_effect), ) @patch("saleor.payment.gateways.np_atobarai.api_helpers._request", new=Mock()) def test_register_invalid_shipping_address(config, np_payment_data): # given np_payment_data.shipping = INVALID # when np_response = api_helpers.register(config, np_payment_data) # then assert not np_response.result assert np_response.error_codes == [f"{errors.SHIPPING_ADDRESS_INVALID}"] def test_format_name(np_address_data): # given double_byte_space = "\u3000" # when formatted_name = api_helpers.format_name(np_address_data) # then assert formatted_name == ( f"{np_address_data.last_name}" f"{double_byte_space}" f"{np_address_data.first_name}" ) def test_format_address_do_not_fill(config, np_address_data): # given config.fill_missing_address = False # when formatted_address = api_helpers.format_address(config, np_address_data) # then assert formatted_address == ( f"{np_address_data.country_area}" f"{np_address_data.street_address_1}" f"{np_address_data.street_address_2}" ) def test_format_address_fill(config, np_address_data): # when formatted_address = api_helpers.format_address(config, np_address_data) # then pp = Posuto() japanese_address = pp.get(np_address_data.postal_code) assert formatted_address == ( f"{np_address_data.country_area}" f"{japanese_address.city}" f"{japanese_address.neighborhood}" f"{np_address_data.street_address_1}" f"{np_address_data.street_address_2}" ) def test_format_address_fill_invalid_postal_code(config, np_address_data): # given np_address_data.postal_code = "" # when formatted_address = api_helpers.format_address(config, np_address_data) # then assert formatted_address is None def test_format_address_proper_formatting(config): # given config.fill_missing_address = False address_data = AddressData(**{f.name: f.name for f in fields(AddressData)}) # when formatted_address = api_helpers.format_address(config, address_data) # then assert formatted_address == ( f"{address_data.country_area}" f"{address_data.street_address_1}" f"{address_data.street_address_2}" ) @pytest.mark.parametrize("sku_as_name", [True, False]) def test_get_goods( config, np_payment_data, sku_as_name, ): # given config.sku_as_name = sku_as_name # when goods = get_goods(config, np_payment_data) # then assert goods == [ { "goods_name": line.product_sku if sku_as_name else line.product_name, "goods_price": int( price_to_minor_unit(line.amount, np_payment_data.currency) ), "quantity": line.quantity, } for line in np_payment_data.lines_data.lines ] + [ { "goods_name": "Shipping", "goods_price": int( price_to_minor_unit( np_payment_data.lines_data.shipping_amount, np_payment_data.currency ) ), "quantity": 1, }, ] @pytest.mark.parametrize( "refund_amount, discount_goods", [ (Decimal("0.00"), []), ( Decimal("5.00"), [{"goods_name": "Discount", "goods_price": -500, "quantity": 1}], ), ], ) @pytest.mark.parametrize("sku_as_name", [True, False]) def test_get_goods_with_refunds( config, payment_dummy, np_payment_data, sku_as_name, refund_amount, discount_goods, ): # given config.sku_as_name = sku_as_name np_payment_data.amount = refund_amount np_payment_data.refund_data = RefundData( refund_amount_is_automatically_calculated=False ) # when goods, billed_amount = get_goods_with_refunds( config, payment_dummy, np_payment_data ) # then assert ( goods == [ { "goods_name": line.product_sku if sku_as_name else line.product_name, "goods_price": int( price_to_minor_unit(line.amount, np_payment_data.currency) ), "quantity": line.quantity, } for line in np_payment_data.lines_data.lines ] + [ { "goods_name": "Shipping", "goods_price": int( price_to_minor_unit( np_payment_data.lines_data.shipping_amount, np_payment_data.currency, ) ), "quantity": 1, }, ] + discount_goods ) manual_refund_amount = refund_amount or Decimal("0.00") assert ( billed_amount == sum(line.amount * line.quantity for line in np_payment_data.lines_data.lines) + np_payment_data.lines_data.voucher_amount + np_payment_data.lines_data.shipping_amount - manual_refund_amount ) @pytest.fixture def order_lines(order_with_lines): return list(order_with_lines.lines.all()) def test_get_goods_with_refunds_manual_product_refund_product_refund( create_refund, order_with_lines, config, np_payment_data, payment_dummy, order_lines ): # given line_to_refund = order_lines[0] create_refund( order_with_lines, order_lines=[ OrderLineInfo( line=line_to_refund, quantity=1, variant=line_to_refund.variant ) ], manual_refund_amount=Decimal("3.00"), ) # when np_payment_data.refund_data = RefundData( order_lines_to_refund=[ OrderLineInfo( line=line_to_refund, quantity=1, variant=line_to_refund.variant ), ] ) np_payment_data.amount = line_to_refund.unit_price_gross_amount goods, billed_amount = get_goods_with_refunds( config, payment_dummy, np_payment_data ) # then expected_billed_amount = order_with_lines.total_gross_amount - ( Decimal("3.00") + line_to_refund.unit_price_gross_amount ) assert billed_amount == expected_billed_amount assert goods[0]["quantity"] == line_to_refund.quantity - 1 for goods_line, order_line in zip(goods[1:], order_lines[1:]): assert goods_line["quantity"] == order_line.quantity def test_get_goods_with_refunds_product_refund_shipping_refund( create_refund, order_with_lines, config, np_payment_data, payment_dummy, order_lines ): # given line_to_refund = order_lines[0] create_refund( order_with_lines, order_lines=[ OrderLineInfo( line=line_to_refund, quantity=1, variant=line_to_refund.variant ) ], ) # when np_payment_data.refund_data = RefundData(refund_shipping_costs=True) np_payment_data.amount = order_with_lines.shipping_price_gross_amount goods, billed_amount = get_goods_with_refunds( config, payment_dummy, np_payment_data ) # then expected_billed_amount = order_with_lines.total_gross_amount - ( line_to_refund.unit_price_gross_amount + order_with_lines.shipping_price_gross_amount ) assert billed_amount == expected_billed_amount assert goods[0]["quantity"] == line_to_refund.quantity - 1 for goods_line, order_line in zip(goods[1:], order_lines[1:]): assert goods_line["quantity"] == order_line.quantity def test_get_goods_with_refunds_manual_shipping_misc_refund( create_refund, order_with_lines, config, np_payment_data, payment_dummy, order_lines ): # given create_refund( order_with_lines, refund_shipping_costs=True, manual_refund_amount=Decimal("5.30"), ) # when np_payment_data.refund_data = RefundData(refund_shipping_costs=True) np_payment_data.amount = Decimal("4.30") goods, billed_amount = get_goods_with_refunds( config, payment_dummy, np_payment_data ) # then expected_billed_amount = order_with_lines.total_gross_amount - ( Decimal("5.30") + Decimal("4.30") ) assert billed_amount == expected_billed_amount for goods_line, order_line in zip(goods, order_lines): assert goods_line["quantity"] == order_line.quantity def test_get_goods_with_refunds_shipping_refund_manual_product_refund( create_refund, order_with_lines, config, np_payment_data, payment_dummy, order_lines ): # given create_refund( order_with_lines, refund_shipping_costs=True, ) # when line_to_refund = order_lines[0] np_payment_data.refund_data = RefundData( order_lines_to_refund=[ OrderLineInfo( line=line_to_refund, quantity=1, variant=line_to_refund.variant, ) ], refund_amount_is_automatically_calculated=False, ) np_payment_data.amount = Decimal("8.20") goods, billed_amount = get_goods_with_refunds( config, payment_dummy, np_payment_data ) # then expected_billed_amount = order_with_lines.total_gross_amount - ( order_with_lines.shipping_price_gross_amount + Decimal("8.20") ) assert billed_amount == expected_billed_amount for goods_line, order_line in zip(goods, order_lines): assert goods_line["quantity"] == order_line.quantity
28.714286
88
0.676173
from dataclasses import fields from decimal import Decimal from unittest.mock import DEFAULT, Mock, patch, sentinel import pytest from posuto import Posuto from .....order.fetch import OrderLineInfo from ....interface import AddressData, RefundData from ....utils import price_to_minor_unit from .. import api_helpers, errors from ..api_helpers import get_goods, get_goods_with_refunds def test_register_no_billing_address(config, np_payment_data): np_payment_data.billing = None np_response = api_helpers.register(config, np_payment_data) assert not np_response.result assert np_response.error_codes == [f"{errors.NO_BILLING_ADDRESS}"] def test_register_no_shipping_address(config, np_payment_data): np_payment_data.shipping = None np_response = api_helpers.register(config, np_payment_data) assert not np_response.result assert np_response.error_codes == [f"{errors.NO_SHIPPING_ADDRESS}"] INVALID = sentinel.INVALID def format_address_side_effect(config, address): return None if address is INVALID else DEFAULT @patch( "saleor.payment.gateways.np_atobarai.api_helpers.format_address", new=Mock(side_effect=format_address_side_effect), ) @patch("saleor.payment.gateways.np_atobarai.api_helpers._request", new=Mock()) def test_register_invalid_billing_address(config, np_payment_data): np_payment_data.billing = INVALID np_response = api_helpers.register(config, np_payment_data) assert not np_response.result assert np_response.error_codes == [f"{errors.BILLING_ADDRESS_INVALID}"] @patch( "saleor.payment.gateways.np_atobarai.api_helpers.format_address", new=Mock(side_effect=format_address_side_effect), ) @patch("saleor.payment.gateways.np_atobarai.api_helpers._request", new=Mock()) def test_register_invalid_shipping_address(config, np_payment_data): np_payment_data.shipping = INVALID np_response = api_helpers.register(config, np_payment_data) assert not np_response.result assert np_response.error_codes == [f"{errors.SHIPPING_ADDRESS_INVALID}"] def test_format_name(np_address_data): double_byte_space = "\u3000" formatted_name = api_helpers.format_name(np_address_data) assert formatted_name == ( f"{np_address_data.last_name}" f"{double_byte_space}" f"{np_address_data.first_name}" ) def test_format_address_do_not_fill(config, np_address_data): config.fill_missing_address = False formatted_address = api_helpers.format_address(config, np_address_data) assert formatted_address == ( f"{np_address_data.country_area}" f"{np_address_data.street_address_1}" f"{np_address_data.street_address_2}" ) def test_format_address_fill(config, np_address_data): formatted_address = api_helpers.format_address(config, np_address_data) pp = Posuto() japanese_address = pp.get(np_address_data.postal_code) assert formatted_address == ( f"{np_address_data.country_area}" f"{japanese_address.city}" f"{japanese_address.neighborhood}" f"{np_address_data.street_address_1}" f"{np_address_data.street_address_2}" ) def test_format_address_fill_invalid_postal_code(config, np_address_data): np_address_data.postal_code = "" formatted_address = api_helpers.format_address(config, np_address_data) assert formatted_address is None def test_format_address_proper_formatting(config): config.fill_missing_address = False address_data = AddressData(**{f.name: f.name for f in fields(AddressData)}) formatted_address = api_helpers.format_address(config, address_data) assert formatted_address == ( f"{address_data.country_area}" f"{address_data.street_address_1}" f"{address_data.street_address_2}" ) @pytest.mark.parametrize("sku_as_name", [True, False]) def test_get_goods( config, np_payment_data, sku_as_name, ): config.sku_as_name = sku_as_name goods = get_goods(config, np_payment_data) assert goods == [ { "goods_name": line.product_sku if sku_as_name else line.product_name, "goods_price": int( price_to_minor_unit(line.amount, np_payment_data.currency) ), "quantity": line.quantity, } for line in np_payment_data.lines_data.lines ] + [ { "goods_name": "Shipping", "goods_price": int( price_to_minor_unit( np_payment_data.lines_data.shipping_amount, np_payment_data.currency ) ), "quantity": 1, }, ] @pytest.mark.parametrize( "refund_amount, discount_goods", [ (Decimal("0.00"), []), ( Decimal("5.00"), [{"goods_name": "Discount", "goods_price": -500, "quantity": 1}], ), ], ) @pytest.mark.parametrize("sku_as_name", [True, False]) def test_get_goods_with_refunds( config, payment_dummy, np_payment_data, sku_as_name, refund_amount, discount_goods, ): config.sku_as_name = sku_as_name np_payment_data.amount = refund_amount np_payment_data.refund_data = RefundData( refund_amount_is_automatically_calculated=False ) goods, billed_amount = get_goods_with_refunds( config, payment_dummy, np_payment_data ) assert ( goods == [ { "goods_name": line.product_sku if sku_as_name else line.product_name, "goods_price": int( price_to_minor_unit(line.amount, np_payment_data.currency) ), "quantity": line.quantity, } for line in np_payment_data.lines_data.lines ] + [ { "goods_name": "Shipping", "goods_price": int( price_to_minor_unit( np_payment_data.lines_data.shipping_amount, np_payment_data.currency, ) ), "quantity": 1, }, ] + discount_goods ) manual_refund_amount = refund_amount or Decimal("0.00") assert ( billed_amount == sum(line.amount * line.quantity for line in np_payment_data.lines_data.lines) + np_payment_data.lines_data.voucher_amount + np_payment_data.lines_data.shipping_amount - manual_refund_amount ) @pytest.fixture def order_lines(order_with_lines): return list(order_with_lines.lines.all()) def test_get_goods_with_refunds_manual_product_refund_product_refund( create_refund, order_with_lines, config, np_payment_data, payment_dummy, order_lines ): line_to_refund = order_lines[0] create_refund( order_with_lines, order_lines=[ OrderLineInfo( line=line_to_refund, quantity=1, variant=line_to_refund.variant ) ], manual_refund_amount=Decimal("3.00"), ) np_payment_data.refund_data = RefundData( order_lines_to_refund=[ OrderLineInfo( line=line_to_refund, quantity=1, variant=line_to_refund.variant ), ] ) np_payment_data.amount = line_to_refund.unit_price_gross_amount goods, billed_amount = get_goods_with_refunds( config, payment_dummy, np_payment_data ) expected_billed_amount = order_with_lines.total_gross_amount - ( Decimal("3.00") + line_to_refund.unit_price_gross_amount ) assert billed_amount == expected_billed_amount assert goods[0]["quantity"] == line_to_refund.quantity - 1 for goods_line, order_line in zip(goods[1:], order_lines[1:]): assert goods_line["quantity"] == order_line.quantity def test_get_goods_with_refunds_product_refund_shipping_refund( create_refund, order_with_lines, config, np_payment_data, payment_dummy, order_lines ): line_to_refund = order_lines[0] create_refund( order_with_lines, order_lines=[ OrderLineInfo( line=line_to_refund, quantity=1, variant=line_to_refund.variant ) ], ) np_payment_data.refund_data = RefundData(refund_shipping_costs=True) np_payment_data.amount = order_with_lines.shipping_price_gross_amount goods, billed_amount = get_goods_with_refunds( config, payment_dummy, np_payment_data ) expected_billed_amount = order_with_lines.total_gross_amount - ( line_to_refund.unit_price_gross_amount + order_with_lines.shipping_price_gross_amount ) assert billed_amount == expected_billed_amount assert goods[0]["quantity"] == line_to_refund.quantity - 1 for goods_line, order_line in zip(goods[1:], order_lines[1:]): assert goods_line["quantity"] == order_line.quantity def test_get_goods_with_refunds_manual_shipping_misc_refund( create_refund, order_with_lines, config, np_payment_data, payment_dummy, order_lines ): create_refund( order_with_lines, refund_shipping_costs=True, manual_refund_amount=Decimal("5.30"), ) np_payment_data.refund_data = RefundData(refund_shipping_costs=True) np_payment_data.amount = Decimal("4.30") goods, billed_amount = get_goods_with_refunds( config, payment_dummy, np_payment_data ) expected_billed_amount = order_with_lines.total_gross_amount - ( Decimal("5.30") + Decimal("4.30") ) assert billed_amount == expected_billed_amount for goods_line, order_line in zip(goods, order_lines): assert goods_line["quantity"] == order_line.quantity def test_get_goods_with_refunds_shipping_refund_manual_product_refund( create_refund, order_with_lines, config, np_payment_data, payment_dummy, order_lines ): create_refund( order_with_lines, refund_shipping_costs=True, ) line_to_refund = order_lines[0] np_payment_data.refund_data = RefundData( order_lines_to_refund=[ OrderLineInfo( line=line_to_refund, quantity=1, variant=line_to_refund.variant, ) ], refund_amount_is_automatically_calculated=False, ) np_payment_data.amount = Decimal("8.20") goods, billed_amount = get_goods_with_refunds( config, payment_dummy, np_payment_data ) expected_billed_amount = order_with_lines.total_gross_amount - ( order_with_lines.shipping_price_gross_amount + Decimal("8.20") ) assert billed_amount == expected_billed_amount for goods_line, order_line in zip(goods, order_lines): assert goods_line["quantity"] == order_line.quantity
true
true
1c43741fca9fb5451f7dc34b89f19edff48f1a2f
3,072
py
Python
kfold.py
bhadreshpsavani/TAPER-EHR
ab938749756fcaaef52a7002a074421f483e3562
[ "MIT" ]
12
2020-04-10T02:24:20.000Z
2021-11-09T22:52:24.000Z
kfold.py
bhadreshpsavani/TAPER-EHR
ab938749756fcaaef52a7002a074421f483e3562
[ "MIT" ]
7
2020-05-03T10:03:29.000Z
2022-02-09T23:38:21.000Z
kfold.py
bhadreshpsavani/TAPER-EHR
ab938749756fcaaef52a7002a074421f483e3562
[ "MIT" ]
10
2020-06-14T09:37:35.000Z
2022-02-04T22:21:16.000Z
import numpy as np import pandas as pd import os import pickle import argparse from sklearn.model_selection import KFold from utils.data_utils import * from collections import defaultdict def filt_code(data, code_type, min_=5): """ Filter code sets based on frequency count Args: min_: (int) minimum number of occurence in order to include in final dict data: """ codes = defaultdict(lambda : 0) for k, v in data.items(): for vv in v: for cc in set(vv[code_type]): codes[cc] += 1 keys = set(codes.keys()) for k in keys: if (codes[k] < min_): del codes[k] return codes def ret_filtered_code(codes, filt): return set([codes[i] for i in range(len(codes)) if codes[i] in filt]) if __name__ == '__main__': """Generates Kfold splits based on patient ids. """ parser = argparse.ArgumentParser(description='Process Mimic-iii CSV Files') parser.add_argument('-p', '--path', default=None, type=str, help='path to mimic-iii csvs') parser.add_argument('-s', '--save', default=None, type=str, help='path to dump output') parser.add_argument('-seed', '--seed', default=1, type=int, help='numpy seed used to create datasplit') parser.add_argument('-k', '--kfold', default=7, type=int, help='kfold split') parser.add_argument('-filter_codes', action='store_true', help='filter codes based on frequency count') parser.add_argument('-min_adm', type=int, help='min number of admissions filter, af must be specified') args = parser.parse_args() np.random.seed(args.seed) data = pickle.load(open(args.path, 'rb')) data_info = data['info'] data_data = data['data'] if (args.filter_codes): proc_codes = filt_code(data_data, 'procedures') diag_codes = filt_code(data_data, 'diagnoses') med_codes = filt_code(data_data, 'medications') for k, v in data_data.items(): if (len(v) < args.min_adm): del data_data[k] continue for i in range(len(v)): v[i]['procedures'] = list(ret_filtered_code(v[i]['procedures'], proc_codes)) v[i]['diagnoses'] = list(ret_filtered_code(v[i]['diagnoses'], diag_codes)) v[i]['medications'] = list(ret_filtered_code(v[i]['medications'], med_codes)) data_temp = {} data_temp['info'] = data_info data_temp['data'] = data_data try: with open(os.path.abspath(os.path.join(args.save, '..', 'data_filtered.pkl')), 'wb') as handle: pickle.dump(data, handle) except: import pdb; pdb.set_trace() pids = np.asarray(list(data_data.keys())) np.random.shuffle(pids) kf = KFold(args.kfold,random_state=None, shuffle=False) if (not os.path.isdir(args.save)): os.makedirs(args.save) for idx, ids in enumerate(kf.split(pids)): ids = (pids[ids[0]], pids[ids[1]]) pickle.dump(ids, open(os.path.join(args.save, 'split_{}.pkl'.format(idx)), 'wb'))
36.571429
108
0.62207
import numpy as np import pandas as pd import os import pickle import argparse from sklearn.model_selection import KFold from utils.data_utils import * from collections import defaultdict def filt_code(data, code_type, min_=5): codes = defaultdict(lambda : 0) for k, v in data.items(): for vv in v: for cc in set(vv[code_type]): codes[cc] += 1 keys = set(codes.keys()) for k in keys: if (codes[k] < min_): del codes[k] return codes def ret_filtered_code(codes, filt): return set([codes[i] for i in range(len(codes)) if codes[i] in filt]) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Process Mimic-iii CSV Files') parser.add_argument('-p', '--path', default=None, type=str, help='path to mimic-iii csvs') parser.add_argument('-s', '--save', default=None, type=str, help='path to dump output') parser.add_argument('-seed', '--seed', default=1, type=int, help='numpy seed used to create datasplit') parser.add_argument('-k', '--kfold', default=7, type=int, help='kfold split') parser.add_argument('-filter_codes', action='store_true', help='filter codes based on frequency count') parser.add_argument('-min_adm', type=int, help='min number of admissions filter, af must be specified') args = parser.parse_args() np.random.seed(args.seed) data = pickle.load(open(args.path, 'rb')) data_info = data['info'] data_data = data['data'] if (args.filter_codes): proc_codes = filt_code(data_data, 'procedures') diag_codes = filt_code(data_data, 'diagnoses') med_codes = filt_code(data_data, 'medications') for k, v in data_data.items(): if (len(v) < args.min_adm): del data_data[k] continue for i in range(len(v)): v[i]['procedures'] = list(ret_filtered_code(v[i]['procedures'], proc_codes)) v[i]['diagnoses'] = list(ret_filtered_code(v[i]['diagnoses'], diag_codes)) v[i]['medications'] = list(ret_filtered_code(v[i]['medications'], med_codes)) data_temp = {} data_temp['info'] = data_info data_temp['data'] = data_data try: with open(os.path.abspath(os.path.join(args.save, '..', 'data_filtered.pkl')), 'wb') as handle: pickle.dump(data, handle) except: import pdb; pdb.set_trace() pids = np.asarray(list(data_data.keys())) np.random.shuffle(pids) kf = KFold(args.kfold,random_state=None, shuffle=False) if (not os.path.isdir(args.save)): os.makedirs(args.save) for idx, ids in enumerate(kf.split(pids)): ids = (pids[ids[0]], pids[ids[1]]) pickle.dump(ids, open(os.path.join(args.save, 'split_{}.pkl'.format(idx)), 'wb'))
true
true
1c437445200f13523ac4b91a040d4bb026052069
134,889
py
Python
h2o-py/h2o/frame.py
DEVESHTARASIA/h2o-3
9bd73fcedb4236b7ea8f214b36ca95f3e00d4548
[ "Apache-2.0" ]
null
null
null
h2o-py/h2o/frame.py
DEVESHTARASIA/h2o-3
9bd73fcedb4236b7ea8f214b36ca95f3e00d4548
[ "Apache-2.0" ]
null
null
null
h2o-py/h2o/frame.py
DEVESHTARASIA/h2o-3
9bd73fcedb4236b7ea8f214b36ca95f3e00d4548
[ "Apache-2.0" ]
null
null
null
# -*- encoding: utf-8 -*- """ H2O data frame. :copyright: (c) 2016 H2O.ai :license: Apache License Version 2.0 (see LICENSE for details) """ from __future__ import absolute_import, division, print_function, unicode_literals import csv import datetime import functools import os import sys import tempfile import traceback import warnings from io import StringIO from types import FunctionType import requests import h2o from h2o.display import H2ODisplay from h2o.exceptions import H2OTypeError, H2OValueError from h2o.expr import ExprNode from h2o.group_by import GroupBy from h2o.job import H2OJob from h2o.utils.compatibility import * # NOQA from h2o.utils.compatibility import viewitems, viewvalues from h2o.utils.config import get_config_value from h2o.utils.shared_utils import (_handle_numpy_array, _handle_pandas_data_frame, _handle_python_dicts, _handle_python_lists, _is_list, _is_str_list, _py_tmp_key, _quoted, can_use_pandas, quote, normalize_slice, slice_is_normalized, check_frame_id) from h2o.utils.typechecks import (assert_is_type, assert_satisfies, Enum, I, is_type, numeric, numpy_ndarray, numpy_datetime, pandas_dataframe, pandas_timestamp, scipy_sparse, U) __all__ = ("H2OFrame", ) class H2OFrame(object): """ Primary data store for H2O. H2OFrame is similar to pandas' ``DataFrame``, or R's ``data.frame``. One of the critical distinction is that the data is generally not held in memory, instead it is located on a (possibly remote) H2O cluster, and thus ``H2OFrame`` represents a mere handle to that data. """ #------------------------------------------------------------------------------------------------------------------- # Construction #------------------------------------------------------------------------------------------------------------------- def __init__(self, python_obj=None, destination_frame=None, header=0, separator=",", column_names=None, column_types=None, na_strings=None): """ Create a new H2OFrame object, possibly from some other object. :param python_obj: object that will be converted to an ``H2OFrame``. This could have multiple types: - None: create an empty H2OFrame - A list/tuple of strings or numbers: create a single-column H2OFrame containing the contents of this list. - A dictionary of ``{name: list}`` pairs: create an H2OFrame with multiple columns, each column having the provided ``name`` and contents from ``list``. If the source dictionary is not an OrderedDict, then the columns in the H2OFrame may appear shuffled. - A list of lists of strings/numbers: construct an H2OFrame from a rectangular table of values, with inner lists treated as rows of the table. I.e. ``H2OFrame([[1, 'a'], [2, 'b'], [3, 'c']])`` will create a frame with 3 rows and 2 columns, one numeric and one string. - A Pandas dataframe, or a Numpy ndarray: create a matching H2OFrame. - A Scipy sparse matrix: create a matching sparse H2OFrame. :param int header: if ``python_obj`` is a list of lists, this parameter can be used to indicate whether the first row of the data represents headers. The value of -1 means the first row is data, +1 means the first row is the headers, 0 (default) allows H2O to guess whether the first row contains data or headers. :param List[str] column_names: explicit list of column names for the new H2OFrame. This will override any column names derived from the data. If the python_obj does not contain explicit column names, and this parameter is not given, then the columns will be named "C1", "C2", "C3", etc. :param column_types: explicit column types for the new H2OFrame. This could be either a list of types for each column, or a dictionary of {column name: column type} pairs. In the latter case you may override types for only few columns, and let H2O choose the types of the rest. :param na_strings: List of strings in the input data that should be interpreted as missing values. This could be given on a per-column basis, either as a list-of-lists, or as a dictionary {column name: list of nas}. :param str destination_frame: (internal) name of the target DKV key in the H2O backend. :param str separator: (deprecated) """ coltype = U(None, "unknown", "uuid", "string", "float", "real", "double", "int", "numeric", "categorical", "factor", "enum", "time") assert_is_type(python_obj, None, list, tuple, dict, numpy_ndarray, pandas_dataframe, scipy_sparse) assert_is_type(destination_frame, None, str) assert_is_type(header, -1, 0, 1) assert_is_type(separator, I(str, lambda s: len(s) == 1)) assert_is_type(column_names, None, [str]) assert_is_type(column_types, None, [coltype], {str: coltype}) assert_is_type(na_strings, None, [str], [[str]], {str: [str]}) check_frame_id(destination_frame) self._ex = ExprNode() self._ex._children = None self._is_frame = True # Indicate that this is an actual frame, allowing typechecks to be made if python_obj is not None: self._upload_python_object(python_obj, destination_frame, header, separator, column_names, column_types, na_strings) @staticmethod def _expr(expr, cache=None): # TODO: merge this method with `__init__` fr = H2OFrame() fr._ex = expr if cache is not None: fr._ex._cache.fill_from(cache) return fr def _upload_python_object(self, python_obj, destination_frame=None, header=0, separator=",", column_names=None, column_types=None, na_strings=None): assert_is_type(python_obj, list, tuple, dict, numpy_ndarray, pandas_dataframe, scipy_sparse) if is_type(python_obj, scipy_sparse): self._upload_sparse_matrix(python_obj, destination_frame=destination_frame) return # TODO: all these _handlers should really belong to this class, not to shared_utils. processor = (_handle_pandas_data_frame if is_type(python_obj, pandas_dataframe) else _handle_numpy_array if is_type(python_obj, numpy_ndarray) else _handle_python_dicts if is_type(python_obj, dict) else _handle_python_lists) col_header, data_to_write = processor(python_obj, header) if col_header is None or data_to_write is None: raise H2OValueError("No data to write") if not column_names: column_names = col_header # create a temporary file that will be written to tmp_handle, tmp_path = tempfile.mkstemp(suffix=".csv") tmp_file = os.fdopen(tmp_handle, 'w') # create a new csv writer object thingy csv_writer = csv.writer(tmp_file, dialect="excel", quoting=csv.QUOTE_NONNUMERIC) csv_writer.writerow(column_names) if data_to_write and isinstance(data_to_write[0], dict): for row in data_to_write: csv_writer.writerow([row.get(k, None) for k in col_header]) else: csv_writer.writerows(data_to_write) tmp_file.close() # close the streams self._upload_parse(tmp_path, destination_frame, 1, separator, column_names, column_types, na_strings) os.remove(tmp_path) # delete the tmp file def _upload_sparse_matrix(self, matrix, destination_frame=None): import scipy.sparse as sp if not sp.issparse(matrix): raise H2OValueError("A sparse matrix expected, got %s" % type(matrix)) tmp_handle, tmp_path = tempfile.mkstemp(suffix=".svmlight") out = os.fdopen(tmp_handle, "wt") if destination_frame is None: destination_frame = _py_tmp_key(h2o.connection().session_id) # sp.find(matrix) returns (row indices, column indices, values) of the non-zero elements of A. Unfortunately # there is no guarantee that those elements are returned in the correct order, so need to sort data = zip(*sp.find(matrix)) if not isinstance(data, list): data = list(data) # possibly convert from iterator to a list data.sort() idata = 0 # index of the next element to be consumed from `data` for irow in range(matrix.shape[0]): if idata < len(data) and data[idata][0] == irow and data[idata][1] == 0: y = data[idata][2] idata += 1 else: y = 0 out.write(str(y)) while idata < len(data) and data[idata][0] == irow: out.write(" ") out.write(str(data[idata][1])) out.write(":") out.write(str(data[idata][2])) idata += 1 out.write("\n") out.close() ret = h2o.api("POST /3/PostFile", filename=tmp_path) os.remove(tmp_path) rawkey = ret["destination_frame"] p = {"source_frames": [rawkey], "destination_frame": destination_frame} H2OJob(h2o.api("POST /3/ParseSVMLight", data=p), "Parse").poll() self._ex._cache._id = destination_frame self._ex._cache.fill() @staticmethod def get_frame(frame_id): """ Retrieve an existing H2OFrame from the H2O cluster using the frame's id. :param str frame_id: id of the frame to retrieve :returns: an existing H2OFrame with the id provided; or None if such frame doesn't exist. """ fr = H2OFrame() fr._ex._cache._id = frame_id try: fr._ex._cache.fill() except EnvironmentError: return None return fr def refresh(self): """Reload frame information from the backend H2O server.""" self._ex._cache.flush() self._frame(fill_cache=True) #------------------------------------------------------------------------------------------------------------------- # Frame properties #------------------------------------------------------------------------------------------------------------------- @property def names(self): """The list of column names (List[str]).""" if not self._ex._cache.names_valid(): self._ex._cache.flush() self._frame(fill_cache=True) return list(self._ex._cache.names) @names.setter def names(self, value): self.set_names(value) @property def nrows(self): """Number of rows in the dataframe (int).""" if not self._ex._cache.nrows_valid(): self._ex._cache.flush() self._frame(fill_cache=True) return self._ex._cache.nrows @property def ncols(self): """Number of columns in the dataframe (int).""" if not self._ex._cache.ncols_valid(): self._ex._cache.flush() self._frame(fill_cache=True) return self._ex._cache.ncols @property def shape(self): """Number of rows and columns in the dataframe as a tuple ``(nrows, ncols)``.""" return self.nrows, self.ncols @property def types(self): """The dictionary of column name/type pairs.""" if not self._ex._cache.types_valid(): self._ex._cache.flush() self._frame(fill_cache=True) return dict(self._ex._cache.types) @property def frame_id(self): """Internal id of the frame (str).""" return self._frame()._ex._cache._id @frame_id.setter def frame_id(self, newid): check_frame_id(newid) if self._ex._cache._id is None: h2o.assign(self, newid) else: oldname = self.frame_id self._ex._cache._id = newid h2o.rapids("(rename \"{}\" \"{}\")".format(oldname, newid)) def type(self, col): """ The type for the given column. :param col: either a name, or an index of the column to look up :returns: type of the column, one of: ``str``, ``int``, ``real``, ``enum``, ``time``, ``bool``. :raises H2OValueError: if such column does not exist in the frame. """ assert_is_type(col, int, str) if not self._ex._cache.types_valid() or not self._ex._cache.names_valid(): self._ex._cache.flush() self._frame(fill_cache=True) types = self._ex._cache.types if is_type(col, str): if col in types: return types[col] else: names = self._ex._cache.names if -len(names) <= col < len(names): return types[names[col]] raise H2OValueError("Column '%r' does not exist in the frame" % col) def _import_parse(self, path, pattern, destination_frame, header, separator, column_names, column_types, na_strings): if is_type(path, str) and "://" not in path: path = os.path.abspath(path) rawkey = h2o.lazy_import(path, pattern) self._parse(rawkey, destination_frame, header, separator, column_names, column_types, na_strings) return self def _upload_parse(self, path, destination_frame, header, sep, column_names, column_types, na_strings): ret = h2o.api("POST /3/PostFile", filename=path) rawkey = ret["destination_frame"] self._parse(rawkey, destination_frame, header, sep, column_names, column_types, na_strings) return self def _parse(self, rawkey, destination_frame="", header=None, separator=None, column_names=None, column_types=None, na_strings=None): setup = h2o.parse_setup(rawkey, destination_frame, header, separator, column_names, column_types, na_strings) return self._parse_raw(setup) def _parse_raw(self, setup): # Parse parameters (None values provided by setup) p = {"destination_frame": None, "parse_type": None, "separator": None, "single_quotes": None, "check_header": None, "number_columns": None, "chunk_size": None, "delete_on_done": True, "blocking": False, "column_types": None, } if setup["column_names"]: p["column_names"] = None if setup["na_strings"]: p["na_strings"] = None p.update({k: v for k, v in viewitems(setup) if k in p}) # Extract only 'name' from each src in the array of srcs p['source_frames'] = [_quoted(src['name']) for src in setup['source_frames']] H2OJob(h2o.api("POST /3/Parse", data=p), "Parse").poll() # Need to return a Frame here for nearly all callers # ... but job stats returns only a dest_key, requiring another REST call to get nrow/ncol self._ex._cache._id = p["destination_frame"] self._ex._cache.fill() def filter_na_cols(self, frac=0.2): """ Filter columns with proportion of NAs greater or equals than ``frac``. :param float frac: Maximum fraction of NAs in the column to keep. :returns: A list of indices of columns that have fewer NAs than ``frac``. If all columns are filtered, None is returned. """ return ExprNode("filterNACols", self, frac)._eager_scalar() def columns_by_type(self, coltype="numeric"): """ Extract columns of the specified type from the frame. :param str coltype: A character string indicating which column type to filter by. This must be one of the following: - ``"numeric"`` - Numeric, but not categorical or time - ``"categorical"`` - Integer, with a categorical/factor String mapping - ``"string"`` - String column - ``"time"`` - Long msec since the Unix Epoch - with a variety of display/parse options - ``"uuid"`` - UUID - ``"bad"`` - No none-NA rows (triple negative! all NAs or zero rows) :returns: list of indices of columns that have the requested type """ assert_is_type(coltype, "numeric", "categorical", "string", "time", "uuid", "bad") assert_is_type(self, H2OFrame) return ExprNode("columnsByType", self, coltype)._eager_scalar() def __iter__(self): return (self[i] for i in range(self.ncol)) def __unicode__(self): if sys.gettrace() is None: if self._ex is None: return "This H2OFrame has been removed." table = self._frame(fill_cache=True)._ex._cache._tabulate("simple", False) nrows = "%d %s" % (self.nrow, "row" if self.nrow == 1 else "rows") ncols = "%d %s" % (self.ncol, "column" if self.ncol == 1 else "columns") return "%s\n\n[%s x %s]" % (table, nrows, ncols) return "" def __repr__(self): if sys.gettrace() is None: # PUBDEV-2278: using <method>? from IPython caused everything to dump stk = traceback.extract_stack() if not ("IPython" in stk[-2][0] and "info" == stk[-2][2]): self.show() return "" def show(self, use_pandas=False): """ Used by the H2OFrame.__repr__ method to print or display a snippet of the data frame. If called from IPython, displays an html'ized result. Else prints a tabulate'd result. """ if self._ex is None: print("This H2OFrame has been removed.") return if not self._ex._cache.is_valid(): self._frame()._ex._cache.fill() if H2ODisplay._in_ipy(): import IPython.display if use_pandas and can_use_pandas(): IPython.display.display(self.head().as_data_frame(fill_cache=True)) else: IPython.display.display_html(self._ex._cache._tabulate("html", False), raw=True) else: if use_pandas and can_use_pandas(): print(self.head().as_data_frame(fill_cache=True)) else: s = self.__unicode__() stk = traceback.extract_stack() if "IPython" in stk[-3][0]: s = "\n%s" % s try: print(s) except UnicodeEncodeError: print(s.encode("ascii", "replace")) def summary(self, return_data=False): """ Display summary information about the frame. Summary includes min/mean/max/sigma and other rollup data. :param bool return_data: Return a dictionary of the summary output """ if not self._ex._cache.is_valid(): self._frame()._ex._cache.fill() if not return_data: if H2ODisplay._in_ipy(): import IPython.display IPython.display.display_html(self._ex._cache._tabulate("html", True), raw=True) else: print(self._ex._cache._tabulate("simple", True)) else: return self._ex._cache._data def describe(self, chunk_summary=False): """ Generate an in-depth description of this H2OFrame. This will print to the console the dimensions of the frame; names/types/summary statistics for each column; and finally first ten rows of the frame. :param bool chunk_summary: Retrieve the chunk summary along with the distribution summary """ res = h2o.api("GET /3/Frames/%s" % self.frame_id, data={"row_count": 10})["frames"][0] self._ex._cache._fill_data(res) print("Rows:{}".format(self.nrow)) print("Cols:{}".format(self.ncol)) #The chunk & distribution summaries are not cached, so must be pulled if chunk_summary=True. if chunk_summary: res["chunk_summary"].show() res["distribution_summary"].show() print("\n") self.summary() def _frame(self, rows=10, fill_cache=False): self._ex._eager_frame() if fill_cache: self._ex._cache.fill(rows=rows) return self def head(self, rows=10, cols=200): """ Return the first ``rows`` and ``cols`` of the frame as a new H2OFrame. :param int rows: maximum number of rows to return :param int cols: maximum number of columns to return :returns: a new H2OFrame cut from the top left corner of the current frame, and having dimensions at most ``rows`` x ``cols``. """ assert_is_type(rows, int) assert_is_type(cols, int) nrows = min(self.nrows, rows) ncols = min(self.ncols, cols) newdt = self[:nrows, :ncols] return newdt._frame(rows=nrows, fill_cache=True) def tail(self, rows=10, cols=200): """ Return the last ``rows`` and ``cols`` of the frame as a new H2OFrame. :param int rows: maximum number of rows to return :param int cols: maximum number of columns to return :returns: a new H2OFrame cut from the bottom left corner of the current frame, and having dimensions at most ``rows`` x ``cols``. """ assert_is_type(rows, int) assert_is_type(cols, int) nrows = min(self.nrows, rows) ncols = min(self.ncols, cols) start_idx = self.nrows - nrows newdt = self[start_idx:start_idx + nrows, :ncols] return newdt._frame(rows=nrows, fill_cache=True) def logical_negation(self): """ Returns new H2OFrame equal to elementwise Logical NOT applied to the current frame. """ return H2OFrame._expr(expr=ExprNode("not", self), cache=self._ex._cache) def _unop(self, op, rtype="real"): if self._is_frame: for cname, ctype in self.types.items(): if ctype not in {"int", "real", "bool"}: raise H2OValueError("Function %s cannot be applied to %s column '%s'" % (op, ctype, cname)) ret = H2OFrame._expr(expr=ExprNode(op, self), cache=self._ex._cache) ret._ex._cache._names = ["%s(%s)" % (op, name) for name in self._ex._cache._names] ret._ex._cache._types = {name: rtype for name in ret._ex._cache._names} return ret # Binary operations def __add__(self, rhs): return _binop(self, "+", rhs) def __sub__(self, rhs): return _binop(self, "-", rhs) def __mul__(self, rhs): return _binop(self, "*", rhs) def __div__(self, rhs): return _binop(self, "/", rhs) def __truediv__(self, rhs): return _binop(self, "/", rhs) def __floordiv__(self, rhs): return _binop(self, "intDiv", rhs) def __mod__(self, rhs): return _binop(self, "%", rhs) def __or__(self, rhs): return _binop(self, "|", rhs, rtype="bool") def __and__(self, rhs): return _binop(self, "&", rhs, rtype="bool") def __ge__(self, rhs): return _binop(self, ">=", rhs, rtype="bool") def __gt__(self, rhs): return _binop(self, ">", rhs, rtype="bool") def __le__(self, rhs): return _binop(self, "<=", rhs, rtype="bool") def __lt__(self, rhs): return _binop(self, "<", rhs, rtype="bool") def __eq__(self, rhs): if rhs is None: rhs = float("nan") return _binop(self, "==", rhs, rtype="bool") def __ne__(self, rhs): if rhs is None: rhs = float("nan") return _binop(self, "!=", rhs, rtype="bool") def __pow__(self, rhs): return _binop(self, "^", rhs) def __contains__(self, lhs): return all((t == self).any() for t in lhs) if _is_list(lhs) else (lhs == self).any() # rops def __rmod__(self, lhs): return _binop(lhs, "%", self) def __radd__(self, lhs): return _binop(lhs, "+", self) def __rsub__(self, lhs): return _binop(lhs, "-", self) def __rand__(self, lhs): return _binop(lhs, "&", self, rtype="bool") def __ror__(self, lhs): return _binop(lhs, "|", self, rtype="bool") def __rtruediv__(self, lhs): return _binop(lhs, "/", self) def __rdiv__(self, lhs): return _binop(lhs, "/", self) def __rfloordiv__(self, lhs): return _binop(lhs, "intDiv", self, rtype="int") def __rmul__(self, lhs): return _binop(lhs, "*", self) def __rpow__(self, lhs): return _binop(lhs, "^", self) # unops def __abs__(self): return self._unop("abs") def __invert__(self): return self._unop("!!", rtype="bool") def __nonzero__(self): if self.nrows > 1 or self.ncols > 1: raise H2OValueError( 'This operation is not supported on an H2OFrame. Try using parentheses. ' 'Did you mean & (logical and), | (logical or), or ~ (logical not)?') else: return self.__len__() def __int__(self): return int(self.flatten()) def __float__(self): return float(self.flatten()) def flatten(self): """ Convert a 1x1 frame into a scalar. :returns: content of this 1x1 frame as a scalar (``int``, ``float``, or ``str``). :raises H2OValueError: if current frame has shape other than 1x1 """ if self.shape != (1, 1): raise H2OValueError("Not a 1x1 Frame") return ExprNode("flatten", self)._eager_scalar() def getrow(self): """ Convert a 1xn frame into an n-element list. :returns: content of this 1xn frame as a Python list. :raises H2OValueError: if current frame has more than one row. """ if self.nrows != 1: raise H2OValueError("This method can only be applied to single-row frames") return ExprNode("getrow", self)._eager_scalar() def mult(self, matrix): """ Multiply this frame, viewed as a matrix, by another matrix. :param matrix: another frame that you want to multiply the current frame by; must be compatible with the current frame (i.e. its number of rows must be the same as number of columns in the current frame). :returns: new H2OFrame, which is the result of multiplying the current frame by ``matrix``. """ if self.ncols != matrix.nrows: raise H2OValueError("Matrix is not compatible for multiplication with the current frame") return H2OFrame._expr(expr=ExprNode("x", self, matrix)) def cos(self): """Return new H2OFrame equal to elementwise cosine of the current frame.""" return self._unop("cos") def sin(self): """Return new H2OFrame equal to elementwise sine of the current frame.""" return self._unop("sin") def tan(self): """Return new H2OFrame equal to elementwise tangent of the current frame.""" return self._unop("tan") def acos(self): """Return new H2OFrame equal to elementwise arc cosine of the current frame.""" return self._unop("acos") def asin(self): """Return new H2OFrame equal to elementwise arc sine of the current frame.""" return self._unop("asin") def atan(self): """Return new H2OFrame equal to elementwise arc tangent of the current frame.""" return self._unop("atan") def cosh(self): """Make new H2OFrame with values equal to the hyperbolic cosines of the values in the current frame.""" return self._unop("cosh") def sinh(self): """Return new H2OFrame equal to elementwise hyperbolic sine of the current frame.""" return self._unop("sinh") def tanh(self): """Return new H2OFrame equal to elementwise hyperbolic tangent of the current frame.""" return self._unop("tanh") def acosh(self): """Return new H2OFrame equal to elementwise inverse hyperbolic cosine of the current frame.""" return self._unop("acosh") def asinh(self): """Return new H2OFrame equal to elementwise inverse hyperbolic sine of the current frame.""" return self._unop("asinh") def atanh(self): """Return new H2OFrame equal to elementwise inverse hyperbolic tangent of the current frame.""" return self._unop("atanh") def cospi(self): """Return new H2OFrame equal to elementwise cosine of the current frame multiplied by Pi.""" return self._unop("cospi") def sinpi(self): """Return new H2OFrame equal to elementwise sine of the current frame multiplied by Pi.""" return self._unop("sinpi") def tanpi(self): """Return new H2OFrame equal to elementwise tangent of the current frame multiplied by Pi.""" return self._unop("tanpi") def abs(self): """Return new H2OFrame equal to elementwise absolute value of the current frame.""" return self._unop("abs") def sign(self): """Return new H2OFrame equal to signs of the values in the frame: -1 , +1, or 0.""" return self._unop("sign", rtype="int") def sqrt(self): """Return new H2OFrame equal to elementwise square root of the current frame.""" return self._unop("sqrt") def trunc(self): """ Apply the numeric truncation function. ``trunc(x)`` is the integer obtained from ``x`` by dropping its decimal tail. This is equal to ``floor(x)`` if ``x`` is positive, and ``ceil(x)`` if ``x`` is negative. Truncation is also called "rounding towards zero". :returns: new H2OFrame of truncated values of the original frame. """ return self._unop("trunc", rtype="int") def ceil(self): """ Apply the ceiling function to the current frame. ``ceil(x)`` is the smallest integer greater or equal to ``x``. :returns: new H2OFrame of ceiling values of the original frame. """ return self._unop("ceiling", rtype="int") def floor(self): """ Apply the floor function to the current frame. ``floor(x)`` is the largest integer smaller or equal to ``x``. :returns: new H2OFrame of floor values of the original frame. """ return self._unop("floor", rtype="int") def log(self): """Return new H2OFrame equals to elementwise natural logarithm of the current frame.""" return self._unop("log") def log10(self): """Return new H2OFrame equals to elementwise decimal logarithm of the current frame.""" return self._unop("log10") def log1p(self): """Return new H2OFrame equals to elementwise ``ln(1 + x)`` for each ``x`` in the current frame.""" return self._unop("log1p") def log2(self): """Return new H2OFrame equals to elementwise binary logarithm of the current frame.""" return self._unop("log2") def exp(self): """Return new H2OFrame equals to elementwise exponent (i.e. ``e^x``) of the current frame.""" return self._unop("exp") def expm1(self): """Return new H2OFrame equals to elementwise exponent minus 1 (i.e. ``e^x - 1``) of the current frame.""" return self._unop("expm1") def gamma(self): """Return new H2OFrame equals to elementwise gamma function of the current frame.""" return self._unop("gamma") def lgamma(self): """Return new H2OFrame equals to elementwise logarithm of the gamma function of the current frame.""" return self._unop("lgamma") def digamma(self): """Return new H2OFrame equals to elementwise digamma function of the current frame.""" return self._unop("digamma") def trigamma(self): """Return new H2OFrame equals to elementwise trigamma function of the current frame.""" return self._unop("trigamma") @staticmethod def moment(year=None, month=None, day=None, hour=None, minute=None, second=None, msec=None, date=None, time=None): """ Create a time column from individual components. Each parameter should be either an integer, or a single-column H2OFrame containing the corresponding time parts for each row. The "date" part of the timestamp can be specified using either the tuple ``(year, month, day)``, or an explicit ``date`` parameter. The "time" part of the timestamp is optional, but can be specified either via the ``time`` parameter, or via the ``(hour, minute, second, msec)`` tuple. :param year: the year part of the constructed date :param month: the month part of the constructed date :param day: the day-of-the-month part of the constructed date :param hour: the hours part of the constructed date :param minute: the minutes part of the constructed date :param second: the seconds part of the constructed date :param msec: the milliseconds part of the constructed date :param date date: construct the timestamp from the Python's native ``datetime.date`` (or ``datetime.datetime``) object. If the object passed is of type ``date``, then you can specify the time part using either the ``time`` argument, or ``hour`` ... ``msec`` arguments (but not both). If the object passed is of type ``datetime``, then no other arguments can be provided. :param time time: construct the timestamp from this Python's native ``datetime.time`` object. This argument cannot be used alone, it should be supplemented with either ``date`` argument, or ``year`` ... ``day`` tuple. :returns: H2OFrame with one column containing the date constructed from the provided arguments. """ assert_is_type(date, None, datetime.date, numpy_datetime, pandas_timestamp) assert_is_type(time, None, datetime.time) assert_is_type(year, None, int, H2OFrame) assert_is_type(month, None, int, H2OFrame) assert_is_type(day, None, int, H2OFrame) assert_is_type(hour, None, int, H2OFrame) assert_is_type(minute, None, int, H2OFrame) assert_is_type(second, None, int, H2OFrame) assert_is_type(msec, None, int, H2OFrame) if time is not None: if hour is not None or minute is not None or second is not None or msec is not None: raise H2OValueError("Arguments hour, minute, second, msec cannot be used together with `time`.") hour = time.hour minute = time.minute second = time.second msec = time.microsecond // 1000 if date is not None: if is_type(date, pandas_timestamp): date = date.to_pydatetime() if is_type(date, numpy_datetime): date = date.astype("M8[ms]").astype("O") if year is not None or month is not None or day is not None: raise H2OValueError("Arguments year, month and day cannot be used together with `date`.") year = date.year month = date.month day = date.day if isinstance(date, datetime.datetime): if time is not None: raise H2OValueError("Argument `time` cannot be used together with `date` of datetime type.") if hour is not None or minute is not None or second is not None or msec is not None: raise H2OValueError("Arguments hour, minute, second, msec cannot be used together with `date` " "of datetime type.") hour = date.hour minute = date.minute second = date.second msec = date.microsecond // 1000 if year is None or month is None or day is None: raise H2OValueError("Either arguments (`year`, `month` and `day`) or the `date` are required.") if hour is None: hour = 0 if minute is None: minute = 0 if second is None: second = 0 if msec is None: msec = 0 local_vars = locals() res_nrows = None for n in ["year", "month", "day", "hour", "minute", "second", "msec"]: x = local_vars[n] if isinstance(x, H2OFrame): if x.ncols != 1: raise H2OValueError("Argument `%s` is a frame with more than 1 column" % n) if x.type(0) not in {"int", "real"}: raise H2OValueError("Column `%s` is not numeric (type = %s)" % (n, x.type(0))) if res_nrows is None: res_nrows = x.nrows if x.nrows == 0 or x.nrows != res_nrows: raise H2OValueError("Incompatible column `%s` having %d rows" % (n, x.nrows)) if res_nrows is None: res_nrows = 1 res = H2OFrame._expr(ExprNode("moment", year, month, day, hour, minute, second, msec)) res._ex._cache._names = ["name"] res._ex._cache._types = {"name": "time"} res._ex._cache._nrows = res_nrows res._ex._cache._ncols = 1 return res def unique(self): """ Extract the unique values in the column. :returns: H2OFrame of just the unique values in the column. """ return H2OFrame._expr(expr=ExprNode("unique", self)) def levels(self): """ Get the factor levels. :returns: A list of lists, one list per column, of levels. """ lol = H2OFrame._expr(expr=ExprNode("levels", self)).as_data_frame(False) lol.pop(0) # Remove column headers lol = list(zip(*lol)) return [[ll for ll in l if ll != ''] for l in lol] def nlevels(self): """ Get the number of factor levels for each categorical column. :returns: A list of the number of levels per column. """ levels = self.levels() return [len(l) for l in levels] if levels else 0 def set_level(self, level): """ A method to set all column values to one of the levels. :param str level: The level at which the column will be set (a string) :returns: H2OFrame with entries set to the desired level. """ return H2OFrame._expr(expr=ExprNode("setLevel", self, level), cache=self._ex._cache) def set_levels(self, levels): """ Replace the levels of a categorical column. New levels must be aligned with the old domain. This call has copy-on-write semantics. :param List[str] levels: A list of strings specifying the new levels. The number of new levels must match the number of old levels. :returns: A single-column H2OFrame with the desired levels. """ assert_is_type(levels, [str]) return H2OFrame._expr(expr=ExprNode("setDomain", self, False, levels), cache=self._ex._cache) def set_names(self, names): """ Change names of all columns in the frame. :param List[str] names: The list of new names for every column in the frame. """ assert_is_type(names, [str]) assert_satisfies(names, len(names) == self.ncol) self._ex = ExprNode("colnames=", self, range(self.ncol), names) # Update-in-place, but still lazy return self def set_name(self, col=None, name=None): """ Set a new name for a column. :param col: index or name of the column whose name is to be set; may be skipped for 1-column frames :param name: the new name of the column """ assert_is_type(col, None, int, str) assert_is_type(name, str) ncols = self.ncols col_index = None if is_type(col, int): if not(-ncols <= col < ncols): raise H2OValueError("Index %d is out of bounds for a frame with %d columns" % (col, ncols)) col_index = (col + ncols) % ncols # handle negative indices elif is_type(col, str): if col not in self.names: raise H2OValueError("Column %s doesn't exist in the frame." % col) col_index = self.names.index(col) # lookup the name else: assert col is None if ncols != 1: raise H2OValueError("The frame has %d columns; please specify which one to rename" % ncols) col_index = 0 if name != self.names[col_index] and name in self.types: raise H2OValueError("Column '%s' already exists in the frame" % name) oldname = self.names[col_index] old_cache = self._ex._cache self._ex = ExprNode("colnames=", self, col_index, name) # Update-in-place, but still lazy self._ex._cache.fill_from(old_cache) if self.names is None: self._frame()._ex._cache.fill() else: self._ex._cache._names = self.names[:col] + [name] + self.names[col + 1:] self._ex._cache._types[name] = self._ex._cache._types.pop(oldname) return def as_date(self, format): """ Convert the frame (containing strings / categoricals) into the ``date`` format. :param str format: the format string (e.g. "YYYY-mm-dd") :returns: new H2OFrame with "date" column types """ fr = H2OFrame._expr(expr=ExprNode("as.Date", self, format), cache=self._ex._cache) if fr._ex._cache.types_valid(): fr._ex._cache.types = {k: "int" for k in self._ex._cache.types.keys()} return fr def cumsum(self, axis=0): """ Compute cumulative sum over rows / columns of the frame. :param int axis: 0 for column-wise, 1 for row-wise :returns: new H2OFrame with cumulative sums of the original frame. """ return H2OFrame._expr(expr=ExprNode("cumsum", self, axis), cache=self._ex._cache) def cumprod(self, axis=0): """ Compute cumulative product over rows / columns of the frame. :param int axis: 0 for column-wise, 1 for row-wise :returns: new H2OFrame with cumulative products of the original frame. """ return H2OFrame._expr(expr=ExprNode("cumprod", self, axis), cache=self._ex._cache) def cummin(self, axis=0): """ Compute cumulative minimum over rows / columns of the frame. :param int axis: 0 for column-wise, 1 for row-wise :returns: new H2OFrame with running minimums of the original frame. """ return H2OFrame._expr(expr=ExprNode("cummin", self, axis), cache=self._ex._cache) def cummax(self, axis=0): """ Compute cumulative maximum over rows / columns of the frame. :param int axis: 0 for column-wise, 1 for row-wise :returns: new H2OFrame with running maximums of the original frame. """ return H2OFrame._expr(expr=ExprNode("cummax", self, axis), cache=self._ex._cache) def prod(self, na_rm=False): """ Compute the product of all values in the frame. :param bool na_rm: If True then NAs will be ignored during the computation. :returns: product of all values in the frame (a float) """ return ExprNode("prod.na" if na_rm else "prod", self)._eager_scalar() def any(self): """Return True if any element in the frame is either True or NA.""" return bool(ExprNode("any", self)._eager_scalar()) def any_na_rm(self): """Return True if any value in the frame is non-zero (disregarding all NAs).""" return bool(ExprNode("any.na", self)._eager_scalar()) def all(self): """Return True if every element in the frame is either True or NA.""" return bool(ExprNode("all", self)._eager_scalar()) def isnumeric(self): """ Test which columns in the frame are numeric. :returns: a list of True/False indicating for each column in the frame whether it is numeric. """ return [bool(o) for o in ExprNode("is.numeric", self)._eager_scalar()] def isstring(self): """ Test which columns in the frame are string. :returns: a list of True/False indicating for each column in the frame whether it is numeric. """ return [bool(o) for o in ExprNode("is.character", self)._eager_scalar()] def isin(self, item): """ Test whether elements of an H2OFrame are contained in the ``item``. :param items: An item or a list of items to compare the H2OFrame against. :returns: An H2OFrame of 0s and 1s showing whether each element in the original H2OFrame is contained in item. """ if is_type(item, list, tuple, set): return functools.reduce(H2OFrame.__or__, (self == i for i in item)) else: return self == item def kfold_column(self, n_folds=3, seed=-1): """ Build a fold assignments column for cross-validation. This method will produce a column having the same data layout as the source frame. :param int n_folds: An integer specifying the number of validation sets to split the training data into. :param int seed: Seed for random numbers as fold IDs are randomly assigned. :returns: A single column H2OFrame with the fold assignments. """ return H2OFrame._expr(expr=ExprNode("kfold_column", self, n_folds, seed))._frame() # want this to be eager! def modulo_kfold_column(self, n_folds=3): """ Build a fold assignments column for cross-validation. Rows are assigned a fold according to the current row number modulo ``n_folds``. :param int n_folds: An integer specifying the number of validation sets to split the training data into. :returns: A single-column H2OFrame with the fold assignments. """ return H2OFrame._expr(expr=ExprNode("modulo_kfold_column", self, n_folds))._frame() # want this to be eager! def stratified_kfold_column(self, n_folds=3, seed=-1): """ Build a fold assignment column with the constraint that each fold has the same class distribution as the fold column. :param int n_folds: The number of folds to build. :param int seed: A seed for the random number generator. :returns: A single column H2OFrame with the fold assignments. """ return H2OFrame._expr( expr=ExprNode("stratified_kfold_column", self, n_folds, seed))._frame() # want this to be eager! def structure(self): """Compactly display the internal structure of an H2OFrame.""" df = self.as_data_frame(use_pandas=False) cn = df.pop(0) nr = self.nrow nc = self.ncol width = max([len(c) for c in cn]) isfactor = self.isfactor() numlevels = self.nlevels() lvls = self.levels() print("H2OFrame: '{}' \nDimensions: {} obs. of {} variables".format(self.frame_id, nr, nc)) for i in range(nc): print("$ {} {}: ".format(cn[i], ' ' * (width - max(0, len(cn[i])))), end=' ') if isfactor[i]: nl = numlevels[i] print("Factor w/ {} level(s) {} ".format(nl, '"' + '","'.join(lvls[i]) + '"'), end='\n') else: print("num {}".format(" ".join(it[0] if it else "nan" for it in h2o.as_list(self[:10, i], False)[1:]))) def as_data_frame(self, use_pandas=True, header=True): """ Obtain the dataset as a python-local object. :param bool use_pandas: If True (default) then return the H2OFrame as a pandas DataFrame (requires that the ``pandas`` library was installed). If False, then return the contents of the H2OFrame as plain nested list, in a row-wise order. :param bool header: If True (default), then column names will be appended as the first row in list :returns: A python object (a list of lists of strings, each list is a row, if use_pandas=False, otherwise a pandas DataFrame) containing this H2OFrame instance's data. """ if can_use_pandas() and use_pandas: import pandas return pandas.read_csv(StringIO(self.get_frame_data()), low_memory=False) frame = [row for row in csv.reader(StringIO(self.get_frame_data()))] if not header: frame.pop(0) return frame def get_frame_data(self): """ Get frame data as a string in csv format. This will create a multiline string, where each line will contain a separate row of frame's data, with individual values separated by commas. """ return h2o.api("GET /3/DownloadDataset", data={"frame_id": self.frame_id, "hex_string": False}) def __getitem__(self, item): """ Frame slicing, supports row and column slicing. :param item: selector of a subframe. This can be one of the following: - an int, indicating selection of a single column at the specified index (0-based) - a string, selecting a column with the given name - a list of ints or strings, selecting several columns with the given indices / names - a slice, selecting columns with the indices within this slice - a single-column boolean frame, selecting rows for which the selector is true - a 2-element tuple, where the first element is a row selector, and the second element is the column selector. Here the row selector may be one of: an int, a list of ints, a slice, or a boolean frame. The column selector is similarly one of: an int, a list of ints, a string, a list of strings, or a slice. It is also possible to use the empty slice (``:``) to select all elements within one of the dimensions. :returns: A new frame comprised of some rows / columns of the source frame. :examples: >>> fr[2] # All rows, 3rd column >>> fr[-2] # All rows, 2nd column from end >>> fr[:, -1] # All rows, last column >>> fr[0:5, :] # First 5 rows, all columns >>> fr[fr[0] > 1, :] # Only rows where first cell is greater than 1, all columns >>> fr[[1, 5, 6]] # Columns 2, 6, and 7 >>> fr[0:50, [1,2,3]] # First 50 rows, columns 2, 3, and 4 """ # Select columns based on a string, a list of strings, an int or a slice. # Note that the python column selector handles the case of negative # selections, or out-of-range selections - without having to compute # self._ncols in the front-end - which would force eager evaluation just to # range check in the front-end. new_ncols = -1 new_nrows = -1 new_names = None new_types = None fr = None flatten = False if isinstance(item, slice): item = normalize_slice(item, self.ncols) if is_type(item, str, int, list, slice): new_ncols, new_names, new_types, item = self._compute_ncol_update(item) new_nrows = self.nrow fr = H2OFrame._expr(expr=ExprNode("cols_py", self, item)) elif isinstance(item, (ExprNode, H2OFrame)): new_ncols = self.ncol new_names = self.names new_types = self.types new_nrows = -1 # have a "big" predicate column -- update cache later on... fr = H2OFrame._expr(expr=ExprNode("rows", self, item)) elif isinstance(item, tuple): rows, cols = item allrows = allcols = False if isinstance(cols, slice): cols = normalize_slice(cols, self.ncols) allcols = cols == slice(0, self.ncols, 1) if isinstance(rows, slice): rows = normalize_slice(rows, self.nrows) allrows = rows == slice(0, self.nrows, 1) if allrows and allcols: return self # fr[:,:] -> all rows and columns.. return self if allrows: new_ncols, new_names, new_types, cols = self._compute_ncol_update(cols) new_nrows = self.nrow fr = H2OFrame._expr(expr=ExprNode("cols_py", self, cols)) # fr[:,cols] -> really just a column slice if allcols: new_ncols = self.ncols new_names = self.names new_types = self.types new_nrows, rows = self._compute_nrow_update(rows) fr = H2OFrame._expr(expr=ExprNode("rows", self, rows)) # fr[rows,:] -> really just a row slices if not allrows and not allcols: new_ncols, new_names, new_types, cols = self._compute_ncol_update(cols) new_nrows, rows = self._compute_nrow_update(rows) fr = H2OFrame._expr(expr=ExprNode("rows", ExprNode("cols_py", self, cols), rows)) flatten = is_type(rows, int) and is_type(cols, str, int) else: raise ValueError("Unexpected __getitem__ selector: " + str(type(item)) + " " + str(item.__class__)) assert fr is not None # Pythonic: if the row & col selector turn into ints (or a single col # name), then extract the single element out of the Frame. Otherwise # return a Frame, EVEN IF the selectors are e.g. slices-of-1-value. if flatten: return fr.flatten() fr._ex._cache.ncols = new_ncols fr._ex._cache.nrows = new_nrows fr._ex._cache.names = new_names fr._ex._cache.types = new_types fr._is_frame = self._is_frame return fr def _compute_ncol_update(self, item): # computes new ncol, names, and types try: new_ncols = -1 if isinstance(item, list): new_ncols = len(item) if _is_str_list(item): new_types = {k: self.types[k] for k in item} new_names = item else: new_names = [self.names[i] for i in item] new_types = {name: self.types[name] for name in new_names} elif isinstance(item, slice): assert slice_is_normalized(item) new_names = self.names[item] new_types = {name: self.types[name] for name in new_names} elif is_type(item, str, int): new_ncols = 1 if is_type(item, str): new_names = [item] new_types = None if item not in self.types else {item: self.types[item]} else: new_names = [self.names[item]] new_types = {new_names[0]: self.types[new_names[0]]} else: raise ValueError("Unexpected type: " + str(type(item))) return (new_ncols, new_names, new_types, item) except: return (-1, None, None, item) def _compute_nrow_update(self, item): try: new_nrows = -1 if isinstance(item, list): new_nrows = len(item) elif isinstance(item, slice): assert slice_is_normalized(item) new_nrows = (item.stop - item.start + item.step - 1) // item.step elif isinstance(item, H2OFrame): new_nrows = -1 else: new_nrows = 1 return [new_nrows, item] except: return [-1, item] def __setitem__(self, item, value): """ Replace, update or add column(s) in an H2OFrame. :param item: A 0-based index of a column, or a column name, or a list of column names, or a slice. Alternatively, this may also be a two-element tuple where the first element in the tuple is a row selector, and the second element is a row selector. Finally, this can also be a boolean frame indicating which rows/columns to modify. If ``item`` is a column name that does not exist in the frame, then a new column will be appended to the current frame. :param value: The value replacing elements at positions given by ``item``. This can be either a constant, or another frame. """ # TODO: add far stronger type checks, so that we never run in a situation where the server has to # tell us that we requested an illegal operation. assert_is_type(item, str, int, tuple, list, H2OFrame) assert_is_type(value, None, numeric, str, H2OFrame) col_expr = None row_expr = None colname = None # When set, we are doing an append if is_type(item, str): # String column name, could be new or old if item in self.names: col_expr = self.names.index(item) # Update an existing column else: col_expr = self.ncols colname = item # New, append elif is_type(item, int): if not(-self.ncols <= item < self.ncols): raise H2OValueError("Incorrect column index: %d" % item) col_expr = item # Column by number if col_expr < 0: col_expr += self.ncols elif isinstance(item, tuple): # Both row and col specifiers # Need more type checks row_expr = item[0] col_expr = item[1] if is_type(col_expr, str): # Col by name if col_expr not in self.names: # Append colname = col_expr col_expr = self.ncol elif is_type(col_expr, int): if not(-self.ncols <= col_expr < self.ncols): raise H2OValueError("Incorrect column index: %d" % item) if col_expr < 0: col_expr += self.ncols elif isinstance(col_expr, slice): # Col by slice if col_expr.start is None and col_expr.stop is None: col_expr = slice(0, self.ncol) # Slice of all if isinstance(row_expr, slice): start = row_expr.start step = row_expr.step stop = row_expr.stop if start is None: start = 0 if stop is None: stop = self.nrows row_expr = slice(start, stop, step) elif isinstance(item, H2OFrame): row_expr = item # Row slicing elif isinstance(item, list): col_expr = item if value is None: value = float("nan") value_is_own_subframe = isinstance(value, H2OFrame) and self._is_frame_in_self(value) old_cache = self._ex._cache if colname is None: self._ex = ExprNode(":=", self, value, col_expr, row_expr) self._ex._cache.fill_from(old_cache) if isinstance(value, H2OFrame) and \ value._ex._cache.types_valid() and \ self._ex._cache.types_valid(): self._ex._cache._types.update(value._ex._cache.types) else: self._ex._cache.types = None else: self._ex = ExprNode("append", self, value, colname) self._ex._cache.fill_from(old_cache) self._ex._cache.names = self.names + [colname] self._ex._cache._ncols += 1 if self._ex._cache.types_valid() and isinstance(value, H2OFrame) and value._ex._cache.types_valid(): self._ex._cache._types[colname] = list(viewvalues(value._ex._cache.types))[0] else: self._ex._cache.types = None if value_is_own_subframe: value._ex = None # wipe out to keep ref counts correct def _is_frame_in_self(self, frame): if self._ex is frame._ex: return True if frame._ex._children is None: return False return any(self._is_expr_in_self(ch) for ch in frame._ex._children) def _is_expr_in_self(self, expr): if not isinstance(expr, ExprNode): return False if self._ex is expr: return True if expr._children is None: return False return any(self._is_expr_in_self(ch) for ch in expr._children) def drop(self, index, axis=1): """ Drop a single column or row or a set of columns or rows from a H2OFrame. Dropping a column or row is not in-place. Indices of rows and columns are zero-based. :param index: A list of column indices, column names, or row indices to drop; or a string to drop a single column by name; or an int to drop a single column by index. :param int axis: If 1 (default), then drop columns; if 0 then drop rows. :returns: a new H2OFrame with the respective dropped columns or rows. The original H2OFrame remains unchanged. """ if axis == 1: if not isinstance(index, list): #If input is a string, i.e., "C1": if is_type(index, str): #Check if index is an actual column(s) in the frame if index not in self.names: raise H2OValueError("Column(s) selected to drop are not in original frame: %r" % index) index = self.names.index(index) #If input is an int indicating a column index, i.e., 3: elif is_type(index, int): #Check if index is an actual column index in the frame if index > self.ncol: raise H2OValueError("Column index selected to drop is not part of the frame: %r" % index) if index < 0: raise H2OValueError("Column index selected to drop is not positive: %r" % index) fr = H2OFrame._expr(expr=ExprNode("cols", self, -(index + 1)), cache=self._ex._cache) fr._ex._cache.ncols -= 1 fr._ex._cache.names = self.names[:index] + self.names[index + 1:] fr._ex._cache.types = {name: self.types[name] for name in fr._ex._cache.names} return fr elif isinstance(index, list): #If input is an int array indicating a column index, i.e., [3] or [1,2,3]: if is_type(index, [int]): if max(index) > self.ncol: raise H2OValueError("Column index selected to drop is not part of the frame: %r" % index) if min(index) < 0: raise H2OValueError("Column index selected to drop is not positive: %r" % index) for i in range(len(index)): index[i] = -(index[i] + 1) #If index is a string array, i.e., ["C1", "C2"] elif is_type(index, [str]): #Check if index is an actual column(s) in the frame if not set(index).issubset(self.names): raise H2OValueError("Column(s) selected to drop are not in original frame: %r" % index) for i in range(len(index)): index[i] = -(self.names.index(index[i]) + 1) fr = H2OFrame._expr(expr=ExprNode("cols", self, index), cache=self._ex._cache) fr._ex._cache.ncols -= len(index) fr._ex._cache.names = [i for i in self.names if self.names.index(i) not in list(map(lambda x: abs(x) - 1, index))] fr._ex._cache.types = {name: fr.types[name] for name in fr._ex._cache.names} else: raise ValueError("Invalid column index types. Must either be a list of all int indexes, " "a string list of all column names, a single int index, or" "a single string for dropping columns.") return fr elif axis == 0: if is_type(index, [int]): #Check if index is an actual column index in the frame if max(index) > self.nrow: raise H2OValueError("Row index selected to drop is not part of the frame: %r" % index) if min(index) < 0: raise H2OValueError("Row index selected to drop is not positive: %r" % index) index = [-(x + 1) for x in index] fr = H2OFrame._expr(expr=ExprNode("rows", self, index), cache=self._ex._cache) fr._ex._cache.nrows -= len(index) else: raise ValueError("Invalid row indexes. Must be a list of int row indexes to drop from the H2OFrame.") return fr def pop(self, i): """ Pop a column from the H2OFrame at index i. :param i: The index (int) or name (str) of the column to pop. :returns: an H2OFrame containing the column dropped from the current frame; the current frame is modified in-place and loses the column. """ if is_type(i, str): i = self.names.index(i) col = H2OFrame._expr(expr=ExprNode("cols", self, i)) old_cache = self._ex._cache self._ex = ExprNode("cols", self, -(i + 1)) self._ex._cache.ncols -= 1 self._ex._cache.names = old_cache.names[:i] + old_cache.names[i + 1:] self._ex._cache.types = {name: old_cache.types[name] for name in self._ex._cache.names} self._ex._cache._data = None col._ex._cache.ncols = 1 col._ex._cache.names = [old_cache.names[i]] return col def quantile(self, prob=None, combine_method="interpolate", weights_column=None): """ Compute quantiles. :param List[float] prob: list of probabilities for which quantiles should be computed. :param str combine_method: for even samples this setting determines how to combine quantiles. This can be one of ``"interpolate"``, ``"average"``, ``"low"``, ``"high"``. :param weights_column: optional weights for each row. If not given, all rows are assumed to have equal importance. This parameter can be either the name of column containing the observation weights in this frame, or a single-column separate H2OFrame of observation weights. :returns: a new H2OFrame containing the quantiles and probabilities. """ if len(self) == 0: return self if prob is None: prob = [0.01, 0.1, 0.25, 0.333, 0.5, 0.667, 0.75, 0.9, 0.99] if weights_column is None: weights_column = "_" else: assert_is_type(weights_column, str, I(H2OFrame, lambda wc: wc.ncol == 1 and wc.nrow == self.nrow)) if isinstance(weights_column, H2OFrame): merged = self.cbind(weights_column) weights_column = merged.names[-1] return H2OFrame._expr(expr=ExprNode("quantile", merged, prob, combine_method, weights_column)) return H2OFrame._expr(expr=ExprNode("quantile", self, prob, combine_method, weights_column)) def concat(self, frames, axis=1): """ Append multiple H2OFrames to this frame, column-wise or row-wise. :param List[H2OFrame] frames: list of frames that should be appended to the current frame. :param int axis: if 1 then append column-wise (default), if 0 then append row-wise. :returns: an H2OFrame of the combined datasets. """ if len(frames) == 0: raise ValueError("Input list of frames is empty! Nothing to concat.") if axis == 1: df = self.cbind(frames) else: df = self.rbind(frames) return df def cbind(self, data): """ Append data to this frame column-wise. :param H2OFrame data: append columns of frame ``data`` to the current frame. You can also cbind a number, in which case it will get converted into a constant column. :returns: new H2OFrame with all frames in ``data`` appended column-wise. """ assert_is_type(data, H2OFrame, numeric, [H2OFrame, numeric]) frames = [data] if not isinstance(data, list) else data new_cols = list(self.columns) new_types = dict(self.types) for frame in frames: if isinstance(frame, H2OFrame): if frame.nrow != self.nrow: raise H2OValueError("Cannot bind a dataframe with %d rows to a data frame with %d rows: " "the number of rows should match" % (frame.nrow, self.nrow)) new_cols += frame.columns new_types.update(frame.types) else: new_cols += [None] unique_cols = set(new_cols) fr = H2OFrame._expr(expr=ExprNode("cbind", self, *frames), cache=self._ex._cache) fr._ex._cache.ncols = len(new_cols) if len(new_cols) == len(unique_cols) and None not in unique_cols: fr._ex._cache.names = new_cols fr._ex._cache.types = new_types else: # Invalidate names and types since they contain duplicate / unknown names, and the server will choose those. fr._ex._cache.names = None fr._ex._cache.types = None return fr def rbind(self, data): """ Append data to this frame row-wise. :param data: an H2OFrame or a list of H2OFrame's to be combined with current frame row-wise. :returns: this H2OFrame with all frames in data appended row-wise. """ assert_is_type(data, H2OFrame, [H2OFrame]) frames = [data] if not isinstance(data, list) else data for frame in frames: if frame.ncol != self.ncol: raise H2OValueError("Cannot row-bind a dataframe with %d columns to a data frame with %d columns: " "the columns must match" % (frame.ncol, self.ncol)) if frame.columns != self.columns or frame.types != self.types: raise H2OValueError("Column names and types must match for rbind() to work") fr = H2OFrame._expr(expr=ExprNode("rbind", self, *frames), cache=self._ex._cache) fr._ex._cache.nrows = self.nrow + sum(frame.nrow for frame in frames) return fr def split_frame(self, ratios=None, destination_frames=None, seed=None): """ Split a frame into distinct subsets of size determined by the given ratios. The number of subsets is always 1 more than the number of ratios given. Note that this does not give an exact split. H2O is designed to be efficient on big data using a probabilistic splitting method rather than an exact split. For example when specifying a split of 0.75/0.25, H2O will produce a test/train split with an expected value of 0.75/0.25 rather than exactly 0.75/0.25. On small datasets, the sizes of the resulting splits will deviate from the expected value more than on big data, where they will be very close to exact. :param List[float] ratios: The fractions of rows for each split. :param List[str] destination_frames: The names of the split frames. :param int seed: seed for the random number generator :returns: A list of H2OFrames """ assert_is_type(ratios, [numeric], None) assert_is_type(destination_frames, [str], None) assert_is_type(seed, int, None) if ratios is None: ratios = [0.75] if not ratios: raise ValueError("Ratios array may not be empty") if destination_frames is not None: if len(ratios) + 1 != len(destination_frames): raise ValueError("The number of provided destination_frames must be one more " "than the number of provided ratios") num_slices = len(ratios) + 1 boundaries = [] last_boundary = 0 i = 0 while i < num_slices - 1: ratio = ratios[i] if ratio < 0: raise ValueError("Ratio must be greater than 0") boundary = last_boundary + ratio if boundary >= 1.0: raise ValueError("Ratios must add up to less than 1.0") boundaries.append(boundary) last_boundary = boundary i += 1 splits = [] tmp_runif = self.runif(seed) tmp_runif.frame_id = "%s_splitter" % _py_tmp_key(h2o.connection().session_id) i = 0 while i < num_slices: if i == 0: # lower_boundary is 0.0 upper_boundary = boundaries[i] tmp_slice = self[(tmp_runif <= upper_boundary), :] elif i == num_slices - 1: lower_boundary = boundaries[i - 1] # upper_boundary is 1.0 tmp_slice = self[(tmp_runif > lower_boundary), :] else: lower_boundary = boundaries[i - 1] upper_boundary = boundaries[i] tmp_slice = self[((tmp_runif > lower_boundary) & (tmp_runif <= upper_boundary)), :] if destination_frames is None: splits.append(tmp_slice) else: destination_frame_id = destination_frames[i] tmp_slice.frame_id = destination_frame_id splits.append(tmp_slice) i += 1 del tmp_runif return splits def group_by(self, by): """ Return a new ``GroupBy`` object using this frame and the desired grouping columns. The returned groups are sorted by the natural group-by column sort. :param by: The columns to group on (either a single column name, or a list of column names, or a list of column indices). """ assert_is_type(by, str, int, [str, int]) return GroupBy(self, by) def sort(self, by): """ Return a new Frame that is sorted by column(s) in ascending order. A fully distributed and parallel sort. However, the original frame must not contain any String columns. :param by: The column to sort by (either a single column name, or a list of column names, or a list of column indices) :return: a new sorted Frame """ assert_is_type(by, str, int, [str, int]) if type(by) != list: by = [by] for c in by: if self.type(c) not in ["enum","time","int"]: raise H2OValueError("Sort by column: " + str(c) + " not of enum, time, or int type") return H2OFrame._expr(expr=ExprNode("sort",self,by)) def fillna(self,method="forward",axis=0,maxlen=1): """ Return a new Frame that fills NA along a given axis and along a given direction with a maximum fill length :param method: ``"forward"`` or ``"backward"`` :param axis: 0 for columnar-wise or 1 for row-wise fill :param maxlen: Max number of consecutive NA's to fill :return: """ assert_is_type(axis, 0, 1) assert_is_type(method,str) assert_is_type(maxlen, int) return H2OFrame._expr(expr=ExprNode("h2o.fillna",self,method,axis,maxlen)) def impute(self, column=-1, method="mean", combine_method="interpolate", by=None, group_by_frame=None, values=None): """ Impute missing values into the frame, modifying it in-place. :param int column: Index of the column to impute, or -1 to impute the entire frame. :param str method: The method of imputation: ``"mean"``, ``"median"``, or ``"mode"``. :param str combine_method: When the method is ``"median"``, this setting dictates how to combine quantiles for even samples. One of ``"interpolate"``, ``"average"``, ``"low"``, ``"high"``. :param by: The list of columns to group on. :param H2OFrame group_by_frame: Impute the values with this pre-computed grouped frame. :param List values: The list of impute values, one per column. None indicates to skip the column. :returns: A list of values used in the imputation or the group-by result used in imputation. """ if is_type(column, str): column = self.names.index(column) if is_type(by, str): by = self.names.index(by) if values is None: values = "_" else: assert len(values) == len(self.columns), "Length of values does not match length of columns" # convert string values to categorical num values values2 = [] for i in range(0,len(values)): if self.type(i) == "enum": try: values2.append(self.levels()[i].index(values[i])) except: raise H2OValueError("Impute value of: " + values[i] + " not found in existing levels of" " column: " + self.col_names[i]) else: values2.append(values[i]) values = values2 if group_by_frame is None: group_by_frame = "_" # This code below is needed to ensure the frame (self) exists on the server. Without it, self._ex._cache.fill() # fails with an assertion that ._id is None. # This code should be removed / reworked once we have a more consistent strategy of dealing with frames. self._ex._eager_frame() if by is not None or group_by_frame is not "_": res = H2OFrame._expr( expr=ExprNode("h2o.impute", self, column, method, combine_method, by, group_by_frame, values))._frame() else: res = ExprNode("h2o.impute", self, column, method, combine_method, by, group_by_frame, values)._eager_scalar() self._ex._cache.flush() self._ex._cache.fill(10) return res def merge(self, other, all_x=False, all_y=False, by_x=None, by_y=None, method="auto"): """ Merge two datasets based on common column names. :param H2OFrame other: The frame to merge to the current one. By default, must have at least one column in common with this frame, and all columns in common are used as the merge key. If you want to use only a subset of the columns in common, rename the other columns so the columns are unique in the merged result. :param bool all_x: If True, include all rows from the left/self frame :param bool all_y: If True, include all rows from the right/other frame :param by_x: list of columns in the current frame to use as a merge key. :param by_y: list of columns in the ``other`` frame to use as a merge key. Should have the same number of columns as in the ``by_x`` list. :returns: New H2OFrame with the result of merging the current frame with the ``other`` frame. """ if by_x is None and by_y is None: common_names = list(set(self.names) & set(other.names)) if not common_names: raise H2OValueError("No columns in common to merge on!") if by_x is None: by_x = [self.names.index(c) for c in common_names] else: by_x = _getValidCols(by_x,self) if by_y is None: by_y = [other.names.index(c) for c in common_names] else: by_y = _getValidCols(by_y,other) return H2OFrame._expr(expr=ExprNode("merge", self, other, all_x, all_y, by_x, by_y, method)) def relevel(self, y): """ Reorder levels of an H2O factor. The levels of a factor are reordered such that the reference level is at level 0, all remaining levels are moved down as needed. :param str y: The reference level :returns: New reordered factor column """ return H2OFrame._expr(expr=ExprNode("relevel", self, quote(y))) def insert_missing_values(self, fraction=0.1, seed=None): """ Insert missing values into the current frame, modifying it in-place. Randomly replaces a user-specified fraction of entries in a H2O dataset with missing values. :param float fraction: A number between 0 and 1 indicating the fraction of entries to replace with missing. :param int seed: The seed for the random number generator used to determine which values to make missing. :returns: the original H2OFrame with missing values inserted. """ kwargs = {} kwargs['dataset'] = self.frame_id # Eager; forces eval now for following REST call kwargs['fraction'] = fraction if seed is not None: kwargs['seed'] = seed job = {} job['job'] = h2o.api("POST /3/MissingInserter", data=kwargs) H2OJob(job, job_type=("Insert Missing Values")).poll() self._ex._cache.flush() return self def min(self): """The minimum value of all frame entries.""" return ExprNode("min", self)._eager_scalar() def max(self): """The maximum value of all frame entries.""" return ExprNode("max", self)._eager_scalar() def sum(self, skipna=True, axis=0, **kwargs): """ Compute the frame's sum by-column (or by-row). :param bool skipna: If True (default), then NAs are ignored during the computation. Otherwise presence of NAs renders the entire result NA. :param int axis: Direction of sum computation. If 0 (default), then sum is computed columnwise, and the result is a frame with 1 row and number of columns as in the original frame. If 1, then sum is computed rowwise and the result is a frame with 1 column (called "sum"), and number of rows equal to the number of rows in the original frame. :returns: either a list of sum of values per-column (old semantic); or an H2OFrame containing sum of values per-column/per-row in the original frame (new semantic). The new semantic is triggered by either providing the ``return_frame=True`` parameter, or having the ``general.allow_breaking_changed`` config option turned on. """ assert_is_type(skipna, bool) assert_is_type(axis, 0, 1) # Deprecated since 2016-10-14, if "na_rm" in kwargs: warnings.warn("Parameter na_rm is deprecated; use skipna instead", category=DeprecationWarning) na_rm = kwargs.pop("na_rm") assert_is_type(na_rm, bool) skipna = na_rm # don't assign to skipna directly, to help with error reporting # Determine whether to return a frame or a list return_frame = get_config_value("general.allow_breaking_changes", False) if "return_frame" in kwargs: return_frame = kwargs.pop("return_frame") assert_is_type(return_frame, bool) if kwargs: raise H2OValueError("Unknown parameters %r" % list(kwargs)) if return_frame: return H2OFrame._expr(ExprNode("sumaxis", self, skipna, axis)) else: return ExprNode("sumNA" if skipna else "sum", self)._eager_scalar() def mean(self, skipna=True, axis=0, **kwargs): """ Compute the frame's means by-column (or by-row). :param bool skipna: If True (default), then NAs are ignored during the computation. Otherwise presence of NAs renders the entire result NA. :param int axis: Direction of mean computation. If 0 (default), then mean is computed columnwise, and the result is a frame with 1 row and number of columns as in the original frame. If 1, then mean is computed rowwise and the result is a frame with 1 column (called "mean"), and number of rows equal to the number of rows in the original frame. :returns: either a list of mean values per-column (old semantic); or an H2OFrame containing mean values per-column/per-row from the original frame (new semantic). The new semantic is triggered by either providing the ``return_frame=True`` parameter, or having the ``general.allow_breaking_changed`` config option turned on. """ assert_is_type(skipna, bool) assert_is_type(axis, 0, 1) # Deprecated since 2016-10-14, if "na_rm" in kwargs: warnings.warn("Parameter na_rm is deprecated; use skipna instead", category=DeprecationWarning) na_rm = kwargs.pop("na_rm") assert_is_type(na_rm, bool) skipna = na_rm # don't assign to skipna directly, to help with error reporting # Determine whether to return a frame or a list return_frame = get_config_value("general.allow_breaking_changes", False) if "return_frame" in kwargs: return_frame = kwargs.pop("return_frame") assert_is_type(return_frame, bool) if kwargs: raise H2OValueError("Unknown parameters %r" % list(kwargs)) new_frame = H2OFrame._expr(ExprNode("mean", self, skipna, axis)) if return_frame: return new_frame else: return new_frame.getrow() def skewness(self, na_rm=False): """ Compute the skewness of each column in the frame. :param bool na_rm: If True, then ignore NAs during the computation. :returns: A list containing the skewness for each column (NaN for non-numeric columns). """ return ExprNode("skewness", self, na_rm)._eager_scalar() def kurtosis(self, na_rm=False): """ Compute the kurtosis of each column in the frame. We calculate the common kurtosis, such that kurtosis(normal distribution) is 3. :param bool na_rm: If True, then ignore NAs during the computation. :returns: A list containing the kurtosis for each column (NaN for non-numeric columns). """ return ExprNode("kurtosis", self, na_rm)._eager_scalar() def nacnt(self): """ Count of NAs for each column in this H2OFrame. :returns: A list of the na counts (one entry per column). """ return ExprNode("naCnt", self)._eager_scalar() def median(self, na_rm=False): """ Compute the median of each column in the frame. :param bool na_rm: If True, then ignore NAs during the computation. :returns: A list containing the median for each column (NaN for non-numeric columns). """ return ExprNode("median", self, na_rm)._eager_scalar() def var(self, y=None, na_rm=False, use=None): """ Compute the variance-covariance matrix of one or two H2OFrames. :param H2OFrame y: If this parameter is given, then a covariance matrix between the columns of the target frame and the columns of ``y`` is computed. If this parameter is not provided then the covariance matrix of the target frame is returned. If target frame has just a single column, then return the scalar variance instead of the matrix. Single rows are treated as single columns. :param str use: A string indicating how to handle missing values. This could be one of the following: - ``"everything"``: outputs NaNs whenever one of its contributing observations is missing - ``"all.obs"``: presence of missing observations will throw an error - ``"complete.obs"``: discards missing values along with all observations in their rows so that only complete observations are used :param bool na_rm: an alternative to ``use``: when this is True then default value for ``use`` is ``"everything"``; and if False then default ``use`` is ``"complete.obs"``. This parameter has no effect if ``use`` is given explicitly. :returns: An H2OFrame of the covariance matrix of the columns of this frame (if ``y`` is not given), or with the columns of ``y`` (if ``y`` is given). However when this frame and ``y`` are both single rows or single columns, then the variance is returned as a scalar. """ symmetric = False if y is None: y = self symmetric = True if use is None: use = "complete.obs" if na_rm else "everything" if self.nrow == 1 or (self.ncol == 1 and y.ncol == 1): return ExprNode("var", self, y, use, symmetric)._eager_scalar() return H2OFrame._expr(expr=ExprNode("var", self, y, use, symmetric))._frame() def sd(self, na_rm=False): """ Compute the standard deviation for each column in the frame. :param bool na_rm: if True, then NAs will be removed from the computation. :returns: A list containing the standard deviation for each column (NaN for non-numeric columns). """ return ExprNode("sd", self, na_rm)._eager_scalar() def cor(self, y=None, na_rm=False, use=None): """ Compute the correlation matrix of one or two H2OFrames. :param H2OFrame y: If this parameter is provided, then compute correlation between the columns of ``y`` and the columns of the current frame. If this parameter is not given, then just compute the correlation matrix for the columns of the current frame. :param str use: A string indicating how to handle missing values. This could be one of the following: - ``"everything"``: outputs NaNs whenever one of its contributing observations is missing - ``"all.obs"``: presence of missing observations will throw an error - ``"complete.obs"``: discards missing values along with all observations in their rows so that only complete observations are used :param bool na_rm: an alternative to ``use``: when this is True then default value for ``use`` is ``"everything"``; and if False then default ``use`` is ``"complete.obs"``. This parameter has no effect if ``use`` is given explicitly. :returns: An H2OFrame of the correlation matrix of the columns of this frame (if ``y`` is not given), or with the columns of ``y`` (if ``y`` is given). However when this frame and ``y`` are both single rows or single columns, then the correlation is returned as a scalar. """ assert_is_type(y, H2OFrame, None) assert_is_type(na_rm, bool) assert_is_type(use, None, "everything", "all.obs", "complete.obs") if y is None: y = self if use is None: use = "complete.obs" if na_rm else "everything" if self.nrow == 1 or (self.ncol == 1 and y.ncol == 1): return ExprNode("cor", self, y, use)._eager_scalar() return H2OFrame._expr(expr=ExprNode("cor", self, y, use))._frame() def distance(self, y, measure=None): """ Compute a pairwise distance measure between all rows of two numeric H2OFrames. :param H2OFrame y: Frame containing queries (small) :param str use: A string indicating what distance measure to use. Must be one of: - ``"l1"``: Absolute distance (L1-norm, >=0) - ``"l2"``: Euclidean distance (L2-norm, >=0) - ``"cosine"``: Cosine similarity (-1...1) - ``"cosine_sq"``: Squared Cosine similarity (0...1) :examples: >>> >>> iris_h2o = h2o.import_file(path=pyunit_utils.locate("smalldata/iris/iris.csv")) >>> references = iris_h2o[10:150,0:4 >>> queries = iris_h2o[0:10,0:4] >>> A = references.distance(queries, "l1") >>> B = references.distance(queries, "l2") >>> C = references.distance(queries, "cosine") >>> D = references.distance(queries, "cosine_sq") >>> E = queries.distance(references, "l1") >>> (E.transpose() == A).all() :returns: An H2OFrame of the matrix containing pairwise distance / similarity between the rows of this frame (N x p) and ``y`` (M x p), with dimensions (N x M). """ assert_is_type(y, H2OFrame) if measure is None: measure = "l2" return H2OFrame._expr(expr=ExprNode("distance", self, y, measure))._frame() def strdistance(self, y, measure=None): """ Compute element-wise string distances between two H2OFrames. Both frames need to have the same shape and only contain string/factor columns. :param H2OFrame y: A comparison frame. :param str measure: A string identifier indicating what string distance measure to use. Must be one of: - ``"lv"``: Levenshtein distance - ``"lcs"``: Longest common substring distance - ``"qgram"``: q-gram distance - ``"jaccard"``: Jaccard distance between q-gram profiles - ``"jw"``: Jaro, or Jaro-Winker distance - ``"soundex"``: Distance based on soundex encoding :examples: >>> >>> x = h2o.H2OFrame.from_python(['Martha', 'Dwayne', 'Dixon'], column_types=['factor']) >>> y = h2o.H2OFrame.from_python(['Marhta', 'Duane', 'Dicksonx'], column_types=['string']) >>> x.strdistance(y, measure="jw") :returns: An H2OFrame of the matrix containing element-wise distance between the strings of this frame and ``y``. The returned frame has the same shape as the input frames. """ assert_is_type(y, H2OFrame) assert_is_type(measure, Enum('lv', 'lcs', 'qgram', 'jaccard', 'jw', 'soundex')) return H2OFrame._expr(expr=ExprNode("strDistance", self, y, measure))._frame() def asfactor(self): """ Convert columns in the current frame to categoricals. :returns: new H2OFrame with columns of the "enum" type. """ for colname in self.names: t = self.types[colname] if t not in {"bool", "int", "string", "enum"}: raise H2OValueError("Only 'int' or 'string' are allowed for " "asfactor(), got %s:%s " % (colname, t)) fr = H2OFrame._expr(expr=ExprNode("as.factor", self), cache=self._ex._cache) if fr._ex._cache.types_valid(): fr._ex._cache.types = {name: "enum" for name in self.types} else: raise H2OTypeError("Types are not available in result") return fr def isfactor(self): """ Test which columns in the current frame are categorical. :returns: a list of True/False indicating for each column in the frame whether it is categorical. """ return [bool(o) for o in ExprNode("is.factor", self)._eager_scalar()] def anyfactor(self): """Return True if there are any categorical columns in the frame.""" return bool(ExprNode("any.factor", self)._eager_scalar()) def categories(self): """ Return the list of levels for an enum (categorical) column. This function can only be applied to single-column categorical frame. """ if self.ncols != 1: raise H2OValueError("This operation only applies to a single factor column") if self.types[self.names[0]] != "enum": raise H2OValueError("Input is not a factor. This operation only applies to a single factor column") return self.levels()[0] def transpose(self): """ Transpose rows and columns of this frame. :returns: new H2OFrame where with rows/columns from the original frame transposed. """ return H2OFrame._expr(expr=ExprNode("t", self)) def strsplit(self, pattern): """ Split the strings in the target column on the given regular expression pattern. :param str pattern: The split pattern. :returns: H2OFrame containing columns of the split strings. """ fr = H2OFrame._expr(expr=ExprNode("strsplit", self, pattern)) fr._ex._cache.nrows = self.nrow return fr def tokenize(self, split): """ Tokenize String tokenize() is similar to strsplit(), the difference between them is that tokenize() will store the tokenized text into a single column making it easier for additional processing (filtering stop words, word2vec algo, ...). :param str split The regular expression to split on. @return An H2OFrame with a single column representing the tokenized Strings. Original rows of the input DF are separated by NA. """ fr = H2OFrame._expr(expr=ExprNode("tokenize", self, split)) return fr def countmatches(self, pattern): """ For each string in the frame, count the occurrences of the provided pattern. The pattern here is a plain string, not a regular expression. We will search for the occurrences of the pattern as a substring in element of the frame. This function is applicable to frames containing only string or categorical columns. :param str pattern: The pattern to count matches on in each string. This can also be a list of strings, in which case all of them will be searched for. :returns: numeric H2OFrame with the same shape as the original, containing counts of matches of the pattern for each cell in the original frame. """ assert_is_type(pattern, str, [str]) fr = H2OFrame._expr(expr=ExprNode("countmatches", self, pattern)) fr._ex._cache.nrows = self.nrow fr._ex._cache.ncols = self.ncol return fr def trim(self): """ Trim white space on the left and right of strings in a single-column H2OFrame. :returns: H2OFrame with trimmed strings. """ fr = H2OFrame._expr(expr=ExprNode("trim", self)) fr._ex._cache.nrows = self.nrow fr._ex._cache.ncol = self.ncol return fr def substring(self, start_index, end_index=None): """ For each string, return a new string that is a substring of the original string. If end_index is not specified, then the substring extends to the end of the original string. If the start_index is longer than the length of the string, or is greater than or equal to the end_index, an empty string is returned. Negative start_index is coerced to 0. :param int start_index: The index of the original string at which to start the substring, inclusive. :param int end_index: The index of the original string at which to end the substring, exclusive. :returns: An H2OFrame containing the specified substrings. """ fr = H2OFrame._expr(expr=ExprNode("substring", self, start_index, end_index)) fr._ex._cache.nrows = self.nrow fr._ex._cache.ncol = self.ncol return fr def lstrip(self, set=" "): """ Return a copy of the column with leading characters removed. The set argument is a string specifying the set of characters to be removed. If omitted, the set argument defaults to removing whitespace. :param str set: The set of characters to lstrip from strings in column :returns: a new H2OFrame with the same shape as the original frame and having all its values trimmed from the left (equivalent of Python's ``str.lstrip()``). """ # work w/ None; parity with python lstrip if set is None: set = " " fr = H2OFrame._expr(expr=ExprNode("lstrip", self, set)) fr._ex._cache.nrows = self.nrow fr._ex._cache.ncol = self.ncol return fr def rstrip(self, set=" "): """ Return a copy of the column with trailing characters removed. The set argument is a string specifying the set of characters to be removed. If omitted, the set argument defaults to removing whitespace. :param str set: The set of characters to rstrip from strings in column :returns: a new H2OFrame with the same shape as the original frame and having all its values trimmed from the right (equivalent of Python's ``str.rstrip()``). """ # work w/ None; parity with python rstrip if set is None: set = " " fr = H2OFrame._expr(expr=ExprNode("rstrip", self, set)) fr._ex._cache.nrows = self.nrow fr._ex._cache.ncol = self.ncol return fr def entropy(self): """ For each string compute its Shannon entropy, if the string is empty the entropy is 0. :returns: an H2OFrame of Shannon entropies. """ fr = H2OFrame._expr(expr=ExprNode("entropy", self)) fr._ex._cache.nrows = self.nrow fr._ex._cache.ncol = self.ncol return fr def num_valid_substrings(self, path_to_words): """ For each string, find the count of all possible substrings with 2 characters or more that are contained in the line-separated text file whose path is given. :param str path_to_words: Path to file that contains a line-separated list of strings considered valid. :returns: An H2OFrame with the number of substrings that are contained in the given word list. """ assert_is_type(path_to_words, str) fr = H2OFrame._expr(expr=ExprNode("num_valid_substrings", self, path_to_words)) fr._ex._cache.nrows = self.nrow fr._ex._cache.ncol = self.ncol return fr def nchar(self): """ Count the length of each string in a single-column H2OFrame of string type. :returns: A single-column H2OFrame containing the per-row character count. """ return H2OFrame._expr(expr=ExprNode("strlen", self)) def table(self, data2=None, dense=True): """ Compute the counts of values appearing in a column, or co-occurence counts between two columns. :param H2OFrame data2: An optional single column to aggregate counts by. :param bool dense: If True (default) then use dense representation, which lists only non-zero counts, 1 combination per row. Set to False to expand counts across all combinations. :returns: H2OFrame of the counts at each combination of factor levels """ return H2OFrame._expr(expr=ExprNode("table", self, data2, dense)) if data2 is not None else H2OFrame._expr( expr=ExprNode("table", self, dense)) def hist(self, breaks="sturges", plot=True, **kwargs): """ Compute a histogram over a numeric column. :param breaks: Can be one of ``"sturges"``, ``"rice"``, ``"sqrt"``, ``"doane"``, ``"fd"``, ``"scott"``; or a single number for the number of breaks; or a list containing the split points, e.g: ``[-50, 213.2123, 9324834]``. If breaks is "fd", the MAD is used over the IQR in computing bin width. :param bool plot: If True (default), then a plot will be generated using ``matplotlib``. :returns: If ``plot`` is False, return H2OFrame with these columns: breaks, counts, mids_true, mids, and density; otherwise this method draws a plot and returns nothing. """ server = kwargs.pop("server") if "server" in kwargs else False assert_is_type(breaks, int, [numeric], Enum("sturges", "rice", "sqrt", "doane", "fd", "scott")) assert_is_type(plot, bool) assert_is_type(server, bool) if kwargs: raise H2OValueError("Unknown parameters to hist(): %r" % kwargs) hist = H2OFrame._expr(expr=ExprNode("hist", self, breaks))._frame() if plot: try: import matplotlib if server: matplotlib.use("Agg", warn=False) import matplotlib.pyplot as plt except ImportError: print("ERROR: matplotlib is required to make the histogram plot. " "Set `plot` to False, if a plot is not desired.") return hist["widths"] = hist["breaks"].difflag1() # [2:] because we're removing the title and the first row (which consists of NaNs) lefts = [float(c[0]) for c in h2o.as_list(hist["breaks"], use_pandas=False)[2:]] widths = [float(c[0]) for c in h2o.as_list(hist["widths"], use_pandas=False)[2:]] counts = [float(c[0]) for c in h2o.as_list(hist["counts"], use_pandas=False)[2:]] plt.xlabel(self.names[0]) plt.ylabel("Frequency") plt.title("Histogram of %s" % self.names[0]) plt.bar(left=lefts, width=widths, height=counts, bottom=0) if not server: plt.show() else: hist["density"] = hist["counts"] / (hist["breaks"].difflag1() * hist["counts"].sum()) return hist def isax(self, num_words, max_cardinality, optimize_card=False, **kwargs): """ Compute the iSAX index for DataFrame which is assumed to be numeric time series data. References: - http://www.cs.ucr.edu/~eamonn/SAX.pdf - http://www.cs.ucr.edu/~eamonn/iSAX_2.0.pdf :param int num_words: Number of iSAX words for the timeseries, i.e. granularity along the time series :param int max_cardinality: Maximum cardinality of the iSAX word. Each word can have less than the max :param bool optimized_card: An optimization flag that will find the max cardinality regardless of what is passed in for ``max_cardinality``. :returns: An H2OFrame with the name of time series, string representation of iSAX word, followed by binary representation. """ if num_words <= 0: raise H2OValueError("num_words must be greater than 0") if max_cardinality <= 0: raise H2OValueError("max_cardinality must be greater than 0") return H2OFrame._expr(expr=ExprNode("isax", self, num_words, max_cardinality, optimize_card)) def pivot(self, index, column, value): """ Pivot the frame designated by the three columns: index, column, and value. Index and column should be of type enum, int, or time. For cases of multiple indexes for a column label, the aggregation method is to pick the first occurrence in the data frame :param index: Index is a column that will be the row label :param column: The labels for the columns in the pivoted Frame :param value: The column of values for the given index and column label :returns: """ assert_is_type(index, str) assert_is_type(column, str) assert_is_type(value, str) col_names = self.names if index not in col_names: raise H2OValueError("Index not in H2OFrame") if column not in col_names: raise H2OValueError("Column not in H2OFrame") if value not in col_names: raise H2OValueError("Value column not in H2OFrame") if self.type(column) not in ["enum","time","int"]: raise H2OValueError("'column' argument is not type enum, time or int") if self.type(index) not in ["enum","time","int"]: raise H2OValueError("'index' argument is not type enum, time or int") return H2OFrame._expr(expr=ExprNode("pivot",self,index,column,value)) def topNBottomN(self, column=0, nPercent=10, grabTopN=-1): """ Given a column name or one column index, a percent N, this function will return the top or bottom N% of the values of the column of a frame. The column must be a numerical column. :param column: a string for column name or an integer index :param nPercent: a top or bottom percentage of the column values to return :param grabTopN: -1 to grab bottom N percent and 1 to grab top N percent :returns: a H2OFrame containing two columns. The first column contains the original row indices where the top/bottom values are extracted from. The second column contains the values. """ assert (nPercent >= 0) and (nPercent<=100.0), "nPercent must be between 0.0 and 100.0" assert round(nPercent*0.01*self.nrows)>0, "Increase nPercent. Current value will result in top 0 row." if isinstance(column, int): if (column < 0) or (column>=self.ncols): raise H2OValueError("Invalid column index H2OFrame") else: colIndex = column else: # column is a column name col_names = self.names if column not in col_names: raise H2OValueError("Column name not found H2OFrame") else: colIndex = col_names.index(column) if not(self[colIndex].isnumeric()): raise H2OValueError("Wrong column type! Selected column must be numeric.") return H2OFrame._expr(expr=ExprNode("topn", self, colIndex, nPercent, grabTopN)) def topN(self, column=0, nPercent=10): """ Given a column name or one column index, a percent N, this function will return the top N% of the values of the column of a frame. The column must be a numerical column. :param column: a string for column name or an integer index :param nPercent: a top percentage of the column values to return :returns: a H2OFrame containing two columns. The first column contains the original row indices where the top values are extracted from. The second column contains the top nPercent values. """ return self.topNBottomN(column, nPercent, 1) def bottomN(self, column=0, nPercent=10): """ Given a column name or one column index, a percent N, this function will return the bottom N% of the values of the column of a frame. The column must be a numerical column. :param column: a string for column name or an integer index :param nPercent: a bottom percentage of the column values to return :returns: a H2OFrame containing two columns. The first column contains the original row indices where the bottom values are extracted from. The second column contains the bottom nPercent values. """ return self.topNBottomN(column, nPercent, -1) def sub(self, pattern, replacement, ignore_case=False): """ Substitute the first occurrence of pattern in a string with replacement. :param str pattern: A regular expression. :param str replacement: A replacement string. :param bool ignore_case: If True then pattern will match case-insensitively. :returns: an H2OFrame with all values matching ``pattern`` replaced with ``replacement``. """ return H2OFrame._expr(expr=ExprNode("replacefirst", self, pattern, replacement, ignore_case)) def gsub(self, pattern, replacement, ignore_case=False): """ Globally substitute occurrences of pattern in a string with replacement. :param str pattern: A regular expression. :param str replacement: A replacement string. :param bool ignore_case: If True then pattern will match case-insensitively. :returns: an H2OFrame with all occurrences of ``pattern`` in all values replaced with ``replacement``. """ return H2OFrame._expr(expr=ExprNode("replaceall", self, pattern, replacement, ignore_case)) def interaction(self, factors, pairwise, max_factors, min_occurrence, destination_frame=None): """ Categorical Interaction Feature Creation in H2O. Creates a frame in H2O with n-th order interaction features between categorical columns, as specified by the user. :param factors: list of factor columns (either indices or column names). :param bool pairwise: Whether to create pairwise interactions between factors (otherwise create one higher-order interaction). Only applicable if there are 3 or more factors. :param int max_factors: Max. number of factor levels in pair-wise interaction terms (if enforced, one extra catch-all factor will be made). :param int min_occurrence: Min. occurrence threshold for factor levels in pair-wise interaction terms. :param str destination_frame: (internal) string indicating the key for the frame created. :returns: an H2OFrame """ return h2o.interaction(data=self, factors=factors, pairwise=pairwise, max_factors=max_factors, min_occurrence=min_occurrence, destination_frame=destination_frame) def toupper(self): """ Translate characters from lower to upper case for a particular column. :returns: new H2OFrame with all strings in the current frame converted to the uppercase. """ return H2OFrame._expr(expr=ExprNode("toupper", self), cache=self._ex._cache) def grep(self,pattern, ignore_case = False, invert = False, output_logical = False): """ Searches for matches to argument `pattern` within each element of a string column. Default behavior is to return indices of the elements matching the pattern. Parameter `output_logical` can be used to return a logical vector indicating if the element matches the pattern (1) or not (0). :param str pattern: A character string containing a regular expression. :param bool ignore_case: If True, then case is ignored during matching. :param bool invert: If True, then identify elements that do not match the pattern. :param bool output_logical: If True, then return logical vector of indicators instead of list of matching positions :return: H2OFrame holding the matching positions or a logical list if `output_logical` is enabled. """ return H2OFrame._expr(expr=ExprNode("grep", self, pattern, ignore_case, invert, output_logical)) def tolower(self): """ Translate characters from upper to lower case for a particular column. :returns: new H2OFrame with all strings in the current frame converted to the lowercase. """ return H2OFrame._expr(expr=ExprNode("tolower", self), cache=self._ex._cache) def rep_len(self, length_out): """ Create a new frame replicating the current frame. If the source frame has a single column, then the new frame will be replicating rows and its dimensions will be ``length_out x 1``. However if the source frame has more than 1 column, then then new frame will be replicating data in columnwise direction, and its dimensions will be ``nrows x length_out``, where ``nrows`` is the number of rows in the source frame. Also note that if ``length_out`` is smaller than the corresponding dimension of the source frame, then the new frame will actually be a truncated version of the original. :param int length_out: Number of columns (rows) of the resulting H2OFrame :returns: new H2OFrame with repeated data from the current frame. """ return H2OFrame._expr(expr=ExprNode("rep_len", self, length_out)) def scale(self, center=True, scale=True): """ Center and/or scale the columns of the current frame. :param center: If True, then demean the data. If False, no shifting is done. If ``center`` is a list of numbers then shift each column by the corresponding amount. :param scale: If True, then scale the data by each column's standard deviation. If False, no scaling is done. If ``scale`` is a list of numbers, then scale each column by the requested amount. :returns: an H2OFrame with scaled values from the current frame. """ return H2OFrame._expr(expr=ExprNode("scale", self, center, scale), cache=self._ex._cache) def signif(self, digits=6): """ Round doubles/floats to the given number of significant digits. :param int digits: Number of significant digits to retain. :returns: new H2OFrame with rounded values from the original frame. """ return H2OFrame._expr(expr=ExprNode("signif", self, digits), cache=self._ex._cache) def round(self, digits=0): """ Round doubles/floats to the given number of decimal places. :param int digits: The number of decimal places to retain. Rounding to a negative number of decimal places is not supported. For rounding we use the "round half to even" mode (IEC 60559 standard), so that ``round(2.5) = 2`` and ``round(3.5) = 4``. :returns: new H2OFrame with rounded values from the original frame. """ return H2OFrame._expr(expr=ExprNode("round", self, digits), cache=self._ex._cache) def asnumeric(self): """Return new frame with all columns converted to numeric.""" fr = H2OFrame._expr(expr=ExprNode("as.numeric", self), cache=self._ex._cache) if fr._ex._cache.types_valid(): fr._ex._cache.types = {k: "real" for k in fr._ex._cache.types.keys()} return fr def ascharacter(self): """ Convert all columns in the frame into strings. :returns: new H2OFrame with columns of "string" type. """ fr = H2OFrame._expr(expr=ExprNode("as.character", self), cache=self._ex._cache) if fr._ex._cache.types_valid(): fr._ex._cache.types = {k: "string" for k in fr._ex._cache.types.keys()} return fr def na_omit(self): """ Remove rows with NAs from the H2OFrame. :returns: new H2OFrame with all rows from the original frame containing any NAs removed. """ fr = H2OFrame._expr(expr=ExprNode("na.omit", self), cache=self._ex._cache) fr._ex._cache.nrows = -1 return fr def difflag1(self): """ Conduct a diff-1 transform on a numeric frame column. :returns: an H2OFrame where each element is equal to the corresponding element in the source frame minus the previous-row element in the same frame. """ fr = H2OFrame._expr(expr=ExprNode("difflag1", self), cache=self._ex._cache) return fr def isna(self): """ For each element in an H2OFrame, determine if it is NA or not. :returns: an H2OFrame of 1s and 0s, where 1s mean the values were NAs. """ fr = H2OFrame._expr(expr=ExprNode("is.na", self)) fr._ex._cache.nrows = self._ex._cache.nrows fr._ex._cache.ncols = self._ex._cache.ncols if self._ex._cache.names: fr._ex._cache.names = ["isNA(%s)" % n for n in self._ex._cache.names] fr._ex._cache.types = {"isNA(%s)" % n: "int" for n in self._ex._cache.names} return fr def year(self): """ Extract the "year" part from a date column. :returns: a single-column H2OFrame containing the "year" part from the source frame. """ fr = H2OFrame._expr(expr=ExprNode("year", self), cache=self._ex._cache) if fr._ex._cache.types_valid(): fr._ex._cache.types = {k: "int" for k in self._ex._cache.types.keys()} return fr def month(self): """ Extract the "month" part from a date column. :returns: a single-column H2OFrame containing the "month" part from the source frame. """ fr = H2OFrame._expr(expr=ExprNode("month", self), cache=self._ex._cache) if fr._ex._cache.types_valid(): fr._ex._cache.types = {k: "int" for k in self._ex._cache.types.keys()} return fr def week(self): """ Extract the "week" part from a date column. :returns: a single-column H2OFrame containing the "week" part from the source frame. """ fr = H2OFrame._expr(expr=ExprNode("week", self), cache=self._ex._cache) if fr._ex._cache.types_valid(): fr._ex._cache.types = {k: "int" for k in self._ex._cache.types.keys()} return fr def day(self): """ Extract the "day" part from a date column. :returns: a single-column H2OFrame containing the "day" part from the source frame. """ fr = H2OFrame._expr(expr=ExprNode("day", self), cache=self._ex._cache) if fr._ex._cache.types_valid(): fr._ex._cache.types = {k: "int" for k in self._ex._cache.types.keys()} return fr def dayOfWeek(self): """ Extract the "day-of-week" part from a date column. :returns: a single-column H2OFrame containing the "day-of-week" part from the source frame. """ fr = H2OFrame._expr(expr=ExprNode("dayOfWeek", self), cache=self._ex._cache) if fr._ex._cache.types_valid(): fr._ex._cache.types = {k: "int" for k in self._ex._cache.types.keys()} return fr def hour(self): """ Extract the "hour-of-day" part from a date column. :returns: a single-column H2OFrame containing the "hour-of-day" part from the source frame. """ fr = H2OFrame._expr(expr=ExprNode("hour", self), cache=self._ex._cache) if fr._ex._cache.types_valid(): fr._ex._cache.types = {k: "int" for k in self._ex._cache.types.keys()} return fr def minute(self): """ Extract the "minute" part from a date column. :returns: a single-column H2OFrame containing the "minute" part from the source frame. """ fr = H2OFrame._expr(expr=ExprNode("minute", self), cache=self._ex._cache) if fr._ex._cache.types_valid(): fr._ex._cache.types = {k: "int" for k in self._ex._cache.types.keys()} return fr def second(self): """ Extract the "second" part from a date column. :returns: a single-column H2OFrame containing the "second" part from the source frame. """ fr = H2OFrame._expr(expr=ExprNode("second", self), cache=self._ex._cache) if fr._ex._cache.types_valid(): fr._ex._cache.types = {k: "int" for k in self._ex._cache.types.keys()} return fr def runif(self, seed=None): """ Generate a column of random numbers drawn from a uniform distribution [0,1) and having the same data layout as the source frame. :param int seed: seed for the random number generator. :returns: Single-column H2OFrame filled with doubles sampled uniformly from [0,1). """ fr = H2OFrame._expr(expr=ExprNode("h2o.runif", self, -1 if seed is None else seed)) fr._ex._cache.ncols = 1 fr._ex._cache.nrows = self.nrow return fr def stratified_split(self, test_frac=0.2, seed=-1): """ Construct a column that can be used to perform a random stratified split. :param float test_frac: The fraction of rows that will belong to the "test". :param int seed: The seed for the random number generator. :returns: an H2OFrame having single categorical column with two levels: ``"train"`` and ``"test"``. :examples: >>> stratsplit = df["y"].stratified_split(test_frac=0.3, seed=12349453) >>> train = df[stratsplit=="train"] >>> test = df[stratsplit=="test"] >>> >>> # check that the distributions among the initial frame, and the >>> # train/test frames match >>> df["y"].table()["Count"] / df["y"].table()["Count"].sum() >>> train["y"].table()["Count"] / train["y"].table()["Count"].sum() >>> test["y"].table()["Count"] / test["y"].table()["Count"].sum() """ return H2OFrame._expr(expr=ExprNode('h2o.random_stratified_split', self, test_frac, seed)) def match(self, table, nomatch=0): """ Make a vector of the positions of (first) matches of its first argument in its second. Only applicable to single-column categorical/string frames. :param List table: the list of items to match against :param int nomatch: value that should be returned when there is no match. :returns: a new H2OFrame containing for each cell from the source frame the index where the pattern ``table`` first occurs within that cell. """ return H2OFrame._expr(expr=ExprNode("match", self, table, nomatch, None)) def cut(self, breaks, labels=None, include_lowest=False, right=True, dig_lab=3): """ Cut a numeric vector into categorical "buckets". This method is only applicable to a single-column numeric frame. :param List[float] breaks: The cut points in the numeric vector. :param List[str] labels: Labels for categorical levels produced. Defaults to set notation of intervals defined by the breaks. :param bool include_lowest: By default, cuts are defined as intervals ``(lo, hi]``. If this parameter is True, then the interval becomes ``[lo, hi]``. :param bool right: Include the high value: ``(lo, hi]``. If False, get ``(lo, hi)``. :param int dig_lab: Number of digits following the decimal point to consider. :returns: Single-column H2OFrame of categorical data. """ assert_is_type(breaks, [numeric]) if self.ncols != 1: raise H2OValueError("Single-column frame is expected") if self.types[self.names[0]] not in {"int", "real"}: raise H2OValueError("A numeric column is expected") fr = H2OFrame._expr(expr=ExprNode("cut", self, breaks, labels, include_lowest, right, dig_lab), cache=self._ex._cache) fr._ex._cache.types = {k: "enum" for k in self.names} return fr def which(self): """ Compose the list of row indices for which the frame contains non-zero values. Only applicable to integer single-column frames. Equivalent to comprehension ``[index for index, value in enumerate(self) if value]``. :returns: a new single-column H2OFrame containing indices of those rows in the original frame that contained non-zero values. """ return H2OFrame._expr(expr=ExprNode("which", self)) def idxmax(self,skipna=True, axis=0): """ Get the index of the max value in a column or row :param bool skipna: If True (default), then NAs are ignored during the search. Otherwise presence of NAs renders the entire result NA. :param int axis: Direction of finding the max index. If 0 (default), then the max index is searched columnwise, and the result is a frame with 1 row and number of columns as in the original frame. If 1, then the max index is searched rowwise and the result is a frame with 1 column, and number of rows equal to the number of rows in the original frame. :returns: either a list of max index values per-column or an H2OFrame containing max index values per-row from the original frame. """ return H2OFrame._expr(expr=ExprNode("which.max", self, skipna, axis)) def idxmin(self,skipna=True, axis=0): """ Get the index of the min value in a column or row :param bool skipna: If True (default), then NAs are ignored during the search. Otherwise presence of NAs renders the entire result NA. :param int axis: Direction of finding the min index. If 0 (default), then the min index is searched columnwise, and the result is a frame with 1 row and number of columns as in the original frame. If 1, then the min index is searched rowwise and the result is a frame with 1 column, and number of rows equal to the number of rows in the original frame. :returns: either a list of min index values per-column or an H2OFrame containing min index values per-row from the original frame. """ return H2OFrame._expr(expr=ExprNode("which.min", self, skipna, axis)) def ifelse(self, yes, no): """ Equivalent to ``[y if t else n for t,y,n in zip(self,yes,no)]``. Based on the booleans in the test vector, the output has the values of the yes and no vectors interleaved (or merged together). All Frames must have the same row count. Single column frames are broadened to match wider Frames. Scalars are allowed, and are also broadened to match wider frames. :param yes: Frame to use if ``test`` is true; may be a scalar or single column :param no: Frame to use if ``test`` is false; may be a scalar or single column :returns: an H2OFrame of the merged yes/no frames/scalars according to the test input frame. """ return H2OFrame._expr(expr=ExprNode("ifelse", self, yes, no)) def apply(self, fun=None, axis=0): """ Apply a lambda expression to an H2OFrame. :param fun: a lambda expression to be applied per row or per column. :param axis: 0 = apply to each column; 1 = apply to each row :returns: a new H2OFrame with the results of applying ``fun`` to the current frame. """ from .astfun import _bytecode_decompile_lambda assert_is_type(axis, 0, 1) assert_is_type(fun, FunctionType) assert_satisfies(fun, fun.__name__ == "<lambda>") res = _bytecode_decompile_lambda(fun.__code__) return H2OFrame._expr(expr=ExprNode("apply", self, 1 + (axis == 0), *res)) #------------------------------------------------------------------------------------------------------------------- # Synonyms + Deprecated #------------------------------------------------------------------------------------------------------------------- # Here we have all methods that are provided as alternative names to some other names defined above. This also # includes methods that we rename as part of the deprecation process (but keeping the old name for the sake of # backward compatibility). We gather them all down here to have a slightly cleaner code. @staticmethod def mktime(year=1970, month=0, day=0, hour=0, minute=0, second=0, msec=0): """ Deprecated, use :func:`moment` instead. This function was left for backward-compatibility purposes only. It is not very stable, and counterintuitively uses 0-based months and days, so "January 4th, 2001" should be entered as ``mktime(2001, 0, 3)``. """ return H2OFrame._expr(ExprNode("mktime", year, month, day, hour, minute, second, msec)) @property def columns(self): """Same as ``self.names``.""" return self.names @columns.setter def columns(self, value): self.set_names(value) @property def col_names(self): """Same as ``self.names``.""" return self.names @col_names.setter def col_names(self, value): self.set_names(value) def __len__(self): """Number of rows in the dataframe, same as ``self.nrows``.""" return self.nrows @property def nrow(self): """Same as ``self.nrows``.""" return self.nrows @property def ncol(self): """Same as ``self.ncols``.""" return self.ncols @property def dim(self): """Same as ``list(self.shape)``.""" return [self.nrow, self.ncol] #@property #def frame_id(self): # """Same as ``frame.id``.""" # return self.id #@frame_id.setter #def frame_id(self, value): # self.id = value @staticmethod def from_python(python_obj, destination_frame=None, header=0, separator=",", column_names=None, column_types=None, na_strings=None): """[DEPRECATED] Use constructor ``H2OFrame()`` instead.""" return H2OFrame(python_obj, destination_frame, header, separator, column_names, column_types, na_strings) def ischaracter(self): """[DEPRECATED] Use ``frame.isstring()``.""" return self.isstring() #----------------------------------------------------------------------------------------------------------------------- # Helpers #----------------------------------------------------------------------------------------------------------------------- def _getValidCols(by_idx, fr): # so user can input names of the columns as well is idx num tmp = [] for i in by_idx: if type(i) == str: if i not in fr.names: raise H2OValueError("Column: " + i + " not in frame.") tmp.append(fr.names.index(i)) elif type(i) != int: raise H2OValueError("Join on column: " + i + " not of type int") else: tmp.append(i) return list(set(tmp)) def _binop(lhs, op, rhs, rtype=None): assert_is_type(lhs, str, numeric, datetime.date, pandas_timestamp, numpy_datetime, H2OFrame) assert_is_type(rhs, str, numeric, datetime.date, pandas_timestamp, numpy_datetime, H2OFrame) if isinstance(lhs, H2OFrame) and isinstance(rhs, H2OFrame) and lhs._is_frame and rhs._is_frame: lrows, lcols = lhs.shape rrows, rcols = rhs.shape compatible = ((lcols == rcols and lrows == rrows) or (lcols == 1 and lrows == rrows) or (lcols == 1 and lrows == 1) or (rcols == 1 and lrows == rrows) or (rcols == 1 and rrows == 1) or (lrows == 1 and lcols == rcols) or (rrows == 1 and lcols == rcols) ) if not compatible: raise H2OValueError("Attempting to operate on incompatible frames: (%d x %d) and (%d x %d)" % (lrows, lcols, rrows, rcols)) if is_type(lhs, pandas_timestamp, numpy_datetime, datetime.date): lhs = H2OFrame.moment(date=lhs) if is_type(rhs, pandas_timestamp, numpy_datetime, datetime.date): rhs = H2OFrame.moment(date=rhs) cache = lhs._ex._cache if isinstance(lhs, H2OFrame) else rhs._ex._cache res = H2OFrame._expr(expr=ExprNode(op, lhs, rhs), cache=cache) if rtype is not None and res._ex._cache._names is not None: res._ex._cache._types = {name: rtype for name in res._ex._cache._names} return res
43.095527
135
0.609086
from __future__ import absolute_import, division, print_function, unicode_literals import csv import datetime import functools import os import sys import tempfile import traceback import warnings from io import StringIO from types import FunctionType import requests import h2o from h2o.display import H2ODisplay from h2o.exceptions import H2OTypeError, H2OValueError from h2o.expr import ExprNode from h2o.group_by import GroupBy from h2o.job import H2OJob from h2o.utils.compatibility import * from h2o.utils.compatibility import viewitems, viewvalues from h2o.utils.config import get_config_value from h2o.utils.shared_utils import (_handle_numpy_array, _handle_pandas_data_frame, _handle_python_dicts, _handle_python_lists, _is_list, _is_str_list, _py_tmp_key, _quoted, can_use_pandas, quote, normalize_slice, slice_is_normalized, check_frame_id) from h2o.utils.typechecks import (assert_is_type, assert_satisfies, Enum, I, is_type, numeric, numpy_ndarray, numpy_datetime, pandas_dataframe, pandas_timestamp, scipy_sparse, U) __all__ = ("H2OFrame", ) class H2OFrame(object): def __init__(self, python_obj=None, destination_frame=None, header=0, separator=",", column_names=None, column_types=None, na_strings=None): coltype = U(None, "unknown", "uuid", "string", "float", "real", "double", "int", "numeric", "categorical", "factor", "enum", "time") assert_is_type(python_obj, None, list, tuple, dict, numpy_ndarray, pandas_dataframe, scipy_sparse) assert_is_type(destination_frame, None, str) assert_is_type(header, -1, 0, 1) assert_is_type(separator, I(str, lambda s: len(s) == 1)) assert_is_type(column_names, None, [str]) assert_is_type(column_types, None, [coltype], {str: coltype}) assert_is_type(na_strings, None, [str], [[str]], {str: [str]}) check_frame_id(destination_frame) self._ex = ExprNode() self._ex._children = None self._is_frame = True if python_obj is not None: self._upload_python_object(python_obj, destination_frame, header, separator, column_names, column_types, na_strings) @staticmethod def _expr(expr, cache=None): fr = H2OFrame() fr._ex = expr if cache is not None: fr._ex._cache.fill_from(cache) return fr def _upload_python_object(self, python_obj, destination_frame=None, header=0, separator=",", column_names=None, column_types=None, na_strings=None): assert_is_type(python_obj, list, tuple, dict, numpy_ndarray, pandas_dataframe, scipy_sparse) if is_type(python_obj, scipy_sparse): self._upload_sparse_matrix(python_obj, destination_frame=destination_frame) return processor = (_handle_pandas_data_frame if is_type(python_obj, pandas_dataframe) else _handle_numpy_array if is_type(python_obj, numpy_ndarray) else _handle_python_dicts if is_type(python_obj, dict) else _handle_python_lists) col_header, data_to_write = processor(python_obj, header) if col_header is None or data_to_write is None: raise H2OValueError("No data to write") if not column_names: column_names = col_header tmp_handle, tmp_path = tempfile.mkstemp(suffix=".csv") tmp_file = os.fdopen(tmp_handle, 'w') csv_writer = csv.writer(tmp_file, dialect="excel", quoting=csv.QUOTE_NONNUMERIC) csv_writer.writerow(column_names) if data_to_write and isinstance(data_to_write[0], dict): for row in data_to_write: csv_writer.writerow([row.get(k, None) for k in col_header]) else: csv_writer.writerows(data_to_write) tmp_file.close() self._upload_parse(tmp_path, destination_frame, 1, separator, column_names, column_types, na_strings) os.remove(tmp_path) def _upload_sparse_matrix(self, matrix, destination_frame=None): import scipy.sparse as sp if not sp.issparse(matrix): raise H2OValueError("A sparse matrix expected, got %s" % type(matrix)) tmp_handle, tmp_path = tempfile.mkstemp(suffix=".svmlight") out = os.fdopen(tmp_handle, "wt") if destination_frame is None: destination_frame = _py_tmp_key(h2o.connection().session_id) data = zip(*sp.find(matrix)) if not isinstance(data, list): data = list(data) data.sort() idata = 0 for irow in range(matrix.shape[0]): if idata < len(data) and data[idata][0] == irow and data[idata][1] == 0: y = data[idata][2] idata += 1 else: y = 0 out.write(str(y)) while idata < len(data) and data[idata][0] == irow: out.write(" ") out.write(str(data[idata][1])) out.write(":") out.write(str(data[idata][2])) idata += 1 out.write("\n") out.close() ret = h2o.api("POST /3/PostFile", filename=tmp_path) os.remove(tmp_path) rawkey = ret["destination_frame"] p = {"source_frames": [rawkey], "destination_frame": destination_frame} H2OJob(h2o.api("POST /3/ParseSVMLight", data=p), "Parse").poll() self._ex._cache._id = destination_frame self._ex._cache.fill() @staticmethod def get_frame(frame_id): fr = H2OFrame() fr._ex._cache._id = frame_id try: fr._ex._cache.fill() except EnvironmentError: return None return fr def refresh(self): self._ex._cache.flush() self._frame(fill_cache=True) @property def names(self): if not self._ex._cache.names_valid(): self._ex._cache.flush() self._frame(fill_cache=True) return list(self._ex._cache.names) @names.setter def names(self, value): self.set_names(value) @property def nrows(self): if not self._ex._cache.nrows_valid(): self._ex._cache.flush() self._frame(fill_cache=True) return self._ex._cache.nrows @property def ncols(self): if not self._ex._cache.ncols_valid(): self._ex._cache.flush() self._frame(fill_cache=True) return self._ex._cache.ncols @property def shape(self): return self.nrows, self.ncols @property def types(self): if not self._ex._cache.types_valid(): self._ex._cache.flush() self._frame(fill_cache=True) return dict(self._ex._cache.types) @property def frame_id(self): return self._frame()._ex._cache._id @frame_id.setter def frame_id(self, newid): check_frame_id(newid) if self._ex._cache._id is None: h2o.assign(self, newid) else: oldname = self.frame_id self._ex._cache._id = newid h2o.rapids("(rename \"{}\" \"{}\")".format(oldname, newid)) def type(self, col): assert_is_type(col, int, str) if not self._ex._cache.types_valid() or not self._ex._cache.names_valid(): self._ex._cache.flush() self._frame(fill_cache=True) types = self._ex._cache.types if is_type(col, str): if col in types: return types[col] else: names = self._ex._cache.names if -len(names) <= col < len(names): return types[names[col]] raise H2OValueError("Column '%r' does not exist in the frame" % col) def _import_parse(self, path, pattern, destination_frame, header, separator, column_names, column_types, na_strings): if is_type(path, str) and "://" not in path: path = os.path.abspath(path) rawkey = h2o.lazy_import(path, pattern) self._parse(rawkey, destination_frame, header, separator, column_names, column_types, na_strings) return self def _upload_parse(self, path, destination_frame, header, sep, column_names, column_types, na_strings): ret = h2o.api("POST /3/PostFile", filename=path) rawkey = ret["destination_frame"] self._parse(rawkey, destination_frame, header, sep, column_names, column_types, na_strings) return self def _parse(self, rawkey, destination_frame="", header=None, separator=None, column_names=None, column_types=None, na_strings=None): setup = h2o.parse_setup(rawkey, destination_frame, header, separator, column_names, column_types, na_strings) return self._parse_raw(setup) def _parse_raw(self, setup): p = {"destination_frame": None, "parse_type": None, "separator": None, "single_quotes": None, "check_header": None, "number_columns": None, "chunk_size": None, "delete_on_done": True, "blocking": False, "column_types": None, } if setup["column_names"]: p["column_names"] = None if setup["na_strings"]: p["na_strings"] = None p.update({k: v for k, v in viewitems(setup) if k in p}) p['source_frames'] = [_quoted(src['name']) for src in setup['source_frames']] H2OJob(h2o.api("POST /3/Parse", data=p), "Parse").poll() self._ex._cache._id = p["destination_frame"] self._ex._cache.fill() def filter_na_cols(self, frac=0.2): return ExprNode("filterNACols", self, frac)._eager_scalar() def columns_by_type(self, coltype="numeric"): assert_is_type(coltype, "numeric", "categorical", "string", "time", "uuid", "bad") assert_is_type(self, H2OFrame) return ExprNode("columnsByType", self, coltype)._eager_scalar() def __iter__(self): return (self[i] for i in range(self.ncol)) def __unicode__(self): if sys.gettrace() is None: if self._ex is None: return "This H2OFrame has been removed." table = self._frame(fill_cache=True)._ex._cache._tabulate("simple", False) nrows = "%d %s" % (self.nrow, "row" if self.nrow == 1 else "rows") ncols = "%d %s" % (self.ncol, "column" if self.ncol == 1 else "columns") return "%s\n\n[%s x %s]" % (table, nrows, ncols) return "" def __repr__(self): if sys.gettrace() is None: stk = traceback.extract_stack() if not ("IPython" in stk[-2][0] and "info" == stk[-2][2]): self.show() return "" def show(self, use_pandas=False): if self._ex is None: print("This H2OFrame has been removed.") return if not self._ex._cache.is_valid(): self._frame()._ex._cache.fill() if H2ODisplay._in_ipy(): import IPython.display if use_pandas and can_use_pandas(): IPython.display.display(self.head().as_data_frame(fill_cache=True)) else: IPython.display.display_html(self._ex._cache._tabulate("html", False), raw=True) else: if use_pandas and can_use_pandas(): print(self.head().as_data_frame(fill_cache=True)) else: s = self.__unicode__() stk = traceback.extract_stack() if "IPython" in stk[-3][0]: s = "\n%s" % s try: print(s) except UnicodeEncodeError: print(s.encode("ascii", "replace")) def summary(self, return_data=False): if not self._ex._cache.is_valid(): self._frame()._ex._cache.fill() if not return_data: if H2ODisplay._in_ipy(): import IPython.display IPython.display.display_html(self._ex._cache._tabulate("html", True), raw=True) else: print(self._ex._cache._tabulate("simple", True)) else: return self._ex._cache._data def describe(self, chunk_summary=False): res = h2o.api("GET /3/Frames/%s" % self.frame_id, data={"row_count": 10})["frames"][0] self._ex._cache._fill_data(res) print("Rows:{}".format(self.nrow)) print("Cols:{}".format(self.ncol)) if chunk_summary: res["chunk_summary"].show() res["distribution_summary"].show() print("\n") self.summary() def _frame(self, rows=10, fill_cache=False): self._ex._eager_frame() if fill_cache: self._ex._cache.fill(rows=rows) return self def head(self, rows=10, cols=200): assert_is_type(rows, int) assert_is_type(cols, int) nrows = min(self.nrows, rows) ncols = min(self.ncols, cols) newdt = self[:nrows, :ncols] return newdt._frame(rows=nrows, fill_cache=True) def tail(self, rows=10, cols=200): assert_is_type(rows, int) assert_is_type(cols, int) nrows = min(self.nrows, rows) ncols = min(self.ncols, cols) start_idx = self.nrows - nrows newdt = self[start_idx:start_idx + nrows, :ncols] return newdt._frame(rows=nrows, fill_cache=True) def logical_negation(self): return H2OFrame._expr(expr=ExprNode("not", self), cache=self._ex._cache) def _unop(self, op, rtype="real"): if self._is_frame: for cname, ctype in self.types.items(): if ctype not in {"int", "real", "bool"}: raise H2OValueError("Function %s cannot be applied to %s column '%s'" % (op, ctype, cname)) ret = H2OFrame._expr(expr=ExprNode(op, self), cache=self._ex._cache) ret._ex._cache._names = ["%s(%s)" % (op, name) for name in self._ex._cache._names] ret._ex._cache._types = {name: rtype for name in ret._ex._cache._names} return ret def __add__(self, rhs): return _binop(self, "+", rhs) def __sub__(self, rhs): return _binop(self, "-", rhs) def __mul__(self, rhs): return _binop(self, "*", rhs) def __div__(self, rhs): return _binop(self, "/", rhs) def __truediv__(self, rhs): return _binop(self, "/", rhs) def __floordiv__(self, rhs): return _binop(self, "intDiv", rhs) def __mod__(self, rhs): return _binop(self, "%", rhs) def __or__(self, rhs): return _binop(self, "|", rhs, rtype="bool") def __and__(self, rhs): return _binop(self, "&", rhs, rtype="bool") def __ge__(self, rhs): return _binop(self, ">=", rhs, rtype="bool") def __gt__(self, rhs): return _binop(self, ">", rhs, rtype="bool") def __le__(self, rhs): return _binop(self, "<=", rhs, rtype="bool") def __lt__(self, rhs): return _binop(self, "<", rhs, rtype="bool") def __eq__(self, rhs): if rhs is None: rhs = float("nan") return _binop(self, "==", rhs, rtype="bool") def __ne__(self, rhs): if rhs is None: rhs = float("nan") return _binop(self, "!=", rhs, rtype="bool") def __pow__(self, rhs): return _binop(self, "^", rhs) def __contains__(self, lhs): return all((t == self).any() for t in lhs) if _is_list(lhs) else (lhs == self).any() def __rmod__(self, lhs): return _binop(lhs, "%", self) def __radd__(self, lhs): return _binop(lhs, "+", self) def __rsub__(self, lhs): return _binop(lhs, "-", self) def __rand__(self, lhs): return _binop(lhs, "&", self, rtype="bool") def __ror__(self, lhs): return _binop(lhs, "|", self, rtype="bool") def __rtruediv__(self, lhs): return _binop(lhs, "/", self) def __rdiv__(self, lhs): return _binop(lhs, "/", self) def __rfloordiv__(self, lhs): return _binop(lhs, "intDiv", self, rtype="int") def __rmul__(self, lhs): return _binop(lhs, "*", self) def __rpow__(self, lhs): return _binop(lhs, "^", self) def __abs__(self): return self._unop("abs") def __invert__(self): return self._unop("!!", rtype="bool") def __nonzero__(self): if self.nrows > 1 or self.ncols > 1: raise H2OValueError( 'This operation is not supported on an H2OFrame. Try using parentheses. ' 'Did you mean & (logical and), | (logical or), or ~ (logical not)?') else: return self.__len__() def __int__(self): return int(self.flatten()) def __float__(self): return float(self.flatten()) def flatten(self): if self.shape != (1, 1): raise H2OValueError("Not a 1x1 Frame") return ExprNode("flatten", self)._eager_scalar() def getrow(self): if self.nrows != 1: raise H2OValueError("This method can only be applied to single-row frames") return ExprNode("getrow", self)._eager_scalar() def mult(self, matrix): if self.ncols != matrix.nrows: raise H2OValueError("Matrix is not compatible for multiplication with the current frame") return H2OFrame._expr(expr=ExprNode("x", self, matrix)) def cos(self): return self._unop("cos") def sin(self): return self._unop("sin") def tan(self): return self._unop("tan") def acos(self): return self._unop("acos") def asin(self): return self._unop("asin") def atan(self): return self._unop("atan") def cosh(self): return self._unop("cosh") def sinh(self): return self._unop("sinh") def tanh(self): return self._unop("tanh") def acosh(self): return self._unop("acosh") def asinh(self): return self._unop("asinh") def atanh(self): return self._unop("atanh") def cospi(self): return self._unop("cospi") def sinpi(self): return self._unop("sinpi") def tanpi(self): return self._unop("tanpi") def abs(self): return self._unop("abs") def sign(self): return self._unop("sign", rtype="int") def sqrt(self): return self._unop("sqrt") def trunc(self): return self._unop("trunc", rtype="int") def ceil(self): return self._unop("ceiling", rtype="int") def floor(self): return self._unop("floor", rtype="int") def log(self): return self._unop("log") def log10(self): return self._unop("log10") def log1p(self): return self._unop("log1p") def log2(self): return self._unop("log2") def exp(self): return self._unop("exp") def expm1(self): return self._unop("expm1") def gamma(self): return self._unop("gamma") def lgamma(self): return self._unop("lgamma") def digamma(self): return self._unop("digamma") def trigamma(self): return self._unop("trigamma") @staticmethod def moment(year=None, month=None, day=None, hour=None, minute=None, second=None, msec=None, date=None, time=None): assert_is_type(date, None, datetime.date, numpy_datetime, pandas_timestamp) assert_is_type(time, None, datetime.time) assert_is_type(year, None, int, H2OFrame) assert_is_type(month, None, int, H2OFrame) assert_is_type(day, None, int, H2OFrame) assert_is_type(hour, None, int, H2OFrame) assert_is_type(minute, None, int, H2OFrame) assert_is_type(second, None, int, H2OFrame) assert_is_type(msec, None, int, H2OFrame) if time is not None: if hour is not None or minute is not None or second is not None or msec is not None: raise H2OValueError("Arguments hour, minute, second, msec cannot be used together with `time`.") hour = time.hour minute = time.minute second = time.second msec = time.microsecond // 1000 if date is not None: if is_type(date, pandas_timestamp): date = date.to_pydatetime() if is_type(date, numpy_datetime): date = date.astype("M8[ms]").astype("O") if year is not None or month is not None or day is not None: raise H2OValueError("Arguments year, month and day cannot be used together with `date`.") year = date.year month = date.month day = date.day if isinstance(date, datetime.datetime): if time is not None: raise H2OValueError("Argument `time` cannot be used together with `date` of datetime type.") if hour is not None or minute is not None or second is not None or msec is not None: raise H2OValueError("Arguments hour, minute, second, msec cannot be used together with `date` " "of datetime type.") hour = date.hour minute = date.minute second = date.second msec = date.microsecond // 1000 if year is None or month is None or day is None: raise H2OValueError("Either arguments (`year`, `month` and `day`) or the `date` are required.") if hour is None: hour = 0 if minute is None: minute = 0 if second is None: second = 0 if msec is None: msec = 0 local_vars = locals() res_nrows = None for n in ["year", "month", "day", "hour", "minute", "second", "msec"]: x = local_vars[n] if isinstance(x, H2OFrame): if x.ncols != 1: raise H2OValueError("Argument `%s` is a frame with more than 1 column" % n) if x.type(0) not in {"int", "real"}: raise H2OValueError("Column `%s` is not numeric (type = %s)" % (n, x.type(0))) if res_nrows is None: res_nrows = x.nrows if x.nrows == 0 or x.nrows != res_nrows: raise H2OValueError("Incompatible column `%s` having %d rows" % (n, x.nrows)) if res_nrows is None: res_nrows = 1 res = H2OFrame._expr(ExprNode("moment", year, month, day, hour, minute, second, msec)) res._ex._cache._names = ["name"] res._ex._cache._types = {"name": "time"} res._ex._cache._nrows = res_nrows res._ex._cache._ncols = 1 return res def unique(self): return H2OFrame._expr(expr=ExprNode("unique", self)) def levels(self): lol = H2OFrame._expr(expr=ExprNode("levels", self)).as_data_frame(False) lol.pop(0) lol = list(zip(*lol)) return [[ll for ll in l if ll != ''] for l in lol] def nlevels(self): levels = self.levels() return [len(l) for l in levels] if levels else 0 def set_level(self, level): return H2OFrame._expr(expr=ExprNode("setLevel", self, level), cache=self._ex._cache) def set_levels(self, levels): assert_is_type(levels, [str]) return H2OFrame._expr(expr=ExprNode("setDomain", self, False, levels), cache=self._ex._cache) def set_names(self, names): assert_is_type(names, [str]) assert_satisfies(names, len(names) == self.ncol) self._ex = ExprNode("colnames=", self, range(self.ncol), names) return self def set_name(self, col=None, name=None): assert_is_type(col, None, int, str) assert_is_type(name, str) ncols = self.ncols col_index = None if is_type(col, int): if not(-ncols <= col < ncols): raise H2OValueError("Index %d is out of bounds for a frame with %d columns" % (col, ncols)) col_index = (col + ncols) % ncols elif is_type(col, str): if col not in self.names: raise H2OValueError("Column %s doesn't exist in the frame." % col) col_index = self.names.index(col) # lookup the name else: assert col is None if ncols != 1: raise H2OValueError("The frame has %d columns; please specify which one to rename" % ncols) col_index = 0 if name != self.names[col_index] and name in self.types: raise H2OValueError("Column '%s' already exists in the frame" % name) oldname = self.names[col_index] old_cache = self._ex._cache self._ex = ExprNode("colnames=", self, col_index, name) # Update-in-place, but still lazy self._ex._cache.fill_from(old_cache) if self.names is None: self._frame()._ex._cache.fill() else: self._ex._cache._names = self.names[:col] + [name] + self.names[col + 1:] self._ex._cache._types[name] = self._ex._cache._types.pop(oldname) return def as_date(self, format): fr = H2OFrame._expr(expr=ExprNode("as.Date", self, format), cache=self._ex._cache) if fr._ex._cache.types_valid(): fr._ex._cache.types = {k: "int" for k in self._ex._cache.types.keys()} return fr def cumsum(self, axis=0): return H2OFrame._expr(expr=ExprNode("cumsum", self, axis), cache=self._ex._cache) def cumprod(self, axis=0): return H2OFrame._expr(expr=ExprNode("cumprod", self, axis), cache=self._ex._cache) def cummin(self, axis=0): return H2OFrame._expr(expr=ExprNode("cummin", self, axis), cache=self._ex._cache) def cummax(self, axis=0): return H2OFrame._expr(expr=ExprNode("cummax", self, axis), cache=self._ex._cache) def prod(self, na_rm=False): return ExprNode("prod.na" if na_rm else "prod", self)._eager_scalar() def any(self): return bool(ExprNode("any", self)._eager_scalar()) def any_na_rm(self): return bool(ExprNode("any.na", self)._eager_scalar()) def all(self): return bool(ExprNode("all", self)._eager_scalar()) def isnumeric(self): return [bool(o) for o in ExprNode("is.numeric", self)._eager_scalar()] def isstring(self): return [bool(o) for o in ExprNode("is.character", self)._eager_scalar()] def isin(self, item): if is_type(item, list, tuple, set): return functools.reduce(H2OFrame.__or__, (self == i for i in item)) else: return self == item def kfold_column(self, n_folds=3, seed=-1): return H2OFrame._expr(expr=ExprNode("kfold_column", self, n_folds, seed))._frame() # want this to be eager! def modulo_kfold_column(self, n_folds=3): return H2OFrame._expr(expr=ExprNode("modulo_kfold_column", self, n_folds))._frame() # want this to be eager! def stratified_kfold_column(self, n_folds=3, seed=-1): return H2OFrame._expr( expr=ExprNode("stratified_kfold_column", self, n_folds, seed))._frame() # want this to be eager! def structure(self): df = self.as_data_frame(use_pandas=False) cn = df.pop(0) nr = self.nrow nc = self.ncol width = max([len(c) for c in cn]) isfactor = self.isfactor() numlevels = self.nlevels() lvls = self.levels() print("H2OFrame: '{}' \nDimensions: {} obs. of {} variables".format(self.frame_id, nr, nc)) for i in range(nc): print("$ {} {}: ".format(cn[i], ' ' * (width - max(0, len(cn[i])))), end=' ') if isfactor[i]: nl = numlevels[i] print("Factor w/ {} level(s) {} ".format(nl, '"' + '","'.join(lvls[i]) + '"'), end='\n') else: print("num {}".format(" ".join(it[0] if it else "nan" for it in h2o.as_list(self[:10, i], False)[1:]))) def as_data_frame(self, use_pandas=True, header=True): if can_use_pandas() and use_pandas: import pandas return pandas.read_csv(StringIO(self.get_frame_data()), low_memory=False) frame = [row for row in csv.reader(StringIO(self.get_frame_data()))] if not header: frame.pop(0) return frame def get_frame_data(self): return h2o.api("GET /3/DownloadDataset", data={"frame_id": self.frame_id, "hex_string": False}) def __getitem__(self, item): # Select columns based on a string, a list of strings, an int or a slice. # Note that the python column selector handles the case of negative # selections, or out-of-range selections - without having to compute # self._ncols in the front-end - which would force eager evaluation just to # range check in the front-end. new_ncols = -1 new_nrows = -1 new_names = None new_types = None fr = None flatten = False if isinstance(item, slice): item = normalize_slice(item, self.ncols) if is_type(item, str, int, list, slice): new_ncols, new_names, new_types, item = self._compute_ncol_update(item) new_nrows = self.nrow fr = H2OFrame._expr(expr=ExprNode("cols_py", self, item)) elif isinstance(item, (ExprNode, H2OFrame)): new_ncols = self.ncol new_names = self.names new_types = self.types new_nrows = -1 # have a "big" predicate column -- update cache later on... fr = H2OFrame._expr(expr=ExprNode("rows", self, item)) elif isinstance(item, tuple): rows, cols = item allrows = allcols = False if isinstance(cols, slice): cols = normalize_slice(cols, self.ncols) allcols = cols == slice(0, self.ncols, 1) if isinstance(rows, slice): rows = normalize_slice(rows, self.nrows) allrows = rows == slice(0, self.nrows, 1) if allrows and allcols: return self # fr[:,:] -> all rows and columns.. return self if allrows: new_ncols, new_names, new_types, cols = self._compute_ncol_update(cols) new_nrows = self.nrow fr = H2OFrame._expr(expr=ExprNode("cols_py", self, cols)) # fr[:,cols] -> really just a column slice if allcols: new_ncols = self.ncols new_names = self.names new_types = self.types new_nrows, rows = self._compute_nrow_update(rows) fr = H2OFrame._expr(expr=ExprNode("rows", self, rows)) # fr[rows,:] -> really just a row slices if not allrows and not allcols: new_ncols, new_names, new_types, cols = self._compute_ncol_update(cols) new_nrows, rows = self._compute_nrow_update(rows) fr = H2OFrame._expr(expr=ExprNode("rows", ExprNode("cols_py", self, cols), rows)) flatten = is_type(rows, int) and is_type(cols, str, int) else: raise ValueError("Unexpected __getitem__ selector: " + str(type(item)) + " " + str(item.__class__)) assert fr is not None # Pythonic: if the row & col selector turn into ints (or a single col # name), then extract the single element out of the Frame. Otherwise # return a Frame, EVEN IF the selectors are e.g. slices-of-1-value. if flatten: return fr.flatten() fr._ex._cache.ncols = new_ncols fr._ex._cache.nrows = new_nrows fr._ex._cache.names = new_names fr._ex._cache.types = new_types fr._is_frame = self._is_frame return fr def _compute_ncol_update(self, item): # computes new ncol, names, and types try: new_ncols = -1 if isinstance(item, list): new_ncols = len(item) if _is_str_list(item): new_types = {k: self.types[k] for k in item} new_names = item else: new_names = [self.names[i] for i in item] new_types = {name: self.types[name] for name in new_names} elif isinstance(item, slice): assert slice_is_normalized(item) new_names = self.names[item] new_types = {name: self.types[name] for name in new_names} elif is_type(item, str, int): new_ncols = 1 if is_type(item, str): new_names = [item] new_types = None if item not in self.types else {item: self.types[item]} else: new_names = [self.names[item]] new_types = {new_names[0]: self.types[new_names[0]]} else: raise ValueError("Unexpected type: " + str(type(item))) return (new_ncols, new_names, new_types, item) except: return (-1, None, None, item) def _compute_nrow_update(self, item): try: new_nrows = -1 if isinstance(item, list): new_nrows = len(item) elif isinstance(item, slice): assert slice_is_normalized(item) new_nrows = (item.stop - item.start + item.step - 1) // item.step elif isinstance(item, H2OFrame): new_nrows = -1 else: new_nrows = 1 return [new_nrows, item] except: return [-1, item] def __setitem__(self, item, value): # TODO: add far stronger type checks, so that we never run in a situation where the server has to # tell us that we requested an illegal operation. assert_is_type(item, str, int, tuple, list, H2OFrame) assert_is_type(value, None, numeric, str, H2OFrame) col_expr = None row_expr = None colname = None # When set, we are doing an append if is_type(item, str): # String column name, could be new or old if item in self.names: col_expr = self.names.index(item) # Update an existing column else: col_expr = self.ncols colname = item # New, append elif is_type(item, int): if not(-self.ncols <= item < self.ncols): raise H2OValueError("Incorrect column index: %d" % item) col_expr = item # Column by number if col_expr < 0: col_expr += self.ncols elif isinstance(item, tuple): # Both row and col specifiers # Need more type checks row_expr = item[0] col_expr = item[1] if is_type(col_expr, str): # Col by name if col_expr not in self.names: # Append colname = col_expr col_expr = self.ncol elif is_type(col_expr, int): if not(-self.ncols <= col_expr < self.ncols): raise H2OValueError("Incorrect column index: %d" % item) if col_expr < 0: col_expr += self.ncols elif isinstance(col_expr, slice): # Col by slice if col_expr.start is None and col_expr.stop is None: col_expr = slice(0, self.ncol) # Slice of all if isinstance(row_expr, slice): start = row_expr.start step = row_expr.step stop = row_expr.stop if start is None: start = 0 if stop is None: stop = self.nrows row_expr = slice(start, stop, step) elif isinstance(item, H2OFrame): row_expr = item # Row slicing elif isinstance(item, list): col_expr = item if value is None: value = float("nan") value_is_own_subframe = isinstance(value, H2OFrame) and self._is_frame_in_self(value) old_cache = self._ex._cache if colname is None: self._ex = ExprNode(":=", self, value, col_expr, row_expr) self._ex._cache.fill_from(old_cache) if isinstance(value, H2OFrame) and \ value._ex._cache.types_valid() and \ self._ex._cache.types_valid(): self._ex._cache._types.update(value._ex._cache.types) else: self._ex._cache.types = None else: self._ex = ExprNode("append", self, value, colname) self._ex._cache.fill_from(old_cache) self._ex._cache.names = self.names + [colname] self._ex._cache._ncols += 1 if self._ex._cache.types_valid() and isinstance(value, H2OFrame) and value._ex._cache.types_valid(): self._ex._cache._types[colname] = list(viewvalues(value._ex._cache.types))[0] else: self._ex._cache.types = None if value_is_own_subframe: value._ex = None # wipe out to keep ref counts correct def _is_frame_in_self(self, frame): if self._ex is frame._ex: return True if frame._ex._children is None: return False return any(self._is_expr_in_self(ch) for ch in frame._ex._children) def _is_expr_in_self(self, expr): if not isinstance(expr, ExprNode): return False if self._ex is expr: return True if expr._children is None: return False return any(self._is_expr_in_self(ch) for ch in expr._children) def drop(self, index, axis=1): if axis == 1: if not isinstance(index, list): #If input is a string, i.e., "C1": if is_type(index, str): #Check if index is an actual column(s) in the frame if index not in self.names: raise H2OValueError("Column(s) selected to drop are not in original frame: %r" % index) index = self.names.index(index) #If input is an int indicating a column index, i.e., 3: elif is_type(index, int): #Check if index is an actual column index in the frame if index > self.ncol: raise H2OValueError("Column index selected to drop is not part of the frame: %r" % index) if index < 0: raise H2OValueError("Column index selected to drop is not positive: %r" % index) fr = H2OFrame._expr(expr=ExprNode("cols", self, -(index + 1)), cache=self._ex._cache) fr._ex._cache.ncols -= 1 fr._ex._cache.names = self.names[:index] + self.names[index + 1:] fr._ex._cache.types = {name: self.types[name] for name in fr._ex._cache.names} return fr elif isinstance(index, list): #If input is an int array indicating a column index, i.e., [3] or [1,2,3]: if is_type(index, [int]): if max(index) > self.ncol: raise H2OValueError("Column index selected to drop is not part of the frame: %r" % index) if min(index) < 0: raise H2OValueError("Column index selected to drop is not positive: %r" % index) for i in range(len(index)): index[i] = -(index[i] + 1) #If index is a string array, i.e., ["C1", "C2"] elif is_type(index, [str]): #Check if index is an actual column(s) in the frame if not set(index).issubset(self.names): raise H2OValueError("Column(s) selected to drop are not in original frame: %r" % index) for i in range(len(index)): index[i] = -(self.names.index(index[i]) + 1) fr = H2OFrame._expr(expr=ExprNode("cols", self, index), cache=self._ex._cache) fr._ex._cache.ncols -= len(index) fr._ex._cache.names = [i for i in self.names if self.names.index(i) not in list(map(lambda x: abs(x) - 1, index))] fr._ex._cache.types = {name: fr.types[name] for name in fr._ex._cache.names} else: raise ValueError("Invalid column index types. Must either be a list of all int indexes, " "a string list of all column names, a single int index, or" "a single string for dropping columns.") return fr elif axis == 0: if is_type(index, [int]): #Check if index is an actual column index in the frame if max(index) > self.nrow: raise H2OValueError("Row index selected to drop is not part of the frame: %r" % index) if min(index) < 0: raise H2OValueError("Row index selected to drop is not positive: %r" % index) index = [-(x + 1) for x in index] fr = H2OFrame._expr(expr=ExprNode("rows", self, index), cache=self._ex._cache) fr._ex._cache.nrows -= len(index) else: raise ValueError("Invalid row indexes. Must be a list of int row indexes to drop from the H2OFrame.") return fr def pop(self, i): if is_type(i, str): i = self.names.index(i) col = H2OFrame._expr(expr=ExprNode("cols", self, i)) old_cache = self._ex._cache self._ex = ExprNode("cols", self, -(i + 1)) self._ex._cache.ncols -= 1 self._ex._cache.names = old_cache.names[:i] + old_cache.names[i + 1:] self._ex._cache.types = {name: old_cache.types[name] for name in self._ex._cache.names} self._ex._cache._data = None col._ex._cache.ncols = 1 col._ex._cache.names = [old_cache.names[i]] return col def quantile(self, prob=None, combine_method="interpolate", weights_column=None): if len(self) == 0: return self if prob is None: prob = [0.01, 0.1, 0.25, 0.333, 0.5, 0.667, 0.75, 0.9, 0.99] if weights_column is None: weights_column = "_" else: assert_is_type(weights_column, str, I(H2OFrame, lambda wc: wc.ncol == 1 and wc.nrow == self.nrow)) if isinstance(weights_column, H2OFrame): merged = self.cbind(weights_column) weights_column = merged.names[-1] return H2OFrame._expr(expr=ExprNode("quantile", merged, prob, combine_method, weights_column)) return H2OFrame._expr(expr=ExprNode("quantile", self, prob, combine_method, weights_column)) def concat(self, frames, axis=1): if len(frames) == 0: raise ValueError("Input list of frames is empty! Nothing to concat.") if axis == 1: df = self.cbind(frames) else: df = self.rbind(frames) return df def cbind(self, data): assert_is_type(data, H2OFrame, numeric, [H2OFrame, numeric]) frames = [data] if not isinstance(data, list) else data new_cols = list(self.columns) new_types = dict(self.types) for frame in frames: if isinstance(frame, H2OFrame): if frame.nrow != self.nrow: raise H2OValueError("Cannot bind a dataframe with %d rows to a data frame with %d rows: " "the number of rows should match" % (frame.nrow, self.nrow)) new_cols += frame.columns new_types.update(frame.types) else: new_cols += [None] unique_cols = set(new_cols) fr = H2OFrame._expr(expr=ExprNode("cbind", self, *frames), cache=self._ex._cache) fr._ex._cache.ncols = len(new_cols) if len(new_cols) == len(unique_cols) and None not in unique_cols: fr._ex._cache.names = new_cols fr._ex._cache.types = new_types else: # Invalidate names and types since they contain duplicate / unknown names, and the server will choose those. fr._ex._cache.names = None fr._ex._cache.types = None return fr def rbind(self, data): assert_is_type(data, H2OFrame, [H2OFrame]) frames = [data] if not isinstance(data, list) else data for frame in frames: if frame.ncol != self.ncol: raise H2OValueError("Cannot row-bind a dataframe with %d columns to a data frame with %d columns: " "the columns must match" % (frame.ncol, self.ncol)) if frame.columns != self.columns or frame.types != self.types: raise H2OValueError("Column names and types must match for rbind() to work") fr = H2OFrame._expr(expr=ExprNode("rbind", self, *frames), cache=self._ex._cache) fr._ex._cache.nrows = self.nrow + sum(frame.nrow for frame in frames) return fr def split_frame(self, ratios=None, destination_frames=None, seed=None): assert_is_type(ratios, [numeric], None) assert_is_type(destination_frames, [str], None) assert_is_type(seed, int, None) if ratios is None: ratios = [0.75] if not ratios: raise ValueError("Ratios array may not be empty") if destination_frames is not None: if len(ratios) + 1 != len(destination_frames): raise ValueError("The number of provided destination_frames must be one more " "than the number of provided ratios") num_slices = len(ratios) + 1 boundaries = [] last_boundary = 0 i = 0 while i < num_slices - 1: ratio = ratios[i] if ratio < 0: raise ValueError("Ratio must be greater than 0") boundary = last_boundary + ratio if boundary >= 1.0: raise ValueError("Ratios must add up to less than 1.0") boundaries.append(boundary) last_boundary = boundary i += 1 splits = [] tmp_runif = self.runif(seed) tmp_runif.frame_id = "%s_splitter" % _py_tmp_key(h2o.connection().session_id) i = 0 while i < num_slices: if i == 0: # lower_boundary is 0.0 upper_boundary = boundaries[i] tmp_slice = self[(tmp_runif <= upper_boundary), :] elif i == num_slices - 1: lower_boundary = boundaries[i - 1] # upper_boundary is 1.0 tmp_slice = self[(tmp_runif > lower_boundary), :] else: lower_boundary = boundaries[i - 1] upper_boundary = boundaries[i] tmp_slice = self[((tmp_runif > lower_boundary) & (tmp_runif <= upper_boundary)), :] if destination_frames is None: splits.append(tmp_slice) else: destination_frame_id = destination_frames[i] tmp_slice.frame_id = destination_frame_id splits.append(tmp_slice) i += 1 del tmp_runif return splits def group_by(self, by): assert_is_type(by, str, int, [str, int]) return GroupBy(self, by) def sort(self, by): assert_is_type(by, str, int, [str, int]) if type(by) != list: by = [by] for c in by: if self.type(c) not in ["enum","time","int"]: raise H2OValueError("Sort by column: " + str(c) + " not of enum, time, or int type") return H2OFrame._expr(expr=ExprNode("sort",self,by)) def fillna(self,method="forward",axis=0,maxlen=1): assert_is_type(axis, 0, 1) assert_is_type(method,str) assert_is_type(maxlen, int) return H2OFrame._expr(expr=ExprNode("h2o.fillna",self,method,axis,maxlen)) def impute(self, column=-1, method="mean", combine_method="interpolate", by=None, group_by_frame=None, values=None): if is_type(column, str): column = self.names.index(column) if is_type(by, str): by = self.names.index(by) if values is None: values = "_" else: assert len(values) == len(self.columns), "Length of values does not match length of columns" # convert string values to categorical num values values2 = [] for i in range(0,len(values)): if self.type(i) == "enum": try: values2.append(self.levels()[i].index(values[i])) except: raise H2OValueError("Impute value of: " + values[i] + " not found in existing levels of" " column: " + self.col_names[i]) else: values2.append(values[i]) values = values2 if group_by_frame is None: group_by_frame = "_" # This code below is needed to ensure the frame (self) exists on the server. Without it, self._ex._cache.fill() # fails with an assertion that ._id is None. # This code should be removed / reworked once we have a more consistent strategy of dealing with frames. self._ex._eager_frame() if by is not None or group_by_frame is not "_": res = H2OFrame._expr( expr=ExprNode("h2o.impute", self, column, method, combine_method, by, group_by_frame, values))._frame() else: res = ExprNode("h2o.impute", self, column, method, combine_method, by, group_by_frame, values)._eager_scalar() self._ex._cache.flush() self._ex._cache.fill(10) return res def merge(self, other, all_x=False, all_y=False, by_x=None, by_y=None, method="auto"): if by_x is None and by_y is None: common_names = list(set(self.names) & set(other.names)) if not common_names: raise H2OValueError("No columns in common to merge on!") if by_x is None: by_x = [self.names.index(c) for c in common_names] else: by_x = _getValidCols(by_x,self) if by_y is None: by_y = [other.names.index(c) for c in common_names] else: by_y = _getValidCols(by_y,other) return H2OFrame._expr(expr=ExprNode("merge", self, other, all_x, all_y, by_x, by_y, method)) def relevel(self, y): return H2OFrame._expr(expr=ExprNode("relevel", self, quote(y))) def insert_missing_values(self, fraction=0.1, seed=None): kwargs = {} kwargs['dataset'] = self.frame_id # Eager; forces eval now for following REST call kwargs['fraction'] = fraction if seed is not None: kwargs['seed'] = seed job = {} job['job'] = h2o.api("POST /3/MissingInserter", data=kwargs) H2OJob(job, job_type=("Insert Missing Values")).poll() self._ex._cache.flush() return self def min(self): return ExprNode("min", self)._eager_scalar() def max(self): return ExprNode("max", self)._eager_scalar() def sum(self, skipna=True, axis=0, **kwargs): assert_is_type(skipna, bool) assert_is_type(axis, 0, 1) # Deprecated since 2016-10-14, if "na_rm" in kwargs: warnings.warn("Parameter na_rm is deprecated; use skipna instead", category=DeprecationWarning) na_rm = kwargs.pop("na_rm") assert_is_type(na_rm, bool) skipna = na_rm # don't assign to skipna directly, to help with error reporting return_frame = get_config_value("general.allow_breaking_changes", False) if "return_frame" in kwargs: return_frame = kwargs.pop("return_frame") assert_is_type(return_frame, bool) if kwargs: raise H2OValueError("Unknown parameters %r" % list(kwargs)) if return_frame: return H2OFrame._expr(ExprNode("sumaxis", self, skipna, axis)) else: return ExprNode("sumNA" if skipna else "sum", self)._eager_scalar() def mean(self, skipna=True, axis=0, **kwargs): assert_is_type(skipna, bool) assert_is_type(axis, 0, 1) if "na_rm" in kwargs: warnings.warn("Parameter na_rm is deprecated; use skipna instead", category=DeprecationWarning) na_rm = kwargs.pop("na_rm") assert_is_type(na_rm, bool) skipna = na_rm # Determine whether to return a frame or a list return_frame = get_config_value("general.allow_breaking_changes", False) if "return_frame" in kwargs: return_frame = kwargs.pop("return_frame") assert_is_type(return_frame, bool) if kwargs: raise H2OValueError("Unknown parameters %r" % list(kwargs)) new_frame = H2OFrame._expr(ExprNode("mean", self, skipna, axis)) if return_frame: return new_frame else: return new_frame.getrow() def skewness(self, na_rm=False): return ExprNode("skewness", self, na_rm)._eager_scalar() def kurtosis(self, na_rm=False): return ExprNode("kurtosis", self, na_rm)._eager_scalar() def nacnt(self): return ExprNode("naCnt", self)._eager_scalar() def median(self, na_rm=False): return ExprNode("median", self, na_rm)._eager_scalar() def var(self, y=None, na_rm=False, use=None): symmetric = False if y is None: y = self symmetric = True if use is None: use = "complete.obs" if na_rm else "everything" if self.nrow == 1 or (self.ncol == 1 and y.ncol == 1): return ExprNode("var", self, y, use, symmetric)._eager_scalar() return H2OFrame._expr(expr=ExprNode("var", self, y, use, symmetric))._frame() def sd(self, na_rm=False): return ExprNode("sd", self, na_rm)._eager_scalar() def cor(self, y=None, na_rm=False, use=None): assert_is_type(y, H2OFrame, None) assert_is_type(na_rm, bool) assert_is_type(use, None, "everything", "all.obs", "complete.obs") if y is None: y = self if use is None: use = "complete.obs" if na_rm else "everything" if self.nrow == 1 or (self.ncol == 1 and y.ncol == 1): return ExprNode("cor", self, y, use)._eager_scalar() return H2OFrame._expr(expr=ExprNode("cor", self, y, use))._frame() def distance(self, y, measure=None): assert_is_type(y, H2OFrame) if measure is None: measure = "l2" return H2OFrame._expr(expr=ExprNode("distance", self, y, measure))._frame() def strdistance(self, y, measure=None): assert_is_type(y, H2OFrame) assert_is_type(measure, Enum('lv', 'lcs', 'qgram', 'jaccard', 'jw', 'soundex')) return H2OFrame._expr(expr=ExprNode("strDistance", self, y, measure))._frame() def asfactor(self): for colname in self.names: t = self.types[colname] if t not in {"bool", "int", "string", "enum"}: raise H2OValueError("Only 'int' or 'string' are allowed for " "asfactor(), got %s:%s " % (colname, t)) fr = H2OFrame._expr(expr=ExprNode("as.factor", self), cache=self._ex._cache) if fr._ex._cache.types_valid(): fr._ex._cache.types = {name: "enum" for name in self.types} else: raise H2OTypeError("Types are not available in result") return fr def isfactor(self): return [bool(o) for o in ExprNode("is.factor", self)._eager_scalar()] def anyfactor(self): return bool(ExprNode("any.factor", self)._eager_scalar()) def categories(self): if self.ncols != 1: raise H2OValueError("This operation only applies to a single factor column") if self.types[self.names[0]] != "enum": raise H2OValueError("Input is not a factor. This operation only applies to a single factor column") return self.levels()[0] def transpose(self): return H2OFrame._expr(expr=ExprNode("t", self)) def strsplit(self, pattern): fr = H2OFrame._expr(expr=ExprNode("strsplit", self, pattern)) fr._ex._cache.nrows = self.nrow return fr def tokenize(self, split): fr = H2OFrame._expr(expr=ExprNode("tokenize", self, split)) return fr def countmatches(self, pattern): assert_is_type(pattern, str, [str]) fr = H2OFrame._expr(expr=ExprNode("countmatches", self, pattern)) fr._ex._cache.nrows = self.nrow fr._ex._cache.ncols = self.ncol return fr def trim(self): fr = H2OFrame._expr(expr=ExprNode("trim", self)) fr._ex._cache.nrows = self.nrow fr._ex._cache.ncol = self.ncol return fr def substring(self, start_index, end_index=None): fr = H2OFrame._expr(expr=ExprNode("substring", self, start_index, end_index)) fr._ex._cache.nrows = self.nrow fr._ex._cache.ncol = self.ncol return fr def lstrip(self, set=" "): # work w/ None; parity with python lstrip if set is None: set = " " fr = H2OFrame._expr(expr=ExprNode("lstrip", self, set)) fr._ex._cache.nrows = self.nrow fr._ex._cache.ncol = self.ncol return fr def rstrip(self, set=" "): # work w/ None; parity with python rstrip if set is None: set = " " fr = H2OFrame._expr(expr=ExprNode("rstrip", self, set)) fr._ex._cache.nrows = self.nrow fr._ex._cache.ncol = self.ncol return fr def entropy(self): fr = H2OFrame._expr(expr=ExprNode("entropy", self)) fr._ex._cache.nrows = self.nrow fr._ex._cache.ncol = self.ncol return fr def num_valid_substrings(self, path_to_words): assert_is_type(path_to_words, str) fr = H2OFrame._expr(expr=ExprNode("num_valid_substrings", self, path_to_words)) fr._ex._cache.nrows = self.nrow fr._ex._cache.ncol = self.ncol return fr def nchar(self): return H2OFrame._expr(expr=ExprNode("strlen", self)) def table(self, data2=None, dense=True): return H2OFrame._expr(expr=ExprNode("table", self, data2, dense)) if data2 is not None else H2OFrame._expr( expr=ExprNode("table", self, dense)) def hist(self, breaks="sturges", plot=True, **kwargs): server = kwargs.pop("server") if "server" in kwargs else False assert_is_type(breaks, int, [numeric], Enum("sturges", "rice", "sqrt", "doane", "fd", "scott")) assert_is_type(plot, bool) assert_is_type(server, bool) if kwargs: raise H2OValueError("Unknown parameters to hist(): %r" % kwargs) hist = H2OFrame._expr(expr=ExprNode("hist", self, breaks))._frame() if plot: try: import matplotlib if server: matplotlib.use("Agg", warn=False) import matplotlib.pyplot as plt except ImportError: print("ERROR: matplotlib is required to make the histogram plot. " "Set `plot` to False, if a plot is not desired.") return hist["widths"] = hist["breaks"].difflag1() # [2:] because we're removing the title and the first row (which consists of NaNs) lefts = [float(c[0]) for c in h2o.as_list(hist["breaks"], use_pandas=False)[2:]] widths = [float(c[0]) for c in h2o.as_list(hist["widths"], use_pandas=False)[2:]] counts = [float(c[0]) for c in h2o.as_list(hist["counts"], use_pandas=False)[2:]] plt.xlabel(self.names[0]) plt.ylabel("Frequency") plt.title("Histogram of %s" % self.names[0]) plt.bar(left=lefts, width=widths, height=counts, bottom=0) if not server: plt.show() else: hist["density"] = hist["counts"] / (hist["breaks"].difflag1() * hist["counts"].sum()) return hist def isax(self, num_words, max_cardinality, optimize_card=False, **kwargs): if num_words <= 0: raise H2OValueError("num_words must be greater than 0") if max_cardinality <= 0: raise H2OValueError("max_cardinality must be greater than 0") return H2OFrame._expr(expr=ExprNode("isax", self, num_words, max_cardinality, optimize_card)) def pivot(self, index, column, value): assert_is_type(index, str) assert_is_type(column, str) assert_is_type(value, str) col_names = self.names if index not in col_names: raise H2OValueError("Index not in H2OFrame") if column not in col_names: raise H2OValueError("Column not in H2OFrame") if value not in col_names: raise H2OValueError("Value column not in H2OFrame") if self.type(column) not in ["enum","time","int"]: raise H2OValueError("'column' argument is not type enum, time or int") if self.type(index) not in ["enum","time","int"]: raise H2OValueError("'index' argument is not type enum, time or int") return H2OFrame._expr(expr=ExprNode("pivot",self,index,column,value)) def topNBottomN(self, column=0, nPercent=10, grabTopN=-1): assert (nPercent >= 0) and (nPercent<=100.0), "nPercent must be between 0.0 and 100.0" assert round(nPercent*0.01*self.nrows)>0, "Increase nPercent. Current value will result in top 0 row." if isinstance(column, int): if (column < 0) or (column>=self.ncols): raise H2OValueError("Invalid column index H2OFrame") else: colIndex = column else: col_names = self.names if column not in col_names: raise H2OValueError("Column name not found H2OFrame") else: colIndex = col_names.index(column) if not(self[colIndex].isnumeric()): raise H2OValueError("Wrong column type! Selected column must be numeric.") return H2OFrame._expr(expr=ExprNode("topn", self, colIndex, nPercent, grabTopN)) def topN(self, column=0, nPercent=10): return self.topNBottomN(column, nPercent, 1) def bottomN(self, column=0, nPercent=10): return self.topNBottomN(column, nPercent, -1) def sub(self, pattern, replacement, ignore_case=False): return H2OFrame._expr(expr=ExprNode("replacefirst", self, pattern, replacement, ignore_case)) def gsub(self, pattern, replacement, ignore_case=False): return H2OFrame._expr(expr=ExprNode("replaceall", self, pattern, replacement, ignore_case)) def interaction(self, factors, pairwise, max_factors, min_occurrence, destination_frame=None): return h2o.interaction(data=self, factors=factors, pairwise=pairwise, max_factors=max_factors, min_occurrence=min_occurrence, destination_frame=destination_frame) def toupper(self): return H2OFrame._expr(expr=ExprNode("toupper", self), cache=self._ex._cache) def grep(self,pattern, ignore_case = False, invert = False, output_logical = False): return H2OFrame._expr(expr=ExprNode("grep", self, pattern, ignore_case, invert, output_logical)) def tolower(self): return H2OFrame._expr(expr=ExprNode("tolower", self), cache=self._ex._cache) def rep_len(self, length_out): return H2OFrame._expr(expr=ExprNode("rep_len", self, length_out)) def scale(self, center=True, scale=True): return H2OFrame._expr(expr=ExprNode("scale", self, center, scale), cache=self._ex._cache) def signif(self, digits=6): return H2OFrame._expr(expr=ExprNode("signif", self, digits), cache=self._ex._cache) def round(self, digits=0): return H2OFrame._expr(expr=ExprNode("round", self, digits), cache=self._ex._cache) def asnumeric(self): fr = H2OFrame._expr(expr=ExprNode("as.numeric", self), cache=self._ex._cache) if fr._ex._cache.types_valid(): fr._ex._cache.types = {k: "real" for k in fr._ex._cache.types.keys()} return fr def ascharacter(self): fr = H2OFrame._expr(expr=ExprNode("as.character", self), cache=self._ex._cache) if fr._ex._cache.types_valid(): fr._ex._cache.types = {k: "string" for k in fr._ex._cache.types.keys()} return fr def na_omit(self): fr = H2OFrame._expr(expr=ExprNode("na.omit", self), cache=self._ex._cache) fr._ex._cache.nrows = -1 return fr def difflag1(self): fr = H2OFrame._expr(expr=ExprNode("difflag1", self), cache=self._ex._cache) return fr def isna(self): fr = H2OFrame._expr(expr=ExprNode("is.na", self)) fr._ex._cache.nrows = self._ex._cache.nrows fr._ex._cache.ncols = self._ex._cache.ncols if self._ex._cache.names: fr._ex._cache.names = ["isNA(%s)" % n for n in self._ex._cache.names] fr._ex._cache.types = {"isNA(%s)" % n: "int" for n in self._ex._cache.names} return fr def year(self): fr = H2OFrame._expr(expr=ExprNode("year", self), cache=self._ex._cache) if fr._ex._cache.types_valid(): fr._ex._cache.types = {k: "int" for k in self._ex._cache.types.keys()} return fr def month(self): fr = H2OFrame._expr(expr=ExprNode("month", self), cache=self._ex._cache) if fr._ex._cache.types_valid(): fr._ex._cache.types = {k: "int" for k in self._ex._cache.types.keys()} return fr def week(self): fr = H2OFrame._expr(expr=ExprNode("week", self), cache=self._ex._cache) if fr._ex._cache.types_valid(): fr._ex._cache.types = {k: "int" for k in self._ex._cache.types.keys()} return fr def day(self): fr = H2OFrame._expr(expr=ExprNode("day", self), cache=self._ex._cache) if fr._ex._cache.types_valid(): fr._ex._cache.types = {k: "int" for k in self._ex._cache.types.keys()} return fr def dayOfWeek(self): fr = H2OFrame._expr(expr=ExprNode("dayOfWeek", self), cache=self._ex._cache) if fr._ex._cache.types_valid(): fr._ex._cache.types = {k: "int" for k in self._ex._cache.types.keys()} return fr def hour(self): fr = H2OFrame._expr(expr=ExprNode("hour", self), cache=self._ex._cache) if fr._ex._cache.types_valid(): fr._ex._cache.types = {k: "int" for k in self._ex._cache.types.keys()} return fr def minute(self): fr = H2OFrame._expr(expr=ExprNode("minute", self), cache=self._ex._cache) if fr._ex._cache.types_valid(): fr._ex._cache.types = {k: "int" for k in self._ex._cache.types.keys()} return fr def second(self): fr = H2OFrame._expr(expr=ExprNode("second", self), cache=self._ex._cache) if fr._ex._cache.types_valid(): fr._ex._cache.types = {k: "int" for k in self._ex._cache.types.keys()} return fr def runif(self, seed=None): fr = H2OFrame._expr(expr=ExprNode("h2o.runif", self, -1 if seed is None else seed)) fr._ex._cache.ncols = 1 fr._ex._cache.nrows = self.nrow return fr def stratified_split(self, test_frac=0.2, seed=-1): return H2OFrame._expr(expr=ExprNode('h2o.random_stratified_split', self, test_frac, seed)) def match(self, table, nomatch=0): return H2OFrame._expr(expr=ExprNode("match", self, table, nomatch, None)) def cut(self, breaks, labels=None, include_lowest=False, right=True, dig_lab=3): assert_is_type(breaks, [numeric]) if self.ncols != 1: raise H2OValueError("Single-column frame is expected") if self.types[self.names[0]] not in {"int", "real"}: raise H2OValueError("A numeric column is expected") fr = H2OFrame._expr(expr=ExprNode("cut", self, breaks, labels, include_lowest, right, dig_lab), cache=self._ex._cache) fr._ex._cache.types = {k: "enum" for k in self.names} return fr def which(self): return H2OFrame._expr(expr=ExprNode("which", self)) def idxmax(self,skipna=True, axis=0): return H2OFrame._expr(expr=ExprNode("which.max", self, skipna, axis)) def idxmin(self,skipna=True, axis=0): return H2OFrame._expr(expr=ExprNode("which.min", self, skipna, axis)) def ifelse(self, yes, no): return H2OFrame._expr(expr=ExprNode("ifelse", self, yes, no)) def apply(self, fun=None, axis=0): from .astfun import _bytecode_decompile_lambda assert_is_type(axis, 0, 1) assert_is_type(fun, FunctionType) assert_satisfies(fun, fun.__name__ == "<lambda>") res = _bytecode_decompile_lambda(fun.__code__) return H2OFrame._expr(expr=ExprNode("apply", self, 1 + (axis == 0), *res)) @staticmethod def mktime(year=1970, month=0, day=0, hour=0, minute=0, second=0, msec=0): return H2OFrame._expr(ExprNode("mktime", year, month, day, hour, minute, second, msec)) @property def columns(self): return self.names @columns.setter def columns(self, value): self.set_names(value) @property def col_names(self): return self.names @col_names.setter def col_names(self, value): self.set_names(value) def __len__(self): return self.nrows @property def nrow(self): return self.nrows @property def ncol(self): return self.ncols @property def dim(self): return [self.nrow, self.ncol] @staticmethod def from_python(python_obj, destination_frame=None, header=0, separator=",", column_names=None, column_types=None, na_strings=None): return H2OFrame(python_obj, destination_frame, header, separator, column_names, column_types, na_strings) def ischaracter(self): return self.isstring() def _getValidCols(by_idx, fr): tmp = [] for i in by_idx: if type(i) == str: if i not in fr.names: raise H2OValueError("Column: " + i + " not in frame.") tmp.append(fr.names.index(i)) elif type(i) != int: raise H2OValueError("Join on column: " + i + " not of type int") else: tmp.append(i) return list(set(tmp)) def _binop(lhs, op, rhs, rtype=None): assert_is_type(lhs, str, numeric, datetime.date, pandas_timestamp, numpy_datetime, H2OFrame) assert_is_type(rhs, str, numeric, datetime.date, pandas_timestamp, numpy_datetime, H2OFrame) if isinstance(lhs, H2OFrame) and isinstance(rhs, H2OFrame) and lhs._is_frame and rhs._is_frame: lrows, lcols = lhs.shape rrows, rcols = rhs.shape compatible = ((lcols == rcols and lrows == rrows) or (lcols == 1 and lrows == rrows) or (lcols == 1 and lrows == 1) or (rcols == 1 and lrows == rrows) or (rcols == 1 and rrows == 1) or (lrows == 1 and lcols == rcols) or (rrows == 1 and lcols == rcols) ) if not compatible: raise H2OValueError("Attempting to operate on incompatible frames: (%d x %d) and (%d x %d)" % (lrows, lcols, rrows, rcols)) if is_type(lhs, pandas_timestamp, numpy_datetime, datetime.date): lhs = H2OFrame.moment(date=lhs) if is_type(rhs, pandas_timestamp, numpy_datetime, datetime.date): rhs = H2OFrame.moment(date=rhs) cache = lhs._ex._cache if isinstance(lhs, H2OFrame) else rhs._ex._cache res = H2OFrame._expr(expr=ExprNode(op, lhs, rhs), cache=cache) if rtype is not None and res._ex._cache._names is not None: res._ex._cache._types = {name: rtype for name in res._ex._cache._names} return res
true
true
1c4375d0124e02a4e91d5f16efeedc52c37bb057
17,765
py
Python
sdk/python/pulumi_azure_native/sql/v20190601preview/workload_group.py
sebtelko/pulumi-azure-native
711ec021b5c73da05611c56c8a35adb0ce3244e4
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_native/sql/v20190601preview/workload_group.py
sebtelko/pulumi-azure-native
711ec021b5c73da05611c56c8a35adb0ce3244e4
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_native/sql/v20190601preview/workload_group.py
sebtelko/pulumi-azure-native
711ec021b5c73da05611c56c8a35adb0ce3244e4
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities __all__ = ['WorkloadGroupArgs', 'WorkloadGroup'] @pulumi.input_type class WorkloadGroupArgs: def __init__(__self__, *, database_name: pulumi.Input[str], max_resource_percent: pulumi.Input[int], min_resource_percent: pulumi.Input[int], min_resource_percent_per_request: pulumi.Input[float], resource_group_name: pulumi.Input[str], server_name: pulumi.Input[str], importance: Optional[pulumi.Input[str]] = None, max_resource_percent_per_request: Optional[pulumi.Input[float]] = None, query_execution_timeout: Optional[pulumi.Input[int]] = None, workload_group_name: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a WorkloadGroup resource. :param pulumi.Input[str] database_name: The name of the database. :param pulumi.Input[int] max_resource_percent: The workload group cap percentage resource. :param pulumi.Input[int] min_resource_percent: The workload group minimum percentage resource. :param pulumi.Input[float] min_resource_percent_per_request: The workload group request minimum grant percentage. :param pulumi.Input[str] resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. :param pulumi.Input[str] server_name: The name of the server. :param pulumi.Input[str] importance: The workload group importance level. :param pulumi.Input[float] max_resource_percent_per_request: The workload group request maximum grant percentage. :param pulumi.Input[int] query_execution_timeout: The workload group query execution timeout. :param pulumi.Input[str] workload_group_name: The name of the workload group. """ pulumi.set(__self__, "database_name", database_name) pulumi.set(__self__, "max_resource_percent", max_resource_percent) pulumi.set(__self__, "min_resource_percent", min_resource_percent) pulumi.set(__self__, "min_resource_percent_per_request", min_resource_percent_per_request) pulumi.set(__self__, "resource_group_name", resource_group_name) pulumi.set(__self__, "server_name", server_name) if importance is not None: pulumi.set(__self__, "importance", importance) if max_resource_percent_per_request is not None: pulumi.set(__self__, "max_resource_percent_per_request", max_resource_percent_per_request) if query_execution_timeout is not None: pulumi.set(__self__, "query_execution_timeout", query_execution_timeout) if workload_group_name is not None: pulumi.set(__self__, "workload_group_name", workload_group_name) @property @pulumi.getter(name="databaseName") def database_name(self) -> pulumi.Input[str]: """ The name of the database. """ return pulumi.get(self, "database_name") @database_name.setter def database_name(self, value: pulumi.Input[str]): pulumi.set(self, "database_name", value) @property @pulumi.getter(name="maxResourcePercent") def max_resource_percent(self) -> pulumi.Input[int]: """ The workload group cap percentage resource. """ return pulumi.get(self, "max_resource_percent") @max_resource_percent.setter def max_resource_percent(self, value: pulumi.Input[int]): pulumi.set(self, "max_resource_percent", value) @property @pulumi.getter(name="minResourcePercent") def min_resource_percent(self) -> pulumi.Input[int]: """ The workload group minimum percentage resource. """ return pulumi.get(self, "min_resource_percent") @min_resource_percent.setter def min_resource_percent(self, value: pulumi.Input[int]): pulumi.set(self, "min_resource_percent", value) @property @pulumi.getter(name="minResourcePercentPerRequest") def min_resource_percent_per_request(self) -> pulumi.Input[float]: """ The workload group request minimum grant percentage. """ return pulumi.get(self, "min_resource_percent_per_request") @min_resource_percent_per_request.setter def min_resource_percent_per_request(self, value: pulumi.Input[float]): pulumi.set(self, "min_resource_percent_per_request", value) @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> pulumi.Input[str]: """ The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. """ return pulumi.get(self, "resource_group_name") @resource_group_name.setter def resource_group_name(self, value: pulumi.Input[str]): pulumi.set(self, "resource_group_name", value) @property @pulumi.getter(name="serverName") def server_name(self) -> pulumi.Input[str]: """ The name of the server. """ return pulumi.get(self, "server_name") @server_name.setter def server_name(self, value: pulumi.Input[str]): pulumi.set(self, "server_name", value) @property @pulumi.getter def importance(self) -> Optional[pulumi.Input[str]]: """ The workload group importance level. """ return pulumi.get(self, "importance") @importance.setter def importance(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "importance", value) @property @pulumi.getter(name="maxResourcePercentPerRequest") def max_resource_percent_per_request(self) -> Optional[pulumi.Input[float]]: """ The workload group request maximum grant percentage. """ return pulumi.get(self, "max_resource_percent_per_request") @max_resource_percent_per_request.setter def max_resource_percent_per_request(self, value: Optional[pulumi.Input[float]]): pulumi.set(self, "max_resource_percent_per_request", value) @property @pulumi.getter(name="queryExecutionTimeout") def query_execution_timeout(self) -> Optional[pulumi.Input[int]]: """ The workload group query execution timeout. """ return pulumi.get(self, "query_execution_timeout") @query_execution_timeout.setter def query_execution_timeout(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "query_execution_timeout", value) @property @pulumi.getter(name="workloadGroupName") def workload_group_name(self) -> Optional[pulumi.Input[str]]: """ The name of the workload group. """ return pulumi.get(self, "workload_group_name") @workload_group_name.setter def workload_group_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "workload_group_name", value) class WorkloadGroup(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, database_name: Optional[pulumi.Input[str]] = None, importance: Optional[pulumi.Input[str]] = None, max_resource_percent: Optional[pulumi.Input[int]] = None, max_resource_percent_per_request: Optional[pulumi.Input[float]] = None, min_resource_percent: Optional[pulumi.Input[int]] = None, min_resource_percent_per_request: Optional[pulumi.Input[float]] = None, query_execution_timeout: Optional[pulumi.Input[int]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, server_name: Optional[pulumi.Input[str]] = None, workload_group_name: Optional[pulumi.Input[str]] = None, __props__=None): """ Workload group operations for a data warehouse :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] database_name: The name of the database. :param pulumi.Input[str] importance: The workload group importance level. :param pulumi.Input[int] max_resource_percent: The workload group cap percentage resource. :param pulumi.Input[float] max_resource_percent_per_request: The workload group request maximum grant percentage. :param pulumi.Input[int] min_resource_percent: The workload group minimum percentage resource. :param pulumi.Input[float] min_resource_percent_per_request: The workload group request minimum grant percentage. :param pulumi.Input[int] query_execution_timeout: The workload group query execution timeout. :param pulumi.Input[str] resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. :param pulumi.Input[str] server_name: The name of the server. :param pulumi.Input[str] workload_group_name: The name of the workload group. """ ... @overload def __init__(__self__, resource_name: str, args: WorkloadGroupArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Workload group operations for a data warehouse :param str resource_name: The name of the resource. :param WorkloadGroupArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(WorkloadGroupArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, database_name: Optional[pulumi.Input[str]] = None, importance: Optional[pulumi.Input[str]] = None, max_resource_percent: Optional[pulumi.Input[int]] = None, max_resource_percent_per_request: Optional[pulumi.Input[float]] = None, min_resource_percent: Optional[pulumi.Input[int]] = None, min_resource_percent_per_request: Optional[pulumi.Input[float]] = None, query_execution_timeout: Optional[pulumi.Input[int]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, server_name: Optional[pulumi.Input[str]] = None, workload_group_name: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = WorkloadGroupArgs.__new__(WorkloadGroupArgs) if database_name is None and not opts.urn: raise TypeError("Missing required property 'database_name'") __props__.__dict__["database_name"] = database_name __props__.__dict__["importance"] = importance if max_resource_percent is None and not opts.urn: raise TypeError("Missing required property 'max_resource_percent'") __props__.__dict__["max_resource_percent"] = max_resource_percent __props__.__dict__["max_resource_percent_per_request"] = max_resource_percent_per_request if min_resource_percent is None and not opts.urn: raise TypeError("Missing required property 'min_resource_percent'") __props__.__dict__["min_resource_percent"] = min_resource_percent if min_resource_percent_per_request is None and not opts.urn: raise TypeError("Missing required property 'min_resource_percent_per_request'") __props__.__dict__["min_resource_percent_per_request"] = min_resource_percent_per_request __props__.__dict__["query_execution_timeout"] = query_execution_timeout if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__.__dict__["resource_group_name"] = resource_group_name if server_name is None and not opts.urn: raise TypeError("Missing required property 'server_name'") __props__.__dict__["server_name"] = server_name __props__.__dict__["workload_group_name"] = workload_group_name __props__.__dict__["name"] = None __props__.__dict__["type"] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:sql/v20190601preview:WorkloadGroup"), pulumi.Alias(type_="azure-native:sql:WorkloadGroup"), pulumi.Alias(type_="azure-nextgen:sql:WorkloadGroup"), pulumi.Alias(type_="azure-native:sql/v20200202preview:WorkloadGroup"), pulumi.Alias(type_="azure-nextgen:sql/v20200202preview:WorkloadGroup"), pulumi.Alias(type_="azure-native:sql/v20200801preview:WorkloadGroup"), pulumi.Alias(type_="azure-nextgen:sql/v20200801preview:WorkloadGroup"), pulumi.Alias(type_="azure-native:sql/v20201101preview:WorkloadGroup"), pulumi.Alias(type_="azure-nextgen:sql/v20201101preview:WorkloadGroup")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(WorkloadGroup, __self__).__init__( 'azure-native:sql/v20190601preview:WorkloadGroup', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'WorkloadGroup': """ Get an existing WorkloadGroup resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = WorkloadGroupArgs.__new__(WorkloadGroupArgs) __props__.__dict__["importance"] = None __props__.__dict__["max_resource_percent"] = None __props__.__dict__["max_resource_percent_per_request"] = None __props__.__dict__["min_resource_percent"] = None __props__.__dict__["min_resource_percent_per_request"] = None __props__.__dict__["name"] = None __props__.__dict__["query_execution_timeout"] = None __props__.__dict__["type"] = None return WorkloadGroup(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def importance(self) -> pulumi.Output[Optional[str]]: """ The workload group importance level. """ return pulumi.get(self, "importance") @property @pulumi.getter(name="maxResourcePercent") def max_resource_percent(self) -> pulumi.Output[int]: """ The workload group cap percentage resource. """ return pulumi.get(self, "max_resource_percent") @property @pulumi.getter(name="maxResourcePercentPerRequest") def max_resource_percent_per_request(self) -> pulumi.Output[Optional[float]]: """ The workload group request maximum grant percentage. """ return pulumi.get(self, "max_resource_percent_per_request") @property @pulumi.getter(name="minResourcePercent") def min_resource_percent(self) -> pulumi.Output[int]: """ The workload group minimum percentage resource. """ return pulumi.get(self, "min_resource_percent") @property @pulumi.getter(name="minResourcePercentPerRequest") def min_resource_percent_per_request(self) -> pulumi.Output[float]: """ The workload group request minimum grant percentage. """ return pulumi.get(self, "min_resource_percent_per_request") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ Resource name. """ return pulumi.get(self, "name") @property @pulumi.getter(name="queryExecutionTimeout") def query_execution_timeout(self) -> pulumi.Output[Optional[int]]: """ The workload group query execution timeout. """ return pulumi.get(self, "query_execution_timeout") @property @pulumi.getter def type(self) -> pulumi.Output[str]: """ Resource type. """ return pulumi.get(self, "type")
47.373333
663
0.677174
import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities __all__ = ['WorkloadGroupArgs', 'WorkloadGroup'] @pulumi.input_type class WorkloadGroupArgs: def __init__(__self__, *, database_name: pulumi.Input[str], max_resource_percent: pulumi.Input[int], min_resource_percent: pulumi.Input[int], min_resource_percent_per_request: pulumi.Input[float], resource_group_name: pulumi.Input[str], server_name: pulumi.Input[str], importance: Optional[pulumi.Input[str]] = None, max_resource_percent_per_request: Optional[pulumi.Input[float]] = None, query_execution_timeout: Optional[pulumi.Input[int]] = None, workload_group_name: Optional[pulumi.Input[str]] = None): pulumi.set(__self__, "database_name", database_name) pulumi.set(__self__, "max_resource_percent", max_resource_percent) pulumi.set(__self__, "min_resource_percent", min_resource_percent) pulumi.set(__self__, "min_resource_percent_per_request", min_resource_percent_per_request) pulumi.set(__self__, "resource_group_name", resource_group_name) pulumi.set(__self__, "server_name", server_name) if importance is not None: pulumi.set(__self__, "importance", importance) if max_resource_percent_per_request is not None: pulumi.set(__self__, "max_resource_percent_per_request", max_resource_percent_per_request) if query_execution_timeout is not None: pulumi.set(__self__, "query_execution_timeout", query_execution_timeout) if workload_group_name is not None: pulumi.set(__self__, "workload_group_name", workload_group_name) @property @pulumi.getter(name="databaseName") def database_name(self) -> pulumi.Input[str]: return pulumi.get(self, "database_name") @database_name.setter def database_name(self, value: pulumi.Input[str]): pulumi.set(self, "database_name", value) @property @pulumi.getter(name="maxResourcePercent") def max_resource_percent(self) -> pulumi.Input[int]: return pulumi.get(self, "max_resource_percent") @max_resource_percent.setter def max_resource_percent(self, value: pulumi.Input[int]): pulumi.set(self, "max_resource_percent", value) @property @pulumi.getter(name="minResourcePercent") def min_resource_percent(self) -> pulumi.Input[int]: return pulumi.get(self, "min_resource_percent") @min_resource_percent.setter def min_resource_percent(self, value: pulumi.Input[int]): pulumi.set(self, "min_resource_percent", value) @property @pulumi.getter(name="minResourcePercentPerRequest") def min_resource_percent_per_request(self) -> pulumi.Input[float]: return pulumi.get(self, "min_resource_percent_per_request") @min_resource_percent_per_request.setter def min_resource_percent_per_request(self, value: pulumi.Input[float]): pulumi.set(self, "min_resource_percent_per_request", value) @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> pulumi.Input[str]: return pulumi.get(self, "resource_group_name") @resource_group_name.setter def resource_group_name(self, value: pulumi.Input[str]): pulumi.set(self, "resource_group_name", value) @property @pulumi.getter(name="serverName") def server_name(self) -> pulumi.Input[str]: return pulumi.get(self, "server_name") @server_name.setter def server_name(self, value: pulumi.Input[str]): pulumi.set(self, "server_name", value) @property @pulumi.getter def importance(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "importance") @importance.setter def importance(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "importance", value) @property @pulumi.getter(name="maxResourcePercentPerRequest") def max_resource_percent_per_request(self) -> Optional[pulumi.Input[float]]: return pulumi.get(self, "max_resource_percent_per_request") @max_resource_percent_per_request.setter def max_resource_percent_per_request(self, value: Optional[pulumi.Input[float]]): pulumi.set(self, "max_resource_percent_per_request", value) @property @pulumi.getter(name="queryExecutionTimeout") def query_execution_timeout(self) -> Optional[pulumi.Input[int]]: return pulumi.get(self, "query_execution_timeout") @query_execution_timeout.setter def query_execution_timeout(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "query_execution_timeout", value) @property @pulumi.getter(name="workloadGroupName") def workload_group_name(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "workload_group_name") @workload_group_name.setter def workload_group_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "workload_group_name", value) class WorkloadGroup(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, database_name: Optional[pulumi.Input[str]] = None, importance: Optional[pulumi.Input[str]] = None, max_resource_percent: Optional[pulumi.Input[int]] = None, max_resource_percent_per_request: Optional[pulumi.Input[float]] = None, min_resource_percent: Optional[pulumi.Input[int]] = None, min_resource_percent_per_request: Optional[pulumi.Input[float]] = None, query_execution_timeout: Optional[pulumi.Input[int]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, server_name: Optional[pulumi.Input[str]] = None, workload_group_name: Optional[pulumi.Input[str]] = None, __props__=None): ... @overload def __init__(__self__, resource_name: str, args: WorkloadGroupArgs, opts: Optional[pulumi.ResourceOptions] = None): ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(WorkloadGroupArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, database_name: Optional[pulumi.Input[str]] = None, importance: Optional[pulumi.Input[str]] = None, max_resource_percent: Optional[pulumi.Input[int]] = None, max_resource_percent_per_request: Optional[pulumi.Input[float]] = None, min_resource_percent: Optional[pulumi.Input[int]] = None, min_resource_percent_per_request: Optional[pulumi.Input[float]] = None, query_execution_timeout: Optional[pulumi.Input[int]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, server_name: Optional[pulumi.Input[str]] = None, workload_group_name: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = WorkloadGroupArgs.__new__(WorkloadGroupArgs) if database_name is None and not opts.urn: raise TypeError("Missing required property 'database_name'") __props__.__dict__["database_name"] = database_name __props__.__dict__["importance"] = importance if max_resource_percent is None and not opts.urn: raise TypeError("Missing required property 'max_resource_percent'") __props__.__dict__["max_resource_percent"] = max_resource_percent __props__.__dict__["max_resource_percent_per_request"] = max_resource_percent_per_request if min_resource_percent is None and not opts.urn: raise TypeError("Missing required property 'min_resource_percent'") __props__.__dict__["min_resource_percent"] = min_resource_percent if min_resource_percent_per_request is None and not opts.urn: raise TypeError("Missing required property 'min_resource_percent_per_request'") __props__.__dict__["min_resource_percent_per_request"] = min_resource_percent_per_request __props__.__dict__["query_execution_timeout"] = query_execution_timeout if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__.__dict__["resource_group_name"] = resource_group_name if server_name is None and not opts.urn: raise TypeError("Missing required property 'server_name'") __props__.__dict__["server_name"] = server_name __props__.__dict__["workload_group_name"] = workload_group_name __props__.__dict__["name"] = None __props__.__dict__["type"] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:sql/v20190601preview:WorkloadGroup"), pulumi.Alias(type_="azure-native:sql:WorkloadGroup"), pulumi.Alias(type_="azure-nextgen:sql:WorkloadGroup"), pulumi.Alias(type_="azure-native:sql/v20200202preview:WorkloadGroup"), pulumi.Alias(type_="azure-nextgen:sql/v20200202preview:WorkloadGroup"), pulumi.Alias(type_="azure-native:sql/v20200801preview:WorkloadGroup"), pulumi.Alias(type_="azure-nextgen:sql/v20200801preview:WorkloadGroup"), pulumi.Alias(type_="azure-native:sql/v20201101preview:WorkloadGroup"), pulumi.Alias(type_="azure-nextgen:sql/v20201101preview:WorkloadGroup")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(WorkloadGroup, __self__).__init__( 'azure-native:sql/v20190601preview:WorkloadGroup', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'WorkloadGroup': opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = WorkloadGroupArgs.__new__(WorkloadGroupArgs) __props__.__dict__["importance"] = None __props__.__dict__["max_resource_percent"] = None __props__.__dict__["max_resource_percent_per_request"] = None __props__.__dict__["min_resource_percent"] = None __props__.__dict__["min_resource_percent_per_request"] = None __props__.__dict__["name"] = None __props__.__dict__["query_execution_timeout"] = None __props__.__dict__["type"] = None return WorkloadGroup(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def importance(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "importance") @property @pulumi.getter(name="maxResourcePercent") def max_resource_percent(self) -> pulumi.Output[int]: return pulumi.get(self, "max_resource_percent") @property @pulumi.getter(name="maxResourcePercentPerRequest") def max_resource_percent_per_request(self) -> pulumi.Output[Optional[float]]: return pulumi.get(self, "max_resource_percent_per_request") @property @pulumi.getter(name="minResourcePercent") def min_resource_percent(self) -> pulumi.Output[int]: return pulumi.get(self, "min_resource_percent") @property @pulumi.getter(name="minResourcePercentPerRequest") def min_resource_percent_per_request(self) -> pulumi.Output[float]: return pulumi.get(self, "min_resource_percent_per_request") @property @pulumi.getter def name(self) -> pulumi.Output[str]: return pulumi.get(self, "name") @property @pulumi.getter(name="queryExecutionTimeout") def query_execution_timeout(self) -> pulumi.Output[Optional[int]]: return pulumi.get(self, "query_execution_timeout") @property @pulumi.getter def type(self) -> pulumi.Output[str]: return pulumi.get(self, "type")
true
true
1c4376331213d71be83752613cbfb6ce878134b3
143
py
Python
problems/0025/compute.py
Dynortice/Project-Euler
99a0201b5d5f147eab77fc52d9db8995045cded0
[ "MIT" ]
null
null
null
problems/0025/compute.py
Dynortice/Project-Euler
99a0201b5d5f147eab77fc52d9db8995045cded0
[ "MIT" ]
null
null
null
problems/0025/compute.py
Dynortice/Project-Euler
99a0201b5d5f147eab77fc52d9db8995045cded0
[ "MIT" ]
null
null
null
from math import ceil, log, sqrt def compute(n: int) -> int: return int(ceil((log(10) * (n - 1) + log(5) / 2) / log((1 + sqrt(5)) / 2)))
23.833333
79
0.538462
from math import ceil, log, sqrt def compute(n: int) -> int: return int(ceil((log(10) * (n - 1) + log(5) / 2) / log((1 + sqrt(5)) / 2)))
true
true
1c43775eddde1322d48ee5b6171359f9460a3375
325
py
Python
dagology/tests/test_metrics.py
JamesClough/dagology
5421fd0ad439e70a61d0408eb1cacebaa403f671
[ "MIT" ]
5
2017-02-16T21:35:28.000Z
2020-08-09T07:33:30.000Z
dagology/tests/test_metrics.py
JamesClough/dagology
5421fd0ad439e70a61d0408eb1cacebaa403f671
[ "MIT" ]
null
null
null
dagology/tests/test_metrics.py
JamesClough/dagology
5421fd0ad439e70a61d0408eb1cacebaa403f671
[ "MIT" ]
3
2018-04-20T08:58:24.000Z
2020-04-11T02:25:56.000Z
from nose.tools import assert_equal from nose.tools import assert_false from nose.tools import assert_in from nose.tools import assert_raises from nose.tools import assert_true import networkx as nx import numpy as np import dagology as dag class TestEucludea(object): """ Unit tests for interval function""" pass
21.666667
43
0.796923
from nose.tools import assert_equal from nose.tools import assert_false from nose.tools import assert_in from nose.tools import assert_raises from nose.tools import assert_true import networkx as nx import numpy as np import dagology as dag class TestEucludea(object): pass
true
true
1c4377c3dc57f9c21495e90e367e31c935a9a01f
1,824
py
Python
code/hd_Preprocessing.py
snehil1703/Home-Depot-Search-Relevance-Data-Analysis
d036734f48ac35e608b792a3c68b97c97d51357f
[ "Apache-2.0" ]
null
null
null
code/hd_Preprocessing.py
snehil1703/Home-Depot-Search-Relevance-Data-Analysis
d036734f48ac35e608b792a3c68b97c97d51357f
[ "Apache-2.0" ]
null
null
null
code/hd_Preprocessing.py
snehil1703/Home-Depot-Search-Relevance-Data-Analysis
d036734f48ac35e608b792a3c68b97c97d51357f
[ "Apache-2.0" ]
null
null
null
# Importing required packages import pandas import numpy #Importing required files import hd_DataAnalysis as sd import hd_Preprocessing as prep # Extracting the features of the three main attributes def feature_extraction(main_data): main_data['search_term'] = main_data['search_term'].map(lambda x: sd.preprocess_word(x)) main_data['product_title'] = main_data['product_title'].map(lambda x: sd.preprocess_word(x)) main_data['product_description'] = main_data['product_description'].map(lambda x: sd.preprocess_word(x)) return main_data # Create new features in the main data file def add_attributes(main_data): # Integrating all the above three extracted features into a single column. main_data['prod_combined_info'] = main_data['search_term'] + "\t" + main_data['product_title'] + "\t" + main_data['product_description'] main_data['length_of_search_query'] = main_data['search_term'].map(lambda x: len(x.split())).astype(numpy.int64) main_data['length_of_title'] = main_data['product_title'].map(lambda x: len(x.split())).astype(numpy.int64) main_data['length_of_description'] = main_data['product_description'].map(lambda x: len(x.split())).astype(numpy.int64) main_data['query_in_title'] = main_data['prod_combined_info'].map(lambda x: sd.freq_of_words(x.split('\t')[0], x.split('\t')[1], 0)) main_data['query_in_description'] = main_data['prod_combined_info'].map(lambda x: sd.freq_of_words(x.split('\t')[0], x.split('\t')[2], 0)) main_data['title_commonWord'] = main_data['prod_combined_info'].map(lambda x: sd.find_common_words(x.split('\t')[0], x.split('\t')[1])) main_data['description_commonWord'] = main_data['prod_combined_info'].map(lambda x: sd.find_common_words(x.split('\t')[0], x.split('\t')[2])) print("Step2: Feature Extraction & Integrated attributes") return main_data
46.769231
142
0.750548
import pandas import numpy import hd_DataAnalysis as sd import hd_Preprocessing as prep def feature_extraction(main_data): main_data['search_term'] = main_data['search_term'].map(lambda x: sd.preprocess_word(x)) main_data['product_title'] = main_data['product_title'].map(lambda x: sd.preprocess_word(x)) main_data['product_description'] = main_data['product_description'].map(lambda x: sd.preprocess_word(x)) return main_data def add_attributes(main_data): main_data['prod_combined_info'] = main_data['search_term'] + "\t" + main_data['product_title'] + "\t" + main_data['product_description'] main_data['length_of_search_query'] = main_data['search_term'].map(lambda x: len(x.split())).astype(numpy.int64) main_data['length_of_title'] = main_data['product_title'].map(lambda x: len(x.split())).astype(numpy.int64) main_data['length_of_description'] = main_data['product_description'].map(lambda x: len(x.split())).astype(numpy.int64) main_data['query_in_title'] = main_data['prod_combined_info'].map(lambda x: sd.freq_of_words(x.split('\t')[0], x.split('\t')[1], 0)) main_data['query_in_description'] = main_data['prod_combined_info'].map(lambda x: sd.freq_of_words(x.split('\t')[0], x.split('\t')[2], 0)) main_data['title_commonWord'] = main_data['prod_combined_info'].map(lambda x: sd.find_common_words(x.split('\t')[0], x.split('\t')[1])) main_data['description_commonWord'] = main_data['prod_combined_info'].map(lambda x: sd.find_common_words(x.split('\t')[0], x.split('\t')[2])) print("Step2: Feature Extraction & Integrated attributes") return main_data
true
true