text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""
hello
Scenario for Hello World module
:copyright: (c) 2013 by Openlabs Technologies & Consulting (P) Limited
:license: Modified BSD, see LICENSE for more details.
"""
import uuid
MODEL = 'hello.hello'
METHOD = 'create'
def generate():
return {
'args': [
[{
'name': str(uuid.uuid1()),
'greeting': str(uuid.uuid1()),
}],
],
}
|
{
"content_hash": "db0f794f145bddf2e170945557869d3a",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 74,
"avg_line_length": 17.24,
"alnum_prop": 0.5150812064965197,
"repo_name": "openlabs/tryton-bench",
"id": "cbddcbfe4aba9250c9428199d0ec8e032476f30a",
"size": "455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/hello.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "8048"
}
],
"symlink_target": ""
}
|
import json
import requests
from airflow.exceptions import AirflowException
from airflow.providers.http.hooks.http import HttpHook
class DingdingHook(HttpHook):
"""
This hook allows you send Dingding message using Dingding custom bot.
Get Dingding token from conn_id.password. And prefer set domain to
conn_id.host, if not will use default ``https://oapi.dingtalk.com``.
For more detail message in
`Dingding custom bot <https://open-doc.dingtalk.com/microapp/serverapi2/qf2nxq>`_
:param dingding_conn_id: The name of the Dingding connection to use
:type dingding_conn_id: str
:param message_type: Message type you want to send to Dingding, support five type so far
including text, link, markdown, actionCard, feedCard
:type message_type: str
:param message: The message send to Dingding chat group
:type message: str or dict
:param at_mobiles: Remind specific users with this message
:type at_mobiles: list[str]
:param at_all: Remind all people in group or not. If True, will overwrite ``at_mobiles``
:type at_all: bool
"""
def __init__(self,
dingding_conn_id='dingding_default',
message_type='text',
message=None,
at_mobiles=None,
at_all=False,
*args,
**kwargs
):
super().__init__(http_conn_id=dingding_conn_id, *args, **kwargs)
self.message_type = message_type
self.message = message
self.at_mobiles = at_mobiles
self.at_all = at_all
def _get_endpoint(self):
"""
Get Dingding endpoint for sending message.
"""
conn = self.get_connection(self.http_conn_id)
token = conn.password
if not token:
raise AirflowException('Dingding token is requests but get nothing, '
'check you conn_id configuration.')
return 'robot/send?access_token={}'.format(token)
def _build_message(self):
"""
Build different type of Dingding message
As most commonly used type, text message just need post message content
rather than a dict like ``{'content': 'message'}``
"""
if self.message_type in ['text', 'markdown']:
data = {
'msgtype': self.message_type,
self.message_type: {
'content': self.message
} if self.message_type == 'text' else self.message,
'at': {
'atMobiles': self.at_mobiles,
'isAtAll': self.at_all
}
}
else:
data = {
'msgtype': self.message_type,
self.message_type: self.message
}
return json.dumps(data)
def get_conn(self, headers=None):
"""
Overwrite HttpHook get_conn because just need base_url and headers and
not don't need generic params
:param headers: additional headers to be passed through as a dictionary
:type headers: dict
"""
conn = self.get_connection(self.http_conn_id)
self.base_url = conn.host if conn.host else 'https://oapi.dingtalk.com'
session = requests.Session()
if headers:
session.headers.update(headers)
return session
def send(self):
"""
Send Dingding message
"""
support_type = ['text', 'link', 'markdown', 'actionCard', 'feedCard']
if self.message_type not in support_type:
raise ValueError('DingdingWebhookHook only support {} '
'so far, but receive {}'.format(support_type, self.message_type))
data = self._build_message()
self.log.info('Sending Dingding type %s message %s', self.message_type, data)
resp = self.run(endpoint=self._get_endpoint(),
data=data,
headers={'Content-Type': 'application/json'})
# Dingding success send message will with errcode equal to 0
if int(resp.json().get('errcode')) != 0:
raise AirflowException('Send Dingding message failed, receive error '
f'message {resp.text}')
self.log.info('Success Send Dingding message')
|
{
"content_hash": "f887a8a9fe97d2661a9b813d242a51fe",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 94,
"avg_line_length": 38,
"alnum_prop": 0.5782608695652174,
"repo_name": "wooga/airflow",
"id": "00e8b4e6f49684e90a7e155efbca412af65304e5",
"size": "5158",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/providers/dingding/hooks/dingding.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12126"
},
{
"name": "Dockerfile",
"bytes": "4084"
},
{
"name": "HTML",
"bytes": "128446"
},
{
"name": "JavaScript",
"bytes": "22118"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5879650"
},
{
"name": "Shell",
"bytes": "41820"
}
],
"symlink_target": ""
}
|
from _pytest.assertion.util import assertrepr_compare
from springfield import Entity
def pytest_assertrepr_compare(config, op, left, right):
"""
Provide field-by-field comparisons if someone
uses `assert a==b` to compare two entities.
"""
left_ent = isinstance(left, Entity)
right_ent = isinstance(right, Entity)
if not (left_ent or right_ent):
return None
if left_ent:
left = left.flatten()
if right_ent:
right = right.flatten()
return assertrepr_compare(config, op, left, right)
|
{
"content_hash": "dae9732a138ed968397a4616bbd734e3",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 55,
"avg_line_length": 30.38888888888889,
"alnum_prop": 0.6691042047531993,
"repo_name": "six8/springfield",
"id": "9b01e04c1a45d086fd1496984f1a665e5ae5054e",
"size": "547",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "src/pytest_springfield/assertrepr_compare.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "53442"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from datetime import date
from json import loads
from django.core.serializers import serialize
from django.test import TestCase
from django.utils.six import StringIO
from multigtfs.models import Feed, Frequency, Route, Service, Trip
from multigtfs.models.fields import Seconds
class FrequencyTest(TestCase):
def setUp(self):
self.feed = Feed.objects.create()
self.route = Route.objects.create(
feed=self.feed, route_id='R1', rtype=3)
self.service = Service.objects.create(
feed=self.feed, service_id='S1', start_date=date(2011, 4, 14),
end_date=date(2011, 12, 31))
self.trip = Trip.objects.create(
route=self.route, service=self.service, trip_id='STBA')
def test_string(self):
frequency = Frequency.objects.create(
trip=self.trip, start_time='6:00', end_time='22:00',
headway_secs=1800)
self.assertEqual(str(frequency), '%d-R1-STBA' % self.feed.id)
def test_import_frequencies_txt_minimal(self):
frequencies_txt = StringIO("""\
trip_id,start_time,end_time,headway_secs
STBA,6:00:00,22:00:00,1800
""")
Frequency.import_txt(frequencies_txt, self.feed)
frequency = Frequency.objects.get()
self.assertEqual(frequency.trip, self.trip)
self.assertEqual(frequency.start_time, Seconds.from_hms(hours=6))
self.assertEqual(frequency.end_time, Seconds.from_hms(hours=22))
self.assertEqual(frequency.headway_secs, 1800)
self.assertEqual(frequency.exact_times, '')
def test_import_frequencies_txt_duplicate(self):
frequencies_txt = StringIO("""\
trip_id,start_time,end_time,headway_secs
STBA,6:00:00,8:00:00,1800
STBA,6:00:00,10:00:00,1200
STBA,10:00:00,12:00:00,1500
""")
Frequency.import_txt(frequencies_txt, self.feed)
self.assertEqual(2, Frequency.objects.count())
freq1, freq2 = Frequency.objects.order_by('start_time')
self.assertEqual(freq1.trip, self.trip)
self.assertEqual(freq1.start_time, Seconds.from_hms(hours=6))
self.assertEqual(freq1.end_time, Seconds.from_hms(hours=8))
self.assertEqual(freq1.headway_secs, 1800)
self.assertEqual(freq2.trip, self.trip)
self.assertEqual(freq2.start_time, Seconds.from_hms(hours=10))
self.assertEqual(freq2.end_time, Seconds.from_hms(hours=12))
self.assertEqual(freq2.headway_secs, 1500)
def test_import_frequencies_txt_maximal(self):
frequencies_txt = StringIO("""\
trip_id,start_time,end_time,headway_secs,exact_times
STBA,6:00:00,23:30:35,1800,1
""")
Frequency.import_txt(frequencies_txt, self.feed)
frequency = Frequency.objects.get()
self.assertEqual(frequency.trip, self.trip)
self.assertEqual(frequency.start_time, Seconds.from_hms(hours=6))
self.assertEqual(frequency.end_time, Seconds.from_hms(23, 30, 35))
self.assertEqual(frequency.headway_secs, 1800)
self.assertEqual(frequency.exact_times, '1')
def test_import_frequencies_txt_omitted_with_rollover(self):
frequencies_txt = StringIO("""\
trip_id,start_time,end_time,headway_secs,exact_times
STBA,00:50:00,24:10:00,1800,
""")
Frequency.import_txt(frequencies_txt, self.feed)
frequency = Frequency.objects.get()
self.assertEqual(str(frequency.start_time), '00:50:00')
self.assertEqual(frequency.end_time, Seconds.from_hms(24, 10))
self.assertEqual(frequency.headway_secs, 1800)
self.assertEqual(frequency.exact_times, '')
def test_export_frequencies_txt_none(self):
frequencies_txt = Frequency.export_txt(self.feed)
self.assertEqual(frequencies_txt, None)
def test_export_frequencies_txt_minimal(self):
Frequency.objects.create(
trip=self.trip, start_time=Seconds.from_hms(hours=6),
end_time=Seconds.from_hms(hours=22), headway_secs=1800)
frequencies_txt = Frequency.export_txt(self.feed)
self.assertEqual(frequencies_txt, """\
trip_id,start_time,end_time,headway_secs
STBA,06:00:00,22:00:00,1800
""")
def test_export_frequencies_txt_maximal(self):
Frequency.objects.create(
trip=self.trip, start_time='05:00', end_time='25:00',
headway_secs=1800)
frequencies_txt = Frequency.export_txt(self.feed)
self.assertEqual(frequencies_txt, """\
trip_id,start_time,end_time,headway_secs
STBA,05:00:00,25:00:00,1800
""")
def test_serialize(self):
'''Test serialization of Frequency, which has a SecondsField'''
f = Frequency.objects.create(
trip=self.trip, start_time='05:00', end_time='25:00',
headway_secs=1800)
actual = loads(serialize('json', Frequency.objects.all()))
expected = [{
u"pk": f.id,
u"model": u"multigtfs.frequency",
u"fields": {
u"exact_times": u"",
u"extra_data": u"{}",
u"start_time": u"05:00:00",
u"headway_secs": 1800,
u"trip": self.trip.id,
u"end_time": u"25:00:00"}}]
self.maxDiff = None
self.assertEqual(expected, actual)
|
{
"content_hash": "8a1e9e954bf5bbf22e72f8d80c532af0",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 74,
"avg_line_length": 40.9609375,
"alnum_prop": 0.6517261110051498,
"repo_name": "inmagik/django-multi-gtfs",
"id": "4151ac331f9139702fddf345aaaa1d9632331f67",
"size": "5824",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "multigtfs/tests/test_frequency.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "89"
},
{
"name": "HTML",
"bytes": "36570"
},
{
"name": "JavaScript",
"bytes": "1754"
},
{
"name": "Makefile",
"bytes": "1921"
},
{
"name": "Python",
"bytes": "659780"
},
{
"name": "Shell",
"bytes": "119"
}
],
"symlink_target": ""
}
|
from selenium import webdriver
from fixture.session import SessionHelper
from fixture.mantis_project import ProjectHelper
from fixture.james import JamesHelper
from fixture.mail import MailHelper
from fixture.signup import SignupHelper
from fixture.soap import SoapHelper
class Application:
def __init__(self, browser, config):
if browser == "firefox":
self.wd = webdriver.Firefox()
elif browser == "chrome":
self.wd = webdriver.Chrome()
elif browser == "ie":
self.wd = webdriver.Ie()
else:
raise ValueError("Unrecognized browser %s" % browser)
#self.wd.implicitly_wait(5)
self.session = SessionHelper(self)
self.config=config
self.base_url = config['web']['baseUrl']
self.project = ProjectHelper(self)
self.james = JamesHelper(self)
self.signup = SignupHelper(self)
self.mail = MailHelper(self)
self.soap = SoapHelper(self)
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
def open_home_page(self):
wd = self.wd
wd.get(self.base_url)
def destroy(self):
self.wd.quit()
|
{
"content_hash": "94c33b0f771a1db80d005ec9a09cbd37",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 66,
"avg_line_length": 29.69047619047619,
"alnum_prop": 0.6174819566960705,
"repo_name": "ainur-fa/python_training_mantis",
"id": "0f75826530dfce84f8f1c0f3abd7de65c93d7327",
"size": "1271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fixture/application.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "387"
},
{
"name": "Python",
"bytes": "18934"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
from tasklog import views
urlpatterns = patterns('',
# Examples:
url(r'^$', views.home, name='home'),
)
|
{
"content_hash": "c9e9b323db723baef58fb7d84852f741",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 51,
"avg_line_length": 23.083333333333332,
"alnum_prop": 0.7111913357400722,
"repo_name": "lrsjohnson/time-log",
"id": "ffb4ab084cc7960994923ce9944154a2c526269d",
"size": "277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tracker/tasklog/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7584"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
from pip.req import parse_requirements
from pip.download import PipSession
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import pytest
pytest.main(self.test_args)
def read_requirements_txt():
fh = parse_requirements("requirements.txt",session=PipSession());
return [str(ir.req) for ir in fh]
setup(
name="composablepairingheap",
version="0.3.3",
author="Edgar Klerks",
author_email="edgar.klerks@gmail.com",
description="A datastructure, which can be composed with another datastructure to yield a hybrid between the two",
packages=find_packages('src'),
package_dir={'':'src'},
url="http://github.com/edgarklerks/composablepairingheap",
package_data={'':["src/*.md", "src/*.txt","*.txt","*.md"]},
tests_require=['pytest'],
test_suite="tests",
setup_requires=['pytest-runner'],
cmdclass={'test':PyTest},
install_requires=read_requirements_txt(),
)
|
{
"content_hash": "dd9e204cab5f92c675529e5cb6fd3b52",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 119,
"avg_line_length": 33.67567567567568,
"alnum_prop": 0.6725521669341894,
"repo_name": "edgarklerks/composablepairingheap",
"id": "79f6d5cf1b148e730eca8d27c977610624336aa8",
"size": "1246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "17909"
}
],
"symlink_target": ""
}
|
"""
@author: David John Gagne (djgagne@ou.edu)
"""
from collections import OrderedDict
import numpy as np
from scipy.ndimage import find_objects
class EnhancedWatershed(object):
"""
The enhanced watershed performs image segmentation using a modified version of the traditional watershed technique.
It includes a size criteria and creates foothills around each object to keep them distinct. The object is used to
store the quantization and size parameters. It can be used to watershed multiple grids.
Attributes:
min_intensity (int): minimum pixel value for pixel to be part of a region
data_increment (int): quantization interval. Use 1 if you don't want to quantize
max_intensity (int): values greater than maxThresh are treated as the maximum threshold
size_threshold_pixels (int): clusters smaller than this threshold are ignored.
delta (int): maximum number of data increments the cluster is allowed to range over. Larger d results in clusters over larger scales.
"""
def __init__(self, min_intensity, data_increment, max_intensity, size_threshold_pixels, delta):
self.min_intensity = min_intensity
self.data_increment = data_increment
self.max_intensity = max_intensity
self.max_size = size_threshold_pixels
self.delta = delta
self.max_bin = int((self.max_intensity - self.min_intensity) / self.data_increment)
self.UNMARKED = -1
self.GLOBBED = -3
self.TOOSMALL = -4
def label(self, input_grid, only_objects=True):
"""
Labels input grid using enhanced watershed algorithm.
Args:
input_grid (numpy.ndarray): Grid to be labeled.
only_objects (bool): Only return object pixel values on final grid
Returns:
Array of labeled pixels
"""
pixels, q_data = self.quantize(input_grid)
centers = self.find_local_maxima(pixels, q_data)
marked = self.grow_centers(centers, q_data)
if only_objects:
marked = np.where(marked > 0, marked, 0)
return marked
@staticmethod
def size_filter(labeled_grid, min_size):
"""
Removes labeled objects that are smaller than min_size, and relabels the remaining objects.
Args:
labeled_grid: Grid that has been labeled
min_size: Minimium object size.
Returns:
Labeled array with re-numbered objects to account for those that have been removed
"""
out_grid = np.zeros(labeled_grid.shape, dtype=int)
slices = find_objects(labeled_grid)
j = 1
for i, s in enumerate(slices):
box = labeled_grid[s]
size = np.count_nonzero(box == i + 1)
if size >= min_size and box.shape[0] > 1 and box.shape[1] > 1:
out_grid[np.where(labeled_grid == i + 1)] = j
j += 1
return out_grid
def find_local_maxima(self, pixels, q_data):
"""
Finds the local maxima in the inputGrid and perform region growing to identify objects.
Args:
pixels: dictionary of quantized pixel values
q_data: 2D array representation of quantized input data
Returns:
array with labeled objects.
"""
centers = OrderedDict()
for p in pixels.keys():
centers[p] = []
marked = np.ones(q_data.shape, dtype=np.int32) * self.UNMARKED
MIN_INFL = int(np.round(1 + 0.5 * np.sqrt(self.max_size)))
MAX_INFL = 2 * MIN_INFL
marked_so_far = []
# Find the maxima. These are high-values with enough clearance
# around them.
# Work from high to low bins. The pixels in the highest bin mark their
# neighborhoods first. If you did it from low to high the lowest maxima
# would mark their neighborhoods first and interfere with the identification of higher maxima.
for b in sorted(pixels.keys(), reverse=True):
# Square starts large with high intensity bins and gets smaller with low intensity bins.
infl_dist = MIN_INFL + int(np.round(float(b) / self.max_bin * (MAX_INFL - MIN_INFL)))
for p in pixels[b]:
if marked[p] == self.UNMARKED:
ok = False
del marked_so_far[:]
# Temporarily mark unmarked points in square around point (keep track of them in list
# marked_so_far).
# If none of the points in square were marked already from a higher intensity center,
# this counts as a new center and ok=True and points will remain marked.
# Otherwise ok=False and marked points that were previously unmarked will be unmarked.
for (i, j), v in np.ndenumerate(marked[p[0] - infl_dist:p[0] + infl_dist + 1,
p[1] - infl_dist:p[1] + infl_dist + 1]):
if v == self.UNMARKED:
ok = True
marked[i - infl_dist + p[0], j - infl_dist + p[1]] = b
marked_so_far.append((i - infl_dist + p[0], j - infl_dist + p[1]))
else:
# neighborhood already taken
ok = False
break
# ok if point and surrounding square were not marked already.
if ok:
# highest point in its neighborhood
centers[b].append(p)
else:
for m in marked_so_far:
marked[m] = self.UNMARKED
return centers
def grow_centers(self, centers, q_data):
"""
Once
Args:
centers:
q_data:
Returns:
"""
marked = np.ones(q_data.shape, dtype=np.int32) * self.UNMARKED
deferred_from_last = []
deferred_to_next = []
center_keys = np.array(list(centers.keys()))[::-1]
capture_index = 1
foothills = []
for diff in range(0, self.delta + 1):
# Work from high to low bins.
for b in center_keys:
bin_lower = b - diff
deferred_from_last[:] = deferred_to_next[:]
del deferred_to_next[:]
new_centers = len(centers[b])
old_centers = len(deferred_from_last)
tot_centers = new_centers + old_centers
for i in range(tot_centers):
# done this way to minimize memory overhead of maintaining two lists
if i < old_centers:
center = deferred_from_last[i]
else:
center = centers[b][i - old_centers]
if bin_lower < 0:
bin_lower = 0
if marked[center] == self.UNMARKED:
captured = self.set_maximum(q_data, marked, center, bin_lower, foothills, capture_index)
if not captured:
# decrement to lower value to see if it'll get big enough
deferred_to_next.append(center)
else:
capture_index += 1
# this is the last one for this bin
self.remove_foothills(q_data, marked, b, bin_lower, centers, foothills)
del deferred_from_last[:]
del deferred_to_next[:]
return marked
def set_maximum(self, q_data, marked, center, bin_lower, foothills, capture_index):
"""
Grow a region at a certain bin level and check if the region has reached the maximum size.
Args:
q_data: Quantized data array
marked: Array marking points that are objects
center: Coordinates of the center pixel of the region being grown
bin_lower: Intensity level of lower bin being evaluated
foothills: List of points that are associated with a center but fall outside the the size or
intensity criteria
capture_index:
Returns:
True if the object is finished growing and False if the object should be grown again at the next
threshold level.
"""
as_bin = [] # pixels to be included in peak
as_glob = [] # pixels to be globbed up as part of foothills
marked_so_far = [] # pixels that have already been marked
will_be_considered_again = False
as_bin.append(center)
center_data = q_data[center]
while len(as_bin) > 0:
p = as_bin.pop(-1) # remove and return last pixel in as_bin
if marked[p] != self.UNMARKED: # already processed
continue
marked[p] = capture_index
marked_so_far.append(p)
# check neighbors
for index, val in np.ndenumerate(marked[p[0] - 1:p[0] + 2, p[1] - 1:p[1] + 2]):
# is neighbor part of peak or part of mountain?
if val == self.UNMARKED:
pixel = (index[0] - 1 + p[0], index[1] - 1 + p[1])
p_data = q_data[pixel]
if (not will_be_considered_again) and (p_data >= 0) and (p_data < center_data):
will_be_considered_again = True
if p_data >= bin_lower:
as_bin.append(pixel)
# Do not check that this is the closest: this way, a narrow channel of globbed pixels form
elif p_data >= 0:
as_glob.append(pixel)
if bin_lower == 0:
will_be_considered_again = False
big_enough = len(marked_so_far) >= self.max_size
if big_enough:
# remove lower values within region of influence
foothills.append((center, as_glob))
elif will_be_considered_again: # remove the check if you want to ignore regions smaller than max_size
for m in marked_so_far:
marked[m] = self.UNMARKED
del as_bin[:]
del as_glob[:]
del marked_so_far[:]
return big_enough or (not will_be_considered_again)
def remove_foothills(self, q_data, marked, bin_num, bin_lower, centers, foothills):
"""
Mark points determined to be foothills as globbed, so that they are not included in
future searches. Also searches neighboring points to foothill points to determine
if they should also be considered foothills.
Args:
q_data: Quantized data
marked: Marked
bin_num: Current bin being searched
bin_lower: Next bin being searched
centers: dictionary of local maxima considered to be object centers
foothills: List of foothill points being removed.
"""
hills = []
for foot in foothills:
center = foot[0]
hills[:] = foot[1][:]
# remove all foothills
while len(hills) > 0:
# mark this point
pt = hills.pop(-1)
marked[pt] = self.GLOBBED
for s_index, val in np.ndenumerate(marked[pt[0] - 1:pt[0] + 2, pt[1] - 1:pt[1] + 2]):
index = (s_index[0] - 1 + pt[0], s_index[1] - 1 + pt[1])
# is neighbor part of peak or part of mountain?
if val == self.UNMARKED:
# will let in even minor peaks
if (q_data[index] >= 0) and \
(q_data[index] < bin_lower) and \
((q_data[index] <= q_data[pt]) or self.is_closest(index, center, centers, bin_num)):
hills.append(index)
del foothills[:]
@staticmethod
def is_closest(point, center, centers, bin_num):
bin_thresh = int(bin_num / 2)
p_arr = np.array(point)
c_arr = np.array(center)
my_dist = np.sum(np.power(p_arr - c_arr, 2))
for o_bin in range(bin_thresh, len(centers.keys())):
for c in centers[o_bin]:
oc_arr = np.array(c)
if np.sum(np.power(p_arr - oc_arr, 2)) < my_dist:
return False
return True
def quantize(self, input_grid):
"""
Quantize a grid into discrete steps based on input parameters.
Args:
input_grid: 2-d array of values
Returns:
Dictionary of value pointing to pixel locations, and quantized 2-d array of data
"""
pixels = dict()
for i in range(self.max_bin + 1):
pixels[i] = []
data = (np.array(input_grid, dtype=np.int32) - self.min_intensity) // self.data_increment
data[data < 0] = -1
data[data > self.max_bin] = self.max_bin
good_points = np.where(data >= 0)
for g in np.arange(good_points[0].shape[0]):
pixels[data[(good_points[0][g], good_points[1][g])]].append((good_points[0][g], good_points[1][g]))
return pixels, data
@staticmethod
def is_valid(point, shape):
return np.all((np.array(point) >= 0) & (np.array(shape) - np.array(point) > 0))
def rescale_data(data, data_min, data_max, out_min=0.0, out_max=100.0):
"""
Rescale your input data so that is ranges over integer values, which will perform better in the watershed.
Args:
data: 2D or 3D ndarray being rescaled
data_min: minimum value of input data for scaling purposes
data_max: maximum value of input data for scaling purposes
out_min: minimum value of scaled data
out_max: maximum value of scaled data
Returns:
Linearly scaled ndarray
"""
return (out_max - out_min) / (data_max - data_min) * (data - data_min) + out_min
|
{
"content_hash": "aa2de63dccb951cc4580e9908a652136",
"timestamp": "",
"source": "github",
"line_count": 321,
"max_line_length": 141,
"avg_line_length": 43.8411214953271,
"alnum_prop": 0.5507709798905706,
"repo_name": "djgagne/hagelslag",
"id": "c5622a2f59947787f49bcc1e86383fcdcacf334f",
"size": "14593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hagelslag/processing/EnhancedWatershedSegmenter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "6617598"
},
{
"name": "Python",
"bytes": "933497"
},
{
"name": "Shell",
"bytes": "5545"
}
],
"symlink_target": ""
}
|
import StringIO
import hashlib
from math import floor
from PySide.QtGui import QPixmap
def transformToLists(*args):
params = []
for arg in args:
if arg is not None and not isinstance(arg, list):
params.append([arg])
else:
params.append(arg)
return params
def inv_duration(duracion_string):
l = duracion_string.split(":")
l.reverse()
i = 0
total = 0
for num in l:
num = int(num)
total += num*(60**i)
i+=1
return total
def secondsToDuration(seconds):
minutes = int(floor(seconds / 60))
seconds = seconds % 60
return "%d:%02d" % (minutes, seconds)
def numeroATexto(numero):
if numero < 1000:
return str(numero)
elif numero < 1000000:
return "{}k".format(numero / 1000)
else:
return "{}m".format(numero / 1000000)
def segundosAHora(time):
hor = (int(time / 3600))
minu = int((time - (hor * 3600)) / 60)
seg = time - ((hor * 3600) + (minu * 60))
if seg < 10:
seg = "0" + str(seg)
if hor == 0:
return str(minu) + ":" + str(trunc(seg))
else:
return str(hor) + ":" + str(minu) + ":" + str(trunc(seg))
def trunc(seg):
a = str(seg).split(".")
return a[0]
def getPixmap(raw_image_data):
pixmap = QPixmap()
pixmap.loadFromData(raw_image_data)
return pixmap
def contarTiempoCanciones(canciones):
tiempo = 0
for cancion in canciones:
tiempo += cancion.duracion
return tiempo
def segundosAHoras(segundos):
m, s = divmod(segundos, 60)
h, m = divmod(m, 60)
return "%02d:%02d" % (h, m)
def hasNone(list_dict_or_var):
if list_dict_or_var is None:
return True
if isinstance(list_dict_or_var, list):
if None in list_dict_or_var:
return True
if isinstance(list_dict_or_var, dict):
if None in list_dict_or_var.values():
return True
return False
def hashString(string):
return hashlib.sha256(string).hexdigest()
def obtenerImagenDesdeBinario(dato):
file_imagen = StringIO.StringIO(dato)
image_data = file_imagen.getvalue()
file_imagen.close()
if image_data == "None":
image_data = None
return image_data
def obtenerImagenDesdePath(path):
file_imagen = StringIO.StringIO(open(path,'rb').read())
image_data = file_imagen.getvalue()
file_imagen.close()
if image_data == "None":
image_data = None
return image_data
|
{
"content_hash": "188f693462901527d39811231b38bb0e",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 59,
"avg_line_length": 21.94,
"alnum_prop": 0.6768459434822243,
"repo_name": "andimarafioti/intercomunicador",
"id": "89d94f6354f2946e0683ce7d573fd03d930d2b1d",
"size": "2218",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "helpers/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "32761"
},
{
"name": "Shell",
"bytes": "297"
}
],
"symlink_target": ""
}
|
import weakref
from . import attributes
from .. import util
from .. import exc as sa_exc
from . import util as orm_util
class IdentityMap(object):
def __init__(self):
self._dict = {}
self._modified = set()
self._wr = weakref.ref(self)
def keys(self):
return self._dict.keys()
def replace(self, state):
raise NotImplementedError()
def add(self, state):
raise NotImplementedError()
def _add_unpresent(self, state, key):
"""optional inlined form of add() which can assume item isn't present
in the map"""
self.add(state)
def update(self, dict):
raise NotImplementedError("IdentityMap uses add() to insert data")
def clear(self):
raise NotImplementedError("IdentityMap uses remove() to remove data")
def _manage_incoming_state(self, state):
state._instance_dict = self._wr
if state.modified:
self._modified.add(state)
def _manage_removed_state(self, state):
del state._instance_dict
if state.modified:
self._modified.discard(state)
def _dirty_states(self):
return self._modified
def check_modified(self):
"""return True if any InstanceStates present have been marked
as 'modified'.
"""
return bool(self._modified)
def has_key(self, key):
return key in self
def popitem(self):
raise NotImplementedError("IdentityMap uses remove() to remove data")
def pop(self, key, *args):
raise NotImplementedError("IdentityMap uses remove() to remove data")
def setdefault(self, key, default=None):
raise NotImplementedError("IdentityMap uses add() to insert data")
def __len__(self):
return len(self._dict)
def copy(self):
raise NotImplementedError()
def __setitem__(self, key, value):
raise NotImplementedError("IdentityMap uses add() to insert data")
def __delitem__(self, key):
raise NotImplementedError("IdentityMap uses remove() to remove data")
class WeakInstanceDict(IdentityMap):
def __getitem__(self, key):
state = self._dict[key]
o = state.obj()
if o is None:
raise KeyError(key)
return o
def __contains__(self, key):
try:
if key in self._dict:
state = self._dict[key]
o = state.obj()
else:
return False
except KeyError:
return False
else:
return o is not None
def contains_state(self, state):
if state.key in self._dict:
try:
return self._dict[state.key] is state
except KeyError:
return False
else:
return False
def replace(self, state):
if state.key in self._dict:
try:
existing = self._dict[state.key]
except KeyError:
# catch gc removed the key after we just checked for it
pass
else:
if existing is not state:
self._manage_removed_state(existing)
else:
return
self._dict[state.key] = state
self._manage_incoming_state(state)
def add(self, state):
key = state.key
# inline of self.__contains__
if key in self._dict:
try:
existing_state = self._dict[key]
except KeyError:
# catch gc removed the key after we just checked for it
pass
else:
if existing_state is not state:
o = existing_state.obj()
if o is not None:
raise sa_exc.InvalidRequestError(
"Can't attach instance "
"%s; another instance with key %s is already "
"present in this session." % (
orm_util.state_str(state), state.key))
else:
return False
self._dict[key] = state
self._manage_incoming_state(state)
return True
def _add_unpresent(self, state, key):
# inlined form of add() called by loading.py
self._dict[key] = state
state._instance_dict = self._wr
def get(self, key, default=None):
if key not in self._dict:
return default
try:
state = self._dict[key]
except KeyError:
# catch gc removed the key after we just checked for it
return default
else:
o = state.obj()
if o is None:
return default
return o
def items(self):
values = self.all_states()
result = []
for state in values:
value = state.obj()
if value is not None:
result.append((state.key, value))
return result
def values(self):
values = self.all_states()
result = []
for state in values:
value = state.obj()
if value is not None:
result.append(value)
return result
def __iter__(self):
return iter(self.keys())
if util.py2k:
def iteritems(self):
return iter(self.items())
def itervalues(self):
return iter(self.values())
def all_states(self):
if util.py2k:
return self._dict.values()
else:
return list(self._dict.values())
def _fast_discard(self, state):
self._dict.pop(state.key, None)
def discard(self, state):
st = self._dict.pop(state.key, None)
if st:
assert st is state
self._manage_removed_state(state)
def safe_discard(self, state):
if state.key in self._dict:
try:
st = self._dict[state.key]
except KeyError:
# catch gc removed the key after we just checked for it
pass
else:
if st is state:
self._dict.pop(state.key, None)
self._manage_removed_state(state)
def prune(self):
return 0
class StrongInstanceDict(IdentityMap):
"""A 'strong-referencing' version of the identity map.
.. deprecated 1.1::
The strong
reference identity map is legacy. See the
recipe at :ref:`session_referencing_behavior` for
an event-based approach to maintaining strong identity
references.
"""
if util.py2k:
def itervalues(self):
return self._dict.itervalues()
def iteritems(self):
return self._dict.iteritems()
def __iter__(self):
return iter(self.dict_)
def __getitem__(self, key):
return self._dict[key]
def __contains__(self, key):
return key in self._dict
def get(self, key, default=None):
return self._dict.get(key, default)
def values(self):
return self._dict.values()
def items(self):
return self._dict.items()
def all_states(self):
return [attributes.instance_state(o) for o in self.values()]
def contains_state(self, state):
return (
state.key in self and
attributes.instance_state(self[state.key]) is state)
def replace(self, state):
if state.key in self._dict:
existing = self._dict[state.key]
existing = attributes.instance_state(existing)
if existing is not state:
self._manage_removed_state(existing)
else:
return
self._dict[state.key] = state.obj()
self._manage_incoming_state(state)
def add(self, state):
if state.key in self:
if attributes.instance_state(self._dict[state.key]) is not state:
raise sa_exc.InvalidRequestError(
"Can't attach instance "
"%s; another instance with key %s is already "
"present in this session." % (
orm_util.state_str(state), state.key))
return False
else:
self._dict[state.key] = state.obj()
self._manage_incoming_state(state)
return True
def _add_unpresent(self, state, key):
# inlined form of add() called by loading.py
self._dict[key] = state.obj()
state._instance_dict = self._wr
def _fast_discard(self, state):
self._dict.pop(state.key, None)
def discard(self, state):
obj = self._dict.pop(state.key, None)
if obj is not None:
self._manage_removed_state(state)
st = attributes.instance_state(obj)
assert st is state
def safe_discard(self, state):
if state.key in self._dict:
obj = self._dict[state.key]
st = attributes.instance_state(obj)
if st is state:
self._dict.pop(state.key, None)
self._manage_removed_state(state)
def prune(self):
"""prune unreferenced, non-dirty states."""
ref_count = len(self)
dirty = [s.obj() for s in self.all_states() if s.modified]
# work around http://bugs.python.org/issue6149
keepers = weakref.WeakValueDictionary()
keepers.update(self)
self._dict.clear()
self._dict.update(keepers)
self.modified = bool(dirty)
return ref_count - len(self)
|
{
"content_hash": "afdccf4eb0dc13d9340ce34d490b4af9",
"timestamp": "",
"source": "github",
"line_count": 337,
"max_line_length": 77,
"avg_line_length": 28.617210682492583,
"alnum_prop": 0.5404396515968478,
"repo_name": "williamfeng323/py-web",
"id": "8f4304ad2633f70a2d31a5639cf1bf67a7c6c153",
"size": "9880",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "flask/lib/python3.6/site-packages/sqlalchemy/orm/identity.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "39957"
},
{
"name": "CSS",
"bytes": "6270"
},
{
"name": "HTML",
"bytes": "6046"
},
{
"name": "JavaScript",
"bytes": "6264"
},
{
"name": "Mako",
"bytes": "10018"
},
{
"name": "Python",
"bytes": "15554131"
},
{
"name": "Shell",
"bytes": "6007"
}
],
"symlink_target": ""
}
|
tests=[
("python","UnitTestDbConnect.py",{}),
("python","UnitTestDbUtils.py",{}),
("python","UnitTestDbResultSet.py",{}),
("python","StorageUtils.py",{}),
]
longTests=[
]
|
{
"content_hash": "c66d89b9d2bbce858ead792cb3ba6be9",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 41,
"avg_line_length": 16.90909090909091,
"alnum_prop": 0.5860215053763441,
"repo_name": "strets123/rdkit",
"id": "b2ffe5ee54bf41d2252049f11d4624a6e5212ea0",
"size": "187",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "rdkit/Dbase/test_list.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "385"
},
{
"name": "C",
"bytes": "203078"
},
{
"name": "C#",
"bytes": "6745"
},
{
"name": "C++",
"bytes": "7068170"
},
{
"name": "CMake",
"bytes": "584702"
},
{
"name": "CSS",
"bytes": "4742"
},
{
"name": "FORTRAN",
"bytes": "7661"
},
{
"name": "HTML",
"bytes": "65468"
},
{
"name": "Java",
"bytes": "248620"
},
{
"name": "JavaScript",
"bytes": "11595"
},
{
"name": "LLVM",
"bytes": "27271"
},
{
"name": "Lex",
"bytes": "4508"
},
{
"name": "Makefile",
"bytes": "15431"
},
{
"name": "Objective-C",
"bytes": "299"
},
{
"name": "Python",
"bytes": "3033212"
},
{
"name": "QMake",
"bytes": "389"
},
{
"name": "SMT",
"bytes": "3010"
},
{
"name": "Shell",
"bytes": "8899"
},
{
"name": "Smarty",
"bytes": "5864"
},
{
"name": "Yacc",
"bytes": "49170"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
import sys
import textwrap
import _pytest.assertion as plugin
import py
import pytest
from _pytest.assertion import util
from _pytest.assertion import truncate
PY3 = sys.version_info >= (3, 0)
@pytest.fixture
def mock_config():
class Config(object):
verbose = False
def getoption(self, name):
if name == 'verbose':
return self.verbose
raise KeyError('Not mocked out: %s' % name)
return Config()
class TestImportHookInstallation(object):
@pytest.mark.parametrize('initial_conftest', [True, False])
@pytest.mark.parametrize('mode', ['plain', 'rewrite'])
def test_conftest_assertion_rewrite(self, testdir, initial_conftest, mode):
"""Test that conftest files are using assertion rewrite on import.
(#1619)
"""
testdir.tmpdir.join('foo/tests').ensure(dir=1)
conftest_path = 'conftest.py' if initial_conftest else 'foo/conftest.py'
contents = {
conftest_path: """
import pytest
@pytest.fixture
def check_first():
def check(values, value):
assert values.pop(0) == value
return check
""",
'foo/tests/test_foo.py': """
def test(check_first):
check_first([10, 30], 30)
"""
}
testdir.makepyfile(**contents)
result = testdir.runpytest_subprocess('--assert=%s' % mode)
if mode == 'plain':
expected = 'E AssertionError'
elif mode == 'rewrite':
expected = '*assert 10 == 30*'
else:
assert 0
result.stdout.fnmatch_lines([expected])
def test_rewrite_assertions_pytester_plugin(self, testdir):
"""
Assertions in the pytester plugin must also benefit from assertion
rewriting (#1920).
"""
testdir.makepyfile("""
pytest_plugins = ['pytester']
def test_dummy_failure(testdir): # how meta!
testdir.makepyfile('def test(): assert 0')
r = testdir.inline_run()
r.assertoutcome(passed=1)
""")
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines([
'*assert 1 == 0*',
])
@pytest.mark.parametrize('mode', ['plain', 'rewrite'])
def test_pytest_plugins_rewrite(self, testdir, mode):
contents = {
'conftest.py': """
pytest_plugins = ['ham']
""",
'ham.py': """
import pytest
@pytest.fixture
def check_first():
def check(values, value):
assert values.pop(0) == value
return check
""",
'test_foo.py': """
def test_foo(check_first):
check_first([10, 30], 30)
""",
}
testdir.makepyfile(**contents)
result = testdir.runpytest_subprocess('--assert=%s' % mode)
if mode == 'plain':
expected = 'E AssertionError'
elif mode == 'rewrite':
expected = '*assert 10 == 30*'
else:
assert 0
result.stdout.fnmatch_lines([expected])
@pytest.mark.parametrize('mode', ['str', 'list'])
def test_pytest_plugins_rewrite_module_names(self, testdir, mode):
"""Test that pluginmanager correct marks pytest_plugins variables
for assertion rewriting if they are defined as plain strings or
list of strings (#1888).
"""
plugins = '"ham"' if mode == 'str' else '["ham"]'
contents = {
'conftest.py': """
pytest_plugins = {plugins}
""".format(plugins=plugins),
'ham.py': """
import pytest
""",
'test_foo.py': """
def test_foo(pytestconfig):
assert 'ham' in pytestconfig.pluginmanager.rewrite_hook._must_rewrite
""",
}
testdir.makepyfile(**contents)
result = testdir.runpytest_subprocess('--assert=rewrite')
assert result.ret == 0
@pytest.mark.parametrize('mode', ['plain', 'rewrite'])
@pytest.mark.parametrize('plugin_state', ['development', 'installed'])
def test_installed_plugin_rewrite(self, testdir, mode, plugin_state):
# Make sure the hook is installed early enough so that plugins
# installed via setuptools are re-written.
testdir.tmpdir.join('hampkg').ensure(dir=1)
contents = {
'hampkg/__init__.py': """
import pytest
@pytest.fixture
def check_first2():
def check(values, value):
assert values.pop(0) == value
return check
""",
'spamplugin.py': """
import pytest
from hampkg import check_first2
@pytest.fixture
def check_first():
def check(values, value):
assert values.pop(0) == value
return check
""",
'mainwrapper.py': """
import pytest, pkg_resources
plugin_state = "{plugin_state}"
class DummyDistInfo(object):
project_name = 'spam'
version = '1.0'
def _get_metadata(self, name):
# 'RECORD' meta-data only available in installed plugins
if name == 'RECORD' and plugin_state == "installed":
return ['spamplugin.py,sha256=abc,123',
'hampkg/__init__.py,sha256=abc,123']
# 'SOURCES.txt' meta-data only available for plugins in development mode
elif name == 'SOURCES.txt' and plugin_state == "development":
return ['spamplugin.py',
'hampkg/__init__.py']
return []
class DummyEntryPoint(object):
name = 'spam'
module_name = 'spam.py'
attrs = ()
extras = None
dist = DummyDistInfo()
def load(self, require=True, *args, **kwargs):
import spamplugin
return spamplugin
def iter_entry_points(name):
yield DummyEntryPoint()
pkg_resources.iter_entry_points = iter_entry_points
pytest.main()
""".format(plugin_state=plugin_state),
'test_foo.py': """
def test(check_first):
check_first([10, 30], 30)
def test2(check_first2):
check_first([10, 30], 30)
""",
}
testdir.makepyfile(**contents)
result = testdir.run(sys.executable, 'mainwrapper.py', '-s', '--assert=%s' % mode)
if mode == 'plain':
expected = 'E AssertionError'
elif mode == 'rewrite':
expected = '*assert 10 == 30*'
else:
assert 0
result.stdout.fnmatch_lines([expected])
def test_rewrite_ast(self, testdir):
testdir.tmpdir.join('pkg').ensure(dir=1)
contents = {
'pkg/__init__.py': """
import pytest
pytest.register_assert_rewrite('pkg.helper')
""",
'pkg/helper.py': """
def tool():
a, b = 2, 3
assert a == b
""",
'pkg/plugin.py': """
import pytest, pkg.helper
@pytest.fixture
def tool():
return pkg.helper.tool
""",
'pkg/other.py': """
l = [3, 2]
def tool():
assert l.pop() == 3
""",
'conftest.py': """
pytest_plugins = ['pkg.plugin']
""",
'test_pkg.py': """
import pkg.other
def test_tool(tool):
tool()
def test_other():
pkg.other.tool()
""",
}
testdir.makepyfile(**contents)
result = testdir.runpytest_subprocess('--assert=rewrite')
result.stdout.fnmatch_lines(['>*assert a == b*',
'E*assert 2 == 3*',
'>*assert l.pop() == 3*',
'E*AssertionError'])
def test_register_assert_rewrite_checks_types(self):
with pytest.raises(TypeError):
pytest.register_assert_rewrite(['pytest_tests_internal_non_existing'])
pytest.register_assert_rewrite('pytest_tests_internal_non_existing',
'pytest_tests_internal_non_existing2')
class TestBinReprIntegration(object):
def test_pytest_assertrepr_compare_called(self, testdir):
testdir.makeconftest("""
import pytest
l = []
def pytest_assertrepr_compare(op, left, right):
l.append((op, left, right))
@pytest.fixture
def list(request):
return l
""")
testdir.makepyfile("""
def test_hello():
assert 0 == 1
def test_check(list):
assert list == [("==", 0, 1)]
""")
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines([
"*test_hello*FAIL*",
"*test_check*PASS*",
])
def callequal(left, right, verbose=False):
config = mock_config()
config.verbose = verbose
return plugin.pytest_assertrepr_compare(config, '==', left, right)
class TestAssert_reprcompare(object):
def test_different_types(self):
assert callequal([0, 1], 'foo') is None
def test_summary(self):
summary = callequal([0, 1], [0, 2])[0]
assert len(summary) < 65
def test_text_diff(self):
diff = callequal('spam', 'eggs')[1:]
assert '- spam' in diff
assert '+ eggs' in diff
def test_text_skipping(self):
lines = callequal('a'*50 + 'spam', 'a'*50 + 'eggs')
assert 'Skipping' in lines[1]
for line in lines:
assert 'a'*50 not in line
def test_text_skipping_verbose(self):
lines = callequal('a'*50 + 'spam', 'a'*50 + 'eggs', verbose=True)
assert '- ' + 'a'*50 + 'spam' in lines
assert '+ ' + 'a'*50 + 'eggs' in lines
def test_multiline_text_diff(self):
left = 'foo\nspam\nbar'
right = 'foo\neggs\nbar'
diff = callequal(left, right)
assert '- spam' in diff
assert '+ eggs' in diff
def test_list(self):
expl = callequal([0, 1], [0, 2])
assert len(expl) > 1
@pytest.mark.parametrize(
['left', 'right', 'expected'], [
([0, 1], [0, 2], """
Full diff:
- [0, 1]
? ^
+ [0, 2]
? ^
"""),
({0: 1}, {0: 2}, """
Full diff:
- {0: 1}
? ^
+ {0: 2}
? ^
"""),
(set([0, 1]), set([0, 2]), """
Full diff:
- set([0, 1])
? ^
+ set([0, 2])
? ^
""" if not PY3 else """
Full diff:
- {0, 1}
? ^
+ {0, 2}
? ^
""")
]
)
def test_iterable_full_diff(self, left, right, expected):
"""Test the full diff assertion failure explanation.
When verbose is False, then just a -v notice to get the diff is rendered,
when verbose is True, then ndiff of the pprint is returned.
"""
expl = callequal(left, right, verbose=False)
assert expl[-1] == 'Use -v to get the full diff'
expl = '\n'.join(callequal(left, right, verbose=True))
assert expl.endswith(textwrap.dedent(expected).strip())
def test_list_different_lengths(self):
expl = callequal([0, 1], [0, 1, 2])
assert len(expl) > 1
expl = callequal([0, 1, 2], [0, 1])
assert len(expl) > 1
def test_dict(self):
expl = callequal({'a': 0}, {'a': 1})
assert len(expl) > 1
def test_dict_omitting(self):
lines = callequal({'a': 0, 'b': 1}, {'a': 1, 'b': 1})
assert lines[1].startswith('Omitting 1 identical item')
assert 'Common items' not in lines
for line in lines[1:]:
assert 'b' not in line
def test_dict_omitting_with_verbosity_1(self):
""" Ensure differing items are visible for verbosity=1 (#1512) """
lines = callequal({'a': 0, 'b': 1}, {'a': 1, 'b': 1}, verbose=1)
assert lines[1].startswith('Omitting 1 identical item')
assert lines[2].startswith('Differing items')
assert lines[3] == "{'a': 0} != {'a': 1}"
assert 'Common items' not in lines
def test_dict_omitting_with_verbosity_2(self):
lines = callequal({'a': 0, 'b': 1}, {'a': 1, 'b': 1}, verbose=2)
assert lines[1].startswith('Common items:')
assert 'Omitting' not in lines[1]
assert lines[2] == "{'b': 1}"
def test_set(self):
expl = callequal(set([0, 1]), set([0, 2]))
assert len(expl) > 1
def test_frozenzet(self):
expl = callequal(frozenset([0, 1]), set([0, 2]))
assert len(expl) > 1
def test_Sequence(self):
col = py.builtin._tryimport(
"collections.abc",
"collections",
"sys")
if not hasattr(col, "MutableSequence"):
pytest.skip("cannot import MutableSequence")
MutableSequence = col.MutableSequence
class TestSequence(MutableSequence): # works with a Sequence subclass
def __init__(self, iterable):
self.elements = list(iterable)
def __getitem__(self, item):
return self.elements[item]
def __len__(self):
return len(self.elements)
def __setitem__(self, item, value):
pass
def __delitem__(self, item):
pass
def insert(self, item, index):
pass
expl = callequal(TestSequence([0, 1]), list([0, 2]))
assert len(expl) > 1
def test_list_tuples(self):
expl = callequal([], [(1,2)])
assert len(expl) > 1
expl = callequal([(1,2)], [])
assert len(expl) > 1
def test_list_bad_repr(self):
class A(object):
def __repr__(self):
raise ValueError(42)
expl = callequal([], [A()])
assert 'ValueError' in "".join(expl)
expl = callequal({}, {'1': A()})
assert 'faulty' in "".join(expl)
def test_one_repr_empty(self):
"""
the faulty empty string repr did trigger
a unbound local error in _diff_text
"""
class A(str):
def __repr__(self):
return ''
expl = callequal(A(), '')
assert not expl
def test_repr_no_exc(self):
expl = ' '.join(callequal('foo', 'bar'))
assert 'raised in repr()' not in expl
def test_unicode(self):
left = py.builtin._totext('£€', 'utf-8')
right = py.builtin._totext('£', 'utf-8')
expl = callequal(left, right)
assert expl[0] == py.builtin._totext("'£€' == '£'", 'utf-8')
assert expl[1] == py.builtin._totext('- £€', 'utf-8')
assert expl[2] == py.builtin._totext('+ £', 'utf-8')
def test_nonascii_text(self):
"""
:issue: 877
non ascii python2 str caused a UnicodeDecodeError
"""
class A(str):
def __repr__(self):
return '\xff'
expl = callequal(A(), '1')
assert expl
def test_format_nonascii_explanation(self):
assert util.format_explanation('λ')
def test_mojibake(self):
# issue 429
left = 'e'
right = '\xc3\xa9'
if not isinstance(left, py.builtin.bytes):
left = py.builtin.bytes(left, 'utf-8')
right = py.builtin.bytes(right, 'utf-8')
expl = callequal(left, right)
for line in expl:
assert isinstance(line, py.builtin.text)
msg = py.builtin._totext('\n').join(expl)
assert msg
class TestFormatExplanation(object):
def test_special_chars_full(self, testdir):
# Issue 453, for the bug this would raise IndexError
testdir.makepyfile("""
def test_foo():
assert '\\n}' == ''
""")
result = testdir.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines([
"*AssertionError*",
])
def test_fmt_simple(self):
expl = 'assert foo'
assert util.format_explanation(expl) == 'assert foo'
def test_fmt_where(self):
expl = '\n'.join(['assert 1',
'{1 = foo',
'} == 2'])
res = '\n'.join(['assert 1 == 2',
' + where 1 = foo'])
assert util.format_explanation(expl) == res
def test_fmt_and(self):
expl = '\n'.join(['assert 1',
'{1 = foo',
'} == 2',
'{2 = bar',
'}'])
res = '\n'.join(['assert 1 == 2',
' + where 1 = foo',
' + and 2 = bar'])
assert util.format_explanation(expl) == res
def test_fmt_where_nested(self):
expl = '\n'.join(['assert 1',
'{1 = foo',
'{foo = bar',
'}',
'} == 2'])
res = '\n'.join(['assert 1 == 2',
' + where 1 = foo',
' + where foo = bar'])
assert util.format_explanation(expl) == res
def test_fmt_newline(self):
expl = '\n'.join(['assert "foo" == "bar"',
'~- foo',
'~+ bar'])
res = '\n'.join(['assert "foo" == "bar"',
' - foo',
' + bar'])
assert util.format_explanation(expl) == res
def test_fmt_newline_escaped(self):
expl = '\n'.join(['assert foo == bar',
'baz'])
res = 'assert foo == bar\\nbaz'
assert util.format_explanation(expl) == res
def test_fmt_newline_before_where(self):
expl = '\n'.join(['the assertion message here',
'>assert 1',
'{1 = foo',
'} == 2',
'{2 = bar',
'}'])
res = '\n'.join(['the assertion message here',
'assert 1 == 2',
' + where 1 = foo',
' + and 2 = bar'])
assert util.format_explanation(expl) == res
def test_fmt_multi_newline_before_where(self):
expl = '\n'.join(['the assertion',
'~message here',
'>assert 1',
'{1 = foo',
'} == 2',
'{2 = bar',
'}'])
res = '\n'.join(['the assertion',
' message here',
'assert 1 == 2',
' + where 1 = foo',
' + and 2 = bar'])
assert util.format_explanation(expl) == res
class TestTruncateExplanation(object):
""" Confirm assertion output is truncated as expected """
# The number of lines in the truncation explanation message. Used
# to calculate that results have the expected length.
LINES_IN_TRUNCATION_MSG = 2
def test_doesnt_truncate_when_input_is_empty_list(self):
expl = []
result = truncate._truncate_explanation(expl, max_lines=8, max_chars=100)
assert result == expl
def test_doesnt_truncate_at_when_input_is_5_lines_and_LT_max_chars(self):
expl = ['a' * 100 for x in range(5)]
result = truncate._truncate_explanation(expl, max_lines=8, max_chars=8*80)
assert result == expl
def test_truncates_at_8_lines_when_given_list_of_empty_strings(self):
expl = ['' for x in range(50)]
result = truncate._truncate_explanation(expl, max_lines=8, max_chars=100)
assert result != expl
assert len(result) == 8 + self.LINES_IN_TRUNCATION_MSG
assert "Full output truncated" in result[-1]
assert "43 lines hidden" in result[-1]
last_line_before_trunc_msg = result[- self.LINES_IN_TRUNCATION_MSG -1]
assert last_line_before_trunc_msg.endswith("...")
def test_truncates_at_8_lines_when_first_8_lines_are_LT_max_chars(self):
expl = ['a' for x in range(100)]
result = truncate._truncate_explanation(expl, max_lines=8, max_chars=8*80)
assert result != expl
assert len(result) == 8 + self.LINES_IN_TRUNCATION_MSG
assert "Full output truncated" in result[-1]
assert "93 lines hidden" in result[-1]
last_line_before_trunc_msg = result[- self.LINES_IN_TRUNCATION_MSG -1]
assert last_line_before_trunc_msg.endswith("...")
def test_truncates_at_8_lines_when_first_8_lines_are_EQ_max_chars(self):
expl = ['a' * 80 for x in range(16)]
result = truncate._truncate_explanation(expl, max_lines=8, max_chars=8*80)
assert result != expl
assert len(result) == 8 + self.LINES_IN_TRUNCATION_MSG
assert "Full output truncated" in result[-1]
assert "9 lines hidden" in result[-1]
last_line_before_trunc_msg = result[- self.LINES_IN_TRUNCATION_MSG -1]
assert last_line_before_trunc_msg.endswith("...")
def test_truncates_at_4_lines_when_first_4_lines_are_GT_max_chars(self):
expl = ['a' * 250 for x in range(10)]
result = truncate._truncate_explanation(expl, max_lines=8, max_chars=999)
assert result != expl
assert len(result) == 4 + self.LINES_IN_TRUNCATION_MSG
assert "Full output truncated" in result[-1]
assert "7 lines hidden" in result[-1]
last_line_before_trunc_msg = result[- self.LINES_IN_TRUNCATION_MSG -1]
assert last_line_before_trunc_msg.endswith("...")
def test_truncates_at_1_line_when_first_line_is_GT_max_chars(self):
expl = ['a' * 250 for x in range(1000)]
result = truncate._truncate_explanation(expl, max_lines=8, max_chars=100)
assert result != expl
assert len(result) == 1 + self.LINES_IN_TRUNCATION_MSG
assert "Full output truncated" in result[-1]
assert "1000 lines hidden" in result[-1]
last_line_before_trunc_msg = result[- self.LINES_IN_TRUNCATION_MSG -1]
assert last_line_before_trunc_msg.endswith("...")
def test_full_output_truncated(self, monkeypatch, testdir):
""" Test against full runpytest() output. """
line_count = 7
line_len = 100
expected_truncated_lines = 2
testdir.makepyfile(r"""
def test_many_lines():
a = list([str(i)[0] * %d for i in range(%d)])
b = a[::2]
a = '\n'.join(map(str, a))
b = '\n'.join(map(str, b))
assert a == b
""" % (line_len, line_count))
monkeypatch.delenv('CI', raising=False)
result = testdir.runpytest()
# without -vv, truncate the message showing a few diff lines only
result.stdout.fnmatch_lines([
"*- 1*",
"*- 3*",
"*- 5*",
"*truncated (%d lines hidden)*use*-vv*" % expected_truncated_lines,
])
result = testdir.runpytest('-vv')
result.stdout.fnmatch_lines([
"* 6*",
])
monkeypatch.setenv('CI', '1')
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"* 6*",
])
def test_python25_compile_issue257(testdir):
testdir.makepyfile("""
def test_rewritten():
assert 1 == 2
# some comment
""")
result = testdir.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines("""
*E*assert 1 == 2*
*1 failed*
""")
def test_rewritten(testdir):
testdir.makepyfile("""
def test_rewritten():
assert "@py_builtins" in globals()
""")
assert testdir.runpytest().ret == 0
def test_reprcompare_notin(mock_config):
detail = plugin.pytest_assertrepr_compare(
mock_config, 'not in', 'foo', 'aaafoobbb')[1:]
assert detail == ["'foo' is contained here:", ' aaafoobbb', '? +++']
def test_pytest_assertrepr_compare_integration(testdir):
testdir.makepyfile("""
def test_hello():
x = set(range(100))
y = x.copy()
y.remove(50)
assert x == y
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*def test_hello():*",
"*assert x == y*",
"*E*Extra items*left*",
"*E*50*",
])
def test_sequence_comparison_uses_repr(testdir):
testdir.makepyfile("""
def test_hello():
x = set("hello x")
y = set("hello y")
assert x == y
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*def test_hello():*",
"*assert x == y*",
"*E*Extra items*left*",
"*E*'x'*",
"*E*Extra items*right*",
"*E*'y'*",
])
def test_assertrepr_loaded_per_dir(testdir):
testdir.makepyfile(test_base=['def test_base(): assert 1 == 2'])
a = testdir.mkdir('a')
a_test = a.join('test_a.py')
a_test.write('def test_a(): assert 1 == 2')
a_conftest = a.join('conftest.py')
a_conftest.write('def pytest_assertrepr_compare(): return ["summary a"]')
b = testdir.mkdir('b')
b_test = b.join('test_b.py')
b_test.write('def test_b(): assert 1 == 2')
b_conftest = b.join('conftest.py')
b_conftest.write('def pytest_assertrepr_compare(): return ["summary b"]')
result = testdir.runpytest()
result.stdout.fnmatch_lines([
'*def test_base():*',
'*E*assert 1 == 2*',
'*def test_a():*',
'*E*assert summary a*',
'*def test_b():*',
'*E*assert summary b*'])
def test_assertion_options(testdir):
testdir.makepyfile("""
def test_hello():
x = 3
assert x == 4
""")
result = testdir.runpytest()
assert "3 == 4" in result.stdout.str()
result = testdir.runpytest_subprocess("--assert=plain")
assert "3 == 4" not in result.stdout.str()
def test_triple_quoted_string_issue113(testdir):
testdir.makepyfile("""
def test_hello():
assert "" == '''
'''""")
result = testdir.runpytest("--fulltrace")
result.stdout.fnmatch_lines([
"*1 failed*",
])
assert 'SyntaxError' not in result.stdout.str()
def test_traceback_failure(testdir):
p1 = testdir.makepyfile("""
def g():
return 2
def f(x):
assert x == g()
def test_onefails():
f(3)
""")
result = testdir.runpytest(p1, "--tb=long")
result.stdout.fnmatch_lines([
"*test_traceback_failure.py F",
"====* FAILURES *====",
"____*____",
"",
" def test_onefails():",
"> f(3)",
"",
"*test_*.py:6: ",
"_ _ _ *",
#"",
" def f(x):",
"> assert x == g()",
"E assert 3 == 2",
"E + where 2 = g()",
"",
"*test_traceback_failure.py:4: AssertionError"
])
result = testdir.runpytest(p1) # "auto"
result.stdout.fnmatch_lines([
"*test_traceback_failure.py F",
"====* FAILURES *====",
"____*____",
"",
" def test_onefails():",
"> f(3)",
"",
"*test_*.py:6: ",
"",
" def f(x):",
"> assert x == g()",
"E assert 3 == 2",
"E + where 2 = g()",
"",
"*test_traceback_failure.py:4: AssertionError"
])
@pytest.mark.skipif(sys.version_info[:2] <= (3, 3), reason='Python 3.4+ shows chained exceptions on multiprocess')
def test_exception_handling_no_traceback(testdir):
"""
Handle chain exceptions in tasks submitted by the multiprocess module (#1984).
"""
p1 = testdir.makepyfile("""
from multiprocessing import Pool
def process_task(n):
assert n == 10
def multitask_job():
tasks = [1]
with Pool(processes=1) as pool:
pool.map(process_task, tasks)
def test_multitask_job():
multitask_job()
""")
result = testdir.runpytest(p1, "--tb=long")
result.stdout.fnmatch_lines([
"====* FAILURES *====",
"*multiprocessing.pool.RemoteTraceback:*",
"Traceback (most recent call last):",
"*assert n == 10",
"The above exception was the direct cause of the following exception:",
"> * multitask_job()",
])
@pytest.mark.skipif("'__pypy__' in sys.builtin_module_names or sys.platform.startswith('java')" )
def test_warn_missing(testdir):
testdir.makepyfile("")
result = testdir.run(sys.executable, "-OO", "-m", "pytest", "-h")
result.stderr.fnmatch_lines([
"*WARNING*assert statements are not executed*",
])
result = testdir.run(sys.executable, "-OO", "-m", "pytest")
result.stderr.fnmatch_lines([
"*WARNING*assert statements are not executed*",
])
def test_recursion_source_decode(testdir):
testdir.makepyfile("""
def test_something():
pass
""")
testdir.makeini("""
[pytest]
python_files = *.py
""")
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines("""
<Module*>
""")
def test_AssertionError_message(testdir):
testdir.makepyfile("""
def test_hello():
x,y = 1,2
assert 0, (x,y)
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines("""
*def test_hello*
*assert 0, (x,y)*
*AssertionError: (1, 2)*
""")
@pytest.mark.skipif(PY3, reason='This bug does not exist on PY3')
def test_set_with_unsortable_elements():
# issue #718
class UnsortableKey(object):
def __init__(self, name):
self.name = name
def __lt__(self, other):
raise RuntimeError()
def __repr__(self):
return 'repr({0})'.format(self.name)
def __eq__(self, other):
return self.name == other.name
def __hash__(self):
return hash(self.name)
left_set = set(UnsortableKey(str(i)) for i in range(1, 3))
right_set = set(UnsortableKey(str(i)) for i in range(2, 4))
expl = callequal(left_set, right_set, verbose=True)
# skip first line because it contains the "construction" of the set, which does not have a guaranteed order
expl = expl[1:]
dedent = textwrap.dedent("""
Extra items in the left set:
repr(1)
Extra items in the right set:
repr(3)
Full diff (fallback to calling repr on each item):
- repr(1)
repr(2)
+ repr(3)
""").strip()
assert '\n'.join(expl) == dedent
def test_diff_newline_at_end(monkeypatch, testdir):
testdir.makepyfile(r"""
def test_diff():
assert 'asdf' == 'asdf\n'
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines(r"""
*assert 'asdf' == 'asdf\n'
* - asdf
* + asdf
* ? +
""")
def test_assert_tuple_warning(testdir):
testdir.makepyfile("""
def test_tuple():
assert(False, 'you shall not pass')
""")
result = testdir.runpytest('-rw')
result.stdout.fnmatch_lines([
'*test_assert_tuple_warning.py:2',
'*assertion is always true*',
])
def test_assert_indirect_tuple_no_warning(testdir):
testdir.makepyfile("""
def test_tuple():
tpl = ('foo', 'bar')
assert tpl
""")
result = testdir.runpytest('-rw')
output = '\n'.join(result.stdout.lines)
assert 'WR1' not in output
def test_assert_with_unicode(monkeypatch, testdir):
testdir.makepyfile(u"""
# -*- coding: utf-8 -*-
def test_unicode():
assert u'유니코드' == u'Unicode'
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines(['*AssertionError*'])
def test_raise_unprintable_assertion_error(testdir):
testdir.makepyfile(r"""
def test_raise_assertion_error():
raise AssertionError('\xff')
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([r"> raise AssertionError('\xff')", 'E AssertionError: *'])
def test_raise_assertion_error_raisin_repr(testdir):
testdir.makepyfile(u"""
class RaisingRepr(object):
def __repr__(self):
raise Exception()
def test_raising_repr():
raise AssertionError(RaisingRepr())
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines(['E AssertionError: <unprintable AssertionError object>'])
def test_issue_1944(testdir):
testdir.makepyfile("""
def f():
return
assert f() == 10
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 error*"])
assert "AttributeError: 'Module' object has no attribute '_obj'" not in result.stdout.str()
|
{
"content_hash": "a1cf844082a79dbfdb0510716d6b5c6d",
"timestamp": "",
"source": "github",
"line_count": 1030,
"max_line_length": 114,
"avg_line_length": 33.24563106796116,
"alnum_prop": 0.5027888911602372,
"repo_name": "flub/pytest",
"id": "c385f6aa100da6810d9b85ff45dbcaf024d590d7",
"size": "34288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testing/test_assertion.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "568"
},
{
"name": "Python",
"bytes": "1424649"
}
],
"symlink_target": ""
}
|
"""
MozTrap root URLconf.
"""
from django.conf.urls import patterns, url, include
from django.conf.urls.static import static
from django.conf import settings
from django.contrib import admin
from moztrap.model import mtadmin
admin.site = mtadmin.MTAdminSite()
admin.autodiscover()
import session_csrf
session_csrf.monkeypatch()
urlpatterns = patterns(
"",
url(r"^$", "moztrap.view.views.home", name="home"),
# runtests ---------------------------------------------------------------
url(r"^runtests/", include("moztrap.view.runtests.urls")),
# users ------------------------------------------------------------------
url(r"^users/", include("moztrap.view.users.urls")),
# manage -----------------------------------------------------------------
url(r"^manage/", include("moztrap.view.manage.urls")),
# results ----------------------------------------------------------------
url(r"^results/", include("moztrap.view.results.urls")),
# admin ------------------------------------------------------------------
url(r"^admin/", include(admin.site.urls)),
# browserid --------------------------------------------------------------
url(r"^browserid/", include("moztrap.view.users.browserid_urls")),
# api --------------------------------------------------------------------
url(r"^api/", include("moztrap.view.api.urls")),
# open web apps-----------------------------------------------------------
url("^owa/", include("moztrap.view.owa.urls")),
# special /contribute.json endpoint --------------------------------------
url(r"^(?P<path>contribute\.json)$", "django.views.static.serve",
{'document_root': settings.BASE_PATH})
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
{
"content_hash": "fa1f986e597a4a4918464803ad23be62",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 78,
"avg_line_length": 32.67272727272727,
"alnum_prop": 0.4557595993322204,
"repo_name": "mccarrmb/moztrap",
"id": "39407192e8575b0d4386234ea9457c40c50da1c4",
"size": "1797",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "moztrap/view/urls.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "713098"
},
{
"name": "HTML",
"bytes": "1056025"
},
{
"name": "JavaScript",
"bytes": "270285"
},
{
"name": "Python",
"bytes": "2090049"
},
{
"name": "Ruby",
"bytes": "464"
},
{
"name": "Shell",
"bytes": "867"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/scout/camokit/shared_camokit_rori.iff"
result.attribute_template_id = -1
result.stfName("item_n","camokit_rori")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "868a3533aac9bf8ce6c1fd65a29c9409",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 74,
"avg_line_length": 23.384615384615383,
"alnum_prop": 0.694078947368421,
"repo_name": "anhstudios/swganh",
"id": "d7fec1f82a9fc2a965d98df131f95c11e8e16e42",
"size": "449",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/scout/camokit/shared_camokit_rori.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
import praw
import time
# initializes reddit api stuff
user_agent = ("username getter 1.0 by /u/darkfire613 "
"https://github.com/darkfire613/username-getter")
r = praw.Reddit(user_agent = user_agent)
# load sublist
sublist = []
with open('sublist') as file:
for line in file:
sublist.append(line)
file.close()
# strip newlines
sublist = [x.strip() for x in sublist]
#load placeholder placeholders
placeholders = []
for x in sublist:
placeholders.append('1bu7ak')
# main loop
while True:
# subcount is an iterator for filling the placeholder
subcount = 0
f = open('userlist', 'a')
for subname in sublist:
subreddit = r.get_subreddit(subname)
print placeholders[subcount]
firstloop = True
for submission in subreddit.get_new(limit=50, place_holder=placeholders[subcount]):
if firstloop:
placeholders[subcount] = submission.id
firstloop = False
# workaround for inclusive placeholder. Fix later.
else:
username = submission.author
f.write(str(username) + '\n')
subcount += 1
f.close()
print placeholders
#break
time.sleep(600)
|
{
"content_hash": "fae45d2f9bf8893ebe78fe9ba7163997",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 91,
"avg_line_length": 27.022222222222222,
"alnum_prop": 0.6381578947368421,
"repo_name": "darkfire613/username-getter",
"id": "37f266fa2b14e0605ff0e4abe547a40362ea1578",
"size": "1315",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "getter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2748"
}
],
"symlink_target": ""
}
|
import functools
from typing import Tuple
from acme.specs import EnvironmentSpec
from acme.tf import networks
import sonnet as snt
import tensorflow as tf
def make_value_func_bsuite(environment_spec: EnvironmentSpec,
value_layer_sizes: str = '50,50',
adversarial_layer_sizes: str = '50,50',
) -> Tuple[snt.Module, snt.Module]:
action_network = functools.partial(
tf.one_hot, depth=environment_spec.actions.num_values)
layer_sizes = list(map(int, value_layer_sizes.split(',')))
value_function = snt.Sequential([
networks.CriticMultiplexer(action_network=action_network),
snt.nets.MLP(layer_sizes, activate_final=True),
snt.Linear(1)])
layer_sizes = list(map(int, adversarial_layer_sizes.split(',')))
advsarial_function = snt.Sequential([
networks.CriticMultiplexer(action_network=action_network),
snt.nets.MLP(layer_sizes, activate_final=True),
snt.Linear(1)])
return value_function, advsarial_function
|
{
"content_hash": "11fbee3d0f3d434236991570fc940392",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 68,
"avg_line_length": 36.86206896551724,
"alnum_prop": 0.6576239476145931,
"repo_name": "liyuan9988/IVOPEwithACME",
"id": "7b3836229ef712176c40a856a2274ae2235060b7",
"size": "1155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ope/deep_gmm/nn_structure/bsuite_network.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "125939"
}
],
"symlink_target": ""
}
|
import sys
for line in sys.stdin:
data = line.strip().split("GET ")
if len(data) > 1:
filename = data[1].split(" ")[0]
print "{0}\t".format(filename)
|
{
"content_hash": "a627fb759e7be510d22d5e0caa912485",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 40,
"avg_line_length": 22,
"alnum_prop": 0.5511363636363636,
"repo_name": "np1810/Hadoop_and_MapReduce",
"id": "8f39c569bef0fa1b2139304f21f88df9b1fdd61f",
"size": "346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "L3_Project/p2q1/mapper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28113"
}
],
"symlink_target": ""
}
|
"""Custom implementation of multiprocessing.Pool with custom pickler.
This module provides efficient ways of working with data stored in
shared memory with numpy.memmap arrays without inducing any memory
copy between the parent and child processes.
This module should not be imported if multiprocessing is not
available as it implements subclasses of multiprocessing Pool
that uses a custom alternative to SimpleQueue.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# Copyright: 2012, Olivier Grisel
# License: BSD 3 clause
from mmap import mmap
import errno
import os
import stat
import sys
import threading
import atexit
import tempfile
import shutil
import warnings
from time import sleep
try:
WindowsError
except NameError:
WindowsError = type(None)
from pickle import whichmodule
try:
# Python 2 compat
from cPickle import loads
from cPickle import dumps
except ImportError:
from pickle import loads
from pickle import dumps
import copyreg
# Customizable pure Python pickler in Python 2
# customizable C-optimized pickler under Python 3.3+
from pickle import Pickler
from pickle import HIGHEST_PROTOCOL
from io import BytesIO
from ._multiprocessing_helpers import mp, assert_spawning
# We need the class definition to derive from it not the multiprocessing.Pool
# factory function
from multiprocessing.pool import Pool
try:
import numpy as np
from numpy.lib.stride_tricks import as_strided
except ImportError:
np = None
from .numpy_pickle import load
from .numpy_pickle import dump
from .hashing import hash
from .backports import make_memmap
# Some system have a ramdisk mounted by default, we can use it instead of /tmp
# as the default folder to dump big arrays to share with subprocesses
SYSTEM_SHARED_MEM_FS = '/dev/shm'
# Folder and file permissions to chmod temporary files generated by the
# memmaping pool. Only the owner of the Python process can access the
# temporary files and folder.
FOLDER_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
FILE_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR
###############################################################################
# Support for efficient transient pickling of numpy data structures
def _get_backing_memmap(a):
"""Recursively look up the original np.memmap instance base if any."""
b = getattr(a, 'base', None)
if b is None:
# TODO: check scipy sparse datastructure if scipy is installed
# a nor its descendants do not have a memmap base
return None
elif isinstance(b, mmap):
# a is already a real memmap instance.
return a
else:
# Recursive exploration of the base ancestry
return _get_backing_memmap(b)
def _get_temp_dir(pool_folder_name, temp_folder=None):
"""Get the full path to a subfolder inside the temporary folder.
Parameters
----------
pool_folder_name : str
Sub-folder name used for the serialization of a pool instance.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment
variable,
- /dev/shm if the folder exists and is writable: this is a
RAMdisk filesystem available by default on modern Linux
distributions,
- the default system temporary folder that can be
overridden with TMP, TMPDIR or TEMP environment
variables, typically /tmp under Unix operating systems.
Returns
-------
pool_folder : str
full path to the temporary folder
use_shared_mem : bool
whether the temporary folder is written to tmpfs
"""
use_shared_mem = False
if temp_folder is None:
temp_folder = os.environ.get('JOBLIB_TEMP_FOLDER', None)
if temp_folder is None:
if os.path.exists(SYSTEM_SHARED_MEM_FS):
try:
temp_folder = SYSTEM_SHARED_MEM_FS
pool_folder = os.path.join(temp_folder, pool_folder_name)
if not os.path.exists(pool_folder):
os.makedirs(pool_folder)
use_shared_mem = True
except IOError:
# Missing rights in the the /dev/shm partition,
# fallback to regular temp folder.
temp_folder = None
if temp_folder is None:
# Fallback to the default tmp folder, typically /tmp
temp_folder = tempfile.gettempdir()
temp_folder = os.path.abspath(os.path.expanduser(temp_folder))
pool_folder = os.path.join(temp_folder, pool_folder_name)
return pool_folder, use_shared_mem
def has_shareable_memory(a):
"""Return True if a is backed by some mmap buffer directly or not."""
return _get_backing_memmap(a) is not None
def _strided_from_memmap(filename, dtype, mode, offset, order, shape, strides,
total_buffer_len):
"""Reconstruct an array view on a memory mapped file."""
if mode == 'w+':
# Do not zero the original data when unpickling
mode = 'r+'
if strides is None:
# Simple, contiguous memmap
return make_memmap(filename, dtype=dtype, shape=shape, mode=mode,
offset=offset, order=order)
else:
# For non-contiguous data, memmap the total enclosing buffer and then
# extract the non-contiguous view with the stride-tricks API
base = make_memmap(filename, dtype=dtype, shape=total_buffer_len,
mode=mode, offset=offset, order=order)
return as_strided(base, shape=shape, strides=strides)
def _reduce_memmap_backed(a, m):
"""Pickling reduction for memmap backed arrays.
a is expected to be an instance of np.ndarray (or np.memmap)
m is expected to be an instance of np.memmap on the top of the ``base``
attribute ancestry of a. ``m.base`` should be the real python mmap object.
"""
# offset that comes from the striding differences between a and m
a_start, a_end = np.byte_bounds(a)
m_start = np.byte_bounds(m)[0]
offset = a_start - m_start
# offset from the backing memmap
offset += m.offset
if m.flags['F_CONTIGUOUS']:
order = 'F'
else:
# The backing memmap buffer is necessarily contiguous hence C if not
# Fortran
order = 'C'
if a.flags['F_CONTIGUOUS'] or a.flags['C_CONTIGUOUS']:
# If the array is a contiguous view, no need to pass the strides
strides = None
total_buffer_len = None
else:
# Compute the total number of items to map from which the strided
# view will be extracted.
strides = a.strides
total_buffer_len = (a_end - a_start) // a.itemsize
return (_strided_from_memmap,
(m.filename, a.dtype, m.mode, offset, order, a.shape, strides,
total_buffer_len))
def reduce_memmap(a):
"""Pickle the descriptors of a memmap instance to reopen on same file."""
m = _get_backing_memmap(a)
if m is not None:
# m is a real mmap backed memmap instance, reduce a preserving striding
# information
return _reduce_memmap_backed(a, m)
else:
# This memmap instance is actually backed by a regular in-memory
# buffer: this can happen when using binary operators on numpy.memmap
# instances
return (loads, (dumps(np.asarray(a), protocol=HIGHEST_PROTOCOL),))
class ArrayMemmapReducer(object):
"""Reducer callable to dump large arrays to memmap files.
Parameters
----------
max_nbytes: int
Threshold to trigger memmaping of large arrays to files created
a folder.
temp_folder: str
Path of a folder where files for backing memmaped arrays are created.
mmap_mode: 'r', 'r+' or 'c'
Mode for the created memmap datastructure. See the documentation of
numpy.memmap for more details. Note: 'w+' is coerced to 'r+'
automatically to avoid zeroing the data on unpickling.
verbose: int, optional, 0 by default
If verbose > 0, memmap creations are logged.
If verbose > 1, both memmap creations, reuse and array pickling are
logged.
prewarm: bool, optional, False by default.
Force a read on newly memmaped array to make sure that OS pre-cache it
memory. This can be useful to avoid concurrent disk access when the
same data array is passed to different worker processes.
"""
def __init__(self, max_nbytes, temp_folder, mmap_mode, verbose=0,
context_id=None, prewarm=True):
self._max_nbytes = max_nbytes
self._temp_folder = temp_folder
self._mmap_mode = mmap_mode
self.verbose = int(verbose)
self._prewarm = prewarm
if context_id is not None:
warnings.warn('context_id is deprecated and ignored in joblib'
' 0.9.4 and will be removed in 0.11',
DeprecationWarning)
def __call__(self, a):
m = _get_backing_memmap(a)
if m is not None:
# a is already backed by a memmap file, let's reuse it directly
return _reduce_memmap_backed(a, m)
if (not a.dtype.hasobject
and self._max_nbytes is not None
and a.nbytes > self._max_nbytes):
# check that the folder exists (lazily create the pool temp folder
# if required)
try:
os.makedirs(self._temp_folder)
os.chmod(self._temp_folder, FOLDER_PERMISSIONS)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
# Find a unique, concurrent safe filename for writing the
# content of this array only once.
basename = "%d-%d-%s.pkl" % (
os.getpid(), id(threading.current_thread()), hash(a))
filename = os.path.join(self._temp_folder, basename)
# In case the same array with the same content is passed several
# times to the pool subprocess children, serialize it only once
# XXX: implement an explicit reference counting scheme to make it
# possible to delete temporary files as soon as the workers are
# done processing this data.
if not os.path.exists(filename):
if self.verbose > 0:
print("Memmaping (shape=%r, dtype=%s) to new file %s" % (
a.shape, a.dtype, filename))
for dumped_filename in dump(a, filename):
os.chmod(dumped_filename, FILE_PERMISSIONS)
if self._prewarm:
# Warm up the data to avoid concurrent disk access in
# multiple children processes
load(filename, mmap_mode=self._mmap_mode).max()
elif self.verbose > 1:
print("Memmaping (shape=%s, dtype=%s) to old file %s" % (
a.shape, a.dtype, filename))
# The worker process will use joblib.load to memmap the data
return (load, (filename, self._mmap_mode))
else:
# do not convert a into memmap, let pickler do its usual copy with
# the default system pickler
if self.verbose > 1:
print("Pickling array (shape=%r, dtype=%s)." % (
a.shape, a.dtype))
return (loads, (dumps(a, protocol=HIGHEST_PROTOCOL),))
###############################################################################
# Enable custom pickling in Pool queues
class CustomizablePickler(Pickler):
"""Pickler that accepts custom reducers.
HIGHEST_PROTOCOL is selected by default as this pickler is used
to pickle ephemeral datastructures for interprocess communication
hence no backward compatibility is required.
`reducers` is expected to be a dictionary with key/values
being `(type, callable)` pairs where `callable` is a function that
give an instance of `type` will return a tuple `(constructor,
tuple_of_objects)` to rebuild an instance out of the pickled
`tuple_of_objects` as would return a `__reduce__` method. See the
standard library documentation on pickling for more details.
"""
# We override the pure Python pickler as its the only way to be able to
# customize the dispatch table without side effects in Python 2.7
# to 3.2. For Python 3.3+ leverage the new dispatch_table
# feature from http://bugs.python.org/issue14166 that makes it possible
# to use the C implementation of the Pickler which is faster.
def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL):
Pickler.__init__(self, writer, protocol=protocol)
if reducers is None:
reducers = {}
if hasattr(Pickler, 'dispatch'):
# Make the dispatch registry an instance level attribute instead of
# a reference to the class dictionary under Python 2
self.dispatch = Pickler.dispatch.copy()
else:
# Under Python 3 initialize the dispatch table with a copy of the
# default registry
self.dispatch_table = copyreg.dispatch_table.copy()
for type, reduce_func in reducers.items():
self.register(type, reduce_func)
def register(self, type, reduce_func):
"""Attach a reducer function to a given type in the dispatch table."""
if hasattr(Pickler, 'dispatch'):
# Python 2 pickler dispatching is not explicitly customizable.
# Let us use a closure to workaround this limitation.
def dispatcher(self, obj):
reduced = reduce_func(obj)
self.save_reduce(obj=obj, *reduced)
self.dispatch[type] = dispatcher
else:
self.dispatch_table[type] = reduce_func
class CustomizablePicklingQueue(object):
"""Locked Pipe implementation that uses a customizable pickler.
This class is an alternative to the multiprocessing implementation
of SimpleQueue in order to make it possible to pass custom
pickling reducers, for instance to avoid memory copy when passing
memory mapped datastructures.
`reducers` is expected to be a dict with key / values being
`(type, callable)` pairs where `callable` is a function that, given an
instance of `type`, will return a tuple `(constructor, tuple_of_objects)`
to rebuild an instance out of the pickled `tuple_of_objects` as would
return a `__reduce__` method.
See the standard library documentation on pickling for more details.
"""
def __init__(self, context, reducers=None):
self._reducers = reducers
self._reader, self._writer = context.Pipe(duplex=False)
self._rlock = context.Lock()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = context.Lock()
self._make_methods()
def __getstate__(self):
assert_spawning(self)
return (self._reader, self._writer, self._rlock, self._wlock,
self._reducers)
def __setstate__(self, state):
(self._reader, self._writer, self._rlock, self._wlock,
self._reducers) = state
self._make_methods()
def empty(self):
return not self._reader.poll()
def _make_methods(self):
self._recv = recv = self._reader.recv
racquire, rrelease = self._rlock.acquire, self._rlock.release
def get():
racquire()
try:
return recv()
finally:
rrelease()
self.get = get
if self._reducers:
def send(obj):
buffer = BytesIO()
CustomizablePickler(buffer, self._reducers).dump(obj)
self._writer.send_bytes(buffer.getvalue())
self._send = send
else:
self._send = send = self._writer.send
if self._wlock is None:
# writes to a message oriented win32 pipe are atomic
self.put = send
else:
wlock_acquire, wlock_release = (
self._wlock.acquire, self._wlock.release)
def put(obj):
wlock_acquire()
try:
return send(obj)
finally:
wlock_release()
self.put = put
class PicklingPool(Pool):
"""Pool implementation with customizable pickling reducers.
This is useful to control how data is shipped between processes
and makes it possible to use shared memory without useless
copies induces by the default pickling methods of the original
objects passed as arguments to dispatch.
`forward_reducers` and `backward_reducers` are expected to be
dictionaries with key/values being `(type, callable)` pairs where
`callable` is a function that, given an instance of `type`, will return a
tuple `(constructor, tuple_of_objects)` to rebuild an instance out of the
pickled `tuple_of_objects` as would return a `__reduce__` method.
See the standard library documentation about pickling for more details.
"""
def __init__(self, processes=None, forward_reducers=None,
backward_reducers=None, **kwargs):
if forward_reducers is None:
forward_reducers = dict()
if backward_reducers is None:
backward_reducers = dict()
self._forward_reducers = forward_reducers
self._backward_reducers = backward_reducers
poolargs = dict(processes=processes)
poolargs.update(kwargs)
super(PicklingPool, self).__init__(**poolargs)
def _setup_queues(self):
context = getattr(self, '_ctx', mp)
self._inqueue = CustomizablePicklingQueue(context,
self._forward_reducers)
self._outqueue = CustomizablePicklingQueue(context,
self._backward_reducers)
self._quick_put = self._inqueue._send
self._quick_get = self._outqueue._recv
def delete_folder(folder_path):
"""Utility function to cleanup a temporary folder if still existing."""
try:
if os.path.exists(folder_path):
shutil.rmtree(folder_path)
except WindowsError:
warnings.warn("Failed to clean temporary folder: %s" % folder_path)
class MemmapingPool(PicklingPool):
"""Process pool that shares large arrays to avoid memory copy.
This drop-in replacement for `multiprocessing.pool.Pool` makes
it possible to work efficiently with shared memory in a numpy
context.
Existing instances of numpy.memmap are preserved: the child
suprocesses will have access to the same shared memory in the
original mode except for the 'w+' mode that is automatically
transformed as 'r+' to avoid zeroing the original data upon
instantiation.
Furthermore large arrays from the parent process are automatically
dumped to a temporary folder on the filesystem such as child
processes to access their content via memmaping (file system
backed shared memory).
Note: it is important to call the terminate method to collect
the temporary folder used by the pool.
Parameters
----------
processes: int, optional
Number of worker processes running concurrently in the pool.
initializer: callable, optional
Callable executed on worker process creation.
initargs: tuple, optional
Arguments passed to the initializer callable.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
max_nbytes int or None, optional, 1e6 by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder.
Use None to disable memmaping of large arrays.
mmap_mode: {'r+', 'r', 'w+', 'c'}
Memmapping mode for numpy arrays passed to workers.
See 'max_nbytes' parameter documentation for more details.
forward_reducers: dictionary, optional
Reducers used to pickle objects passed from master to worker
processes: see below.
backward_reducers: dictionary, optional
Reducers used to pickle return values from workers back to the
master process.
verbose: int, optional
Make it possible to monitor how the communication of numpy arrays
with the subprocess is handled (pickling or memmaping)
prewarm: bool or str, optional, "auto" by default.
If True, force a read on newly memmaped array to make sure that OS pre-
cache it in memory. This can be useful to avoid concurrent disk access
when the same data array is passed to different worker processes.
If "auto" (by default), prewarm is set to True, unless the Linux shared
memory partition /dev/shm is available and used as temp_folder.
`forward_reducers` and `backward_reducers` are expected to be
dictionaries with key/values being `(type, callable)` pairs where
`callable` is a function that give an instance of `type` will return
a tuple `(constructor, tuple_of_objects)` to rebuild an instance out
of the pickled `tuple_of_objects` as would return a `__reduce__`
method. See the standard library documentation on pickling for more
details.
"""
def __init__(self, processes=None, temp_folder=None, max_nbytes=1e6,
mmap_mode='r', forward_reducers=None, backward_reducers=None,
verbose=0, context_id=None, prewarm=False, **kwargs):
if forward_reducers is None:
forward_reducers = dict()
if backward_reducers is None:
backward_reducers = dict()
if context_id is not None:
warnings.warn('context_id is deprecated and ignored in joblib'
' 0.9.4 and will be removed in 0.11',
DeprecationWarning)
# Prepare a sub-folder name for the serialization of this particular
# pool instance (do not create in advance to spare FS write access if
# no array is to be dumped):
pool_folder_name = "joblib_memmaping_pool_%d_%d" % (
os.getpid(), id(self))
pool_folder, use_shared_mem = _get_temp_dir(pool_folder_name,
temp_folder)
self._temp_folder = pool_folder
# Register the garbage collector at program exit in case caller forgets
# to call terminate explicitly: note we do not pass any reference to
# self to ensure that this callback won't prevent garbage collection of
# the pool instance and related file handler resources such as POSIX
# semaphores and pipes
pool_module_name = whichmodule(delete_folder, 'delete_folder')
def _cleanup():
# In some cases the Python runtime seems to set delete_folder to
# None just before exiting when accessing the delete_folder
# function from the closure namespace. So instead we reimport
# the delete_folder function explicitly.
# https://github.com/joblib/joblib/issues/328
# We cannot just use from 'joblib.pool import delete_folder'
# because joblib should only use relative imports to allow
# easy vendoring.
delete_folder = __import__(
pool_module_name, fromlist=['delete_folder']).delete_folder
delete_folder(pool_folder)
atexit.register(_cleanup)
if np is not None:
# Register smart numpy.ndarray reducers that detects memmap backed
# arrays and that is alse able to dump to memmap large in-memory
# arrays over the max_nbytes threshold
if prewarm == "auto":
prewarm = not use_shared_mem
forward_reduce_ndarray = ArrayMemmapReducer(
max_nbytes, pool_folder, mmap_mode, verbose,
prewarm=prewarm)
forward_reducers[np.ndarray] = forward_reduce_ndarray
forward_reducers[np.memmap] = reduce_memmap
# Communication from child process to the parent process always
# pickles in-memory numpy.ndarray without dumping them as memmap
# to avoid confusing the caller and make it tricky to collect the
# temporary folder
backward_reduce_ndarray = ArrayMemmapReducer(
None, pool_folder, mmap_mode, verbose)
backward_reducers[np.ndarray] = backward_reduce_ndarray
backward_reducers[np.memmap] = reduce_memmap
poolargs = dict(
processes=processes,
forward_reducers=forward_reducers,
backward_reducers=backward_reducers)
poolargs.update(kwargs)
super(MemmapingPool, self).__init__(**poolargs)
def terminate(self):
n_retries = 10
for i in range(n_retries):
try:
super(MemmapingPool, self).terminate()
break
except OSError as e:
if isinstance(e, WindowsError):
# Workaround occasional "[Error 5] Access is denied" issue
# when trying to terminate a process under windows.
sleep(0.1)
if i + 1 == n_retries:
warnings.warn("Failed to terminate worker processes in"
" multiprocessing pool: %r" % e)
delete_folder(self._temp_folder)
|
{
"content_hash": "7889b53c4bccdcf1ceba0036d95fba1c",
"timestamp": "",
"source": "github",
"line_count": 650,
"max_line_length": 79,
"avg_line_length": 40.44307692307692,
"alnum_prop": 0.6321895922093731,
"repo_name": "RTHMaK/RPGOne",
"id": "aca8db5a15af6e13773a3ac954d28d6cb87dd119",
"size": "26288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "joblib-master/joblib/pool.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "1C Enterprise",
"bytes": "36"
},
{
"name": "Batchfile",
"bytes": "15029"
},
{
"name": "CSS",
"bytes": "41709"
},
{
"name": "Erlang",
"bytes": "39438"
},
{
"name": "Go",
"bytes": "287"
},
{
"name": "HTML",
"bytes": "633076"
},
{
"name": "JavaScript",
"bytes": "1128791"
},
{
"name": "Jupyter Notebook",
"bytes": "927247"
},
{
"name": "Makefile",
"bytes": "31756"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Matlab",
"bytes": "9454"
},
{
"name": "PHP",
"bytes": "708541"
},
{
"name": "PowerShell",
"bytes": "68503"
},
{
"name": "Python",
"bytes": "2278740"
},
{
"name": "Ruby",
"bytes": "1136"
},
{
"name": "Shell",
"bytes": "62555"
},
{
"name": "Smarty",
"bytes": "5752"
},
{
"name": "TeX",
"bytes": "34544"
}
],
"symlink_target": ""
}
|
def lum_contrast(clip, lum = 0, contrast=0, contrast_thr=127):
""" luminosity-contrast correction of a clip """
def fl_image(im):
im = 1.0*im # float conversion
corrected = im + lum + factor*(im-thr)
corrected[corrected < 0] = 0
corrected[corrected > 255] = 255
return corrected.astype('uint8')
return clip.fl_image(fl_image)
|
{
"content_hash": "4047e02975dff6b34310ed77e4fc7bc0",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 62,
"avg_line_length": 35.27272727272727,
"alnum_prop": 0.5979381443298969,
"repo_name": "DevinGeo/moviepy",
"id": "992b420e71e0ca163b88f8de5528a6ea95fb7e84",
"size": "388",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moviepy/video/fx/lum_contrast.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "295"
},
{
"name": "Python",
"bytes": "220312"
},
{
"name": "Shell",
"bytes": "6748"
}
],
"symlink_target": ""
}
|
"""
Some fixture methods
"""
import os
import pytest
import autobreadcrumbs
class FixturesStorageParameters(object):
"""Mixin containing some basic settings"""
def __init__(self):
# Base fixture datas directory
self.tests_dir = 'project_test/tests'
self.fixtures_dir = 'data_fixtures'
self.tests_path = os.path.normpath(
os.path.join(
os.path.abspath(os.path.dirname(autobreadcrumbs.__file__)),
'..',
self.tests_dir,
)
)
self.fixtures_path = os.path.join(
self.tests_dir,
self.fixtures_dir
)
@pytest.fixture(scope='session')
def temp_builds_dir(tmpdir_factory):
"""Prepare a temporary build directory"""
fn = tmpdir_factory.mktemp('builds')
return fn
#@pytest.fixture(scope='module')
#def clean_registry():
#"""Perform a reset on registry then relaunch autodiscover"""
#from autobreadcrumbs.registry import breadcrumbs_registry
#print "Before clean:", breadcrumbs_registry.get_names()
#breadcrumbs_registry.reset()
#return breadcrumbs_registry
#@pytest.fixture(scope='function')
#def reboot_discovering():
#"""Perform a reset on registry then relaunch autodiscover"""
#from autobreadcrumbs.registry import breadcrumbs_registry
#from autobreadcrumbs.discover import autodiscover
#print "Before reboot:", breadcrumbs_registry.get_names()
#breadcrumbs_registry.reset()
#return autodiscover()
@pytest.fixture(scope="module")
def storageparameters():
"""Initialize and return parameters storage object (mostly paths) for
fixtures (scope at module level)"""
return FixturesStorageParameters()
|
{
"content_hash": "a66c3fb1be5e68fa5de63c5df5723706",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 75,
"avg_line_length": 28.37704918032787,
"alnum_prop": 0.6637781629116117,
"repo_name": "sveetch/autobreadcrumbs",
"id": "c9e312df867ce5f4c816ba6abcb09501b104912c",
"size": "1731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project_test/tests/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "679"
},
{
"name": "Makefile",
"bytes": "1259"
},
{
"name": "Python",
"bytes": "43722"
}
],
"symlink_target": ""
}
|
"""
Python representations of the JSON Schema Test Suite tests.
"""
from functools import partial
from pathlib import Path
import json
import os
import re
import subprocess
import sys
import unittest
import attr
from jsonschema.validators import _VALIDATORS
import jsonschema
def _find_suite():
root = os.environ.get("JSON_SCHEMA_TEST_SUITE")
if root is not None:
return Path(root)
root = Path(jsonschema.__file__).parent.parent / "json"
if not root.is_dir(): # pragma: no cover
raise ValueError(
(
"Can't find the JSON-Schema-Test-Suite directory. "
"Set the 'JSON_SCHEMA_TEST_SUITE' environment "
"variable or run the tests from alongside a checkout "
"of the suite."
),
)
return root
@attr.s(hash=True)
class Suite:
_root = attr.ib(default=attr.Factory(_find_suite))
def _remotes(self):
jsonschema_suite = self._root.joinpath("bin", "jsonschema_suite")
remotes = subprocess.check_output(
[sys.executable, str(jsonschema_suite), "remotes"],
)
return json.loads(remotes.decode("utf-8"))
def benchmark(self, runner): # pragma: no cover
for name, Validator in _VALIDATORS.items():
self.version(name=name).benchmark(
runner=runner,
Validator=Validator,
)
def version(self, name):
return Version(
name=name,
path=self._root.joinpath("tests", name),
remotes=self._remotes(),
)
@attr.s(hash=True)
class Version:
_path = attr.ib()
_remotes = attr.ib()
name = attr.ib()
def benchmark(self, runner, **kwargs): # pragma: no cover
for suite in self.tests():
for test in suite:
runner.bench_func(
test.fully_qualified_name,
partial(test.validate_ignoring_errors, **kwargs),
)
def tests(self):
return (
test
for child in self._path.glob("*.json")
for test in self._tests_in(
subject=child.name[:-5],
path=child,
)
)
def format_tests(self):
path = self._path.joinpath("optional", "format")
return (
test
for child in path.glob("*.json")
for test in self._tests_in(
subject=child.name[:-5],
path=child,
)
)
def optional_tests_of(self, name):
return self._tests_in(
subject=name,
path=self._path.joinpath("optional", name + ".json"),
)
def to_unittest_testcase(self, *suites, **kwargs):
name = kwargs.pop("name", "Test" + self.name.title().replace("-", ""))
methods = {
test.method_name: test.to_unittest_method(**kwargs)
for suite in suites
for tests in suite
for test in tests
}
cls = type(name, (unittest.TestCase,), methods)
try:
cls.__module__ = _someone_save_us_the_module_of_the_caller()
except Exception: # pragma: no cover
# We're doing crazy things, so if they go wrong, like a function
# behaving differently on some other interpreter, just make them
# not happen.
pass
return cls
def _tests_in(self, subject, path):
for each in json.loads(path.read_text(encoding="utf-8")):
yield (
_Test(
version=self,
subject=subject,
case_description=each["description"],
schema=each["schema"],
remotes=self._remotes,
**test,
) for test in each["tests"]
)
@attr.s(hash=True, repr=False)
class _Test:
version = attr.ib()
subject = attr.ib()
case_description = attr.ib()
description = attr.ib()
data = attr.ib()
schema = attr.ib(repr=False)
valid = attr.ib()
_remotes = attr.ib()
comment = attr.ib(default=None)
def __repr__(self): # pragma: no cover
return "<Test {}>".format(self.fully_qualified_name)
@property
def fully_qualified_name(self): # pragma: no cover
return " > ".join(
[
self.version.name,
self.subject,
self.case_description,
self.description,
],
)
@property
def method_name(self):
delimiters = r"[\W\- ]+"
return "test_{}_{}_{}".format(
re.sub(delimiters, "_", self.subject),
re.sub(delimiters, "_", self.case_description),
re.sub(delimiters, "_", self.description),
)
def to_unittest_method(self, skip=lambda test: None, **kwargs):
if self.valid:
def fn(this):
self.validate(**kwargs)
else:
def fn(this):
with this.assertRaises(jsonschema.ValidationError):
self.validate(**kwargs)
fn.__name__ = self.method_name
reason = skip(self)
if reason is None or os.environ.get("JSON_SCHEMA_DEBUG", "0") != "0":
return fn
elif os.environ.get("JSON_SCHEMA_EXPECTED_FAILURES", "0") != "0":
return unittest.expectedFailure(fn)
else:
return unittest.skip(reason)(fn)
def validate(self, Validator, **kwargs):
Validator.check_schema(self.schema)
resolver = jsonschema.RefResolver.from_schema(
schema=self.schema,
store=self._remotes,
id_of=Validator.ID_OF,
)
# XXX: #693 asks to improve the public API for this, since yeah, it's
# bad. Figures that since it's hard for end-users, we experience
# the pain internally here too.
def prevent_network_access(uri):
raise RuntimeError(f"Tried to access the network: {uri}")
resolver.resolve_remote = prevent_network_access
validator = Validator(schema=self.schema, resolver=resolver, **kwargs)
if os.environ.get("JSON_SCHEMA_DEBUG", "0") != "0":
breakpoint()
validator.validate(instance=self.data)
def validate_ignoring_errors(self, Validator): # pragma: no cover
try:
self.validate(Validator=Validator)
except jsonschema.ValidationError:
pass
def _someone_save_us_the_module_of_the_caller():
"""
The FQON of the module 2nd stack frames up from here.
This is intended to allow us to dynamically return test case classes that
are indistinguishable from being defined in the module that wants them.
Otherwise, trial will mis-print the FQON, and copy pasting it won't re-run
the class that really is running.
Save us all, this is all so so so so so terrible.
"""
return sys._getframe(2).f_globals["__name__"]
|
{
"content_hash": "da76d1675f9401a8ea3caa42b0fd6771",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 78,
"avg_line_length": 29.261410788381742,
"alnum_prop": 0.5520419739081112,
"repo_name": "python-jsonschema/jsonschema",
"id": "f3129e045fc938d96ba43b227865f7177d14eda1",
"size": "7052",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "jsonschema/tests/_suite.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "314121"
}
],
"symlink_target": ""
}
|
import version_independent as ind
namespace = "http://www.dmg.org/PMML-4_0"
class PMML(ind.PMML):
"""Represents a <PMML> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(PMML, self).__init__()
self.version = None
for key, value in attribs.items():
setattr(self, key, value)
self.AssociationModel = []
self.ClusteringModel = []
self.DataDictionary = []
self.Extension = []
self.GeneralRegressionModel = []
self.Header = []
self.MiningBuildTask = []
self.MiningModel = []
self.NaiveBayesModel = []
self.NeuralNetwork = []
self.RegressionModel = []
self.RuleSetModel = []
self.SequenceModel = []
self.SupportVectorMachineModel = []
self.TextModel = []
self.TimeSeriesModel = []
self.TransformationDictionary = []
self.TreeModel = []
def models(self):
return self.AssociationModel + self.ClusteringModel + self.GeneralRegressionModel + self.MiningModel + self.NaiveBayesModel + self.NeuralNetwork + self.RegressionModel + self.RuleSetModel + self.SequenceModel + self.SupportVectorMachineModel + self.TextModel + self.TimeSeriesModel + self.TreeModel
class ARIMA(ind.ARIMA):
"""Represents a <ARIMA> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(ARIMA, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
class Aggregate(ind.Aggregate):
"""Represents a <Aggregate> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Aggregate, self).__init__()
self.field = None
self.function = None
self.groupField = None
self.sqlWhere = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class Annotation(ind.Annotation):
"""Represents a <Annotation> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Annotation, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class Anova(ind.Anova):
"""Represents a <Anova> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Anova, self).__init__()
self.target = None
for key, value in attribs.items():
setattr(self, key, value)
self.AnovaRow = []
self.Extension = []
class AnovaRow(ind.AnovaRow):
"""Represents a <AnovaRow> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(AnovaRow, self).__init__()
self.type = None
self.sumOfSquares = None
self.degreesOfFreedom = None
self.meanOfSquares = None
self.fValue = None
self.pValue = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class AntecedentSequence(ind.AntecedentSequence):
"""Represents a <AntecedentSequence> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(AntecedentSequence, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.SequenceReference = []
self.Time = []
class Application(ind.Application):
"""Represents a <Application> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Application, self).__init__()
self.name = None
self.version = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class Apply(ind.Apply):
"""Represents a <Apply> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Apply, self).__init__()
self.function = None
for key, value in attribs.items():
setattr(self, key, value)
self.Aggregate = []
self.Apply = []
self.Constant = []
self.Discretize = []
self.Extension = []
self.FieldRef = []
self.MapValues = []
self.NormContinuous = []
self.NormDiscrete = []
class Array(ind.Array):
"""Represents a <Array> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Array, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
class AssociationModel(ind.AssociationModel):
"""Represents a <AssociationModel> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(AssociationModel, self).__init__()
self.modelName = None
self.functionName = None
self.algorithmName = None
self.numberOfTransactions = None
self.maxNumberOfItemsPerTA = None
self.avgNumberOfItemsPerTA = None
self.minimumSupport = None
self.minimumConfidence = None
self.lengthLimit = None
self.numberOfItems = None
self.numberOfItemsets = None
self.numberOfRules = None
for key, value in attribs.items():
setattr(self, key, value)
self.AssociationRule = []
self.Extension = []
self.Item = []
self.Itemset = []
self.LocalTransformations = []
self.MiningSchema = []
self.ModelStats = []
self.Output = []
class AssociationRule(ind.AssociationRule):
"""Represents a <AssociationRule> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(AssociationRule, self).__init__()
self.antecedent = None
self.consequent = None
self.support = None
self.confidence = None
self.lift = None
self.id = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class BaseCumHazardTables(ind.BaseCumHazardTables):
"""Represents a <BaseCumHazardTables> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(BaseCumHazardTables, self).__init__()
self.maxTime = None
for key, value in attribs.items():
setattr(self, key, value)
self.BaselineCell = []
self.BaselineStratum = []
self.Extension = []
class BaselineCell(ind.BaselineCell):
"""Represents a <BaselineCell> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(BaselineCell, self).__init__()
self.time = None
self.cumHazard = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class BaselineStratum(ind.BaselineStratum):
"""Represents a <BaselineStratum> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(BaselineStratum, self).__init__()
self.value = None
self.label = None
self.maxTime = None
for key, value in attribs.items():
setattr(self, key, value)
self.BaselineCell = []
self.Extension = []
class BayesInput(ind.BayesInput):
"""Represents a <BayesInput> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(BayesInput, self).__init__()
self.fieldName = None
for key, value in attribs.items():
setattr(self, key, value)
self.DerivedField = []
self.Extension = []
self.PairCounts = []
class BayesInputs(ind.BayesInputs):
"""Represents a <BayesInputs> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(BayesInputs, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.BayesInput = []
self.Extension = []
class BayesOutput(ind.BayesOutput):
"""Represents a <BayesOutput> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(BayesOutput, self).__init__()
self.fieldName = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.TargetValueCounts = []
class BoundaryValueMeans(ind.BoundaryValueMeans):
"""Represents a <BoundaryValueMeans> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(BoundaryValueMeans, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Array = []
self.Extension = []
class BoundaryValues(ind.BoundaryValues):
"""Represents a <BoundaryValues> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(BoundaryValues, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Array = []
self.Extension = []
class CategoricalPredictor(ind.CategoricalPredictor):
"""Represents a <CategoricalPredictor> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(CategoricalPredictor, self).__init__()
self.name = None
self.value = None
self.coefficient = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class ChildParent(ind.ChildParent):
"""Represents a <ChildParent> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(ChildParent, self).__init__()
self.childField = None
self.parentField = None
self.parentLevelField = None
self.isRecursive = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.InlineTable = []
self.TableLocator = []
class ClassLabels(ind.ClassLabels):
"""Represents a <ClassLabels> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(ClassLabels, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Array = []
self.Extension = []
class Cluster(ind.Cluster):
"""Represents a <Cluster> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Cluster, self).__init__()
self.name = None
self.size = None
for key, value in attribs.items():
setattr(self, key, value)
self.Array = []
self.Covariances = []
self.Extension = []
self.KohonenMap = []
self.Partition = []
class ClusteringField(ind.ClusteringField):
"""Represents a <ClusteringField> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(ClusteringField, self).__init__()
self.field = None
self.isCenterField = None
self.fieldWeight = None
self.similarityScale = None
self.compareFunction = None
for key, value in attribs.items():
setattr(self, key, value)
self.Comparisons = []
self.Extension = []
class ClusteringModel(ind.ClusteringModel):
"""Represents a <ClusteringModel> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(ClusteringModel, self).__init__()
self.modelName = None
self.functionName = None
self.algorithmName = None
self.modelClass = None
self.numberOfClusters = None
for key, value in attribs.items():
"""Represents a <= None
self.numberOfClusters = None
for key, value in attribs.items> tag in v4.0 and provides methods to convert to PFA."""
setattr(self, key, value)
self.Cluster = []
self.ClusteringField = []
self.ComparisonMeasure = []
self.Extension = []
self.LocalTransformations = []
self.MiningSchema = []
self.MissingValueWeights = []
self.ModelExplanation = []
self.ModelStats = []
self.ModelVerification = []
self.Output = []
class ClusteringModelQuality(ind.ClusteringModelQuality):
"""Represents a <ClusteringModelQuality> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(ClusteringModelQuality, self).__init__()
self.dataName = None
self.SSE = None
self.SSB = None
for key, value in attribs.items():
setattr(self, key, value)
class Coefficient(ind.Coefficient):
"""Represents a <Coefficient> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Coefficient, self).__init__()
self.value = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class Coefficients(ind.Coefficients):
"""Represents a <Coefficients> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Coefficients, self).__init__()
self.numberOfCoefficients = None
self.absoluteValue = None
for key, value in attribs.items():
setattr(self, key, value)
self.Coefficient = []
self.Extension = []
class ComparisonMeasure(ind.ComparisonMeasure):
"""Represents a <ComparisonMeasure> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(ComparisonMeasure, self).__init__()
self.kind = None
self.compareFunction = None
self.minimum = None
self.maximum = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.binarySimilarity = []
self.chebychev = []
self.cityBlock = []
self.euclidean = []
self.jaccard = []
self.minkowski = []
self.simpleMatching = []
self.squaredEuclidean = []
self.tanimoto = []
class Comparisons(ind.Comparisons):
"""Represents a <Comparisons> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Comparisons, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.Matrix = []
class CompoundPredicate(ind.CompoundPredicate):
"""Represents a <CompoundPredicate> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(CompoundPredicate, self).__init__()
self.booleanOperator = None
for key, value in attribs.items():
setattr(self, key, value)
self.CompoundPredicate = []
self.Extension = []
self.AlwaysFalse = []
self.SimplePredicate = []
self.SimpleSetPredicate = []
self.AlwaysTrue = []
class CompoundRule(ind.CompoundRule):
"""Represents a <CompoundRule> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(CompoundRule, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.CompoundPredicate = []
self.CompoundRule = []
self.Extension = []
self.AlwaysFalse = []
self.SimplePredicate = []
self.SimpleRule = []
self.SimpleSetPredicate = []
self.AlwaysTrue = []
class Con(ind.Con):
"""Represents a <Con> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Con, self).__init__()
self.isfrom = None
self.weight = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class ConfusionMatrix(ind.ConfusionMatrix):
"""Represents a <ConfusionMatrix> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(ConfusionMatrix, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.ClassLabels = []
self.Extension = []
self.Matrix = []
class ConsequentSequence(ind.ConsequentSequence):
"""Represents a <ConsequentSequence> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(ConsequentSequence, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.SequenceReference = []
self.Time = []
class Constant(ind.Constant):
"""Represents a <Constant> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Constant, self).__init__()
self.dataType = None
for key, value in attribs.items():
setattr(self, key, value)
class Constraints(ind.Constraints):
"""Represents a <Constraints> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Constraints, self).__init__()
self.minimumNumberOfItems = None
self.maximumNumberOfItems = None
self.minimumNumberOfAntecedentItems = None
self.maximumNumberOfAntecedentItems = None
self.minimumNumberOfConsequentItems = None
self.maximumNumberOfConsequentItems = None
self.minimumSupport = None
self.minimumConfidence = None
self.minimumLift = None
self.minimumTotalSequenceTime = None
self.maximumTotalSequenceTime = None
self.minimumItemsetSeparationTime = None
self.maximumItemsetSeparationTime = None
self.minimumAntConsSeparationTime = None
self.maximumAntConsSeparationTime = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class ContStats(ind.ContStats):
"""Represents a <ContStats> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(ContStats, self).__init__()
self.totalValuesSum = None
self.totalSquaresSum = None
for key, value in attribs.items():
setattr(self, key, value)
self.Array = []
self.Extension = []
self.Interval = []
class CorrelationFields(ind.CorrelationFields):
"""Represents a <CorrelationFields> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(CorrelationFields, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Array = []
self.Extension = []
class CorrelationMethods(ind.CorrelationMethods):
"""Represents a <CorrelationMethods> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(CorrelationMethods, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.Matrix = []
class CorrelationValues(ind.CorrelationValues):
"""Represents a <CorrelationValues> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(CorrelationValues, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.Matrix = []
class Correlations(ind.Correlations):
"""Represents a <Correlations> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Correlations, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.CorrelationFields = []
self.CorrelationMethods = []
self.CorrelationValues = []
self.Extension = []
class Counts(ind.Counts):
"""Represents a <Counts> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Counts, self).__init__()
self.totalFreq = None
self.missingFreq = None
self.invalidFreq = None
self.cardinality = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class Covariances(ind.Covariances):
"""Represents a <Covariances> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Covariances, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.Matrix = []
class CovariateList(ind.CovariateList):
"""Represents a <CovariateList> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(CovariateList, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.Predictor = []
class DataDictionary(ind.DataDictionary):
"""Represents a <DataDictionary> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(DataDictionary, self).__init__()
self.numberOfFields = None
for key, value in attribs.items():
setattr(self, key, value)
self.DataField = []
self.Extension = []
self.Taxonomy = []
class DataField(ind.DataField):
"""Represents a <DataField> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(DataField, self).__init__()
self.name = None
self.displayName = None
self.optype = None
self.dataType = None
self.taxonomy = None
self.isCyclic = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.Interval = []
self.Value = []
class DecisionTree(ind.DecisionTree):
"""Represents a <DecisionTree> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(DecisionTree, self).__init__()
self.modelName = None
self.functionName = None
self.algorithmName = None
self.missingValueStrategy = None
self.missingValuePenalty = None
self.noTrueChildStrategy = None
self.splitCharacteristic = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.LocalTransformations = []
self.ModelStats = []
self.Node = []
self.Output = []
self.ResultField = []
self.Targets = []
class DefineFunction(ind.DefineFunction):
"""Represents a <DefineFunction> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(DefineFunction, self).__init__()
self.name = None
self.optype = None
self.dataType = None
for key, value in attribs.items():
setattr(self, key, value)
self.Aggregate = []
self.Apply = []
self.Constant = []
self.Discretize = []
self.Extension = []
self.FieldRef = []
self.MapValues = []
self.NormContinuous = []
self.NormDiscrete = []
self.ParameterField = []
class Delimiter(ind.Delimiter):
"""Represents a <Delimiter> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Delimiter, self).__init__()
self.delimiter = None
self.gap = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class DerivedField(ind.DerivedField):
"""Represents a <DerivedField> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(DerivedField, self).__init__()
self.name = None
self.displayName = None
self.optype = None
self.dataType = None
for key, value in attribs.items():
setattr(self, key, value)
self.Aggregate = []
self.Apply = []
self.Constant = []
self.Discretize = []
self.Extension = []
self.FieldRef = []
self.MapValues = []
self.NormContinuous = []
self.NormDiscrete = []
self.Value = []
class DiscrStats(ind.DiscrStats):
"""Represents a <DiscrStats> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(DiscrStats, self).__init__()
self.modalValue = None
for key, value in attribs.items():
setattr(self, key, value)
self.Array = []
self.Extension = []
class Discretize(ind.Discretize):
"""Represents a <Discretize> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Discretize, self).__init__()
self.field = None
self.mapMissingTo = None
self.defaultValue = None
self.dataType = None
for key, value in attribs.items():
setattr(self, key, value)
self.DiscretizeBin = []
self.Extension = []
class DiscretizeBin(ind.DiscretizeBin):
"""Represents a <DiscretizeBin> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(DiscretizeBin, self).__init__()
self.binValue = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.Interval = []
class DocumentTermMatrix(ind.DocumentTermMatrix):
"""Represents a <DocumentTermMatrix> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(DocumentTermMatrix, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.Matrix = []
class EventValues(ind.EventValues):
"""Represents a <EventValues> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(EventValues, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.Interval = []
self.Value = []
class ExponentialSmoothing(ind.ExponentialSmoothing):
"""Represents a <ExponentialSmoothing> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(ExponentialSmoothing, self).__init__()
self.RMSE = None
self.transformation = None
for key, value in attribs.items():
setattr(self, key, value)
self.Level = []
self.Seasonality_ExpoSmooth = []
self.TimeValue = []
self.Trend = []
class Extension(ind.Extension):
"""Represents a <Extension> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Extension, self).__init__()
self.extender = None
self.name = None
self.value = None
for key, value in attribs.items():
setattr(self, key, value)
class FactorList(ind.FactorList):
"""Represents a <FactorList> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(FactorList, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.Predictor = []
class AlwaysFalse(ind.AlwaysFalse):
"""Represents a <AlwaysFalse> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(AlwaysFalse, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
@property
def tag(self):
return "False"
class FieldColumnPair(ind.FieldColumnPair):
"""Represents a <FieldColumnPair> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(FieldColumnPair, self).__init__()
self.field = None
self.column = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class FieldRef(ind.FieldRef):
"""Represents a <FieldRef> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(FieldRef, self).__init__()
self.field = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class GeneralRegressionModel(ind.GeneralRegressionModel):
"""Represents a <GeneralRegressionModel> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(GeneralRegressionModel, self).__init__()
self.targetVariableName = None
self.modelType = None
self.modelName = None
self.functionName = None
self.algorithmName = None
self.targetReferenceCategory = None
self.cumulativeLink = None
self.linkFunction = None
self.linkParameter = None
self.trialsVariable = None
self.trialsValue = None
self.distribution = None
self.distParameter = None
self.offsetVariable = None
self.offsetValue = None
self.modelDF = None
self.endTimeVariable = None
self.startTimeVariable = None
self.subjectIDVariable = None
self.statusVariable = None
self.baselineStrataVariable = None
for key, value in attribs.items():
setattr(self, key, value)
self.BaseCumHazardTables = []
self.CovariateList = []
self.EventValues = []
self.Extension = []
self.FactorList = []
self.LocalTransformations = []
self.MiningSchema = []
self.ModelExplanation = []
self.ModelStats = []
self.ModelVerification = []
self.Output = []
self.PCovMatrix = []
self.PPMatrix = []
self.ParamMatrix = []
self.ParameterList = []
self.Targets = []
class Header(ind.Header):
"""Represents a <Header> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Header, self).__init__()
self.copyright = None
self.description = None
for key, value in attribs.items():
setattr(self, key, value)
self.Annotation = []
self.Application = []
self.Extension = []
self.Timestamp = []
class INT_Entries(ind.INT_Entries):
"""Represents a <INT_Entries> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(INT_Entries, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
@property
def tag(self):
return "INT-Entries"
class INT_SparseArray(ind.INT_SparseArray):
"""Represents a <INT_SparseArray> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(INT_SparseArray, self).__init__()
self.n = None
self.defaultValue = None
for key, value in attribs.items():
setattr(self, key, value)
self.INT_Entries = []
self.Indices = []
@property
def tag(self):
return "INT-SparseArray"
class Indices(ind.Indices):
"""Represents a <Indices> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Indices, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
class InlineTable(ind.InlineTable):
"""Represents a <InlineTable> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(InlineTable, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.row = []
class Interval(ind.Interval):
"""Represents a <Interval> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Interval, self).__init__()
self.closure = None
self.leftMargin = None
self.rightMargin = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class Item(ind.Item):
"""Represents a <Item> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Item, self).__init__()
self.id = None
self.value = None
self.mappedValue = None
self.weight = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class ItemRef(ind.ItemRef):
"""Represents a <ItemRef> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(ItemRef, self).__init__()
self.itemRef = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class Itemset(ind.Itemset):
"""Represents a <Itemset> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Itemset, self).__init__()
self.id = None
self.support = None
self.numberOfItems = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.ItemRef = []
class KohonenMap(ind.KohonenMap):
"""Represents a <KohonenMap> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(KohonenMap, self).__init__()
self.coord1 = None
self.coord2 = None
self.coord3 = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class Level(ind.Level):
"""Represents a <Level> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Level, self).__init__()
self.alpha = None
self.smoothedValue = None
self.quadraticSmoothedValue = None
self.cubicSmoothedValue = None
for key, value in attribs.items():
setattr(self, key, value)
class LiftData(ind.LiftData):
"""Represents a <LiftData> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(LiftData, self).__init__()
self.targetFieldValue = None
self.targetFieldDisplayValue = None
self.rankingQuality = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.ModelLiftGraph = []
self.OptimumLiftGraph = []
self.RandomLiftGraph = []
class LiftGraph(ind.LiftGraph):
"""Represents a <LiftGraph> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(LiftGraph, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.BoundaryValueMeans = []
self.BoundaryValues = []
self.Extension = []
self.XCoordinates = []
self.YCoordinates = []
class LinearKernelType(ind.LinearKernelType):
"""Represents a <LinearKernelType> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(LinearKernelType, self).__init__()
self.description = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class LinearNorm(ind.LinearNorm):
"""Represents a <LinearNorm> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(LinearNorm, self).__init__()
self.orig = None
self.norm = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class LocalTransformations(ind.LocalTransformations):
"""Represents a <LocalTransformations> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(LocalTransformations, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.DerivedField = []
self.Extension = []
class MapValues(ind.MapValues):
"""Represents a <MapValues> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(MapValues, self).__init__()
self.mapMissingTo = None
self.defaultValue = None
self.outputColumn = None
self.dataType = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.FieldColumnPair = []
self.InlineTable = []
self.TableLocator = []
class MatCell(ind.MatCell):
"""Represents a <MatCell> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(MatCell, self).__init__()
self.row = None
self.col = None
for key, value in attribs.items():
setattr(self, key, value)
class Matrix(ind.Matrix):
"""Represents a <Matrix> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Matrix, self).__init__()
self.kind = None
self.nbRows = None
self.nbCols = None
self.diagDefault = None
self.offDiagDefault = None
for key, value in attribs.items():
setattr(self, key, value)
self.Array = []
self.MatCell = []
class MiningBuildTask(ind.MiningBuildTask):
"""Represents a <MiningBuildTask> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(MiningBuildTask, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class MiningField(ind.MiningField):
"""Represents a <MiningField> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(MiningField, self).__init__()
self.name = None
self.usageType = None
self.optype = None
self.importance = None
self.outliers = None
self.lowValue = None
self.highValue = None
self.missingValueReplacement = None
self.missingValueTreatment = None
self.invalidValueTreatment = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class MiningModel(ind.MiningModel):
"""Represents a <MiningModel> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(MiningModel, self).__init__()
self.modelName = None
self.functionName = None
self.algorithmName = None
for key, value in attribs.items():
setattr(self, key, value)
self.DecisionTree = []
self.Extension = []
self.LocalTransformations = []
self.MiningSchema = []
self.ModelExplanation = []
self.ModelStats = []
self.ModelVerification = []
self.Output = []
self.Regression = []
self.Segmentation = []
self.Targets = []
class MiningSchema(ind.MiningSchema):
"""Represents a <MiningSchema> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(MiningSchema, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.MiningField = []
class MissingValueWeights(ind.MissingValueWeights):
"""Represents a <MissingValueWeights> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(MissingValueWeights, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Array = []
self.Extension = []
class ModelExplanation(ind.ModelExplanation):
"""Represents a <ModelExplanation> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(ModelExplanation, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.ClusteringModelQuality = []
self.Correlations = []
self.Extension = []
self.PredictiveModelQuality = []
class ModelLiftGraph(ind.ModelLiftGraph):
"""Represents a <ModelLiftGraph> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(ModelLiftGraph, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.LiftGraph = []
class ModelStats(ind.ModelStats):
"""Represents a <ModelStats> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(ModelStats, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.UnivariateStats = []
class ModelVerification(ind.ModelVerification):
"""Represents a <ModelVerification> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(ModelVerification, self).__init__()
self.recordCount = None
self.fieldCount = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.InlineTable = []
self.VerificationFields = []
class NaiveBayesModel(ind.NaiveBayesModel):
"""Represents a <NaiveBayesModel> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(NaiveBayesModel, self).__init__()
self.modelName = None
self.threshold = None
self.functionName = None
self.algorithmName = None
for key, value in attribs.items():
setattr(self, key, value)
self.BayesInputs = []
self.BayesOutput = []
self.Extension = []
self.LocalTransformations = []
self.MiningSchema = []
self.ModelExplanation = []
self.ModelStats = []
self.ModelVerification = []
self.Output = []
self.Targets = []
class NeuralInput(ind.NeuralInput):
"""Represents a <NeuralInput> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(NeuralInput, self).__init__()
self.id = None
for key, value in attribs.items():
setattr(self, key, value)
self.DerivedField = []
self.Extension = []
class NeuralInputs(ind.NeuralInputs):
"""Represents a <NeuralInputs> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(NeuralInputs, self).__init__()
self.numberOfInputs = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.NeuralInput = []
class NeuralLayer(ind.NeuralLayer):
"""Represents a <NeuralLayer> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(NeuralLayer, self).__init__()
self.numberOfNeurons = None
self.activationFunction = None
self.threshold = None
self.width = None
self.altitude = None
self.normalizationMethod = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.Neuron = []
class NeuralNetwork(ind.NeuralNetwork):
"""Represents a <NeuralNetwork> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(NeuralNetwork, self).__init__()
self.modelName = None
self.functionName = None
self.algorithmName = None
self.activationFunction = None
self.normalizationMethod = None
self.threshold = None
self.width = None
self.altitude = None
self.numberOfLayers = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.LocalTransformations = []
self.MiningSchema = []
self.ModelExplanation = []
self.ModelStats = []
self.ModelVerification = []
self.NeuralInputs = []
self.NeuralLayer = []
self.NeuralOutputs = []
self.Output = []
self.Targets = []
class NeuralOutput(ind.NeuralOutput):
"""Represents a <NeuralOutput> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(NeuralOutput, self).__init__()
self.outputNeuron = None
for key, value in attribs.items():
setattr(self, key, value)
self.DerivedField = []
self.Extension = []
class NeuralOutputs(ind.NeuralOutputs):
"""Represents a <NeuralOutputs> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(NeuralOutputs, self).__init__()
self.numberOfOutputs = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.NeuralOutput = []
class Neuron(ind.Neuron):
"""Represents a <Neuron> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Neuron, self).__init__()
self.id = None
self.bias = None
self.width = None
self.altitude = None
for key, value in attribs.items():
setattr(self, key, value)
self.Con = []
self.Extension = []
class Node(ind.Node):
"""Represents a <Node> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Node, self).__init__()
self.id = None
self.score = None
self.recordCount = None
self.defaultChild = None
for key, value in attribs.items():
setattr(self, key, value)
self.CompoundPredicate = []
self.DecisionTree = []
self.Extension = []
self.AlwaysFalse = []
self.Node = []
self.Partition = []
self.Regression = []
self.ScoreDistribution = []
self.SimplePredicate = []
self.SimpleSetPredicate = []
self.AlwaysTrue = []
class NormContinuous(ind.NormContinuous):
"""Represents a <NormContinuous> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(NormContinuous, self).__init__()
self.mapMissingTo = None
self.field = None
self.outliers = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.LinearNorm = []
class NormDiscrete(ind.NormDiscrete):
"""Represents a <NormDiscrete> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(NormDiscrete, self).__init__()
self.field = None
self.method = None
self.value = None
self.mapMissingTo = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class NumericInfo(ind.NumericInfo):
"""Represents a <NumericInfo> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(NumericInfo, self).__init__()
self.minimum = None
self.maximum = None
self.mean = None
self.standardDeviation = None
self.median = None
self.interQuartileRange = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.Quantile = []
class NumericPredictor(ind.NumericPredictor):
"""Represents a <NumericPredictor> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(NumericPredictor, self).__init__()
self.name = None
self.exponent = None
self.coefficient = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class OptimumLiftGraph(ind.OptimumLiftGraph):
"""Represents a <OptimumLiftGraph> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(OptimumLiftGraph, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.LiftGraph = []
class Output(ind.Output):
"""Represents a <Output> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Output, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.OutputField = []
class OutputField(ind.OutputField):
"""Represents a <OutputField> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(OutputField, self).__init__()
self.name = None
self.displayName = None
self.optype = None
self.dataType = None
self.targetField = None
self.feature = None
self.value = None
self.ruleFeature = None
self.algorithm = None
self.rank = None
self.rankBasis = None
self.rankOrder = None
self.isMultiValued = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class PCell(ind.PCell):
"""Represents a <PCell> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(PCell, self).__init__()
self.targetCategory = None
self.parameterName = None
self.beta = None
self.df = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class PCovCell(ind.PCovCell):
"""Represents a <PCovCell> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(PCovCell, self).__init__()
self.pRow = None
self.pCol = None
self.tRow = None
self.tCol = None
self.value = None
self.targetCategory = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class PCovMatrix(ind.PCovMatrix):
"""Represents a <PCovMatrix> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(PCovMatrix, self).__init__()
self.type = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.PCovCell = []
class PPCell(ind.PPCell):
"""Represents a <PPCell> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(PPCell, self).__init__()
self.value = None
self.predictorName = None
self.parameterName = None
self.targetCategory = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class PPMatrix(ind.PPMatrix):
"""Represents a <PPMatrix> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(PPMatrix, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.PPCell = []
class PairCounts(ind.PairCounts):
"""Represents a <PairCounts> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(PairCounts, self).__init__()
self.value = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.TargetValueCounts = []
class ParamMatrix(ind.ParamMatrix):
"""Represents a <ParamMatrix> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(ParamMatrix, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.PCell = []
class Parameter(ind.Parameter):
"""Represents a <Parameter> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Parameter, self).__init__()
self.name = None
self.label = None
self.referencePoint = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class ParameterField(ind.ParameterField):
"""Represents a <ParameterField> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(ParameterField, self).__init__()
self.name = None
self.optype = None
self.dataType = None
for key, value in attribs.items():
setattr(self, key, value)
class ParameterList(ind.ParameterList):
"""Represents a <ParameterList> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(ParameterList, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.Parameter = []
class Partition(ind.Partition):
"""Represents a <Partition> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Partition, self).__init__()
self.name = None
self.size = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.PartitionFieldStats = []
class PartitionFieldStats(ind.PartitionFieldStats):
"""Represents a <PartitionFieldStats> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(PartitionFieldStats, self).__init__()
self.field = None
self.weighted = None
for key, value in attribs.items():
setattr(self, key, value)
self.Array = []
self.Counts = []
self.Extension = []
self.NumericInfo = []
class PolynomialKernelType(ind.PolynomialKernelType):
"""Represents a <PolynomialKernelType> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(PolynomialKernelType, self).__init__()
self.description = None
self.gamma = None
self.coef0 = None
self.degree = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class PredictiveModelQuality(ind.PredictiveModelQuality):
"""Represents a <PredictiveModelQuality> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(PredictiveModelQuality, self).__init__()
self.targetField = None
self.dataName = None
self.dataUsage = None
self.meanError = None
self.meanAbsoluteError = None
self.meanSquaredError = None
self.r_squared = None
for key, value in attribs.items():
setattr(self, key, value)
self.ConfusionMatrix = []
self.Extension = []
self.LiftData = []
self.ROC = []
class Predictor(ind.Predictor):
"""Represents a <Predictor> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Predictor, self).__init__()
self.name = None
self.contrastMatrixType = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.Matrix = []
class PredictorTerm(ind.PredictorTerm):
"""Represents a <PredictorTerm> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(PredictorTerm, self).__init__()
self.coefficient = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.FieldRef = []
class Quantile(ind.Quantile):
"""Represents a <Quantile> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Quantile, self).__init__()
self.quantileLimit = None
self.quantileValue = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class REAL_Entries(ind.REAL_Entries):
"""Represents a <REAL_Entries> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(REAL_Entries, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
@property
def tag(self):
return "REAL-Entries"
class REAL_SparseArray(ind.REAL_SparseArray):
"""Represents a <REAL_SparseArray> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(REAL_SparseArray, self).__init__()
self.n = None
self.defaultValue = None
for key, value in attribs.items():
setattr(self, key, value)
self.Indices = []
self.REAL_Entries = []
@property
def tag(self):
return "REAL-SparseArray"
class ROC(ind.ROC):
"""Represents a <ROC> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(ROC, self).__init__()
self.positiveTargetFieldValue = None
self.positiveTargetFieldDisplayValue = None
self.negativeTargetFieldValue = None
self.negativeTargetFieldDisplayValue = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.ROCGraph = []
class ROCGraph(ind.ROCGraph):
"""Represents a <ROCGraph> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(ROCGraph, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.BoundaryValues = []
self.Extension = []
self.XCoordinates = []
self.YCoordinates = []
class RadialBasisKernelType(ind.RadialBasisKernelType):
"""Represents a <RadialBasisKernelType> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(RadialBasisKernelType, self).__init__()
self.description = None
self.gamma = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class RandomLiftGraph(ind.RandomLiftGraph):
"""Represents a <RandomLiftGraph> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(RandomLiftGraph, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.LiftGraph = []
class Regression(ind.Regression):
"""Represents a <Regression> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Regression, self).__init__()
self.modelName = None
self.functionName = None
self.algorithmName = None
self.normalizationMethod = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.LocalTransformations = []
self.ModelStats = []
self.Output = []
self.RegressionTable = []
self.ResultField = []
self.Targets = []
class RegressionModel(ind.RegressionModel):
"""Represents a <RegressionModel> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(RegressionModel, self).__init__()
self.modelName = None
self.functionName = None
self.algorithmName = None
self.modelType = None
self.targetFieldName = None
self.normalizationMethod = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.LocalTransformations = []
self.MiningSchema = []
self.ModelExplanation = []
self.ModelStats = []
self.ModelVerification = []
self.Output = []
self.RegressionTable = []
self.Targets = []
class RegressionTable(ind.RegressionTable):
"""Represents a <RegressionTable> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(RegressionTable, self).__init__()
self.intercept = None
self.targetCategory = None
for key, value in attribs.items():
setattr(self, key, value)
self.CategoricalPredictor = []
self.Extension = []
self.NumericPredictor = []
self.PredictorTerm = []
class ResultField(ind.ResultField):
"""Represents a <ResultField> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(ResultField, self).__init__()
self.name = None
self.displayName = None
self.optype = None
self.dataType = None
self.feature = None
self.value = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class RuleSelectionMethod(ind.RuleSelectionMethod):
"""Represents a <RuleSelectionMethod> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(RuleSelectionMethod, self).__init__()
self.criterion = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class RuleSet(ind.RuleSet):
"""Represents a <RuleSet> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(RuleSet, self).__init__()
self.recordCount = None
self.nbCorrect = None
self.defaultScore = None
self.defaultConfidence = None
for key, value in attribs.items():
setattr(self, key, value)
self.CompoundRule = []
self.Extension = []
self.RuleSelectionMethod = []
self.ScoreDistribution = []
self.SimpleRule = []
class RuleSetModel(ind.RuleSetModel):
"""Represents a <RuleSetModel> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(RuleSetModel, self).__init__()
self.modelName = None
self.functionName = None
self.algorithmName = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.LocalTransformations = []
self.MiningSchema = []
self.ModelExplanation = []
self.ModelStats = []
self.ModelVerification = []
self.Output = []
self.RuleSet = []
self.Targets = []
class ScoreDistribution(ind.ScoreDistribution):
"""Represents a <ScoreDistribution> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(ScoreDistribution, self).__init__()
self.value = None
self.recordCount = None
self.confidence = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class SeasonalTrendDecomposition(ind.SeasonalTrendDecomposition):
"""Represents a <SeasonalTrendDecomposition> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(SeasonalTrendDecomposition, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
class Seasonality_ExpoSmooth(ind.Seasonality_ExpoSmooth):
"""Represents a <Seasonality_ExpoSmooth> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Seasonality_ExpoSmooth, self).__init__()
self.type = None
self.period = None
self.unit = None
self.phase = None
self.delta = None
for key, value in attribs.items():
setattr(self, key, value)
self.Array = []
class Segment(ind.Segment):
"""Represents a <Segment> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Segment, self).__init__()
self.id = None
self.weight = None
for key, value in attribs.items():
setattr(self, key, value)
self.ClusteringModel = []
self.CompoundPredicate = []
self.Extension = []
self.AlwaysFalse = []
self.GeneralRegressionModel = []
self.NaiveBayesModel = []
self.NeuralNetwork = []
self.RegressionModel = []
self.RuleSetModel = []
self.SimplePredicate = []
self.SimpleSetPredicate = []
self.SupportVectorMachineModel = []
self.TreeModel = []
self.AlwaysTrue = []
class Segmentation(ind.Segmentation):
"""Represents a <Segmentation> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Segmentation, self).__init__()
self.multipleModelMethod = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.LocalTransformations = []
self.Segment = []
class Sequence(ind.Sequence):
"""Represents a <Sequence> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Sequence, self).__init__()
self.id = None
self.numberOfSets = None
self.occurrence = None
self.support = None
for key, value in attribs.items():
setattr(self, key, value)
self.Delimiter = []
self.Extension = []
self.SetReference = []
self.Time = []
class SequenceModel(ind.SequenceModel):
"""Represents a <SequenceModel> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(SequenceModel, self).__init__()
self.modelName = None
self.functionName = None
self.algorithmName = None
self.numberOfTransactions = None
self.maxNumberOfItemsPerTransaction = None
self.avgNumberOfItemsPerTransaction = None
self.numberOfTransactionGroups = None
self.maxNumberOfTAsPerTAGroup = None
self.avgNumberOfTAsPerTAGroup = None
for key, value in attribs.items():
setattr(self, key, value)
self.Constraints = []
self.Extension = []
self.Item = []
self.Itemset = []
self.LocalTransformations = []
self.MiningSchema = []
self.ModelStats = []
self.Sequence = []
self.SequenceRule = []
self.SetPredicate = []
class SequenceReference(ind.SequenceReference):
"""Represents a <SequenceReference> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(SequenceReference, self).__init__()
self.seqId = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class SequenceRule(ind.SequenceRule):
"""Represents a <SequenceRule> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(SequenceRule, self).__init__()
self.id = None
self.numberOfSets = None
self.occurrence = None
self.support = None
self.confidence = None
self.lift = None
for key, value in attribs.items():
setattr(self, key, value)
self.AntecedentSequence = []
self.ConsequentSequence = []
self.Delimiter = []
self.Extension = []
self.Time = []
class SetPredicate(ind.SetPredicate):
"""Represents a <SetPredicate> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(SetPredicate, self).__init__()
self.id = None
self.field = None
self.operator = None
for key, value in attribs.items():
setattr(self, key, value)
self.Array = []
self.Extension = []
class SetReference(ind.SetReference):
"""Represents a <SetReference> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(SetReference, self).__init__()
self.setId = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class SigmoidKernelType(ind.SigmoidKernelType):
"""Represents a <SigmoidKernelType> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(SigmoidKernelType, self).__init__()
self.description = None
self.gamma = None
self.coef0 = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class SimplePredicate(ind.SimplePredicate):
"""Represents a <SimplePredicate> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(SimplePredicate, self).__init__()
self.field = None
self.operator = None
self.value = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class SimpleRule(ind.SimpleRule):
"""Represents a <SimpleRule> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(SimpleRule, self).__init__()
self.id = None
self.score = None
self.recordCount = None
self.nbCorrect = None
self.confidence = None
self.weight = None
for key, value in attribs.items():
setattr(self, key, value)
self.CompoundPredicate = []
self.Extension = []
self.AlwaysFalse = []
self.ScoreDistribution = []
self.SimplePredicate = []
self.SimpleSetPredicate = []
self.AlwaysTrue = []
class SimpleSetPredicate(ind.SimpleSetPredicate):
"""Represents a <SimpleSetPredicate> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(SimpleSetPredicate, self).__init__()
self.field = None
self.booleanOperator = None
for key, value in attribs.items():
setattr(self, key, value)
self.Array = []
self.Extension = []
class SpectralAnalysis(ind.SpectralAnalysis):
"""Represents a <SpectralAnalysis> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(SpectralAnalysis, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
class SupportVector(ind.SupportVector):
"""Represents a <SupportVector> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(SupportVector, self).__init__()
self.vectorId = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class SupportVectorMachine(ind.SupportVectorMachine):
"""Represents a <SupportVectorMachine> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(SupportVectorMachine, self).__init__()
self.targetCategory = None
self.alternateTargetCategory = None
self.threshold = None
for key, value in attribs.items():
setattr(self, key, value)
self.Coefficients = []
self.Extension = []
self.SupportVectors = []
class SupportVectorMachineModel(ind.SupportVectorMachineModel):
"""Represents a <SupportVectorMachineModel> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(SupportVectorMachineModel, self).__init__()
self.modelName = None
self.functionName = None
self.algorithmName = None
self.threshold = None
self.svmRepresentation = None
self.classificationMethod = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.LinearKernelType = []
self.LocalTransformations = []
self.MiningSchema = []
self.ModelExplanation = []
self.ModelStats = []
self.ModelVerification = []
self.Output = []
self.PolynomialKernelType = []
self.RadialBasisKernelType = []
self.SigmoidKernelType = []
self.SupportVectorMachine = []
self.Targets = []
self.VectorDictionary = []
class SupportVectors(ind.SupportVectors):
"""Represents a <SupportVectors> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(SupportVectors, self).__init__()
self.numberOfSupportVectors = None
self.numberOfAttributes = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.SupportVector = []
class TableLocator(ind.TableLocator):
"""Represents a <TableLocator> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(TableLocator, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class Target(ind.Target):
"""Represents a <Target> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Target, self).__init__()
self.field = None
self.optype = None
self.castInteger = None
self.min = None
self.max = None
self.rescaleConstant = None
self.rescaleFactor = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.TargetValue = []
class TargetValue(ind.TargetValue):
"""Represents a <TargetValue> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(TargetValue, self).__init__()
self.value = None
self.displayValue = None
self.priorProbability = None
self.defaultValue = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.Partition = []
class TargetValueCount(ind.TargetValueCount):
"""Represents a <TargetValueCount> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(TargetValueCount, self).__init__()
self.value = None
self.count = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class TargetValueCounts(ind.TargetValueCounts):
"""Represents a <TargetValueCounts> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(TargetValueCounts, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.TargetValueCount = []
class Targets(ind.Targets):
"""Represents a <Targets> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Targets, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.Target = []
class Taxonomy(ind.Taxonomy):
"""Represents a <Taxonomy> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Taxonomy, self).__init__()
self.name = None
for key, value in attribs.items():
setattr(self, key, value)
self.ChildParent = []
self.Extension = []
class TextCorpus(ind.TextCorpus):
"""Represents a <TextCorpus> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(TextCorpus, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.TextDocument = []
class TextDictionary(ind.TextDictionary):
"""Represents a <TextDictionary> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(TextDictionary, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Array = []
self.Extension = []
self.Taxonomy = []
class TextDocument(ind.TextDocument):
"""Represents a <TextDocument> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(TextDocument, self).__init__()
self.id = None
self.name = None
self.length = None
self.file = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class TextModel(ind.TextModel):
"""Represents a <TextModel> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(TextModel, self).__init__()
self.modelName = None
self.functionName = None
self.algorithmName = None
self.numberOfTerms = None
self.numberOfDocuments = None
for key, value in attribs.items():
setattr(self, key, value)
self.DocumentTermMatrix = []
self.Extension = []
self.LocalTransformations = []
self.MiningSchema = []
self.ModelExplanation = []
self.ModelStats = []
self.ModelVerification = []
self.Output = []
self.Targets = []
self.TextCorpus = []
self.TextDictionary = []
self.TextModelNormalization = []
self.TextModelSimiliarity = []
class TextModelNormalization(ind.TextModelNormalization):
"""Represents a <TextModelNormalization> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(TextModelNormalization, self).__init__()
self.localTermWeights = None
self.globalTermWeights = None
self.documentNormalization = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class TextModelSimiliarity(ind.TextModelSimiliarity):
"""Represents a <TextModelSimiliarity> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(TextModelSimiliarity, self).__init__()
self.similarityType = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class Time(ind.Time):
"""Represents a <Time> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Time, self).__init__()
self.min = None
self.max = None
self.mean = None
self.standardDeviation = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class TimeAnchor(ind.TimeAnchor):
"""Represents a <TimeAnchor> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(TimeAnchor, self).__init__()
self.type = None
self.offset = None
self.stepsize = None
self.displayName = None
for key, value in attribs.items():
setattr(self, key, value)
self.TimeCycle = []
self.TimeException = []
class TimeCycle(ind.TimeCycle):
"""Represents a <TimeCycle> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(TimeCycle, self).__init__()
self.length = None
self.type = None
self.displayName = None
for key, value in attribs.items():
setattr(self, key, value)
self.Array = []
class TimeException(ind.TimeException):
"""Represents a <TimeException> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(TimeException, self).__init__()
self.type = None
self.count = None
for key, value in attribs.items():
setattr(self, key, value)
self.Array = []
class TimeSeries(ind.TimeSeries):
"""Represents a <TimeSeries> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(TimeSeries, self).__init__()
self.usage = None
self.startTime = None
self.endTime = None
self.interpolationMethod = None
for key, value in attribs.items():
setattr(self, key, value)
self.TimeAnchor = []
self.TimeException = []
self.TimeValue = []
class TimeSeriesModel(ind.TimeSeriesModel):
"""Represents a <TimeSeriesModel> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(TimeSeriesModel, self).__init__()
self.modelName = None
self.functionName = None
self.algorithmName = None
self.bestFit = None
for key, value in attribs.items():
setattr(self, key, value)
self.ARIMA = []
self.ExponentialSmoothing = []
self.Extension = []
self.LocalTransformations = []
self.MiningSchema = []
self.ModelStats = []
self.ModelVerification = []
self.Output = []
self.SeasonalTrendDecomposition = []
self.SpectralAnalysis = []
self.TimeSeries = []
class TimeValue(ind.TimeValue):
"""Represents a <TimeValue> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(TimeValue, self).__init__()
self.index = None
self.time = None
self.value = None
self.standardError = None
for key, value in attribs.items():
setattr(self, key, value)
self.Timestamp = []
class Timestamp(ind.Timestamp):
"""Represents a <Timestamp> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Timestamp, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class TransformationDictionary(ind.TransformationDictionary):
"""Represents a <TransformationDictionary> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(TransformationDictionary, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.DefineFunction = []
self.DerivedField = []
self.Extension = []
class TreeModel(ind.TreeModel):
"""Represents a <TreeModel> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(TreeModel, self).__init__()
self.modelName = None
self.functionName = None
self.algorithmName = None
self.missingValueStrategy = None
self.missingValuePenalty = None
self.noTrueChildStrategy = None
self.splitCharacteristic = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.LocalTransformations = []
self.MiningSchema = []
self.ModelExplanation = []
self.ModelStats = []
self.ModelVerification = []
self.Node = []
self.Output = []
self.Targets = []
class Trend(ind.Trend):
"""Represents a <Trend> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Trend, self).__init__()
self.trend = None
self.gamma = None
self.phi = None
self.smoothedValue = None
for key, value in attribs.items():
setattr(self, key, value)
class AlwaysTrue(ind.AlwaysTrue):
"""Represents a <AlwaysTrue> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(AlwaysTrue, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
@property
def tag(self):
return "True"
class UnivariateStats(ind.UnivariateStats):
"""Represents a <UnivariateStats> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(UnivariateStats, self).__init__()
self.field = None
self.weighted = None
for key, value in attribs.items():
setattr(self, key, value)
self.Anova = []
self.ContStats = []
self.Counts = []
self.DiscrStats = []
self.Extension = []
self.NumericInfo = []
class Value(ind.Value):
"""Represents a <Value> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(Value, self).__init__()
self.value = None
self.displayValue = None
self.property = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class VectorDictionary(ind.VectorDictionary):
"""Represents a <VectorDictionary> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(VectorDictionary, self).__init__()
self.numberOfVectors = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.VectorFields = []
self.VectorInstance = []
class VectorFields(ind.VectorFields):
"""Represents a <VectorFields> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(VectorFields, self).__init__()
self.numberOfFields = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.FieldRef = []
class VectorInstance(ind.VectorInstance):
"""Represents a <VectorInstance> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(VectorInstance, self).__init__()
self.id = None
for key, value in attribs.items():
setattr(self, key, value)
self.Array = []
self.Extension = []
self.REAL_SparseArray = []
class VerificationField(ind.VerificationField):
"""Represents a <VerificationField> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(VerificationField, self).__init__()
self.field = None
self.column = None
self.precision = None
self.zeroThreshold = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class VerificationFields(ind.VerificationFields):
"""Represents a <VerificationFields> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(VerificationFields, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
self.VerificationField = []
class XCoordinates(ind.XCoordinates):
"""Represents a <XCoordinates> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(XCoordinates, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Array = []
self.Extension = []
class YCoordinates(ind.YCoordinates):
"""Represents a <YCoordinates> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(YCoordinates, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Array = []
self.Extension = []
class binarySimilarity(ind.binarySimilarity):
"""Represents a <binarySimilarity> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(binarySimilarity, self).__init__()
self.c00_parameter = None
self.c01_parameter = None
self.c10_parameter = None
self.c11_parameter = None
self.d00_parameter = None
self.d01_parameter = None
self.d10_parameter = None
self.d11_parameter = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class chebychev(ind.chebychev):
"""Represents a <chebychev> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(chebychev, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class cityBlock(ind.cityBlock):
"""Represents a <cityBlock> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(cityBlock, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class euclidean(ind.euclidean):
"""Represents a <euclidean> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(euclidean, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class jaccard(ind.jaccard):
"""Represents a <jaccard> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(jaccard, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class minkowski(ind.minkowski):
"""Represents a <minkowski> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(minkowski, self).__init__()
self.p_parameter = None
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class row(ind.row):
"""Represents a <row> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(row, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
class simpleMatching(ind.simpleMatching):
"""Represents a <simpleMatching> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(simpleMatching, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class squaredEuclidean(ind.squaredEuclidean):
"""Represents a <squaredEuclidean> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(squaredEuclidean, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
class tanimoto(ind.tanimoto):
"""Represents a <tanimoto> tag in v4.0 and provides methods to convert to PFA."""
def __init__(self, attribs):
super(tanimoto, self).__init__()
for key, value in attribs.items():
setattr(self, key, value)
self.Extension = []
tagToClass = {
"ARIMA": ARIMA,
"Aggregate": Aggregate,
"Annotation": Annotation,
"Anova": Anova,
"AnovaRow": AnovaRow,
"AntecedentSequence": AntecedentSequence,
"Application": Application,
"Apply": Apply,
"Array": Array,
"AssociationModel": AssociationModel,
"AssociationRule": AssociationRule,
"BaseCumHazardTables": BaseCumHazardTables,
"BaselineCell": BaselineCell,
"BaselineStratum": BaselineStratum,
"BayesInput": BayesInput,
"BayesInputs": BayesInputs,
"BayesOutput": BayesOutput,
"BoundaryValueMeans": BoundaryValueMeans,
"BoundaryValues": BoundaryValues,
"CategoricalPredictor": CategoricalPredictor,
"ChildParent": ChildParent,
"ClassLabels": ClassLabels,
"Cluster": Cluster,
"ClusteringField": ClusteringField,
"ClusteringModel": ClusteringModel,
"ClusteringModelQuality": ClusteringModelQuality,
"Coefficient": Coefficient,
"Coefficients": Coefficients,
"ComparisonMeasure": ComparisonMeasure,
"Comparisons": Comparisons,
"CompoundPredicate": CompoundPredicate,
"CompoundRule": CompoundRule,
"Con": Con,
"ConfusionMatrix": ConfusionMatrix,
"ConsequentSequence": ConsequentSequence,
"Constant": Constant,
"Constraints": Constraints,
"ContStats": ContStats,
"CorrelationFields": CorrelationFields,
"CorrelationMethods": CorrelationMethods,
"CorrelationValues": CorrelationValues,
"Correlations": Correlations,
"Counts": Counts,
"Covariances": Covariances,
"CovariateList": CovariateList,
"DataDictionary": DataDictionary,
"DataField": DataField,
"DecisionTree": DecisionTree,
"DefineFunction": DefineFunction,
"Delimiter": Delimiter,
"DerivedField": DerivedField,
"DiscrStats": DiscrStats,
"Discretize": Discretize,
"DiscretizeBin": DiscretizeBin,
"DocumentTermMatrix": DocumentTermMatrix,
"EventValues": EventValues,
"ExponentialSmoothing": ExponentialSmoothing,
"Extension": Extension,
"FactorList": FactorList,
"False": AlwaysFalse,
"FieldColumnPair": FieldColumnPair,
"FieldRef": FieldRef,
"GeneralRegressionModel": GeneralRegressionModel,
"Header": Header,
"INT-Entries": INT_Entries,
"INT-SparseArray": INT_SparseArray,
"Indices": Indices,
"InlineTable": InlineTable,
"Interval": Interval,
"Item": Item,
"ItemRef": ItemRef,
"Itemset": Itemset,
"KohonenMap": KohonenMap,
"Level": Level,
"LiftData": LiftData,
"LiftGraph": LiftGraph,
"LinearKernelType": LinearKernelType,
"LinearNorm": LinearNorm,
"LocalTransformations": LocalTransformations,
"MapValues": MapValues,
"MatCell": MatCell,
"Matrix": Matrix,
"MiningBuildTask": MiningBuildTask,
"MiningField": MiningField,
"MiningModel": MiningModel,
"MiningSchema": MiningSchema,
"MissingValueWeights": MissingValueWeights,
"ModelExplanation": ModelExplanation,
"ModelLiftGraph": ModelLiftGraph,
"ModelStats": ModelStats,
"ModelVerification": ModelVerification,
"NaiveBayesModel": NaiveBayesModel,
"NeuralInput": NeuralInput,
"NeuralInputs": NeuralInputs,
"NeuralLayer": NeuralLayer,
"NeuralNetwork": NeuralNetwork,
"NeuralOutput": NeuralOutput,
"NeuralOutputs": NeuralOutputs,
"Neuron": Neuron,
"Node": Node,
"NormContinuous": NormContinuous,
"NormDiscrete": NormDiscrete,
"NumericInfo": NumericInfo,
"NumericPredictor": NumericPredictor,
"OptimumLiftGraph": OptimumLiftGraph,
"Output": Output,
"OutputField": OutputField,
"PCell": PCell,
"PCovCell": PCovCell,
"PCovMatrix": PCovMatrix,
"PMML": PMML,
"PPCell": PPCell,
"PPMatrix": PPMatrix,
"PairCounts": PairCounts,
"ParamMatrix": ParamMatrix,
"Parameter": Parameter,
"ParameterField": ParameterField,
"ParameterList": ParameterList,
"Partition": Partition,
"PartitionFieldStats": PartitionFieldStats,
"PolynomialKernelType": PolynomialKernelType,
"PredictiveModelQuality": PredictiveModelQuality,
"Predictor": Predictor,
"PredictorTerm": PredictorTerm,
"Quantile": Quantile,
"REAL-Entries": REAL_Entries,
"REAL-SparseArray": REAL_SparseArray,
"ROC": ROC,
"ROCGraph": ROCGraph,
"RadialBasisKernelType": RadialBasisKernelType,
"RandomLiftGraph": RandomLiftGraph,
"Regression": Regression,
"RegressionModel": RegressionModel,
"RegressionTable": RegressionTable,
"ResultField": ResultField,
"RuleSelectionMethod": RuleSelectionMethod,
"RuleSet": RuleSet,
"RuleSetModel": RuleSetModel,
"ScoreDistribution": ScoreDistribution,
"SeasonalTrendDecomposition": SeasonalTrendDecomposition,
"Seasonality_ExpoSmooth": Seasonality_ExpoSmooth,
"Segment": Segment,
"Segmentation": Segmentation,
"Sequence": Sequence,
"SequenceModel": SequenceModel,
"SequenceReference": SequenceReference,
"SequenceRule": SequenceRule,
"SetPredicate": SetPredicate,
"SetReference": SetReference,
"SigmoidKernelType": SigmoidKernelType,
"SimplePredicate": SimplePredicate,
"SimpleRule": SimpleRule,
"SimpleSetPredicate": SimpleSetPredicate,
"SpectralAnalysis": SpectralAnalysis,
"SupportVector": SupportVector,
"SupportVectorMachine": SupportVectorMachine,
"SupportVectorMachineModel": SupportVectorMachineModel,
"SupportVectors": SupportVectors,
"TableLocator": TableLocator,
"Target": Target,
"TargetValue": TargetValue,
"TargetValueCount": TargetValueCount,
"TargetValueCounts": TargetValueCounts,
"Targets": Targets,
"Taxonomy": Taxonomy,
"TextCorpus": TextCorpus,
"TextDictionary": TextDictionary,
"TextDocument": TextDocument,
"TextModel": TextModel,
"TextModelNormalization": TextModelNormalization,
"TextModelSimiliarity": TextModelSimiliarity,
"Time": Time,
"TimeAnchor": TimeAnchor,
"TimeCycle": TimeCycle,
"TimeException": TimeException,
"TimeSeries": TimeSeries,
"TimeSeriesModel": TimeSeriesModel,
"TimeValue": TimeValue,
"Timestamp": Timestamp,
"TransformationDictionary": TransformationDictionary,
"TreeModel": TreeModel,
"Trend": Trend,
"True": AlwaysTrue,
"UnivariateStats": UnivariateStats,
"Value": Value,
"VectorDictionary": VectorDictionary,
"VectorFields": VectorFields,
"VectorInstance": VectorInstance,
"VerificationField": VerificationField,
"VerificationFields": VerificationFields,
"XCoordinates": XCoordinates,
"YCoordinates": YCoordinates,
"binarySimilarity": binarySimilarity,
"chebychev": chebychev,
"cityBlock": cityBlock,
"euclidean": euclidean,
"jaccard": jaccard,
"minkowski": minkowski,
"row": row,
"simpleMatching": simpleMatching,
"squaredEuclidean": squaredEuclidean,
"tanimoto": tanimoto,
}
|
{
"content_hash": "9f8b36ba645a0c009defc406eacff0c4",
"timestamp": "",
"source": "github",
"line_count": 2715,
"max_line_length": 306,
"avg_line_length": 36.9171270718232,
"alnum_prop": 0.5968971365858525,
"repo_name": "opendatagroup/hadrian",
"id": "39ff0a050b29575a93865d2727cdbc86c09500d0",
"size": "101006",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "titus/titus/pmml/version_4_0.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5094"
},
{
"name": "HTML",
"bytes": "43099"
},
{
"name": "JavaScript",
"bytes": "3843"
},
{
"name": "Makefile",
"bytes": "5560"
},
{
"name": "Python",
"bytes": "2949893"
},
{
"name": "R",
"bytes": "327452"
},
{
"name": "Scala",
"bytes": "2772816"
}
],
"symlink_target": ""
}
|
import dataclasses
import json # type: ignore
import re
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
import warnings
from google.api_core import gapic_v1, path_template, rest_helpers, rest_streaming
from google.api_core import exceptions as core_exceptions
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.transport.requests import AuthorizedSession # type: ignore
from google.protobuf import json_format
import grpc # type: ignore
from requests import __version__ as requests_version
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.compute_v1.types import compute
from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO
from .base import RegionAutoscalersTransport
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version,
grpc_version=None,
rest_version=requests_version,
)
class RegionAutoscalersRestInterceptor:
"""Interceptor for RegionAutoscalers.
Interceptors are used to manipulate requests, request metadata, and responses
in arbitrary ways.
Example use cases include:
* Logging
* Verifying requests according to service or custom semantics
* Stripping extraneous information from responses
These use cases and more can be enabled by injecting an
instance of a custom subclass when constructing the RegionAutoscalersRestTransport.
.. code-block:: python
class MyCustomRegionAutoscalersInterceptor(RegionAutoscalersRestInterceptor):
def pre_delete(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_delete(response):
logging.log(f"Received response: {response}")
def pre_get(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_get(response):
logging.log(f"Received response: {response}")
def pre_insert(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_insert(response):
logging.log(f"Received response: {response}")
def pre_list(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_list(response):
logging.log(f"Received response: {response}")
def pre_patch(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_patch(response):
logging.log(f"Received response: {response}")
def pre_update(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_update(response):
logging.log(f"Received response: {response}")
transport = RegionAutoscalersRestTransport(interceptor=MyCustomRegionAutoscalersInterceptor())
client = RegionAutoscalersClient(transport=transport)
"""
def pre_delete(
self,
request: compute.DeleteRegionAutoscalerRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.DeleteRegionAutoscalerRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for delete
Override in a subclass to manipulate the request or metadata
before they are sent to the RegionAutoscalers server.
"""
return request, metadata
def post_delete(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for delete
Override in a subclass to manipulate the response
after it is returned by the RegionAutoscalers server but before
it is returned to user code.
"""
return response
def pre_get(
self,
request: compute.GetRegionAutoscalerRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.GetRegionAutoscalerRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for get
Override in a subclass to manipulate the request or metadata
before they are sent to the RegionAutoscalers server.
"""
return request, metadata
def post_get(self, response: compute.Autoscaler) -> compute.Autoscaler:
"""Post-rpc interceptor for get
Override in a subclass to manipulate the response
after it is returned by the RegionAutoscalers server but before
it is returned to user code.
"""
return response
def pre_insert(
self,
request: compute.InsertRegionAutoscalerRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.InsertRegionAutoscalerRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for insert
Override in a subclass to manipulate the request or metadata
before they are sent to the RegionAutoscalers server.
"""
return request, metadata
def post_insert(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for insert
Override in a subclass to manipulate the response
after it is returned by the RegionAutoscalers server but before
it is returned to user code.
"""
return response
def pre_list(
self,
request: compute.ListRegionAutoscalersRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.ListRegionAutoscalersRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for list
Override in a subclass to manipulate the request or metadata
before they are sent to the RegionAutoscalers server.
"""
return request, metadata
def post_list(
self, response: compute.RegionAutoscalerList
) -> compute.RegionAutoscalerList:
"""Post-rpc interceptor for list
Override in a subclass to manipulate the response
after it is returned by the RegionAutoscalers server but before
it is returned to user code.
"""
return response
def pre_patch(
self,
request: compute.PatchRegionAutoscalerRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.PatchRegionAutoscalerRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for patch
Override in a subclass to manipulate the request or metadata
before they are sent to the RegionAutoscalers server.
"""
return request, metadata
def post_patch(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for patch
Override in a subclass to manipulate the response
after it is returned by the RegionAutoscalers server but before
it is returned to user code.
"""
return response
def pre_update(
self,
request: compute.UpdateRegionAutoscalerRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.UpdateRegionAutoscalerRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for update
Override in a subclass to manipulate the request or metadata
before they are sent to the RegionAutoscalers server.
"""
return request, metadata
def post_update(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for update
Override in a subclass to manipulate the response
after it is returned by the RegionAutoscalers server but before
it is returned to user code.
"""
return response
@dataclasses.dataclass
class RegionAutoscalersRestStub:
_session: AuthorizedSession
_host: str
_interceptor: RegionAutoscalersRestInterceptor
class RegionAutoscalersRestTransport(RegionAutoscalersTransport):
"""REST backend transport for RegionAutoscalers.
The RegionAutoscalers API.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends JSON representations of protocol buffers over HTTP/1.1
NOTE: This REST transport functionality is currently in a beta
state (preview). We welcome your feedback via an issue in this
library's source repository. Thank you!
"""
def __init__(
self,
*,
host: str = "compute.googleapis.com",
credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
url_scheme: str = "https",
interceptor: Optional[RegionAutoscalersRestInterceptor] = None,
api_audience: Optional[str] = None,
) -> None:
"""Instantiate the transport.
NOTE: This REST transport functionality is currently in a beta
state (preview). We welcome your feedback via a GitHub issue in
this library's repository. Thank you!
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
certificate to configure mutual TLS HTTP channel. It is ignored
if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you are developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
url_scheme: the protocol scheme for the API endpoint. Normally
"https", but for testing or local servers,
"http" can be specified.
"""
# Run the base constructor
# TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc.
# TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the
# credentials object
maybe_url_match = re.match("^(?P<scheme>http(?:s)?://)?(?P<host>.*)$", host)
if maybe_url_match is None:
raise ValueError(
f"Unexpected hostname structure: {host}"
) # pragma: NO COVER
url_match_items = maybe_url_match.groupdict()
host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host
super().__init__(
host=host,
credentials=credentials,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
api_audience=api_audience,
)
self._session = AuthorizedSession(
self._credentials, default_host=self.DEFAULT_HOST
)
if client_cert_source_for_mtls:
self._session.configure_mtls_channel(client_cert_source_for_mtls)
self._interceptor = interceptor or RegionAutoscalersRestInterceptor()
self._prep_wrapped_messages(client_info)
class _Delete(RegionAutoscalersRestStub):
def __hash__(self):
return hash("Delete")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.DeleteRegionAutoscalerRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the delete method over HTTP.
Args:
request (~.compute.DeleteRegionAutoscalerRequest):
The request object. A request message for
RegionAutoscalers.Delete. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "delete",
"uri": "/compute/v1/projects/{project}/regions/{region}/autoscalers/{autoscaler}",
},
]
request, metadata = self._interceptor.pre_delete(request, metadata)
pb_request = compute.DeleteRegionAutoscalerRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation()
pb_resp = compute.Operation.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_delete(resp)
return resp
class _Get(RegionAutoscalersRestStub):
def __hash__(self):
return hash("Get")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.GetRegionAutoscalerRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Autoscaler:
r"""Call the get method over HTTP.
Args:
request (~.compute.GetRegionAutoscalerRequest):
The request object. A request message for
RegionAutoscalers.Get. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Autoscaler:
Represents an Autoscaler resource. Google Compute Engine
has two Autoscaler resources: \*
`Zonal </compute/docs/reference/rest/v1/autoscalers>`__
\*
`Regional </compute/docs/reference/rest/v1/regionAutoscalers>`__
Use autoscalers to automatically add or delete instances
from a managed instance group according to your defined
autoscaling policy. For more information, read
Autoscaling Groups of Instances. For zonal managed
instance groups resource, use the autoscaler resource.
For regional managed instance groups, use the
regionAutoscalers resource.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/regions/{region}/autoscalers/{autoscaler}",
},
]
request, metadata = self._interceptor.pre_get(request, metadata)
pb_request = compute.GetRegionAutoscalerRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Autoscaler()
pb_resp = compute.Autoscaler.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_get(resp)
return resp
class _Insert(RegionAutoscalersRestStub):
def __hash__(self):
return hash("Insert")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.InsertRegionAutoscalerRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the insert method over HTTP.
Args:
request (~.compute.InsertRegionAutoscalerRequest):
The request object. A request message for
RegionAutoscalers.Insert. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/compute/v1/projects/{project}/regions/{region}/autoscalers",
"body": "autoscaler_resource",
},
]
request, metadata = self._interceptor.pre_insert(request, metadata)
pb_request = compute.InsertRegionAutoscalerRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
# Jsonify the request body
body = json_format.MessageToJson(
transcoded_request["body"],
including_default_value_fields=False,
use_integers_for_enums=False,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation()
pb_resp = compute.Operation.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_insert(resp)
return resp
class _List(RegionAutoscalersRestStub):
def __hash__(self):
return hash("List")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.ListRegionAutoscalersRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.RegionAutoscalerList:
r"""Call the list method over HTTP.
Args:
request (~.compute.ListRegionAutoscalersRequest):
The request object. A request message for
RegionAutoscalers.List. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.RegionAutoscalerList:
Contains a list of autoscalers.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/regions/{region}/autoscalers",
},
]
request, metadata = self._interceptor.pre_list(request, metadata)
pb_request = compute.ListRegionAutoscalersRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.RegionAutoscalerList()
pb_resp = compute.RegionAutoscalerList.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_list(resp)
return resp
class _Patch(RegionAutoscalersRestStub):
def __hash__(self):
return hash("Patch")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.PatchRegionAutoscalerRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the patch method over HTTP.
Args:
request (~.compute.PatchRegionAutoscalerRequest):
The request object. A request message for
RegionAutoscalers.Patch. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "patch",
"uri": "/compute/v1/projects/{project}/regions/{region}/autoscalers",
"body": "autoscaler_resource",
},
]
request, metadata = self._interceptor.pre_patch(request, metadata)
pb_request = compute.PatchRegionAutoscalerRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
# Jsonify the request body
body = json_format.MessageToJson(
transcoded_request["body"],
including_default_value_fields=False,
use_integers_for_enums=False,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation()
pb_resp = compute.Operation.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_patch(resp)
return resp
class _Update(RegionAutoscalersRestStub):
def __hash__(self):
return hash("Update")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.UpdateRegionAutoscalerRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the update method over HTTP.
Args:
request (~.compute.UpdateRegionAutoscalerRequest):
The request object. A request message for
RegionAutoscalers.Update. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "put",
"uri": "/compute/v1/projects/{project}/regions/{region}/autoscalers",
"body": "autoscaler_resource",
},
]
request, metadata = self._interceptor.pre_update(request, metadata)
pb_request = compute.UpdateRegionAutoscalerRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
# Jsonify the request body
body = json_format.MessageToJson(
transcoded_request["body"],
including_default_value_fields=False,
use_integers_for_enums=False,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation()
pb_resp = compute.Operation.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_update(resp)
return resp
@property
def delete(
self,
) -> Callable[[compute.DeleteRegionAutoscalerRequest], compute.Operation]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._Delete(self._session, self._host, self._interceptor) # type: ignore
@property
def get(self) -> Callable[[compute.GetRegionAutoscalerRequest], compute.Autoscaler]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._Get(self._session, self._host, self._interceptor) # type: ignore
@property
def insert(
self,
) -> Callable[[compute.InsertRegionAutoscalerRequest], compute.Operation]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._Insert(self._session, self._host, self._interceptor) # type: ignore
@property
def list(
self,
) -> Callable[[compute.ListRegionAutoscalersRequest], compute.RegionAutoscalerList]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._List(self._session, self._host, self._interceptor) # type: ignore
@property
def patch(
self,
) -> Callable[[compute.PatchRegionAutoscalerRequest], compute.Operation]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._Patch(self._session, self._host, self._interceptor) # type: ignore
@property
def update(
self,
) -> Callable[[compute.UpdateRegionAutoscalerRequest], compute.Operation]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._Update(self._session, self._host, self._interceptor) # type: ignore
@property
def kind(self) -> str:
return "rest"
def close(self):
self._session.close()
__all__ = ("RegionAutoscalersRestTransport",)
|
{
"content_hash": "ea46d8406b240f36ae668321db84cede",
"timestamp": "",
"source": "github",
"line_count": 1005,
"max_line_length": 105,
"avg_line_length": 40.43482587064677,
"alnum_prop": 0.5849595196495805,
"repo_name": "googleapis/python-compute",
"id": "406ed7aa63c5eb47a1319ac61009fc054189eb5f",
"size": "41238",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/compute_v1/services/region_autoscalers/transports/rest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "32681847"
},
{
"name": "Shell",
"bytes": "30663"
}
],
"symlink_target": ""
}
|
import numpy as np
from cs231n.layers import *
from cs231n.rnn_layers import *
class CaptioningRNN(object):
"""
A CaptioningRNN produces captions from image features using a recurrent
neural network.
The RNN receives input vectors of size D, has a vocab size of V, works on
sequences of length T, has an RNN hidden dimension of H, uses word vectors
of dimension W, and operates on minibatches of size N.
Note that we don't use any regularization for the CaptioningRNN.
"""
def __init__(self, word_to_idx, input_dim=512, wordvec_dim=128,
hidden_dim=128, cell_type='rnn', dtype=np.float32):
"""
Construct a new CaptioningRNN instance.
Inputs:
- word_to_idx: A dictionary giving the vocabulary. It contains V entries,
and maps each string to a unique integer in the range [0, V).
- input_dim: Dimension D of input image feature vectors.
- wordvec_dim: Dimension W of word vectors.
- hidden_dim: Dimension H for the hidden state of the RNN.
- cell_type: What type of RNN to use; either 'rnn' or 'lstm'.
- dtype: numpy datatype to use; use float32 for training and float64 for
numeric gradient checking.
"""
if cell_type not in {'rnn', 'lstm'}:
raise ValueError('Invalid cell_type "%s"' % cell_type)
self.cell_type = cell_type
self.dtype = dtype
self.word_to_idx = word_to_idx
self.idx_to_word = {i: w for w, i in word_to_idx.iteritems()}
self.params = {}
vocab_size = len(word_to_idx)
self._null = word_to_idx['<NULL>']
self._start = word_to_idx.get('<START>', None)
self._end = word_to_idx.get('<END>', None)
# Initialize word vectors
self.params['W_embed'] = np.random.randn(vocab_size, wordvec_dim)
self.params['W_embed'] /= 100
# Initialize CNN -> hidden state projection parameters
self.params['W_proj'] = np.random.randn(input_dim, hidden_dim)
self.params['W_proj'] /= np.sqrt(input_dim)
self.params['b_proj'] = np.zeros(hidden_dim)
# Initialize parameters for the RNN
dim_mul = {'lstm': 4, 'rnn': 1}[cell_type]
self.params['Wx'] = np.random.randn(wordvec_dim, dim_mul * hidden_dim)
self.params['Wx'] /= np.sqrt(wordvec_dim)
self.params['Wh'] = np.random.randn(hidden_dim, dim_mul * hidden_dim)
self.params['Wh'] /= np.sqrt(hidden_dim)
self.params['b'] = np.zeros(dim_mul * hidden_dim)
# Initialize output to vocab weights
self.params['W_vocab'] = np.random.randn(hidden_dim, vocab_size)
self.params['W_vocab'] /= np.sqrt(hidden_dim)
self.params['b_vocab'] = np.zeros(vocab_size)
# Cast parameters to correct dtype
for k, v in self.params.iteritems():
self.params[k] = v.astype(self.dtype)
def loss(self, features, captions):
"""
Compute training-time loss for the RNN. We input image features and
ground-truth captions for those images, and use an RNN (or LSTM) to compute
loss and gradients on all parameters.
Inputs:
- features: Input image features, of shape (N, D)
- captions: Ground-truth captions; an integer array of shape (N, T) where
each element is in the range 0 <= y[i, t] < V
Returns a tuple of:
- loss: Scalar loss
- grads: Dictionary of gradients parallel to self.params
"""
# Cut captions into two pieces: captions_in has everything but the last word
# and will be input to the RNN; captions_out has everything but the first
# word and this is what we will expect the RNN to generate. These are offset
# by one relative to each other because the RNN should produce word (t+1)
# after receiving word t. The first element of captions_in will be the START
# token, and the first element of captions_out will be the first word.
captions_in = captions[:, :-1]
captions_out = captions[:, 1:]
# You'll need this
mask = (captions_out != self._null)
# Weight and bias for the affine transform from image features to initial
# hidden state
W_proj, b_proj = self.params['W_proj'], self.params['b_proj']
# Word embedding matrix
W_embed = self.params['W_embed']
# Input-to-hidden, hidden-to-hidden, and biases for the RNN
Wx, Wh, b = self.params['Wx'], self.params['Wh'], self.params['b']
# Weight and bias for the hidden-to-vocab transformation.
W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']
N, D = features.shape
_, T = captions.shape
# Forward pass
h_init = np.dot(features, W_proj) + b_proj
w2vec_out, w2vec_cache = word_embedding_forward(captions_in, W_embed)
if self.cell_type == 'rnn':
h_out, rnn_cache = rnn_forward(w2vec_out, h_init, Wx, Wh, b)
elif self.cell_type == 'lstm':
h_out, rnn_cache = lstm_forward(w2vec_out, h_init, Wx, Wh, b)
else:
raise ValueError("Invalid type of RNN.")
taffine_out, taffine_cache = \
temporal_affine_forward(h_out, W_vocab, b_vocab)
loss, dtaffine = temporal_softmax_loss(
taffine_out, captions_out, mask, verbose=False)
# Backward pass
grads = {}
drnn, grads['W_vocab'], grads['b_vocab'] = \
temporal_affine_backward(dtaffine, taffine_cache)
if self.cell_type == 'rnn':
dw2vec, dh_init, grads['Wx'], grads['Wh'], grads['b'] = \
rnn_backward(drnn, rnn_cache)
elif self.cell_type == 'lstm':
dw2vec, dh_init, grads['Wx'], grads['Wh'], grads['b'] = \
lstm_backward(drnn, rnn_cache)
else:
raise ValueError("Invalid type of RNN.")
grads['W_embed'] = word_embedding_backward(dw2vec, w2vec_cache)
grads['W_proj'], grads['b_proj'] = np.dot(features.T, dh_init), np.sum(dh_init, axis=0)
return loss, grads
def sample(self, features, max_length=30):
"""
Run a test-time forward pass for the model, sampling captions for input
feature vectors.
At each timestep, we embed the current word, pass it and the previous hidden
state to the RNN to get the next hidden state, use the hidden state to get
scores for all vocab words, and choose the word with the highest score as
the next word. The initial hidden state is computed by applying an affine
transform to the input image features, and the initial word is the <START>
token.
For LSTMs you will also have to keep track of the cell state; in that case
the initial cell state should be zero.
Inputs:
- features: Array of input image features of shape (N, D).
- max_length: Maximum length T of generated captions.
Returns:
- captions: Array of shape (N, max_length) giving sampled captions,
where each element is an integer in the range [0, V). The first element
of captions should be the first sampled word, not the <START> token.
"""
N, _ = features.shape
captions = self._null * np.ones((N, max_length), dtype=np.int32)
# Unpack parameters
W_proj, b_proj = self.params['W_proj'], self.params['b_proj']
W_embed = self.params['W_embed']
Wx, Wh, b = self.params['Wx'], self.params['Wh'], self.params['b']
W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']
h_prev = np.dot(features, W_proj) + b_proj
if self.cell_type == 'lstm':
c_prev = np.zeros_like(h_prev)
x = np.ones((N,), dtype=np.int32)
x *= self._start
captions[:, 0] = x
for i in np.arange(max_length - 1):
x = W_embed[x]
if self.cell_type == 'rnn':
h_next, _ = rnn_step_forward(x, h_prev, Wx, Wh, b)
h_prev = h_next
elif self.cell_type == 'lstm':
h_next, c_next, _ = lstm_step_forward(x, h_prev, c_prev, Wx, Wh, b)
h_prev = h_next
c_prev = c_next
else:
raise ValueError("Invalid type of RNN.")
words, _ = \
temporal_affine_forward(h_next[:, np.newaxis, :], W_vocab, b_vocab)
x = np.argmax(np.squeeze(words), axis=1)
captions[:, i + 1] = x
return captions
|
{
"content_hash": "9110f637f1129f379600b36c62cb9999",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 91,
"avg_line_length": 37.175925925925924,
"alnum_prop": 0.6344956413449564,
"repo_name": "5hubh4m/CS231n",
"id": "c0dfa782b9e0f36d468bf4819ec63b91ea4857ba",
"size": "8030",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Assignment3/cs231n/classifiers/rnn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "9191840"
},
{
"name": "Python",
"bytes": "214685"
},
{
"name": "Shell",
"bytes": "540"
}
],
"symlink_target": ""
}
|
from windmill.authoring import WindmillTestClient
def setup_module(module):
module.client = WindmillTestClient(__name__)
def testPass():
client.click(id=u'story')
client.type(text=u'asdfaasdf', id=u'story')
client.click(id=u'flavor')
client.select(option=u'Strawberry', id=u'flavor')
client.click(xpath=u"/html/body/form[@id='frmfrm']/select[@id='flavor']/option[2]")
client.click(xpath=u"/html/body/form[@id='frmfrm']/p/input[1]")
client.asserts.assertValue(validator=u'', id=u'junkfield')
def testFail():
client.asserts.assertValue(validator=u'', id=u'junkfieldasdf')
|
{
"content_hash": "8be42834e5ee3602cf243a6ae7732e5b",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 87,
"avg_line_length": 38.25,
"alnum_prop": 0.6993464052287581,
"repo_name": "windmill/windmill",
"id": "4bba593ed7829f518a5cf07b403d3676f73a6f99",
"size": "661",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/framework_tests/files/djangoproject/main/windmilltests/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "76128"
},
{
"name": "CSS",
"bytes": "113500"
},
{
"name": "HTML",
"bytes": "226277"
},
{
"name": "JavaScript",
"bytes": "1065858"
},
{
"name": "Makefile",
"bytes": "2367"
},
{
"name": "PHP",
"bytes": "4708"
},
{
"name": "Python",
"bytes": "575202"
},
{
"name": "Shell",
"bytes": "63"
}
],
"symlink_target": ""
}
|
from abc import abstractmethod
class Observer():
"""
Implements Observer entity from Observer pattern.
An Observer can subscribe to an Observable to receive all future notifications.
"""
def __init__(self, name="observer"):
"""
Creates Observer.
An Observer is uniquely defined by its name.vDefault name is 'observer'.
"""
if not isinstance(name, str):
raise TypeError("Expected string for name")
self.name = name
@abstractmethod
def update(self, data):
"""
Automatically called each time Observer receives a notification.
A notification updates data value
"""
pass
def __str__(self):
"""
Simply returns Observer name
"""
return self.name
class Observable():
"""
Implements Observable entity from Observer pattern.
The Observable owns information of interest for one or more other entities.
Observers subscribe to Observable to receive further notifications.
Each time data is set, all Observer are notified.
"""
def __init__(self):
"""
Creates Observable.
No subscriber is registered yet.
"""
self.data = None
self.observers = []
def subscribe(self, observer):
"""
Adds observer to notification list.
"""
if not isinstance(observer, Observer):
raise TypeError("Subscriber must be Observer")
if (observer in self.observers):
raise ValueError("Observer already subscribed")
self.observers.append(observer)
def unsubscribe(self, observer):
"""
Removes observer from notification list.
"""
if not isinstance(observer, Observer):
raise TypeError("Unsubscriber must be Observer")
if not (observer in self.observers):
raise ValueError("Observer not subscribed")
self.observers.remove(observer)
def notify(self, data):
"""
Sends notification data to all subscribed observer
"""
for observer in self.observers:
observer.update(data)
|
{
"content_hash": "b959f1db73d666e0b50c00c5d8038324",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 83,
"avg_line_length": 27.341772151898734,
"alnum_prop": 0.6148148148148148,
"repo_name": "ssls/beetle-agent",
"id": "0074a85a3f0baffa4b55720b2800be72278c3e9b",
"size": "2178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/modules/common/observer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "14909"
},
{
"name": "Mako",
"bytes": "1637"
},
{
"name": "Python",
"bytes": "234058"
}
],
"symlink_target": ""
}
|
from dynamic_rest.viewsets import DynamicModelViewSet
from restapi.models import (
CasProtein,
Locus,
LocusSpacerRepeat,
Organism,
OrganismCasProtein,
OrganismSelfSpacer,
Repeat,
Spacer
)
from restapi.serializers import (
CasProteinSerializer,
LSRSerializer,
LocusSerializer,
OCSerializer,
OSSSerializer,
OrganismSerializer,
SpacerSerializer,
RepeatSerializer
)
class SpacerViewSet(DynamicModelViewSet):
"""
API endpoint that allows spacers to be viewed or edited.
"""
queryset = Spacer.objects.all()
serializer_class = SpacerSerializer
class RepeatViewSet(DynamicModelViewSet):
"""
API endpoint that allows repeats to be viewed or edited.
"""
queryset = Repeat.objects.all()
serializer_class = RepeatSerializer
class OrganismViewSet(DynamicModelViewSet):
"""
API endpoint that allows organisms to be viewed or edited.
"""
queryset = Organism.objects.all()
serializer_class = OrganismSerializer
class LocusViewSet(DynamicModelViewSet):
"""
API endpoint that allows loci to be viewed or edited.
"""
queryset = Locus.objects.all()
serializer_class = LocusSerializer
class CasProteinViewSet(DynamicModelViewSet):
"""
API endpoint that allows cas proteins to be viewed or edited.
"""
queryset = CasProtein.objects.all()
serializer_class = CasProteinSerializer
class OCViewSet(DynamicModelViewSet):
"""
API endpoint that allows organism cas pairs to be viewed or edited.
"""
queryset = OrganismCasProtein.objects.all()
serializer_class = OCSerializer
class OSSViewSet(DynamicModelViewSet):
"""
API endpoint that allows organism self targeting spacer pairs to be
viewed or edited.
"""
queryset = OrganismSelfSpacer.objects.all()
serializer_class = OSSSerializer
class LSRViewSet(DynamicModelViewSet):
"""
API endpoint that allows locus spacer repeat pairs to be viewed or
edited.
"""
queryset = LocusSpacerRepeat.objects.all()
serializer_class = LSRSerializer
|
{
"content_hash": "b3917ae9afc2899e41e744f740f7e4eb",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 71,
"avg_line_length": 23.897727272727273,
"alnum_prop": 0.7113647170708511,
"repo_name": "goyalsid/phageParser",
"id": "c360734acf1239ea9668b726dd0670f97b736323",
"size": "2103",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "restapi/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "10191"
},
{
"name": "Makefile",
"bytes": "1902"
},
{
"name": "Perl",
"bytes": "4413"
},
{
"name": "Python",
"bytes": "56781"
}
],
"symlink_target": ""
}
|
import base64
import unittest
import mock
IMAGE_CONTENT = b'/9j/4QNURXhpZgAASUkq'
IMAGE_SOURCE = 'gs://some/image.jpg'
PROJECT = 'PROJECT'
B64_IMAGE_CONTENT = base64.b64encode(IMAGE_CONTENT).decode('ascii')
def _make_credentials():
import google.auth.credentials
return mock.Mock(spec=google.auth.credentials.Credentials)
class TestClient(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.vision.client import Client
return Client
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor(self):
creds = _make_credentials()
client = self._make_one(project=PROJECT, credentials=creds)
self.assertEqual(client.project, PROJECT)
def test_face_annotation(self):
from google.cloud.vision.feature import Feature, FeatureTypes
from unit_tests._fixtures import FACE_DETECTION_RESPONSE
RETURNED = FACE_DETECTION_RESPONSE
REQUEST = {
"requests": [
{
"image": {
"content": B64_IMAGE_CONTENT
},
"features": [
{
"maxResults": 3,
"type": "FACE_DETECTION"
}
]
}
]
}
credentials = _make_credentials()
client = self._make_one(project=PROJECT, credentials=credentials)
client._connection = _Connection(RETURNED)
features = [Feature(feature_type=FeatureTypes.FACE_DETECTION,
max_results=3)]
image = client.image(content=IMAGE_CONTENT)
response = client.annotate(image, features)
self.assertEqual(REQUEST,
client._connection._requested[0]['data'])
self.assertTrue('faceAnnotations' in response)
def test_image_with_client_gcs_source(self):
from google.cloud.vision.image import Image
credentials = _make_credentials()
client = self._make_one(project=PROJECT,
credentials=credentials)
gcs_image = client.image(source_uri=IMAGE_SOURCE)
self.assertIsInstance(gcs_image, Image)
self.assertEqual(gcs_image.source, IMAGE_SOURCE)
def test_image_with_client_raw_content(self):
from google.cloud.vision.image import Image
credentials = _make_credentials()
client = self._make_one(project=PROJECT,
credentials=credentials)
raw_image = client.image(content=IMAGE_CONTENT)
self.assertIsInstance(raw_image, Image)
self.assertEqual(raw_image.content, B64_IMAGE_CONTENT)
def test_image_with_client_filename(self):
from mock import mock_open
from mock import patch
from google.cloud.vision.image import Image
credentials = _make_credentials()
client = self._make_one(project=PROJECT,
credentials=credentials)
with patch('google.cloud.vision.image.open',
mock_open(read_data=IMAGE_CONTENT)) as m:
file_image = client.image(filename='my_image.jpg')
m.assert_called_once_with('my_image.jpg', 'rb')
self.assertIsInstance(file_image, Image)
self.assertEqual(file_image.content, B64_IMAGE_CONTENT)
def test_multiple_detection_from_content(self):
import copy
from google.cloud.vision.feature import Feature
from google.cloud.vision.feature import FeatureTypes
from unit_tests._fixtures import LABEL_DETECTION_RESPONSE
from unit_tests._fixtures import LOGO_DETECTION_RESPONSE
returned = copy.deepcopy(LABEL_DETECTION_RESPONSE)
logos = copy.deepcopy(LOGO_DETECTION_RESPONSE['responses'][0])
returned['responses'][0]['logoAnnotations'] = logos['logoAnnotations']
credentials = _make_credentials()
client = self._make_one(project=PROJECT, credentials=credentials)
client._connection = _Connection(returned)
limit = 2
label_feature = Feature(FeatureTypes.LABEL_DETECTION, limit)
logo_feature = Feature(FeatureTypes.LOGO_DETECTION, limit)
features = [label_feature, logo_feature]
image = client.image(content=IMAGE_CONTENT)
items = image.detect(features)
self.assertEqual(len(items.logos), 2)
self.assertEqual(len(items.labels), 3)
first_logo = items.logos[0]
second_logo = items.logos[1]
self.assertEqual(first_logo.description, 'Brand1')
self.assertEqual(first_logo.score, 0.63192177)
self.assertEqual(second_logo.description, 'Brand2')
self.assertEqual(second_logo.score, 0.5492993)
first_label = items.labels[0]
second_label = items.labels[1]
third_label = items.labels[2]
self.assertEqual(first_label.description, 'automobile')
self.assertEqual(first_label.score, 0.9776855)
self.assertEqual(second_label.description, 'vehicle')
self.assertEqual(second_label.score, 0.947987)
self.assertEqual(third_label.description, 'truck')
self.assertEqual(third_label.score, 0.88429511)
requested = client._connection._requested
requests = requested[0]['data']['requests']
image_request = requests[0]
label_request = image_request['features'][0]
logo_request = image_request['features'][1]
self.assertEqual(B64_IMAGE_CONTENT,
image_request['image']['content'])
self.assertEqual(label_request['maxResults'], 2)
self.assertEqual(label_request['type'], 'LABEL_DETECTION')
self.assertEqual(logo_request['maxResults'], 2)
self.assertEqual(logo_request['type'], 'LOGO_DETECTION')
def test_face_detection_from_source(self):
from google.cloud.vision.face import Face
from unit_tests._fixtures import FACE_DETECTION_RESPONSE
RETURNED = FACE_DETECTION_RESPONSE
credentials = _make_credentials()
client = self._make_one(project=PROJECT, credentials=credentials)
client._connection = _Connection(RETURNED)
image = client.image(source_uri=IMAGE_SOURCE)
faces = image.detect_faces(limit=3)
self.assertEqual(5, len(faces))
self.assertIsInstance(faces[0], Face)
image_request = client._connection._requested[0]['data']['requests'][0]
self.assertEqual(IMAGE_SOURCE,
image_request['image']['source']['gcs_image_uri'])
self.assertEqual(3, image_request['features'][0]['maxResults'])
def test_face_detection_from_content(self):
from google.cloud.vision.face import Face
from unit_tests._fixtures import FACE_DETECTION_RESPONSE
RETURNED = FACE_DETECTION_RESPONSE
credentials = _make_credentials()
client = self._make_one(project=PROJECT, credentials=credentials)
client._connection = _Connection(RETURNED)
image = client.image(content=IMAGE_CONTENT)
faces = image.detect_faces(limit=5)
self.assertEqual(5, len(faces))
self.assertIsInstance(faces[0], Face)
image_request = client._connection._requested[0]['data']['requests'][0]
self.assertEqual(B64_IMAGE_CONTENT,
image_request['image']['content'])
self.assertEqual(5, image_request['features'][0]['maxResults'])
def test_face_detection_from_content_no_results(self):
RETURNED = {
'responses': [{}]
}
credentials = _make_credentials()
client = self._make_one(project=PROJECT, credentials=credentials)
client._connection = _Connection(RETURNED)
image = client.image(content=IMAGE_CONTENT)
faces = image.detect_faces(limit=5)
self.assertEqual(faces, ())
self.assertEqual(len(faces), 0)
image_request = client._connection._requested[0]['data']['requests'][0]
self.assertEqual(B64_IMAGE_CONTENT,
image_request['image']['content'])
self.assertEqual(5, image_request['features'][0]['maxResults'])
def test_label_detection_from_source(self):
from google.cloud.vision.entity import EntityAnnotation
from unit_tests._fixtures import (
LABEL_DETECTION_RESPONSE as RETURNED)
credentials = _make_credentials()
client = self._make_one(project=PROJECT, credentials=credentials)
client._connection = _Connection(RETURNED)
image = client.image(source_uri=IMAGE_SOURCE)
labels = image.detect_labels(limit=3)
self.assertEqual(3, len(labels))
self.assertIsInstance(labels[0], EntityAnnotation)
image_request = client._connection._requested[0]['data']['requests'][0]
self.assertEqual(IMAGE_SOURCE,
image_request['image']['source']['gcs_image_uri'])
self.assertEqual(3, image_request['features'][0]['maxResults'])
self.assertEqual('automobile', labels[0].description)
self.assertEqual('vehicle', labels[1].description)
self.assertEqual('/m/0k4j', labels[0].mid)
self.assertEqual('/m/07yv9', labels[1].mid)
def test_label_detection_no_results(self):
RETURNED = {
'responses': [{}]
}
credentials = _make_credentials()
client = self._make_one(project=PROJECT, credentials=credentials)
client._connection = _Connection(RETURNED)
image = client.image(content=IMAGE_CONTENT)
labels = image.detect_labels()
self.assertEqual(labels, ())
self.assertEqual(len(labels), 0)
def test_landmark_detection_from_source(self):
from google.cloud.vision.entity import EntityAnnotation
from unit_tests._fixtures import (
LANDMARK_DETECTION_RESPONSE as RETURNED)
credentials = _make_credentials()
client = self._make_one(project=PROJECT, credentials=credentials)
client._connection = _Connection(RETURNED)
image = client.image(source_uri=IMAGE_SOURCE)
landmarks = image.detect_landmarks(limit=3)
self.assertEqual(2, len(landmarks))
self.assertIsInstance(landmarks[0], EntityAnnotation)
image_request = client._connection._requested[0]['data']['requests'][0]
self.assertEqual(IMAGE_SOURCE,
image_request['image']['source']['gcs_image_uri'])
self.assertEqual(3, image_request['features'][0]['maxResults'])
self.assertEqual(48.861013, landmarks[0].locations[0].latitude)
self.assertEqual(2.335818, landmarks[0].locations[0].longitude)
self.assertEqual('/m/04gdr', landmarks[0].mid)
self.assertEqual('/m/094llg', landmarks[1].mid)
def test_landmark_detection_from_content(self):
from google.cloud.vision.entity import EntityAnnotation
from unit_tests._fixtures import (
LANDMARK_DETECTION_RESPONSE as RETURNED)
credentials = _make_credentials()
client = self._make_one(project=PROJECT, credentials=credentials)
client._connection = _Connection(RETURNED)
image = client.image(content=IMAGE_CONTENT)
landmarks = image.detect_landmarks(limit=5)
self.assertEqual(2, len(landmarks))
self.assertIsInstance(landmarks[0], EntityAnnotation)
image_request = client._connection._requested[0]['data']['requests'][0]
self.assertEqual(B64_IMAGE_CONTENT,
image_request['image']['content'])
self.assertEqual(5, image_request['features'][0]['maxResults'])
def test_landmark_detection_no_results(self):
RETURNED = {
'responses': [{}]
}
credentials = _make_credentials()
client = self._make_one(project=PROJECT, credentials=credentials)
client._connection = _Connection(RETURNED)
image = client.image(content=IMAGE_CONTENT)
landmarks = image.detect_landmarks()
self.assertEqual(landmarks, ())
self.assertEqual(len(landmarks), 0)
def test_logo_detection_from_source(self):
from google.cloud.vision.entity import EntityAnnotation
from unit_tests._fixtures import LOGO_DETECTION_RESPONSE
RETURNED = LOGO_DETECTION_RESPONSE
credentials = _make_credentials()
client = self._make_one(project=PROJECT, credentials=credentials)
client._connection = _Connection(RETURNED)
image = client.image(source_uri=IMAGE_SOURCE)
logos = image.detect_logos(limit=3)
self.assertEqual(2, len(logos))
self.assertIsInstance(logos[0], EntityAnnotation)
image_request = client._connection._requested[0]['data']['requests'][0]
self.assertEqual(IMAGE_SOURCE,
image_request['image']['source']['gcs_image_uri'])
self.assertEqual(3, image_request['features'][0]['maxResults'])
def test_logo_detection_from_content(self):
from google.cloud.vision.entity import EntityAnnotation
from unit_tests._fixtures import LOGO_DETECTION_RESPONSE
RETURNED = LOGO_DETECTION_RESPONSE
credentials = _make_credentials()
client = self._make_one(project=PROJECT, credentials=credentials)
client._connection = _Connection(RETURNED)
image = client.image(content=IMAGE_CONTENT)
logos = image.detect_logos(limit=5)
self.assertEqual(2, len(logos))
self.assertIsInstance(logos[0], EntityAnnotation)
image_request = client._connection._requested[0]['data']['requests'][0]
self.assertEqual(B64_IMAGE_CONTENT,
image_request['image']['content'])
self.assertEqual(5, image_request['features'][0]['maxResults'])
def test_text_detection_from_source(self):
from google.cloud.vision.entity import EntityAnnotation
from unit_tests._fixtures import (
TEXT_DETECTION_RESPONSE as RETURNED)
credentials = _make_credentials()
client = self._make_one(project=PROJECT, credentials=credentials)
client._connection = _Connection(RETURNED)
image = client.image(source_uri=IMAGE_SOURCE)
text = image.detect_text(limit=3)
self.assertEqual(3, len(text))
self.assertIsInstance(text[0], EntityAnnotation)
image_request = client._connection._requested[0]['data']['requests'][0]
self.assertEqual(IMAGE_SOURCE,
image_request['image']['source']['gcs_image_uri'])
self.assertEqual(3, image_request['features'][0]['maxResults'])
self.assertEqual('en', text[0].locale)
self.assertEqual('Google CloudPlatform\n', text[0].description)
self.assertEqual('Google', text[1].description)
self.assertEqual(694, text[0].bounds.vertices[0].y_coordinate)
def test_safe_search_detection_from_source(self):
from google.cloud.vision.likelihood import Likelihood
from google.cloud.vision.safe import SafeSearchAnnotation
from unit_tests._fixtures import SAFE_SEARCH_DETECTION_RESPONSE
RETURNED = SAFE_SEARCH_DETECTION_RESPONSE
credentials = _make_credentials()
client = self._make_one(project=PROJECT, credentials=credentials)
client._connection = _Connection(RETURNED)
image = client.image(source_uri=IMAGE_SOURCE)
safe_search = image.detect_safe_search()[0]
self.assertIsInstance(safe_search, SafeSearchAnnotation)
image_request = client._connection._requested[0]['data']['requests'][0]
self.assertEqual(IMAGE_SOURCE,
image_request['image']['source']['gcs_image_uri'])
self.assertEqual(safe_search.adult, Likelihood.VERY_UNLIKELY)
self.assertEqual(safe_search.spoof, Likelihood.UNLIKELY)
self.assertEqual(safe_search.medical, Likelihood.POSSIBLE)
self.assertEqual(safe_search.violence, Likelihood.VERY_UNLIKELY)
def test_safe_search_no_results(self):
RETURNED = {
'responses': [{}]
}
credentials = _make_credentials()
client = self._make_one(project=PROJECT, credentials=credentials)
client._connection = _Connection(RETURNED)
image = client.image(content=IMAGE_CONTENT)
safe_search = image.detect_safe_search()
self.assertEqual(safe_search, ())
self.assertEqual(len(safe_search), 0)
def test_image_properties_detection_from_source(self):
from google.cloud.vision.color import ImagePropertiesAnnotation
from unit_tests._fixtures import IMAGE_PROPERTIES_RESPONSE
RETURNED = IMAGE_PROPERTIES_RESPONSE
credentials = _make_credentials()
client = self._make_one(project=PROJECT, credentials=credentials)
client._connection = _Connection(RETURNED)
image = client.image(source_uri=IMAGE_SOURCE)
image_properties = image.detect_properties()[0]
self.assertIsInstance(image_properties, ImagePropertiesAnnotation)
image_request = client._connection._requested[0]['data']['requests'][0]
self.assertEqual(IMAGE_SOURCE,
image_request['image']['source']['gcs_image_uri'])
self.assertEqual(0.42258179, image_properties.colors[0].score)
self.assertEqual(0.025376344,
image_properties.colors[0].pixel_fraction)
self.assertEqual(253, image_properties.colors[0].color.red)
self.assertEqual(203, image_properties.colors[0].color.green)
self.assertEqual(65, image_properties.colors[0].color.blue)
self.assertEqual(0.0, image_properties.colors[0].color.alpha)
def test_image_properties_no_results(self):
RETURNED = {
'responses': [{}]
}
credentials = _make_credentials()
client = self._make_one(project=PROJECT, credentials=credentials)
client._connection = _Connection(RETURNED)
image = client.image(content=IMAGE_CONTENT)
image_properties = image.detect_properties()
self.assertEqual(image_properties, ())
self.assertEqual(len(image_properties), 0)
class TestVisionRequest(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.vision.client import VisionRequest
return VisionRequest
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_make_vision_request(self):
from google.cloud.vision.feature import Feature, FeatureTypes
feature = Feature(feature_type=FeatureTypes.FACE_DETECTION,
max_results=3)
vision_request = self._make_one(IMAGE_CONTENT, feature)
self.assertEqual(IMAGE_CONTENT, vision_request.image)
self.assertEqual(FeatureTypes.FACE_DETECTION,
vision_request.features[0].feature_type)
def test_make_vision_request_with_bad_feature(self):
with self.assertRaises(TypeError):
self._make_one(IMAGE_CONTENT, 'nonsensefeature')
class _Connection(object):
def __init__(self, *responses):
self._responses = responses
self._requested = []
def api_request(self, **kw):
import json
json.dumps(kw.get('data', '')) # Simulate JSON encoding.
self._requested.append(kw)
response, self._responses = self._responses[0], self._responses[1:]
return response
|
{
"content_hash": "c22775e412be694fbe78607eff68a69a",
"timestamp": "",
"source": "github",
"line_count": 457,
"max_line_length": 79,
"avg_line_length": 42.77242888402626,
"alnum_prop": 0.6410190822121041,
"repo_name": "Fkawala/gcloud-python",
"id": "af6e23a6b01a0c97b40acdf8f4a91fa833d0f73b",
"size": "20123",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vision/unit_tests/test_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "89702"
},
{
"name": "Python",
"bytes": "3403274"
},
{
"name": "Shell",
"bytes": "7548"
}
],
"symlink_target": ""
}
|
from libsaas import http, parsers
from libsaas.services import base
from .resource import BasecampResource
class AttachmentResource(BasecampResource):
path = 'attachments'
@base.apimethod
def get(self, page=None):
"""
Fetch all resources.
:var page: the page that will be return.
If not indicated, first one is returned.
:vartype page: int
"""
params = base.get_params(None, locals())
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
class Attachments(AttachmentResource):
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
class GlobalAttachments(AttachmentResource):
pass
|
{
"content_hash": "ba2c2b594f066037a0498841f1dcbc4e",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 61,
"avg_line_length": 24,
"alnum_prop": 0.6612903225806451,
"repo_name": "80vs90/libsaas",
"id": "c69f53c2f8cc3f772b5980ddcec9b588d552b652",
"size": "744",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "libsaas/services/basecamp/attachments.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "954562"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import abc
import collections
import threading
import time
from supriya.tools import osctools
from supriya.tools.systemtools.SupriyaValueObject import SupriyaValueObject
class Request(SupriyaValueObject):
### CLASS VARIABLES ###
__slots__ = (
'_condition',
'_response',
)
_prototype = None
### INITIALIZER ###
def __init__(
self,
):
self._condition = threading.Condition()
self._response = None
### PRIVATE METHODS ###
def _coerce_completion_message_input(self, message):
if message is None:
return message
elif isinstance(message, (osctools.OscMessage, osctools.OscBundle)):
return message
elif isinstance(message, Request):
return message.to_osc_message()
elif isinstance(message, collections.Sequence):
return osctools.OscMessage(*message)
raise ValueError(message)
def _coerce_completion_message_output(self, contents):
if self.completion_message is not None:
completion_message = self.completion_message.to_datagram()
completion_message = bytearray(completion_message)
contents.append(completion_message)
### PUBLIC METHODS ###
def communicate(
self,
message=None,
server=None,
sync=True,
timeout=1.0,
):
from supriya.tools import servertools
server = server or servertools.Server.get_default_server()
assert isinstance(server, servertools.Server)
assert server.is_running
message = message or self.to_osc_message()
if not sync or self.response_specification is None:
server.send_message(message)
return None
start_time = time.time()
timed_out = False
with self.condition:
with server.response_dispatcher.lock:
callback = self.response_callback
server.register_response_callback(callback)
server.send_message(message)
while self.response is None:
self.condition.wait(timeout)
current_time = time.time()
delta_time = current_time - start_time
if timeout <= delta_time:
timed_out = True
break
if timed_out:
print('TIMED OUT:', repr(self))
return None
return self._response
@abc.abstractmethod
def to_osc_message(self):
raise NotImplementedError
### PUBLIC PROPERTIES ###
@property
def condition(self):
return self._condition
@property
def response(self):
return self._response
@response.setter
def response(self, response):
from supriya.tools import responsetools
assert isinstance(response, responsetools.Response)
with self.condition:
self._response = response
self.condition.notify()
@property
def response_callback(self):
from supriya.tools import requesttools
return requesttools.RequestCallback(
is_one_shot=True,
request=self,
response_specification=self.response_specification,
)
@property
def response_specification(self):
return None
|
{
"content_hash": "e768ce89e17bd5589e37397fd44415a3",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 76,
"avg_line_length": 28.94871794871795,
"alnum_prop": 0.604074402125775,
"repo_name": "andrewyoung1991/supriya",
"id": "7331be235b36def625c5759078dc41426c7ec126",
"size": "3413",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "supriya/tools/requesttools/Request.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6712"
},
{
"name": "CSS",
"bytes": "446"
},
{
"name": "HTML",
"bytes": "1083"
},
{
"name": "JavaScript",
"bytes": "6163"
},
{
"name": "Makefile",
"bytes": "6775"
},
{
"name": "Python",
"bytes": "2693776"
}
],
"symlink_target": ""
}
|
from datetime import timedelta
from django.core.management import call_command
from tests.test_case import AppTestCase
from wagtailstreamforms.models import Form, FormSubmission
class Tests(AppTestCase):
fixtures = ["test"]
def test_command(self):
form = Form.objects.get(pk=1)
to_keep = FormSubmission.objects.create(form=form, form_data={})
to_delete = FormSubmission.objects.create(form=form, form_data={})
to_delete.submit_time = to_delete.submit_time - timedelta(days=2)
to_delete.save()
call_command("prunesubmissions", 1)
FormSubmission.objects.get(pk=to_keep.pk)
with self.assertRaises(FormSubmission.DoesNotExist):
FormSubmission.objects.get(pk=to_delete.pk)
|
{
"content_hash": "4cca3b3599e9153138fb9bf4ae4582db",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 74,
"avg_line_length": 31.666666666666668,
"alnum_prop": 0.7,
"repo_name": "AccentDesign/wagtailstreamforms",
"id": "71bcb5296a3739d9e37fc034c60c318a915da6a2",
"size": "760",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/management/test_prunesubmissions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "690"
},
{
"name": "HTML",
"bytes": "14735"
},
{
"name": "JavaScript",
"bytes": "213"
},
{
"name": "Makefile",
"bytes": "438"
},
{
"name": "Python",
"bytes": "189375"
},
{
"name": "SCSS",
"bytes": "2257"
},
{
"name": "Shell",
"bytes": "559"
}
],
"symlink_target": ""
}
|
"""Implementation of JSONDecoder
"""
import re
import sys
import struct
from scanner import make_scanner
def _import_c_scanstring():
try:
raise ImportError # because assumes simplejson in path
from simplejson._speedups import scanstring
return scanstring
except ImportError:
return None
c_scanstring = _import_c_scanstring()
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
# The struct module in Python 2.4 would get frexp() out of range here
# when an endian is specified in the format string. Fixed in Python 2.5+
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
class JSONDecodeError(ValueError):
"""Subclass of ValueError with the following additional properties:
msg: The unformatted error message
doc: The JSON document being parsed
pos: The start index of doc where parsing failed
end: The end index of doc where parsing failed (may be None)
lineno: The line corresponding to pos
colno: The column corresponding to pos
endlineno: The line corresponding to end (may be None)
endcolno: The column corresponding to end (may be None)
"""
def __init__(self, msg, doc, pos, end=None):
ValueError.__init__(self, errmsg(msg, doc, pos, end=end))
self.msg = msg
self.doc = doc
self.pos = pos
self.end = end
self.lineno, self.colno = linecol(doc, pos)
if end is not None:
self.endlineno, self.endcolno = linecol(doc, end)
else:
self.endlineno, self.endcolno = None, None
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _speedups
lineno, colno = linecol(doc, pos)
if end is None:
#fmt = '{0}: line {1} column {2} (char {3})'
#return fmt.format(msg, lineno, colno, pos)
fmt = '%s: line %d column %d (char %d)'
return fmt % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
#fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
#return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True,
_b=BACKSLASH, _m=STRINGCHUNK.match):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at" % (terminator,)
#msg = "Invalid control character {0!r} at".format(terminator)
raise JSONDecodeError(msg, s, end)
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\escape: " + repr(esc)
raise JSONDecodeError(msg, s, end)
end += 1
else:
# Unicode escape sequence
esc = s[end + 1:end + 5]
next_end = end + 5
if len(esc) != 4:
msg = "Invalid \\uXXXX escape"
raise JSONDecodeError(msg, s, end)
uni = int(esc, 16)
# Check for surrogate pair on UCS-4 systems
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise JSONDecodeError(msg, s, end)
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise JSONDecodeError(msg, s, end)
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
char = unichr(uni)
end = next_end
# Append the unescaped character
_append(char)
return u''.join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject((s, end), encoding, strict, scan_once, object_hook,
object_pairs_hook, memo=None,
_w=WHITESPACE.match, _ws=WHITESPACE_STR):
# Backwards compatibility
if memo is None:
memo = {}
memo_get = memo.setdefault
pairs = []
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end + 1
pairs = {}
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end + 1
elif nextchar != '"':
raise JSONDecodeError("Expecting property name", s, end)
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
key = memo_get(key, key)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise JSONDecodeError("Expecting : delimiter", s, end)
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration:
raise JSONDecodeError("Expecting object", s, end)
pairs.append((key, value))
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting , delimiter", s, end - 1)
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise JSONDecodeError("Expecting property name", s, end - 1)
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end
pairs = dict(pairs)
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise JSONDecodeError("Expecting object", s, end)
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting , delimiter", s, end)
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True,
object_pairs_hook=None):
"""
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
*strict* controls the parser's behavior when it encounters an
invalid control character in a string. The default setting of
``True`` means that unescaped control characters are parse errors, if
``False`` then control characters will be allowed in strings.
"""
self.encoding = encoding
self.object_hook = object_hook
self.object_pairs_hook = object_pairs_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.memo = {}
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise JSONDecodeError("Extra data", s, end, len(s))
return obj
def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode``
beginning with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
try:
obj, end = self.scan_once(s, idx)
except StopIteration:
raise JSONDecodeError("No JSON object could be decoded", s, idx)
return obj, end
|
{
"content_hash": "8170ab879c6c108f0ec3205314a1534e",
"timestamp": "",
"source": "github",
"line_count": 423,
"max_line_length": 78,
"avg_line_length": 35.93617021276596,
"alnum_prop": 0.5448983619498717,
"repo_name": "Jai-Chaudhary/termite-data-server",
"id": "0f7d5f1c122ba8a723ba88d45e3759bc4db03d48",
"size": "15201",
"binary": false,
"copies": "49",
"ref": "refs/heads/master",
"path": "web2py/gluon/contrib/simplejson/decoder.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from pandas_ml.misc.patsy_wraps import transform_with_patsy # noqa
|
{
"content_hash": "303d7d8342698146ab0c79464e778fde",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 69,
"avg_line_length": 36.5,
"alnum_prop": 0.7397260273972602,
"repo_name": "sinhrks/pandas-ml",
"id": "b2a63263ad878bd118ddb9ab9a703da42510725d",
"size": "96",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pandas_ml/misc/__init__.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "431477"
},
{
"name": "Shell",
"bytes": "903"
}
],
"symlink_target": ""
}
|
import os
import platform
import subprocess
from sys import exit
FILE_CREAT = 'start_gn_nexmo'
def pip_install(osx=''):
if osx == 'red':
subprocess.call(['sudo easy_install pip'],shell=True)
subprocess.call(['sudo pip install django==1.8.5'],shell=True)
subprocess.call(['sudo pip install nexmo'],shell=True)
subprocess.call(['sudo pip install httplib2'],shell=True)
def create_startup(osx=''):
global FILE_CREAT
cwd = os.getcwd()
file_content = '#!/bin/bash' + '\n'
file_content +='\n### BEGIN INIT INFO'
file_content +='\n# Provides: start_gn_nexmo'
file_content +='\n# Required-Start: $local_fs $network'
file_content +='\n# Required-Stop: $local_fs'
file_content +='\n# Default-Start: 2 3 4 5'
file_content +='\n# Default-Stop: 0 1 6'
file_content +='\n# Short-Description: g-notifier'
file_content +='\n# Description: nexmo startup script'
file_content += '\n# chkconfig: 2345 20 80' + '\n'
file_content +='\n### END INIT INFO'
file_content += '\n. /etc/rc.d/init.d/functions' + '\n'
file_content += '\npython {0}/manage.py runserver 0.0.0.0:9033 --insecure &'.format(cwd)
file_content += '\nexit 0'
with open("/etc/init.d/"+FILE_CREAT,"wb") as f:
f.write(file_content)
f.close()
os.chdir('/etc/init.d')
subprocess.call(['sudo chmod +x '+FILE_CREAT],shell=True)
subprocess.call(['sudo /sbin/chkconfig --add '+FILE_CREAT],shell=True)
subprocess.call(['sudo /sbin/chkconfig '+str(FILE_CREAT)+' on'],shell=True)
if osx == 'ubuntu':
subprocess.call(['sudo update-rc.d '+str(FILE_CREAT)+' defaults'],shell=True)
return True
def install(cmd,osx=''):
linux_cmd = "sudo {0} ".format(cmd)
subprocess.call([linux_cmd + ' update -y '],shell=True)
subprocess.call([linux_cmd + ' install -y python-pip '],shell=True)
pip_install(osx)
create_startup(osx)
if __name__ == '__main__':
FAIL = '\033[91m'
ENDC = '\033[0m'
OKGREEN = '\033[92m'
get_current = os.getcwd()
distro = platform.linux_distribution()[0].lower()
if os.geteuid() != 0:
print FAIL + "ERROR: This program need 'sudo'" + ENDC
exit(1)
if distro in ['debian','ubuntu']:
install('apt-get',distro)
print OKGREEN + "Google Cloud with Nexmo SMS installed successfully." + ENDC
subprocess.call(['python {0}/manage.py runserver 0.0.0.0:9033 --insecure &'.format(get_current)],shell=True)
|
{
"content_hash": "09246b0f78fd7e04782bcbfa317e381a",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 110,
"avg_line_length": 39.23880597014925,
"alnum_prop": 0.5850133130467858,
"repo_name": "AdvaiyaLabs/Google-Cloud-with-Nexmo-SMS",
"id": "af929fab5e45b8b89cdef991e262538cfdfc02b9",
"size": "2651",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "install.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1380"
},
{
"name": "HTML",
"bytes": "7692"
},
{
"name": "Python",
"bytes": "11824"
}
],
"symlink_target": ""
}
|
import copy
import itertools
import re
import operator
from datetime import datetime, timedelta, date
from collections import defaultdict
import numpy as np
from numpy import percentile as _quantile
from pandas.core.base import PandasObject
from pandas.core.common import (_possibly_downcast_to_dtype, isnull, _NS_DTYPE,
_TD_DTYPE, ABCSeries, is_list_like,
_infer_dtype_from_scalar, is_null_slice,
is_dtype_equal, is_null_datelike_scalar,
_maybe_promote, is_timedelta64_dtype,
is_datetime64_dtype, is_datetimetz, is_sparse,
array_equivalent, _is_na_compat,
_maybe_convert_string_to_object,
_maybe_convert_scalar,
is_categorical, is_datetimelike_v_numeric,
is_numeric_v_string_like, is_extension_type)
import pandas.core.algorithms as algos
from pandas.types.api import DatetimeTZDtype
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.indexing import maybe_convert_indices, length_of_indexer
from pandas.core.categorical import Categorical, maybe_to_categorical
from pandas.tseries.index import DatetimeIndex
from pandas.formats.printing import pprint_thing
import pandas.core.common as com
import pandas.types.concat as _concat
import pandas.core.missing as missing
import pandas.core.convert as convert
from pandas.sparse.array import _maybe_to_sparse, SparseArray
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.computation.expressions as expressions
from pandas.util.decorators import cache_readonly
from pandas.tslib import Timedelta
from pandas import compat
from pandas.compat import range, map, zip, u
from pandas.lib import BlockPlacement
class Block(PandasObject):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a pandas
data structure
Index-ignorant; let the container take care of that
"""
__slots__ = ['_mgr_locs', 'values', 'ndim']
is_numeric = False
is_float = False
is_integer = False
is_complex = False
is_datetime = False
is_datetimetz = False
is_timedelta = False
is_bool = False
is_object = False
is_categorical = False
is_sparse = False
_box_to_block_values = True
_can_hold_na = False
_downcast_dtype = None
_can_consolidate = True
_verify_integrity = True
_validate_ndim = True
_ftype = 'dense'
_holder = None
def __init__(self, values, placement, ndim=None, fastpath=False):
if ndim is None:
ndim = values.ndim
elif values.ndim != ndim:
raise ValueError('Wrong number of dimensions')
self.ndim = ndim
self.mgr_locs = placement
self.values = values
if len(self.mgr_locs) != len(self.values):
raise ValueError('Wrong number of items passed %d, placement '
'implies %d' % (len(self.values),
len(self.mgr_locs)))
@property
def _consolidate_key(self):
return (self._can_consolidate, self.dtype.name)
@property
def _is_single_block(self):
return self.ndim == 1
@property
def is_view(self):
""" return a boolean if I am possibly a view """
return self.values.base is not None
@property
def is_datelike(self):
""" return True if I am a non-datelike """
return self.is_datetime or self.is_timedelta
def is_categorical_astype(self, dtype):
"""
validate that we have a astypeable to categorical,
returns a boolean if we are a categorical
"""
if com.is_categorical_dtype(dtype):
if dtype == com.CategoricalDtype():
return True
# this is a pd.Categorical, but is not
# a valid type for astypeing
raise TypeError("invalid type {0} for astype".format(dtype))
return False
def external_values(self, dtype=None):
""" return an outside world format, currently just the ndarray """
return self.values
def internal_values(self, dtype=None):
""" return an internal format, currently just the ndarray
this should be the pure internal API format
"""
return self.values
def get_values(self, dtype=None):
"""
return an internal format, currently just the ndarray
this is often overriden to handle to_dense like operations
"""
if com.is_object_dtype(dtype):
return self.values.astype(object)
return self.values
def to_dense(self):
return self.values.view()
def to_object_block(self, mgr):
""" return myself as an object block """
values = self.get_values(dtype=object)
return self.make_block(values, klass=ObjectBlock)
@property
def _na_value(self):
return np.nan
@property
def fill_value(self):
return np.nan
@property
def mgr_locs(self):
return self._mgr_locs
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an
array
"""
return self.dtype
def make_block(self, values, placement=None, ndim=None, **kwargs):
"""
Create a new block, with type inference propagate any values that are
not specified
"""
if placement is None:
placement = self.mgr_locs
if ndim is None:
ndim = self.ndim
return make_block(values, placement=placement, ndim=ndim, **kwargs)
def make_block_same_class(self, values, placement=None, fastpath=True,
**kwargs):
""" Wrap given values in a block of same type as self. """
if placement is None:
placement = self.mgr_locs
return make_block(values, placement=placement, klass=self.__class__,
fastpath=fastpath, **kwargs)
@mgr_locs.setter
def mgr_locs(self, new_mgr_locs):
if not isinstance(new_mgr_locs, BlockPlacement):
new_mgr_locs = BlockPlacement(new_mgr_locs)
self._mgr_locs = new_mgr_locs
def __unicode__(self):
# don't want to print out all of the items here
name = pprint_thing(self.__class__.__name__)
if self._is_single_block:
result = '%s: %s dtype: %s' % (name, len(self), self.dtype)
else:
shape = ' x '.join([pprint_thing(s) for s in self.shape])
result = '%s: %s, %s, dtype: %s' % (name, pprint_thing(
self.mgr_locs.indexer), shape, self.dtype)
return result
def __len__(self):
return len(self.values)
def __getstate__(self):
return self.mgr_locs.indexer, self.values
def __setstate__(self, state):
self.mgr_locs = BlockPlacement(state[0])
self.values = state[1]
self.ndim = self.values.ndim
def _slice(self, slicer):
""" return a slice of my values """
return self.values[slicer]
def reshape_nd(self, labels, shape, ref_items, mgr=None):
"""
Parameters
----------
labels : list of new axis labels
shape : new shape
ref_items : new ref_items
return a new block that is transformed to a nd block
"""
return _block2d_to_blocknd(values=self.get_values().T,
placement=self.mgr_locs, shape=shape,
labels=labels, ref_items=ref_items)
def getitem_block(self, slicer, new_mgr_locs=None):
"""
Perform __getitem__-like, return result as block.
As of now, only supports slices that preserve dimensionality.
"""
if new_mgr_locs is None:
if isinstance(slicer, tuple):
axis0_slicer = slicer[0]
else:
axis0_slicer = slicer
new_mgr_locs = self.mgr_locs[axis0_slicer]
new_values = self._slice(slicer)
if self._validate_ndim and new_values.ndim != self.ndim:
raise ValueError("Only same dim slicing is allowed")
return self.make_block_same_class(new_values, new_mgr_locs)
@property
def shape(self):
return self.values.shape
@property
def itemsize(self):
return self.values.itemsize
@property
def dtype(self):
return self.values.dtype
@property
def ftype(self):
return "%s:%s" % (self.dtype, self._ftype)
def merge(self, other):
return _merge_blocks([self, other])
def reindex_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer information
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
if fill_value is None:
fill_value = self.fill_value
new_values = algos.take_nd(self.values, indexer, axis,
fill_value=fill_value, mask_info=mask_info)
return self.make_block(new_values, fastpath=True)
def get(self, item):
loc = self.items.get_loc(item)
return self.values[loc]
def iget(self, i):
return self.values[i]
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
self.values[locs] = values
def delete(self, loc):
"""
Delete given loc(-s) from block in-place.
"""
self.values = np.delete(self.values, loc, 0)
self.mgr_locs = self.mgr_locs.delete(loc)
def apply(self, func, mgr=None, **kwargs):
""" apply the function to my values; return a block if we are not
one
"""
result = func(self.values, **kwargs)
if not isinstance(result, Block):
result = self.make_block(values=_block_shape(result))
return result
def fillna(self, value, limit=None, inplace=False, downcast=None,
mgr=None):
""" fillna on the block with the value. If we fail, then convert to
ObjectBlock and try again
"""
if not self._can_hold_na:
if inplace:
return self
else:
return self.copy()
original_value = value
mask = isnull(self.values)
if limit is not None:
if self.ndim > 2:
raise NotImplementedError("number of dimensions for 'fillna' "
"is currently limited to 2")
mask[mask.cumsum(self.ndim - 1) > limit] = False
# fillna, but if we cannot coerce, then try again as an ObjectBlock
try:
values, _, value, _ = self._try_coerce_args(self.values, value)
blocks = self.putmask(mask, value, inplace=inplace)
blocks = [b.make_block(values=self._try_coerce_result(b.values))
for b in blocks]
return self._maybe_downcast(blocks, downcast)
except (TypeError, ValueError):
# we can't process the value, but nothing to do
if not mask.any():
return self if inplace else self.copy()
# we cannot coerce the underlying object, so
# make an ObjectBlock
return self.to_object_block(mgr=mgr).fillna(original_value,
limit=limit,
inplace=inplace,
downcast=False)
def _maybe_downcast(self, blocks, downcast=None):
# no need to downcast our float
# unless indicated
if downcast is None and self.is_float:
return blocks
elif downcast is None and (self.is_timedelta or self.is_datetime):
return blocks
return _extend_blocks([b.downcast(downcast) for b in blocks])
def downcast(self, dtypes=None, mgr=None):
""" try to downcast each item to the dict of dtypes if present """
# turn it off completely
if dtypes is False:
return self
values = self.values
# single block handling
if self._is_single_block:
# try to cast all non-floats here
if dtypes is None:
dtypes = 'infer'
nv = _possibly_downcast_to_dtype(values, dtypes)
return self.make_block(nv, fastpath=True)
# ndim > 1
if dtypes is None:
return self
if not (dtypes == 'infer' or isinstance(dtypes, dict)):
raise ValueError("downcast must have a dictionary or 'infer' as "
"its argument")
# item-by-item
# this is expensive as it splits the blocks items-by-item
blocks = []
for i, rl in enumerate(self.mgr_locs):
if dtypes == 'infer':
dtype = 'infer'
else:
raise AssertionError("dtypes as dict is not supported yet")
# TODO: This either should be completed or removed
dtype = dtypes.get(item, self._downcast_dtype) # noqa
if dtype is None:
nv = _block_shape(values[i], ndim=self.ndim)
else:
nv = _possibly_downcast_to_dtype(values[i], dtype)
nv = _block_shape(nv, ndim=self.ndim)
blocks.append(self.make_block(nv, fastpath=True, placement=[rl]))
return blocks
def astype(self, dtype, copy=False, raise_on_error=True, values=None,
**kwargs):
return self._astype(dtype, copy=copy, raise_on_error=raise_on_error,
values=values, **kwargs)
def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
klass=None, mgr=None, **kwargs):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
# may need to convert to categorical
# this is only called for non-categoricals
if self.is_categorical_astype(dtype):
return self.make_block(Categorical(self.values, **kwargs))
# astype processing
dtype = np.dtype(dtype)
if self.dtype == dtype:
if copy:
return self.copy()
return self
if klass is None:
if dtype == np.object_:
klass = ObjectBlock
try:
# force the copy here
if values is None:
if issubclass(dtype.type,
(compat.text_type, compat.string_types)):
# use native type formatting for datetime/tz/timedelta
if self.is_datelike:
values = self.to_native_types()
# astype formatting
else:
values = self.values
else:
values = self.get_values(dtype=dtype)
# _astype_nansafe works fine with 1-d only
values = com._astype_nansafe(values.ravel(), dtype, copy=True)
values = values.reshape(self.shape)
newb = make_block(values, placement=self.mgr_locs, dtype=dtype,
klass=klass)
except:
if raise_on_error is True:
raise
newb = self.copy() if copy else self
if newb.is_numeric and self.is_numeric:
if newb.shape != self.shape:
raise TypeError("cannot set astype for copy = [%s] for dtype "
"(%s [%s]) with smaller itemsize that current "
"(%s [%s])" % (copy, self.dtype.name,
self.itemsize, newb.dtype.name,
newb.itemsize))
return newb
def convert(self, copy=True, **kwargs):
""" attempt to coerce any object types to better types return a copy
of the block (if copy = True) by definition we are not an ObjectBlock
here!
"""
return self.copy() if copy else self
def _can_hold_element(self, value):
raise NotImplementedError()
def _try_cast(self, value):
raise NotImplementedError()
def _try_cast_result(self, result, dtype=None):
""" try to cast the result to our original type, we may have
roundtripped thru object in the mean-time
"""
if dtype is None:
dtype = self.dtype
if self.is_integer or self.is_bool or self.is_datetime:
pass
elif self.is_float and result.dtype == self.dtype:
# protect against a bool/object showing up here
if isinstance(dtype, compat.string_types) and dtype == 'infer':
return result
if not isinstance(dtype, type):
dtype = dtype.type
if issubclass(dtype, (np.bool_, np.object_)):
if issubclass(dtype, np.bool_):
if isnull(result).all():
return result.astype(np.bool_)
else:
result = result.astype(np.object_)
result[result == 1] = True
result[result == 0] = False
return result
else:
return result.astype(np.object_)
return result
# may need to change the dtype here
return _possibly_downcast_to_dtype(result, dtype)
def _try_operate(self, values):
""" return a version to operate on as the input """
return values
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments """
return values, False, other, False
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
return result
def _try_coerce_and_cast_result(self, result, dtype=None):
result = self._try_coerce_result(result)
result = self._try_cast_result(result, dtype=dtype)
return result
def _try_fill(self, value):
return value
def to_native_types(self, slicer=None, na_rep='nan', quoting=None,
**kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
if not self.is_object and not quoting:
values = values.astype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
return values
# block actions ####
def copy(self, deep=True, mgr=None):
""" copy constructor """
values = self.values
if deep:
values = values.copy()
return self.make_block_same_class(values)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False, convert=True, mgr=None):
""" replace the to_replace value with value, possible to create new
blocks here this is just a call to putmask. regex is not used here.
It is used in ObjectBlocks. It is here for API
compatibility.
"""
original_to_replace = to_replace
mask = isnull(self.values)
# try to replace, if we raise an error, convert to ObjectBlock and
# retry
try:
values, _, to_replace, _ = self._try_coerce_args(self.values,
to_replace)
mask = missing.mask_missing(values, to_replace)
if filter is not None:
filtered_out = ~self.mgr_locs.isin(filter)
mask[filtered_out.nonzero()[0]] = False
blocks = self.putmask(mask, value, inplace=inplace)
if convert:
blocks = [b.convert(by_item=True, numeric=False,
copy=not inplace) for b in blocks]
return blocks
except (TypeError, ValueError):
# we can't process the value, but nothing to do
if not mask.any():
return self if inplace else self.copy()
return self.to_object_block(mgr=mgr).replace(
to_replace=original_to_replace, value=value, inplace=inplace,
filter=filter, regex=regex, convert=convert)
def _replace_single(self, *args, **kwargs):
""" no-op on a non-ObjectBlock """
return self if kwargs['inplace'] else self.copy()
def setitem(self, indexer, value, mgr=None):
""" set the value inplace; return a new block (of a possibly different
dtype)
indexer is a direct slice/positional indexer; value must be a
compatible shape
"""
# coerce None values, if appropriate
if value is None:
if self.is_numeric:
value = np.nan
# coerce args
values, _, value, _ = self._try_coerce_args(self.values, value)
arr_value = np.array(value)
# cast the values to a type that can hold nan (if necessary)
if not self._can_hold_element(value):
dtype, _ = com._maybe_promote(arr_value.dtype)
values = values.astype(dtype)
transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x)
values = transf(values)
l = len(values)
# length checking
# boolean with truth values == len of the value is ok too
if isinstance(indexer, (np.ndarray, list)):
if is_list_like(value) and len(indexer) != len(value):
if not (isinstance(indexer, np.ndarray) and
indexer.dtype == np.bool_ and
len(indexer[indexer]) == len(value)):
raise ValueError("cannot set using a list-like indexer "
"with a different length than the value")
# slice
elif isinstance(indexer, slice):
if is_list_like(value) and l:
if len(value) != length_of_indexer(indexer, values):
raise ValueError("cannot set using a slice indexer with a "
"different length than the value")
try:
def _is_scalar_indexer(indexer):
# return True if we are all scalar indexers
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return all([lib.isscalar(idx) for idx in indexer])
return False
def _is_empty_indexer(indexer):
# return a boolean if we have an empty indexer
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return any(isinstance(idx, np.ndarray) and len(idx) == 0
for idx in indexer)
return False
# empty indexers
# 8669 (empty)
if _is_empty_indexer(indexer):
pass
# setting a single element for each dim and with a rhs that could
# be say a list
# GH 6043
elif _is_scalar_indexer(indexer):
values[indexer] = value
# if we are an exact match (ex-broadcasting),
# then use the resultant dtype
elif (len(arr_value.shape) and
arr_value.shape[0] == values.shape[0] and
np.prod(arr_value.shape) == np.prod(values.shape)):
values[indexer] = value
values = values.astype(arr_value.dtype)
# set
else:
values[indexer] = value
# coerce and try to infer the dtypes of the result
if hasattr(value, 'dtype') and is_dtype_equal(values.dtype,
value.dtype):
dtype = value.dtype
elif lib.isscalar(value):
dtype, _ = _infer_dtype_from_scalar(value)
else:
dtype = 'infer'
values = self._try_coerce_and_cast_result(values, dtype)
block = self.make_block(transf(values), fastpath=True)
# may have to soft convert_objects here
if block.is_object and not self.is_object:
block = block.convert(numeric=False)
return block
except ValueError:
raise
except TypeError:
# cast to the passed dtype if possible
# otherwise raise the original error
try:
# e.g. we are uint32 and our value is uint64
# this is for compat with older numpies
block = self.make_block(transf(values.astype(value.dtype)))
return block.setitem(indexer=indexer, value=value, mgr=mgr)
except:
pass
raise
except Exception:
pass
return [self]
def putmask(self, mask, new, align=True, inplace=False, axis=0,
transpose=False, mgr=None):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
axis : int
transpose : boolean
Set to True if self is stored with axes reversed
Returns
-------
a list of new blocks, the result of the putmask
"""
new_values = self.values if inplace else self.values.copy()
if hasattr(new, 'reindex_axis'):
new = new.values
if hasattr(mask, 'reindex_axis'):
mask = mask.values
# if we are passed a scalar None, convert it here
if not is_list_like(new) and isnull(new) and not self.is_object:
new = self.fill_value
if self._can_hold_element(new):
if transpose:
new_values = new_values.T
new = self._try_cast(new)
# If the default repeat behavior in np.putmask would go in the
# wrong direction, then explictly repeat and reshape new instead
if getattr(new, 'ndim', 0) >= 1:
if self.ndim - 1 == new.ndim and axis == 1:
new = np.repeat(
new, new_values.shape[-1]).reshape(self.shape)
new = new.astype(new_values.dtype)
np.putmask(new_values, mask, new)
# maybe upcast me
elif mask.any():
if transpose:
mask = mask.T
if isinstance(new, np.ndarray):
new = new.T
axis = new_values.ndim - axis - 1
# Pseudo-broadcast
if getattr(new, 'ndim', 0) >= 1:
if self.ndim - 1 == new.ndim:
new_shape = list(new.shape)
new_shape.insert(axis, 1)
new = new.reshape(tuple(new_shape))
# need to go column by column
new_blocks = []
if self.ndim > 1:
for i, ref_loc in enumerate(self.mgr_locs):
m = mask[i]
v = new_values[i]
# need a new block
if m.any():
if isinstance(new, np.ndarray):
n = np.squeeze(new[i % new.shape[0]])
else:
n = np.array(new)
# type of the new block
dtype, _ = com._maybe_promote(n.dtype)
# we need to explicitly astype here to make a copy
n = n.astype(dtype)
nv = _putmask_smart(v, m, n)
else:
nv = v if inplace else v.copy()
# Put back the dimension that was taken from it and make
# a block out of the result.
block = self.make_block(values=nv[np.newaxis],
placement=[ref_loc], fastpath=True)
new_blocks.append(block)
else:
nv = _putmask_smart(new_values, mask, new)
new_blocks.append(self.make_block(values=nv, fastpath=True))
return new_blocks
if inplace:
return [self]
if transpose:
new_values = new_values.T
return [self.make_block(new_values, fastpath=True)]
def interpolate(self, method='pad', axis=0, index=None, values=None,
inplace=False, limit=None, limit_direction='forward',
fill_value=None, coerce=False, downcast=None, mgr=None,
**kwargs):
def check_int_bool(self, inplace):
# Only FloatBlocks will contain NaNs.
# timedelta subclasses IntBlock
if (self.is_bool or self.is_integer) and not self.is_timedelta:
if inplace:
return self
else:
return self.copy()
# a fill na type method
try:
m = missing.clean_fill_method(method)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate_with_fill(method=m, axis=axis,
inplace=inplace, limit=limit,
fill_value=fill_value,
coerce=coerce,
downcast=downcast, mgr=mgr)
# try an interp method
try:
m = missing.clean_interp_method(method, **kwargs)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate(method=m, index=index, values=values,
axis=axis, limit=limit,
limit_direction=limit_direction,
fill_value=fill_value, inplace=inplace,
downcast=downcast, mgr=mgr, **kwargs)
raise ValueError("invalid method '{0}' to interpolate.".format(method))
def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, coerce=False,
downcast=None, mgr=None):
""" fillna but using the interpolate machinery """
# if we are coercing, then don't force the conversion
# if the block can't hold the type
if coerce:
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.copy()]
values = self.values if inplace else self.values.copy()
values, _, fill_value, _ = self._try_coerce_args(values, fill_value)
values = self._try_operate(values)
values = missing.interpolate_2d(values, method=method, axis=axis,
limit=limit, fill_value=fill_value,
dtype=self.dtype)
values = self._try_coerce_result(values)
blocks = [self.make_block(values, klass=self.__class__, fastpath=True)]
return self._maybe_downcast(blocks, downcast)
def _interpolate(self, method=None, index=None, values=None,
fill_value=None, axis=0, limit=None,
limit_direction='forward', inplace=False, downcast=None,
mgr=None, **kwargs):
""" interpolate using scipy wrappers """
data = self.values if inplace else self.values.copy()
# only deal with floats
if not self.is_float:
if not self.is_integer:
return self
data = data.astype(np.float64)
if fill_value is None:
fill_value = self.fill_value
if method in ('krogh', 'piecewise_polynomial', 'pchip'):
if not index.is_monotonic:
raise ValueError("{0} interpolation requires that the "
"index be monotonic.".format(method))
# process 1-d slices in the axis direction
def func(x):
# process a 1-d slice, returning it
# should the axis argument be handled below in apply_along_axis?
# i.e. not an arg to missing.interpolate_1d
return missing.interpolate_1d(index, x, method=method, limit=limit,
limit_direction=limit_direction,
fill_value=fill_value,
bounds_error=False, **kwargs)
# interp each column independently
interp_values = np.apply_along_axis(func, axis, data)
blocks = [self.make_block(interp_values, klass=self.__class__,
fastpath=True)]
return self._maybe_downcast(blocks, downcast)
def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
# algos.take_nd dispatches for DatetimeTZBlock, CategoricalBlock
# so need to preserve types
# sparse is treated like an ndarray, but needs .get_values() shaping
values = self.values
if self.is_sparse:
values = self.get_values()
if fill_tuple is None:
fill_value = self.fill_value
new_values = algos.take_nd(values, indexer, axis=axis,
allow_fill=False)
else:
fill_value = fill_tuple[0]
new_values = algos.take_nd(values, indexer, axis=axis,
allow_fill=True, fill_value=fill_value)
if new_mgr_locs is None:
if axis == 0:
slc = lib.indexer_as_slice(indexer)
if slc is not None:
new_mgr_locs = self.mgr_locs[slc]
else:
new_mgr_locs = self.mgr_locs[indexer]
else:
new_mgr_locs = self.mgr_locs
if not is_dtype_equal(new_values.dtype, self.dtype):
return self.make_block(new_values, new_mgr_locs)
else:
return self.make_block_same_class(new_values, new_mgr_locs)
def diff(self, n, axis=1, mgr=None):
""" return block for the diff of the values """
new_values = algos.diff(self.values, n, axis=axis)
return [self.make_block(values=new_values, fastpath=True)]
def shift(self, periods, axis=0, mgr=None):
""" shift the block by periods, possibly upcast """
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(self.values)
# make sure array sent to np.roll is c_contiguous
f_ordered = new_values.flags.f_contiguous
if f_ordered:
new_values = new_values.T
axis = new_values.ndim - axis - 1
if np.prod(new_values.shape):
new_values = np.roll(new_values, com._ensure_platform_int(periods),
axis=axis)
axis_indexer = [slice(None)] * self.ndim
if periods > 0:
axis_indexer[axis] = slice(None, periods)
else:
axis_indexer[axis] = slice(periods, None)
new_values[tuple(axis_indexer)] = fill_value
# restore original order
if f_ordered:
new_values = new_values.T
return [self.make_block(new_values, fastpath=True)]
def eval(self, func, other, raise_on_error=True, try_cast=False, mgr=None):
"""
evaluate the block; return result block from the result
Parameters
----------
func : how to combine self, other
other : a ndarray/object
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had coming in)
try_cast : try casting the results to the input type
Returns
-------
a new block, the result of the func
"""
values = self.values
if hasattr(other, 'reindex_axis'):
other = other.values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim:
is_transposed = True
else:
if values.shape == other.shape[::-1]:
is_transposed = True
elif values.shape[0] == other.shape[-1]:
is_transposed = True
else:
# this is a broadcast error heree
raise ValueError("cannot broadcast shape [%s] with block "
"values [%s]" % (values.T.shape,
other.shape))
transf = (lambda x: x.T) if is_transposed else (lambda x: x)
# coerce/transpose the args if needed
values, values_mask, other, other_mask = self._try_coerce_args(
transf(values), other)
# get the result, may need to transpose the other
def get_result(other):
# avoid numpy warning of comparisons again None
if other is None:
result = not func.__name__ == 'eq'
# avoid numpy warning of elementwise comparisons to object
elif is_numeric_v_string_like(values, other):
result = False
else:
result = func(values, other)
# mask if needed
if isinstance(values_mask, np.ndarray) and values_mask.any():
result = result.astype('float64', copy=False)
result[values_mask] = np.nan
if other_mask is True:
result = result.astype('float64', copy=False)
result[:] = np.nan
elif isinstance(other_mask, np.ndarray) and other_mask.any():
result = result.astype('float64', copy=False)
result[other_mask.ravel()] = np.nan
return self._try_coerce_result(result)
# error handler if we have an issue operating with the function
def handle_error():
if raise_on_error:
raise TypeError('Could not operate %s with block values %s' %
(repr(other), str(detail)))
else:
# return the values
result = np.empty(values.shape, dtype='O')
result.fill(np.nan)
return result
# get the result
try:
result = get_result(other)
# if we have an invalid shape/broadcast error
# GH4576, so raise instead of allowing to pass through
except ValueError as detail:
raise
except Exception as detail:
result = handle_error()
# technically a broadcast error in numpy can 'work' by returning a
# boolean False
if not isinstance(result, np.ndarray):
if not isinstance(result, np.ndarray):
# differentiate between an invalid ndarray-ndarray comparison
# and an invalid type comparison
if isinstance(values, np.ndarray) and is_list_like(other):
raise ValueError('Invalid broadcasting comparison [%s] '
'with block values' % repr(other))
raise TypeError('Could not compare [%s] with block values' %
repr(other))
# transpose if needed
result = transf(result)
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return [self.make_block(result, fastpath=True, )]
def where(self, other, cond, align=True, raise_on_error=True,
try_cast=False, axis=0, transpose=False, mgr=None):
"""
evaluate the block; return result block(s) from the result
Parameters
----------
other : a ndarray/object
cond : the condition to respect
align : boolean, perform alignment on other/cond
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had coming in)
axis : int
transpose : boolean
Set to True if self is stored with axes reversed
Returns
-------
a new block(s), the result of the func
"""
values = self.values
if transpose:
values = values.T
if hasattr(other, 'reindex_axis'):
other = other.values
if hasattr(cond, 'reindex_axis'):
cond = cond.values
# If the default broadcasting would go in the wrong direction, then
# explictly reshape other instead
if getattr(other, 'ndim', 0) >= 1:
if values.ndim - 1 == other.ndim and axis == 1:
other = other.reshape(tuple(other.shape + (1, )))
if not hasattr(cond, 'shape'):
raise ValueError("where must have a condition that is ndarray "
"like")
other = _maybe_convert_string_to_object(other)
other = _maybe_convert_scalar(other)
# our where function
def func(cond, values, other):
if cond.ravel().all():
return values
values, values_mask, other, other_mask = self._try_coerce_args(
values, other)
try:
return self._try_coerce_result(expressions.where(
cond, values, other, raise_on_error=True))
except Exception as detail:
if raise_on_error:
raise TypeError('Could not operate [%s] with block values '
'[%s]' % (repr(other), str(detail)))
else:
# return the values
result = np.empty(values.shape, dtype='float64')
result.fill(np.nan)
return result
# see if we can operate on the entire block, or need item-by-item
# or if we are a single block (ndim == 1)
result = func(cond, values, other)
if self._can_hold_na or self.ndim == 1:
if transpose:
result = result.T
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return self.make_block(result)
# might need to separate out blocks
axis = cond.ndim - 1
cond = cond.swapaxes(axis, 0)
mask = np.array([cond[i].all() for i in range(cond.shape[0])],
dtype=bool)
result_blocks = []
for m in [mask, ~mask]:
if m.any():
r = self._try_cast_result(result.take(m.nonzero()[0],
axis=axis))
result_blocks.append(
self.make_block(r.T, placement=self.mgr_locs[m]))
return result_blocks
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape:
return False
return array_equivalent(self.values, other.values)
def quantile(self, qs, mgr=None, **kwargs):
"""
compute the quantiles of the
Parameters
----------
qs : a scalar or list of the quantiles to be computed
"""
values = self.get_values()
values, mask, _, _ = self._try_coerce_args(values, values)
if not lib.isscalar(mask) and mask.any():
values = values[~mask]
if len(values) == 0:
if com.is_list_like(qs):
result = np.array([self.fill_value])
else:
result = self._na_value
elif com.is_list_like(qs):
values = [_quantile(values, x * 100, **kwargs) for x in qs]
result = np.array(values)
else:
result = _quantile(values, qs * 100, **kwargs)
return self._try_coerce_result(result)
class NonConsolidatableMixIn(object):
""" hold methods for the nonconsolidatable blocks """
_can_consolidate = False
_verify_integrity = False
_validate_ndim = False
_holder = None
def __init__(self, values, placement, ndim=None, fastpath=False, **kwargs):
# Placement must be converted to BlockPlacement via property setter
# before ndim logic, because placement may be a slice which doesn't
# have a length.
self.mgr_locs = placement
# kludgetastic
if ndim is None:
if len(self.mgr_locs) != 1:
ndim = 1
else:
ndim = 2
self.ndim = ndim
if not isinstance(values, self._holder):
raise TypeError("values must be {0}".format(self._holder.__name__))
self.values = values
@property
def shape(self):
if self.ndim == 1:
return (len(self.values)),
return (len(self.mgr_locs), len(self.values))
def get_values(self, dtype=None):
""" need to to_dense myself (and always return a ndim sized object) """
values = self.values.to_dense()
if values.ndim == self.ndim - 1:
values = values.reshape((1,) + values.shape)
return values
def iget(self, col):
if self.ndim == 2 and isinstance(col, tuple):
col, loc = col
if not is_null_slice(col) and col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values[loc]
else:
if col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values
def should_store(self, value):
return isinstance(value, self._holder)
def set(self, locs, values, check=False):
assert locs.tolist() == [0]
self.values = values
def get(self, item):
if self.ndim == 1:
loc = self.items.get_loc(item)
return self.values[loc]
else:
return self.values
def putmask(self, mask, new, align=True, inplace=False, axis=0,
transpose=False, mgr=None):
"""
putmask the data to the block; we must be a single block and not
generate other blocks
return the resulting block
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block(s), the result of the putmask
"""
new_values = self.values if inplace else self.values.copy()
new_values, _, new, _ = self._try_coerce_args(new_values, new)
if isinstance(new, np.ndarray) and len(new) == len(mask):
new = new[mask]
new_values[mask] = new
new_values = self._try_coerce_result(new_values)
return [self.make_block(values=new_values)]
def _slice(self, slicer):
""" return a slice of my values (but densify first) """
return self.get_values()[slicer]
def _try_cast_result(self, result, dtype=None):
return result
class NumericBlock(Block):
__slots__ = ()
is_numeric = True
_can_hold_na = True
class FloatOrComplexBlock(NumericBlock):
__slots__ = ()
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape:
return False
left, right = self.values, other.values
return ((left == right) | (np.isnan(left) & np.isnan(right))).all()
class FloatBlock(FloatOrComplexBlock):
__slots__ = ()
is_float = True
_downcast_dtype = 'int64'
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return (issubclass(tipo, (np.floating, np.integer)) and
not issubclass(tipo, (np.datetime64, np.timedelta64)))
return (isinstance(element, (float, int, np.float_, np.int_)) and
not isinstance(element, (bool, np.bool_, datetime, timedelta,
np.datetime64, np.timedelta64)))
def _try_cast(self, element):
try:
return float(element)
except: # pragma: no cover
return element
def to_native_types(self, slicer=None, na_rep='', float_format=None,
decimal='.', quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
from pandas.formats.format import FloatArrayFormatter
formatter = FloatArrayFormatter(values, na_rep=na_rep,
float_format=float_format,
decimal=decimal, quoting=quoting,
fixed_width=False)
return formatter.get_result_as_array()
def should_store(self, value):
# when inserting a column should not coerce integers to floats
# unnecessarily
return (issubclass(value.dtype.type, np.floating) and
value.dtype == self.dtype)
class ComplexBlock(FloatOrComplexBlock):
__slots__ = ()
is_complex = True
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return issubclass(element.dtype.type,
(np.floating, np.integer, np.complexfloating))
return (isinstance(element,
(float, int, complex, np.float_, np.int_)) and
not isinstance(bool, np.bool_))
def _try_cast(self, element):
try:
return complex(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.complexfloating)
class IntBlock(NumericBlock):
__slots__ = ()
is_integer = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return (issubclass(tipo, np.integer) and
not issubclass(tipo, (np.datetime64, np.timedelta64)))
return com.is_integer(element)
def _try_cast(self, element):
try:
return int(element)
except: # pragma: no cover
return element
def should_store(self, value):
return com.is_integer_dtype(value) and value.dtype == self.dtype
class DatetimeLikeBlockMixin(object):
@property
def _na_value(self):
return tslib.NaT
@property
def fill_value(self):
return tslib.iNaT
def _try_operate(self, values):
""" return a version to operate on """
return values.view('i8')
def get_values(self, dtype=None):
"""
return object dtype as boxed values, such as Timestamps/Timedelta
"""
if com.is_object_dtype(dtype):
return lib.map_infer(self.values.ravel(),
self._box_func).reshape(self.values.shape)
return self.values
class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock):
__slots__ = ()
is_timedelta = True
_can_hold_na = True
is_numeric = False
@property
def _box_func(self):
return lambda x: tslib.Timedelta(x, unit='ns')
def fillna(self, value, **kwargs):
# allow filling with integers to be
# interpreted as seconds
if not isinstance(value, np.timedelta64) and com.is_integer(value):
value = Timedelta(value, unit='s')
return super(TimeDeltaBlock, self).fillna(value, **kwargs)
def _try_coerce_args(self, values, other):
"""
Coerce values and other to int64, with null values converted to
iNaT. values is always ndarray-like, other may not be
Parameters
----------
values : ndarray-like
other : ndarray-like or scalar
Returns
-------
base-type values, values mask, base-type other, other mask
"""
values_mask = isnull(values)
values = values.view('i8')
other_mask = False
if isinstance(other, bool):
raise TypeError
elif is_null_datelike_scalar(other):
other = tslib.iNaT
other_mask = True
elif isinstance(other, Timedelta):
other_mask = isnull(other)
other = other.value
elif isinstance(other, np.timedelta64):
other_mask = isnull(other)
other = other.view('i8')
elif isinstance(other, timedelta):
other = Timedelta(other).value
elif isinstance(other, np.ndarray):
other_mask = isnull(other)
other = other.astype('i8', copy=False).view('i8')
else:
# scalar
other = Timedelta(other)
other_mask = isnull(other)
other = other.value
return values, values_mask, other, other_mask
def _try_coerce_result(self, result):
""" reverse of try_coerce_args / try_operate """
if isinstance(result, np.ndarray):
mask = isnull(result)
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('m8[ns]')
result[mask] = tslib.iNaT
elif isinstance(result, (np.integer, np.float)):
result = self._box_func(result)
return result
def should_store(self, value):
return issubclass(value.dtype.type, np.timedelta64)
def to_native_types(self, slicer=None, na_rep=None, quoting=None,
**kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
rvalues = np.empty(values.shape, dtype=object)
if na_rep is None:
na_rep = 'NaT'
rvalues[mask] = na_rep
imask = (~mask).ravel()
# FIXME:
# should use the formats.format.Timedelta64Formatter here
# to figure what format to pass to the Timedelta
# e.g. to not show the decimals say
rvalues.flat[imask] = np.array([Timedelta(val)._repr_base(format='all')
for val in values.ravel()[imask]],
dtype=object)
return rvalues
class BoolBlock(NumericBlock):
__slots__ = ()
is_bool = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return issubclass(element.dtype.type, np.integer)
return isinstance(element, (int, bool))
def _try_cast(self, element):
try:
return bool(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.bool_)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False, mgr=None):
to_replace_values = np.atleast_1d(to_replace)
if not np.can_cast(to_replace_values, bool):
return self
return super(BoolBlock, self).replace(to_replace, value,
inplace=inplace, filter=filter,
regex=regex, mgr=mgr)
class ObjectBlock(Block):
__slots__ = ()
is_object = True
_can_hold_na = True
def __init__(self, values, ndim=2, fastpath=False, placement=None,
**kwargs):
if issubclass(values.dtype.type, compat.string_types):
values = np.array(values, dtype=object)
super(ObjectBlock, self).__init__(values, ndim=ndim, fastpath=fastpath,
placement=placement, **kwargs)
@property
def is_bool(self):
""" we can be a bool if we have only bool values but are of type
object
"""
return lib.is_bool_array(self.values.ravel())
# TODO: Refactor when convert_objects is removed since there will be 1 path
def convert(self, *args, **kwargs):
""" attempt to coerce any object types to better types return a copy of
the block (if copy = True) by definition we ARE an ObjectBlock!!!!!
can return multiple blocks!
"""
if args:
raise NotImplementedError
by_item = True if 'by_item' not in kwargs else kwargs['by_item']
new_inputs = ['coerce', 'datetime', 'numeric', 'timedelta']
new_style = False
for kw in new_inputs:
new_style |= kw in kwargs
if new_style:
fn = convert._soft_convert_objects
fn_inputs = new_inputs
else:
fn = convert._possibly_convert_objects
fn_inputs = ['convert_dates', 'convert_numeric',
'convert_timedeltas']
fn_inputs += ['copy']
fn_kwargs = {}
for key in fn_inputs:
if key in kwargs:
fn_kwargs[key] = kwargs[key]
# attempt to create new type blocks
blocks = []
if by_item and not self._is_single_block:
for i, rl in enumerate(self.mgr_locs):
values = self.iget(i)
values = fn(values.ravel(), **fn_kwargs).reshape(values.shape)
values = _block_shape(values, ndim=self.ndim)
newb = make_block(values, ndim=self.ndim, placement=[rl])
blocks.append(newb)
else:
values = fn(
self.values.ravel(), **fn_kwargs).reshape(self.values.shape)
blocks.append(make_block(values, ndim=self.ndim,
placement=self.mgr_locs))
return blocks
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
# GH6026
if check:
try:
if (self.values[locs] == values).all():
return
except:
pass
try:
self.values[locs] = values
except (ValueError):
# broadcasting error
# see GH6171
new_shape = list(values.shape)
new_shape[0] = len(self.items)
self.values = np.empty(tuple(new_shape), dtype=self.dtype)
self.values.fill(np.nan)
self.values[locs] = values
def _maybe_downcast(self, blocks, downcast=None):
if downcast is not None:
return blocks
# split and convert the blocks
return _extend_blocks([b.convert(datetime=True, numeric=False)
for b in blocks])
def _can_hold_element(self, element):
return True
def _try_cast(self, element):
return element
def should_store(self, value):
return not (issubclass(value.dtype.type,
(np.integer, np.floating, np.complexfloating,
np.datetime64, np.bool_)) or
is_extension_type(value))
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False, convert=True, mgr=None):
to_rep_is_list = com.is_list_like(to_replace)
value_is_list = com.is_list_like(value)
both_lists = to_rep_is_list and value_is_list
either_list = to_rep_is_list or value_is_list
result_blocks = []
blocks = [self]
if not either_list and com.is_re(to_replace):
return self._replace_single(to_replace, value, inplace=inplace,
filter=filter, regex=True,
convert=convert, mgr=mgr)
elif not (either_list or regex):
return super(ObjectBlock, self).replace(to_replace, value,
inplace=inplace,
filter=filter, regex=regex,
convert=convert, mgr=mgr)
elif both_lists:
for to_rep, v in zip(to_replace, value):
result_blocks = []
for b in blocks:
result = b._replace_single(to_rep, v, inplace=inplace,
filter=filter, regex=regex,
convert=convert, mgr=mgr)
result_blocks = _extend_blocks(result, result_blocks)
blocks = result_blocks
return result_blocks
elif to_rep_is_list and regex:
for to_rep in to_replace:
result_blocks = []
for b in blocks:
result = b._replace_single(to_rep, value, inplace=inplace,
filter=filter, regex=regex,
convert=convert, mgr=mgr)
result_blocks = _extend_blocks(result, result_blocks)
blocks = result_blocks
return result_blocks
return self._replace_single(to_replace, value, inplace=inplace,
filter=filter, convert=convert,
regex=regex, mgr=mgr)
def _replace_single(self, to_replace, value, inplace=False, filter=None,
regex=False, convert=True, mgr=None):
# to_replace is regex compilable
to_rep_re = regex and com.is_re_compilable(to_replace)
# regex is regex compilable
regex_re = com.is_re_compilable(regex)
# only one will survive
if to_rep_re and regex_re:
raise AssertionError('only one of to_replace and regex can be '
'regex compilable')
# if regex was passed as something that can be a regex (rather than a
# boolean)
if regex_re:
to_replace = regex
regex = regex_re or to_rep_re
# try to get the pattern attribute (compiled re) or it's a string
try:
pattern = to_replace.pattern
except AttributeError:
pattern = to_replace
# if the pattern is not empty and to_replace is either a string or a
# regex
if regex and pattern:
rx = re.compile(to_replace)
else:
# if the thing to replace is not a string or compiled regex call
# the superclass method -> to_replace is some kind of object
return super(ObjectBlock, self).replace(to_replace, value,
inplace=inplace,
filter=filter, regex=regex,
mgr=mgr)
new_values = self.values if inplace else self.values.copy()
# deal with replacing values with objects (strings) that match but
# whose replacement is not a string (numeric, nan, object)
if isnull(value) or not isinstance(value, compat.string_types):
def re_replacer(s):
try:
return value if rx.search(s) is not None else s
except TypeError:
return s
else:
# value is guaranteed to be a string here, s can be either a string
# or null if it's null it gets returned
def re_replacer(s):
try:
return rx.sub(value, s)
except TypeError:
return s
f = np.vectorize(re_replacer, otypes=[self.dtype])
if filter is None:
filt = slice(None)
else:
filt = self.mgr_locs.isin(filter).nonzero()[0]
new_values[filt] = f(new_values[filt])
# convert
block = self.make_block(new_values)
if convert:
block = block.convert(by_item=True, numeric=False)
return block
class CategoricalBlock(NonConsolidatableMixIn, ObjectBlock):
__slots__ = ()
is_categorical = True
_verify_integrity = True
_can_hold_na = True
_holder = Categorical
def __init__(self, values, placement, fastpath=False, **kwargs):
# coerce to categorical if we can
super(CategoricalBlock, self).__init__(maybe_to_categorical(values),
fastpath=True,
placement=placement, **kwargs)
@property
def is_view(self):
""" I am never a view """
return False
def to_dense(self):
return self.values.to_dense().view()
def convert(self, copy=True, **kwargs):
return self.copy() if copy else self
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an
array
"""
return np.object_
def _slice(self, slicer):
""" return a slice of my values """
# slice the category
# return same dims as we currently have
return self.values._slice(slicer)
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
# GH12564: CategoricalBlock is 1-dim only
# while returned results could be any dim
if ((not com.is_categorical_dtype(result)) and
isinstance(result, np.ndarray)):
result = _block_shape(result, ndim=self.ndim)
return result
def fillna(self, value, limit=None, inplace=False, downcast=None,
mgr=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
"not been implemented yet")
values = self.values if inplace else self.values.copy()
values = self._try_coerce_result(values.fillna(value=value,
limit=limit))
return [self.make_block(values=values)]
def interpolate(self, method='pad', axis=0, inplace=False, limit=None,
fill_value=None, **kwargs):
values = self.values if inplace else self.values.copy()
return self.make_block_same_class(
values=values.fillna(fill_value=fill_value, method=method,
limit=limit),
placement=self.mgr_locs)
def shift(self, periods, axis=0, mgr=None):
return self.make_block_same_class(values=self.values.shift(periods),
placement=self.mgr_locs)
def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
if fill_tuple is None:
fill_value = None
else:
fill_value = fill_tuple[0]
# axis doesn't matter; we are really a single-dim object
# but are passed the axis depending on the calling routing
# if its REALLY axis 0, then this will be a reindex and not a take
new_values = self.values.take_nd(indexer, fill_value=fill_value)
# if we are a 1-dim object, then always place at 0
if self.ndim == 1:
new_mgr_locs = [0]
else:
if new_mgr_locs is None:
new_mgr_locs = self.mgr_locs
return self.make_block_same_class(new_values, new_mgr_locs)
def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
klass=None, mgr=None):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
if self.is_categorical_astype(dtype):
values = self.values
else:
values = np.asarray(self.values).astype(dtype, copy=False)
if copy:
values = values.copy()
return self.make_block(values)
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
# Categorical is always one dimension
values = values[slicer]
mask = isnull(values)
values = np.array(values, dtype='object')
values[mask] = na_rep
# we are expected to return a 2-d ndarray
return values.reshape(1, len(values))
class DatetimeBlock(DatetimeLikeBlockMixin, Block):
__slots__ = ()
is_datetime = True
_can_hold_na = True
def __init__(self, values, placement, fastpath=False, **kwargs):
if values.dtype != _NS_DTYPE:
values = tslib.cast_to_nanoseconds(values)
super(DatetimeBlock, self).__init__(values, fastpath=True,
placement=placement, **kwargs)
def _astype(self, dtype, mgr=None, **kwargs):
"""
these automatically copy, so copy=True has no effect
raise on an except if raise == True
"""
# if we are passed a datetime64[ns, tz]
if com.is_datetime64tz_dtype(dtype):
dtype = DatetimeTZDtype(dtype)
values = self.values
if getattr(values, 'tz', None) is None:
values = DatetimeIndex(values).tz_localize('UTC')
values = values.tz_convert(dtype.tz)
return self.make_block(values)
# delegate
return super(DatetimeBlock, self)._astype(dtype=dtype, **kwargs)
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return element.dtype == _NS_DTYPE or element.dtype == np.int64
return (com.is_integer(element) or isinstance(element, datetime) or
isnull(element))
def _try_cast(self, element):
try:
return int(element)
except:
return element
def _try_coerce_args(self, values, other):
"""
Coerce values and other to dtype 'i8'. NaN and NaT convert to
the smallest i8, and will correctly round-trip to NaT if converted
back in _try_coerce_result. values is always ndarray-like, other
may not be
Parameters
----------
values : ndarray-like
other : ndarray-like or scalar
Returns
-------
base-type values, values mask, base-type other, other mask
"""
values_mask = isnull(values)
values = values.view('i8')
other_mask = False
if isinstance(other, bool):
raise TypeError
elif is_null_datelike_scalar(other):
other = tslib.iNaT
other_mask = True
elif isinstance(other, (datetime, np.datetime64, date)):
other = self._box_func(other)
if getattr(other, 'tz') is not None:
raise TypeError("cannot coerce a Timestamp with a tz on a "
"naive Block")
other_mask = isnull(other)
other = other.asm8.view('i8')
elif hasattr(other, 'dtype') and com.is_integer_dtype(other):
other = other.view('i8')
else:
try:
other = np.asarray(other)
other_mask = isnull(other)
other = other.astype('i8', copy=False).view('i8')
except ValueError:
# coercion issues
# let higher levels handle
raise TypeError
return values, values_mask, other, other_mask
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
if isinstance(result, np.ndarray):
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('M8[ns]')
elif isinstance(result, (np.integer, np.float, np.datetime64)):
result = self._box_func(result)
return result
@property
def _box_func(self):
return tslib.Timestamp
def to_native_types(self, slicer=None, na_rep=None, date_format=None,
quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[..., slicer]
from pandas.formats.format import _get_format_datetime64_from_values
format = _get_format_datetime64_from_values(values, date_format)
result = tslib.format_array_from_datetime(
values.view('i8').ravel(), tz=getattr(self.values, 'tz', None),
format=format, na_rep=na_rep).reshape(values.shape)
return np.atleast_2d(result)
def should_store(self, value):
return (issubclass(value.dtype.type, np.datetime64) and
not is_datetimetz(value))
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
if values.dtype != _NS_DTYPE:
# Workaround for numpy 1.6 bug
values = tslib.cast_to_nanoseconds(values)
self.values[locs] = values
class DatetimeTZBlock(NonConsolidatableMixIn, DatetimeBlock):
""" implement a datetime64 block with a tz attribute """
__slots__ = ()
_holder = DatetimeIndex
is_datetimetz = True
def __init__(self, values, placement, ndim=2, **kwargs):
if not isinstance(values, self._holder):
values = self._holder(values)
dtype = kwargs.pop('dtype', None)
if dtype is not None:
if isinstance(dtype, compat.string_types):
dtype = DatetimeTZDtype.construct_from_string(dtype)
values = values.tz_localize('UTC').tz_convert(dtype.tz)
if values.tz is None:
raise ValueError("cannot create a DatetimeTZBlock without a tz")
super(DatetimeTZBlock, self).__init__(values, placement=placement,
ndim=ndim, **kwargs)
def copy(self, deep=True, mgr=None):
""" copy constructor """
values = self.values
if deep:
values = values.copy(deep=True)
return self.make_block_same_class(values)
def external_values(self):
""" we internally represent the data as a DatetimeIndex, but for
external compat with ndarray, export as a ndarray of Timestamps
"""
return self.values.astype('datetime64[ns]').values
def get_values(self, dtype=None):
# return object dtype as Timestamps with the zones
if com.is_object_dtype(dtype):
f = lambda x: lib.Timestamp(x, tz=self.values.tz)
return lib.map_infer(
self.values.ravel(), f).reshape(self.values.shape)
return self.values
def to_object_block(self, mgr):
"""
return myself as an object block
Since we keep the DTI as a 1-d object, this is different
depends on BlockManager's ndim
"""
values = self.get_values(dtype=object)
kwargs = {}
if mgr.ndim > 1:
values = _block_shape(values, ndim=mgr.ndim)
kwargs['ndim'] = mgr.ndim
kwargs['placement'] = [0]
return self.make_block(values, klass=ObjectBlock, **kwargs)
def replace(self, *args, **kwargs):
# if we are forced to ObjectBlock, then don't coerce (to UTC)
kwargs['convert'] = False
return super(DatetimeTZBlock, self).replace(*args, **kwargs)
def _slice(self, slicer):
""" return a slice of my values """
if isinstance(slicer, tuple):
col, loc = slicer
if not is_null_slice(col) and col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values[loc]
return self.values[slicer]
def _try_coerce_args(self, values, other):
"""
localize and return i8 for the values
Parameters
----------
values : ndarray-like
other : ndarray-like or scalar
Returns
-------
base-type values, values mask, base-type other, other mask
"""
values_mask = isnull(values)
values = values.tz_localize(None).asi8
other_mask = False
if isinstance(other, ABCSeries):
other = self._holder(other)
other_mask = isnull(other)
if isinstance(other, bool):
raise TypeError
elif is_null_datelike_scalar(other):
other = tslib.iNaT
other_mask = True
elif isinstance(other, self._holder):
if other.tz != self.values.tz:
raise ValueError("incompatible or non tz-aware value")
other = other.tz_localize(None).asi8
other_mask = isnull(other)
elif isinstance(other, (np.datetime64, datetime, date)):
other = lib.Timestamp(other)
tz = getattr(other, 'tz', None)
# test we can have an equal time zone
if tz is None or str(tz) != str(self.values.tz):
raise ValueError("incompatible or non tz-aware value")
other_mask = isnull(other)
other = other.tz_localize(None).value
return values, values_mask, other, other_mask
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
if isinstance(result, np.ndarray):
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('M8[ns]')
elif isinstance(result, (np.integer, np.float, np.datetime64)):
result = lib.Timestamp(result).tz_localize(self.values.tz)
if isinstance(result, np.ndarray):
result = self._holder(result).tz_localize(self.values.tz)
return result
@property
def _box_func(self):
return lambda x: tslib.Timestamp(x, tz=self.dtype.tz)
def shift(self, periods, axis=0, mgr=None):
""" shift the block by periods """
# think about moving this to the DatetimeIndex. This is a non-freq
# (number of periods) shift ###
N = len(self)
indexer = np.zeros(N, dtype=int)
if periods > 0:
indexer[periods:] = np.arange(N - periods)
else:
indexer[:periods] = np.arange(-periods, N)
# move to UTC & take
new_values = self.values.tz_localize(None).asi8.take(indexer)
if periods > 0:
new_values[:periods] = tslib.iNaT
else:
new_values[periods:] = tslib.iNaT
new_values = DatetimeIndex(new_values, tz=self.values.tz)
return [self.make_block_same_class(new_values,
placement=self.mgr_locs)]
class SparseBlock(NonConsolidatableMixIn, Block):
""" implement as a list of sparse arrays of the same dtype """
__slots__ = ()
is_sparse = True
is_numeric = True
_box_to_block_values = False
_can_hold_na = True
_ftype = 'sparse'
_holder = SparseArray
@property
def shape(self):
return (len(self.mgr_locs), self.sp_index.length)
@property
def itemsize(self):
return self.dtype.itemsize
@property
def fill_value(self):
# return np.nan
return self.values.fill_value
@fill_value.setter
def fill_value(self, v):
# we may need to upcast our fill to match our dtype
if issubclass(self.dtype.type, np.floating):
v = float(v)
self.values.fill_value = v
def to_dense(self):
return self.values.to_dense().view()
@property
def sp_values(self):
return self.values.sp_values
@sp_values.setter
def sp_values(self, v):
# reset the sparse values
self.values = SparseArray(v, sparse_index=self.sp_index,
kind=self.kind, dtype=v.dtype,
fill_value=self.values.fill_value,
copy=False)
@property
def sp_index(self):
return self.values.sp_index
@property
def kind(self):
return self.values.kind
def __len__(self):
try:
return self.sp_index.length
except:
return 0
def copy(self, deep=True, mgr=None):
return self.make_block_same_class(values=self.values,
sparse_index=self.sp_index,
kind=self.kind, copy=deep,
placement=self.mgr_locs)
def make_block_same_class(self, values, placement, sparse_index=None,
kind=None, dtype=None, fill_value=None,
copy=False, fastpath=True, **kwargs):
""" return a new block """
if dtype is None:
dtype = self.dtype
if fill_value is None and not isinstance(values, SparseArray):
fill_value = self.values.fill_value
# if not isinstance(values, SparseArray) and values.ndim != self.ndim:
# raise ValueError("ndim mismatch")
if values.ndim == 2:
nitems = values.shape[0]
if nitems == 0:
# kludgy, but SparseBlocks cannot handle slices, where the
# output is 0-item, so let's convert it to a dense block: it
# won't take space since there's 0 items, plus it will preserve
# the dtype.
return self.make_block(np.empty(values.shape, dtype=dtype),
placement,
fastpath=True)
elif nitems > 1:
raise ValueError("Only 1-item 2d sparse blocks are supported")
else:
values = values.reshape(values.shape[1])
new_values = SparseArray(values, sparse_index=sparse_index,
kind=kind or self.kind, dtype=dtype,
fill_value=fill_value, copy=copy)
return self.make_block(new_values, fastpath=fastpath,
placement=placement)
def interpolate(self, method='pad', axis=0, inplace=False, limit=None,
fill_value=None, **kwargs):
values = missing.interpolate_2d(self.values.to_dense(), method, axis,
limit, fill_value)
return self.make_block_same_class(values=values,
placement=self.mgr_locs)
def fillna(self, value, limit=None, inplace=False, downcast=None,
mgr=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
"not been implemented yet")
values = self.values if inplace else self.values.copy()
values = values.fillna(value, downcast=downcast)
return [self.make_block_same_class(values=values,
placement=self.mgr_locs)]
def shift(self, periods, axis=0, mgr=None):
""" shift the block by periods """
N = len(self.values.T)
indexer = np.zeros(N, dtype=int)
if periods > 0:
indexer[periods:] = np.arange(N - periods)
else:
indexer[:periods] = np.arange(-periods, N)
new_values = self.values.to_dense().take(indexer)
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(new_values)
if periods > 0:
new_values[:periods] = fill_value
else:
new_values[periods:] = fill_value
return [self.make_block_same_class(new_values,
placement=self.mgr_locs)]
def reindex_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer information
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
# taking on the 0th axis always here
if fill_value is None:
fill_value = self.fill_value
return self.make_block_same_class(self.values.take(indexer),
fill_value=fill_value,
placement=self.mgr_locs)
def sparse_reindex(self, new_index):
""" sparse reindex and return a new block
current reindex only works for float64 dtype! """
values = self.values
values = values.sp_index.to_int_index().reindex(
values.sp_values.astype('float64'), values.fill_value, new_index)
return self.make_block_same_class(values, sparse_index=new_index,
placement=self.mgr_locs)
def make_block(values, placement, klass=None, ndim=None, dtype=None,
fastpath=False):
if klass is None:
dtype = dtype or values.dtype
vtype = dtype.type
if isinstance(values, SparseArray):
klass = SparseBlock
elif issubclass(vtype, np.floating):
klass = FloatBlock
elif (issubclass(vtype, np.integer) and
issubclass(vtype, np.timedelta64)):
klass = TimeDeltaBlock
elif (issubclass(vtype, np.integer) and
not issubclass(vtype, np.datetime64)):
klass = IntBlock
elif dtype == np.bool_:
klass = BoolBlock
elif issubclass(vtype, np.datetime64):
if hasattr(values, 'tz'):
klass = DatetimeTZBlock
else:
klass = DatetimeBlock
elif is_datetimetz(values):
klass = DatetimeTZBlock
elif issubclass(vtype, np.complexfloating):
klass = ComplexBlock
elif is_categorical(values):
klass = CategoricalBlock
else:
klass = ObjectBlock
elif klass is DatetimeTZBlock and not is_datetimetz(values):
return klass(values, ndim=ndim, fastpath=fastpath,
placement=placement, dtype=dtype)
return klass(values, ndim=ndim, fastpath=fastpath, placement=placement)
# TODO: flexible with index=None and/or items=None
class BlockManager(PandasObject):
"""
Core internal data structure to implement DataFrame, Series, Panel, etc.
Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a
lightweight blocked set of labeled data to be manipulated by the DataFrame
public API class
Attributes
----------
shape
ndim
axes
values
items
Methods
-------
set_axis(axis, new_labels)
copy(deep=True)
get_dtype_counts
get_ftype_counts
get_dtypes
get_ftypes
apply(func, axes, block_filter_fn)
get_bool_data
get_numeric_data
get_slice(slice_like, axis)
get(label)
iget(loc)
get_scalar(label_tup)
take(indexer, axis)
reindex_axis(new_labels, axis)
reindex_indexer(new_labels, indexer, axis)
delete(label)
insert(loc, label, value)
set(label, value)
Parameters
----------
Notes
-----
This is *not* a public API class
"""
__slots__ = ['axes', 'blocks', '_ndim', '_shape', '_known_consolidated',
'_is_consolidated', '_blknos', '_blklocs']
def __init__(self, blocks, axes, do_integrity_check=True, fastpath=True):
self.axes = [_ensure_index(ax) for ax in axes]
self.blocks = tuple(blocks)
for block in blocks:
if block.is_sparse:
if len(block.mgr_locs) != 1:
raise AssertionError("Sparse block refers to multiple "
"items")
else:
if self.ndim != block.ndim:
raise AssertionError('Number of Block dimensions (%d) '
'must equal number of axes (%d)' %
(block.ndim, self.ndim))
if do_integrity_check:
self._verify_integrity()
self._consolidate_check()
self._rebuild_blknos_and_blklocs()
def make_empty(self, axes=None):
""" return an empty BlockManager with the items axis of len 0 """
if axes is None:
axes = [_ensure_index([])] + [_ensure_index(a)
for a in self.axes[1:]]
# preserve dtype if possible
if self.ndim == 1:
blocks = np.array([], dtype=self.array_dtype)
else:
blocks = []
return self.__class__(blocks, axes)
def __nonzero__(self):
return True
# Python3 compat
__bool__ = __nonzero__
@property
def shape(self):
return tuple(len(ax) for ax in self.axes)
@property
def ndim(self):
return len(self.axes)
def set_axis(self, axis, new_labels):
new_labels = _ensure_index(new_labels)
old_len = len(self.axes[axis])
new_len = len(new_labels)
if new_len != old_len:
raise ValueError('Length mismatch: Expected axis has %d elements, '
'new values have %d elements' %
(old_len, new_len))
self.axes[axis] = new_labels
def rename_axis(self, mapper, axis, copy=True):
"""
Rename one of axes.
Parameters
----------
mapper : unary callable
axis : int
copy : boolean, default True
"""
obj = self.copy(deep=copy)
obj.set_axis(axis, _transform_index(self.axes[axis], mapper))
return obj
def add_prefix(self, prefix):
f = (str(prefix) + '%s').__mod__
return self.rename_axis(f, axis=0)
def add_suffix(self, suffix):
f = ('%s' + str(suffix)).__mod__
return self.rename_axis(f, axis=0)
@property
def _is_single_block(self):
if self.ndim == 1:
return True
if len(self.blocks) != 1:
return False
blk = self.blocks[0]
return (blk.mgr_locs.is_slice_like and
blk.mgr_locs.as_slice == slice(0, len(self), 1))
def _rebuild_blknos_and_blklocs(self):
"""
Update mgr._blknos / mgr._blklocs.
"""
new_blknos = np.empty(self.shape[0], dtype=np.int64)
new_blklocs = np.empty(self.shape[0], dtype=np.int64)
new_blknos.fill(-1)
new_blklocs.fill(-1)
for blkno, blk in enumerate(self.blocks):
rl = blk.mgr_locs
new_blknos[rl.indexer] = blkno
new_blklocs[rl.indexer] = np.arange(len(rl))
if (new_blknos == -1).any():
raise AssertionError("Gaps in blk ref_locs")
self._blknos = new_blknos
self._blklocs = new_blklocs
# make items read only for now
def _get_items(self):
return self.axes[0]
items = property(fget=_get_items)
def _get_counts(self, f):
""" return a dict of the counts of the function in BlockManager """
self._consolidate_inplace()
counts = dict()
for b in self.blocks:
v = f(b)
counts[v] = counts.get(v, 0) + b.shape[0]
return counts
def get_dtype_counts(self):
return self._get_counts(lambda b: b.dtype.name)
def get_ftype_counts(self):
return self._get_counts(lambda b: b.ftype)
def get_dtypes(self):
dtypes = np.array([blk.dtype for blk in self.blocks])
return algos.take_1d(dtypes, self._blknos, allow_fill=False)
def get_ftypes(self):
ftypes = np.array([blk.ftype for blk in self.blocks])
return algos.take_1d(ftypes, self._blknos, allow_fill=False)
def __getstate__(self):
block_values = [b.values for b in self.blocks]
block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks]
axes_array = [ax for ax in self.axes]
extra_state = {
'0.14.1': {
'axes': axes_array,
'blocks': [dict(values=b.values, mgr_locs=b.mgr_locs.indexer)
for b in self.blocks]
}
}
# First three elements of the state are to maintain forward
# compatibility with 0.13.1.
return axes_array, block_values, block_items, extra_state
def __setstate__(self, state):
def unpickle_block(values, mgr_locs):
# numpy < 1.7 pickle compat
if values.dtype == 'M8[us]':
values = values.astype('M8[ns]')
return make_block(values, placement=mgr_locs)
if (isinstance(state, tuple) and len(state) >= 4 and
'0.14.1' in state[3]):
state = state[3]['0.14.1']
self.axes = [_ensure_index(ax) for ax in state['axes']]
self.blocks = tuple(unpickle_block(b['values'], b['mgr_locs'])
for b in state['blocks'])
else:
# discard anything after 3rd, support beta pickling format for a
# little while longer
ax_arrays, bvalues, bitems = state[:3]
self.axes = [_ensure_index(ax) for ax in ax_arrays]
if len(bitems) == 1 and self.axes[0].equals(bitems[0]):
# This is a workaround for pre-0.14.1 pickles that didn't
# support unpickling multi-block frames/panels with non-unique
# columns/items, because given a manager with items ["a", "b",
# "a"] there's no way of knowing which block's "a" is where.
#
# Single-block case can be supported under the assumption that
# block items corresponded to manager items 1-to-1.
all_mgr_locs = [slice(0, len(bitems[0]))]
else:
all_mgr_locs = [self.axes[0].get_indexer(blk_items)
for blk_items in bitems]
self.blocks = tuple(
unpickle_block(values, mgr_locs)
for values, mgr_locs in zip(bvalues, all_mgr_locs))
self._post_setstate()
def _post_setstate(self):
self._is_consolidated = False
self._known_consolidated = False
self._rebuild_blknos_and_blklocs()
def __len__(self):
return len(self.items)
def __unicode__(self):
output = pprint_thing(self.__class__.__name__)
for i, ax in enumerate(self.axes):
if i == 0:
output += u('\nItems: %s') % ax
else:
output += u('\nAxis %d: %s') % (i, ax)
for block in self.blocks:
output += u('\n%s') % pprint_thing(block)
return output
def _verify_integrity(self):
mgr_shape = self.shape
tot_items = sum(len(x.mgr_locs) for x in self.blocks)
for block in self.blocks:
if block._verify_integrity and block.shape[1:] != mgr_shape[1:]:
construction_error(tot_items, block.shape[1:], self.axes)
if len(self.items) != tot_items:
raise AssertionError('Number of manager items must equal union of '
'block items\n# manager items: {0}, # '
'tot_items: {1}'.format(
len(self.items), tot_items))
def apply(self, f, axes=None, filter=None, do_integrity_check=False,
consolidate=True, raw=False, **kwargs):
"""
iterate over the blocks, collect and create a new block manager
Parameters
----------
f : the callable or function name to operate on at the block level
axes : optional (if not supplied, use self.axes)
filter : list, if supplied, only call the block if the filter is in
the block
do_integrity_check : boolean, default False. Do the block manager
integrity check
consolidate: boolean, default True. Join together blocks having same
dtype
raw: boolean, default False. Return the raw returned results
Returns
-------
Block Manager (new object)
"""
result_blocks = []
# filter kwarg is used in replace-* family of methods
if filter is not None:
filter_locs = set(self.items.get_indexer_for(filter))
if len(filter_locs) == len(self.items):
# All items are included, as if there were no filtering
filter = None
else:
kwargs['filter'] = filter_locs
if consolidate:
self._consolidate_inplace()
if f == 'where':
align_copy = True
if kwargs.get('align', True):
align_keys = ['other', 'cond']
else:
align_keys = ['cond']
elif f == 'putmask':
align_copy = False
if kwargs.get('align', True):
align_keys = ['new', 'mask']
else:
align_keys = ['mask']
elif f == 'eval':
align_copy = False
align_keys = ['other']
elif f == 'fillna':
# fillna internally does putmask, maybe it's better to do this
# at mgr, not block level?
align_copy = False
align_keys = ['value']
else:
align_keys = []
aligned_args = dict((k, kwargs[k])
for k in align_keys
if hasattr(kwargs[k], 'reindex_axis'))
for b in self.blocks:
if filter is not None:
if not b.mgr_locs.isin(filter_locs).any():
result_blocks.append(b)
continue
if aligned_args:
b_items = self.items[b.mgr_locs.indexer]
for k, obj in aligned_args.items():
axis = getattr(obj, '_info_axis_number', 0)
kwargs[k] = obj.reindex_axis(b_items, axis=axis,
copy=align_copy)
kwargs['mgr'] = self
applied = getattr(b, f)(**kwargs)
result_blocks = _extend_blocks(applied, result_blocks)
if raw:
if self._is_single_block:
return result_blocks[0]
return result_blocks
elif len(result_blocks) == 0:
return self.make_empty(axes or self.axes)
bm = self.__class__(result_blocks, axes or self.axes,
do_integrity_check=do_integrity_check)
bm._consolidate_inplace()
return bm
def isnull(self, **kwargs):
return self.apply('apply', **kwargs)
def where(self, **kwargs):
return self.apply('where', **kwargs)
def eval(self, **kwargs):
return self.apply('eval', **kwargs)
def quantile(self, **kwargs):
return self.apply('quantile', raw=True, **kwargs)
def setitem(self, **kwargs):
return self.apply('setitem', **kwargs)
def putmask(self, **kwargs):
return self.apply('putmask', **kwargs)
def diff(self, **kwargs):
return self.apply('diff', **kwargs)
def interpolate(self, **kwargs):
return self.apply('interpolate', **kwargs)
def shift(self, **kwargs):
return self.apply('shift', **kwargs)
def fillna(self, **kwargs):
return self.apply('fillna', **kwargs)
def downcast(self, **kwargs):
return self.apply('downcast', **kwargs)
def astype(self, dtype, **kwargs):
return self.apply('astype', dtype=dtype, **kwargs)
def convert(self, **kwargs):
return self.apply('convert', **kwargs)
def replace(self, **kwargs):
return self.apply('replace', **kwargs)
def replace_list(self, src_list, dest_list, inplace=False, regex=False,
mgr=None):
""" do a list replace """
if mgr is None:
mgr = self
# figure out our mask a-priori to avoid repeated replacements
values = self.as_matrix()
def comp(s):
if isnull(s):
return isnull(values)
return _possibly_compare(values, getattr(s, 'asm8', s),
operator.eq)
masks = [comp(s) for i, s in enumerate(src_list)]
result_blocks = []
for blk in self.blocks:
# its possible to get multiple result blocks here
# replace ALWAYS will return a list
rb = [blk if inplace else blk.copy()]
for i, (s, d) in enumerate(zip(src_list, dest_list)):
new_rb = []
for b in rb:
if b.dtype == np.object_:
result = b.replace(s, d, inplace=inplace, regex=regex,
mgr=mgr)
new_rb = _extend_blocks(result, new_rb)
else:
# get our mask for this element, sized to this
# particular block
m = masks[i][b.mgr_locs.indexer]
if m.any():
new_rb.extend(b.putmask(m, d, inplace=True))
else:
new_rb.append(b)
rb = new_rb
result_blocks.extend(rb)
bm = self.__class__(result_blocks, self.axes)
bm._consolidate_inplace()
return bm
def reshape_nd(self, axes, **kwargs):
""" a 2d-nd reshape operation on a BlockManager """
return self.apply('reshape_nd', axes=axes, **kwargs)
def is_consolidated(self):
"""
Return True if more than one block with the same dtype
"""
if not self._known_consolidated:
self._consolidate_check()
return self._is_consolidated
def _consolidate_check(self):
ftypes = [blk.ftype for blk in self.blocks]
self._is_consolidated = len(ftypes) == len(set(ftypes))
self._known_consolidated = True
@property
def is_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return len(self.blocks) > 1
@property
def is_numeric_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return all([block.is_numeric for block in self.blocks])
@property
def is_datelike_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return any([block.is_datelike for block in self.blocks])
@property
def is_view(self):
""" return a boolean if we are a single block and are a view """
if len(self.blocks) == 1:
return self.blocks[0].is_view
# It is technically possible to figure out which blocks are views
# e.g. [ b.values.base is not None for b in self.blocks ]
# but then we have the case of possibly some blocks being a view
# and some blocks not. setting in theory is possible on the non-view
# blocks w/o causing a SettingWithCopy raise/warn. But this is a bit
# complicated
return False
def get_bool_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_bool], copy)
def get_numeric_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_numeric], copy)
def combine(self, blocks, copy=True):
""" return a new manager with the blocks """
if len(blocks) == 0:
return self.make_empty()
# FIXME: optimization potential
indexer = np.sort(np.concatenate([b.mgr_locs.as_array
for b in blocks]))
inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0])
new_items = self.items.take(indexer)
new_blocks = []
for b in blocks:
b = b.copy(deep=copy)
b.mgr_locs = algos.take_1d(inv_indexer, b.mgr_locs.as_array,
axis=0, allow_fill=False)
new_blocks.append(b)
new_axes = list(self.axes)
new_axes[0] = new_items
return self.__class__(new_blocks, new_axes, do_integrity_check=False)
def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(slobj)
else:
slicer = [slice(None)] * (axis + 1)
slicer[axis] = slobj
slicer = tuple(slicer)
new_blocks = [blk.getitem_block(slicer) for blk in self.blocks]
new_axes = list(self.axes)
new_axes[axis] = new_axes[axis][slobj]
bm = self.__class__(new_blocks, new_axes, do_integrity_check=False,
fastpath=True)
bm._consolidate_inplace()
return bm
def __contains__(self, item):
return item in self.items
@property
def nblocks(self):
return len(self.blocks)
def copy(self, deep=True, mgr=None):
"""
Make deep or shallow copy of BlockManager
Parameters
----------
deep : boolean o rstring, default True
If False, return shallow copy (do not copy data)
If 'all', copy data and a deep copy of the index
Returns
-------
copy : BlockManager
"""
# this preserves the notion of view copying of axes
if deep:
if deep == 'all':
copy = lambda ax: ax.copy(deep=True)
else:
copy = lambda ax: ax.view()
new_axes = [copy(ax) for ax in self.axes]
else:
new_axes = list(self.axes)
return self.apply('copy', axes=new_axes, deep=deep,
do_integrity_check=False)
def as_matrix(self, items=None):
if len(self.blocks) == 0:
return np.empty(self.shape, dtype=float)
if items is not None:
mgr = self.reindex_axis(items, axis=0)
else:
mgr = self
if self._is_single_block or not self.is_mixed_type:
return mgr.blocks[0].get_values()
else:
return mgr._interleave()
def _interleave(self):
"""
Return ndarray from blocks with specified item order
Items must be contained in the blocks
"""
dtype = _interleaved_dtype(self.blocks)
result = np.empty(self.shape, dtype=dtype)
if result.shape[0] == 0:
# Workaround for numpy 1.7 bug:
#
# >>> a = np.empty((0,10))
# >>> a[slice(0,0)]
# array([], shape=(0, 10), dtype=float64)
# >>> a[[]]
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# IndexError: index 0 is out of bounds for axis 0 with size 0
return result
itemmask = np.zeros(self.shape[0])
for blk in self.blocks:
rl = blk.mgr_locs
result[rl.indexer] = blk.get_values(dtype)
itemmask[rl.indexer] = 1
if not itemmask.all():
raise AssertionError('Some items were not contained in blocks')
return result
def xs(self, key, axis=1, copy=True, takeable=False):
if axis < 1:
raise AssertionError('Can only take xs across axis >= 1, got %d' %
axis)
# take by position
if takeable:
loc = key
else:
loc = self.axes[axis].get_loc(key)
slicer = [slice(None, None) for _ in range(self.ndim)]
slicer[axis] = loc
slicer = tuple(slicer)
new_axes = list(self.axes)
# could be an array indexer!
if isinstance(loc, (slice, np.ndarray)):
new_axes[axis] = new_axes[axis][loc]
else:
new_axes.pop(axis)
new_blocks = []
if len(self.blocks) > 1:
# we must copy here as we are mixed type
for blk in self.blocks:
newb = make_block(values=blk.values[slicer],
klass=blk.__class__, fastpath=True,
placement=blk.mgr_locs)
new_blocks.append(newb)
elif len(self.blocks) == 1:
block = self.blocks[0]
vals = block.values[slicer]
if copy:
vals = vals.copy()
new_blocks = [make_block(values=vals,
placement=block.mgr_locs,
klass=block.__class__,
fastpath=True, )]
return self.__class__(new_blocks, new_axes)
def fast_xs(self, loc):
"""
get a cross sectional for a given location in the
items ; handle dups
return the result, is *could* be a view in the case of a
single block
"""
if len(self.blocks) == 1:
return self.blocks[0].iget((slice(None), loc))
items = self.items
# non-unique (GH4726)
if not items.is_unique:
result = self._interleave()
if self.ndim == 2:
result = result.T
return result[loc]
# unique
dtype = _interleaved_dtype(self.blocks)
n = len(items)
result = np.empty(n, dtype=dtype)
for blk in self.blocks:
# Such assignment may incorrectly coerce NaT to None
# result[blk.mgr_locs] = blk._slice((slice(None), loc))
for i, rl in enumerate(blk.mgr_locs):
result[rl] = blk._try_coerce_result(blk.iget((i, loc)))
return result
def consolidate(self):
"""
Join together blocks having same dtype
Returns
-------
y : BlockManager
"""
if self.is_consolidated():
return self
bm = self.__class__(self.blocks, self.axes)
bm._is_consolidated = False
bm._consolidate_inplace()
return bm
def _consolidate_inplace(self):
if not self.is_consolidated():
self.blocks = tuple(_consolidate(self.blocks))
self._is_consolidated = True
self._known_consolidated = True
self._rebuild_blknos_and_blklocs()
def get(self, item, fastpath=True):
"""
Return values for selected item (ndarray or BlockManager).
"""
if self.items.is_unique:
if not isnull(item):
loc = self.items.get_loc(item)
else:
indexer = np.arange(len(self.items))[isnull(self.items)]
# allow a single nan location indexer
if not lib.isscalar(indexer):
if len(indexer) == 1:
loc = indexer.item()
else:
raise ValueError("cannot label index with a null key")
return self.iget(loc, fastpath=fastpath)
else:
if isnull(item):
raise TypeError("cannot label index with a null key")
indexer = self.items.get_indexer_for([item])
return self.reindex_indexer(new_axis=self.items[indexer],
indexer=indexer, axis=0,
allow_dups=True)
def iget(self, i, fastpath=True):
"""
Return the data as a SingleBlockManager if fastpath=True and possible
Otherwise return as a ndarray
"""
block = self.blocks[self._blknos[i]]
values = block.iget(self._blklocs[i])
if not fastpath or not block._box_to_block_values or values.ndim != 1:
return values
# fastpath shortcut for select a single-dim from a 2-dim BM
return SingleBlockManager(
[block.make_block_same_class(values,
placement=slice(0, len(values)),
ndim=1, fastpath=True)],
self.axes[1])
def get_scalar(self, tup):
"""
Retrieve single item
"""
full_loc = list(ax.get_loc(x) for ax, x in zip(self.axes, tup))
blk = self.blocks[self._blknos[full_loc[0]]]
values = blk.values
# FIXME: this may return non-upcasted types?
if values.ndim == 1:
return values[full_loc[1]]
full_loc[0] = self._blklocs[full_loc[0]]
return values[tuple(full_loc)]
def delete(self, item):
"""
Delete selected item (items if non-unique) in-place.
"""
indexer = self.items.get_loc(item)
is_deleted = np.zeros(self.shape[0], dtype=np.bool_)
is_deleted[indexer] = True
ref_loc_offset = -is_deleted.cumsum()
is_blk_deleted = [False] * len(self.blocks)
if isinstance(indexer, int):
affected_start = indexer
else:
affected_start = is_deleted.nonzero()[0][0]
for blkno, _ in _fast_count_smallints(self._blknos[affected_start:]):
blk = self.blocks[blkno]
bml = blk.mgr_locs
blk_del = is_deleted[bml.indexer].nonzero()[0]
if len(blk_del) == len(bml):
is_blk_deleted[blkno] = True
continue
elif len(blk_del) != 0:
blk.delete(blk_del)
bml = blk.mgr_locs
blk.mgr_locs = bml.add(ref_loc_offset[bml.indexer])
# FIXME: use Index.delete as soon as it uses fastpath=True
self.axes[0] = self.items[~is_deleted]
self.blocks = tuple(b for blkno, b in enumerate(self.blocks)
if not is_blk_deleted[blkno])
self._shape = None
self._rebuild_blknos_and_blklocs()
def set(self, item, value, check=False):
"""
Set new item in-place. Does not consolidate. Adds new Block if not
contained in the current set of items
if check, then validate that we are not setting the same data in-place
"""
# FIXME: refactor, clearly separate broadcasting & zip-like assignment
# can prob also fix the various if tests for sparse/categorical
value_is_extension_type = is_extension_type(value)
# categorical/spares/datetimetz
if value_is_extension_type:
def value_getitem(placement):
return value
else:
if value.ndim == self.ndim - 1:
value = value.reshape((1,) + value.shape)
def value_getitem(placement):
return value
else:
def value_getitem(placement):
return value[placement.indexer]
if value.shape[1:] != self.shape[1:]:
raise AssertionError('Shape of new values must be compatible '
'with manager shape')
try:
loc = self.items.get_loc(item)
except KeyError:
# This item wasn't present, just insert at end
self.insert(len(self.items), item, value)
return
if isinstance(loc, int):
loc = [loc]
blknos = self._blknos[loc]
blklocs = self._blklocs[loc].copy()
unfit_mgr_locs = []
unfit_val_locs = []
removed_blknos = []
for blkno, val_locs in _get_blkno_placements(blknos, len(self.blocks),
group=True):
blk = self.blocks[blkno]
blk_locs = blklocs[val_locs.indexer]
if blk.should_store(value):
blk.set(blk_locs, value_getitem(val_locs), check=check)
else:
unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs])
unfit_val_locs.append(val_locs)
# If all block items are unfit, schedule the block for removal.
if len(val_locs) == len(blk.mgr_locs):
removed_blknos.append(blkno)
else:
self._blklocs[blk.mgr_locs.indexer] = -1
blk.delete(blk_locs)
self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk))
if len(removed_blknos):
# Remove blocks & update blknos accordingly
is_deleted = np.zeros(self.nblocks, dtype=np.bool_)
is_deleted[removed_blknos] = True
new_blknos = np.empty(self.nblocks, dtype=np.int64)
new_blknos.fill(-1)
new_blknos[~is_deleted] = np.arange(self.nblocks -
len(removed_blknos))
self._blknos = algos.take_1d(new_blknos, self._blknos, axis=0,
allow_fill=False)
self.blocks = tuple(blk for i, blk in enumerate(self.blocks)
if i not in set(removed_blknos))
if unfit_val_locs:
unfit_mgr_locs = np.concatenate(unfit_mgr_locs)
unfit_count = len(unfit_mgr_locs)
new_blocks = []
if value_is_extension_type:
# This code (ab-)uses the fact that sparse blocks contain only
# one item.
new_blocks.extend(
make_block(values=value.copy(), ndim=self.ndim,
placement=slice(mgr_loc, mgr_loc + 1))
for mgr_loc in unfit_mgr_locs)
self._blknos[unfit_mgr_locs] = (np.arange(unfit_count) +
len(self.blocks))
self._blklocs[unfit_mgr_locs] = 0
else:
# unfit_val_locs contains BlockPlacement objects
unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:])
new_blocks.append(
make_block(values=value_getitem(unfit_val_items),
ndim=self.ndim, placement=unfit_mgr_locs))
self._blknos[unfit_mgr_locs] = len(self.blocks)
self._blklocs[unfit_mgr_locs] = np.arange(unfit_count)
self.blocks += tuple(new_blocks)
# Newly created block's dtype may already be present.
self._known_consolidated = False
def insert(self, loc, item, value, allow_duplicates=False):
"""
Insert item at selected position.
Parameters
----------
loc : int
item : hashable
value : array_like
allow_duplicates: bool
If False, trying to insert non-unique item will raise
"""
if not allow_duplicates and item in self.items:
# Should this be a different kind of error??
raise ValueError('cannot insert %s, already exists' % item)
if not isinstance(loc, int):
raise TypeError("loc must be int")
# insert to the axis; this could possibly raise a TypeError
new_axis = self.items.insert(loc, item)
block = make_block(values=value, ndim=self.ndim,
placement=slice(loc, loc + 1))
for blkno, count in _fast_count_smallints(self._blknos[loc:]):
blk = self.blocks[blkno]
if count == len(blk.mgr_locs):
blk.mgr_locs = blk.mgr_locs.add(1)
else:
new_mgr_locs = blk.mgr_locs.as_array.copy()
new_mgr_locs[new_mgr_locs >= loc] += 1
blk.mgr_locs = new_mgr_locs
if loc == self._blklocs.shape[0]:
# np.append is a lot faster (at least in numpy 1.7.1), let's use it
# if we can.
self._blklocs = np.append(self._blklocs, 0)
self._blknos = np.append(self._blknos, len(self.blocks))
else:
self._blklocs = np.insert(self._blklocs, loc, 0)
self._blknos = np.insert(self._blknos, loc, len(self.blocks))
self.axes[0] = new_axis
self.blocks += (block,)
self._shape = None
self._known_consolidated = False
if len(self.blocks) > 100:
self._consolidate_inplace()
def reindex_axis(self, new_index, axis, method=None, limit=None,
fill_value=None, copy=True):
"""
Conform block manager to new index.
"""
new_index = _ensure_index(new_index)
new_index, indexer = self.axes[axis].reindex(new_index, method=method,
limit=limit)
return self.reindex_indexer(new_index, indexer, axis=axis,
fill_value=fill_value, copy=copy)
def reindex_indexer(self, new_axis, indexer, axis, fill_value=None,
allow_dups=False, copy=True):
"""
Parameters
----------
new_axis : Index
indexer : ndarray of int64 or None
axis : int
fill_value : object
allow_dups : bool
pandas-indexer with -1's only.
"""
if indexer is None:
if new_axis is self.axes[axis] and not copy:
return self
result = self.copy(deep=copy)
result.axes = list(self.axes)
result.axes[axis] = new_axis
return result
self._consolidate_inplace()
# some axes don't allow reindexing with dups
if not allow_dups:
self.axes[axis]._can_reindex(indexer)
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(indexer,
fill_tuple=(fill_value,))
else:
new_blocks = [blk.take_nd(indexer, axis=axis, fill_tuple=(
fill_value if fill_value is not None else blk.fill_value,))
for blk in self.blocks]
new_axes = list(self.axes)
new_axes[axis] = new_axis
return self.__class__(new_blocks, new_axes)
def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None):
"""
Slice/take blocks along axis=0.
Overloaded for SingleBlock
Returns
-------
new_blocks : list of Block
"""
allow_fill = fill_tuple is not None
sl_type, slobj, sllen = _preprocess_slice_or_indexer(
slice_or_indexer, self.shape[0], allow_fill=allow_fill)
if self._is_single_block:
blk = self.blocks[0]
if sl_type in ('slice', 'mask'):
return [blk.getitem_block(slobj, new_mgr_locs=slice(0, sllen))]
elif not allow_fill or self.ndim == 1:
if allow_fill and fill_tuple[0] is None:
_, fill_value = com._maybe_promote(blk.dtype)
fill_tuple = (fill_value, )
return [blk.take_nd(slobj, axis=0,
new_mgr_locs=slice(0, sllen),
fill_tuple=fill_tuple)]
if sl_type in ('slice', 'mask'):
blknos = self._blknos[slobj]
blklocs = self._blklocs[slobj]
else:
blknos = algos.take_1d(self._blknos, slobj, fill_value=-1,
allow_fill=allow_fill)
blklocs = algos.take_1d(self._blklocs, slobj, fill_value=-1,
allow_fill=allow_fill)
# When filling blknos, make sure blknos is updated before appending to
# blocks list, that way new blkno is exactly len(blocks).
#
# FIXME: mgr_groupby_blknos must return mgr_locs in ascending order,
# pytables serialization will break otherwise.
blocks = []
for blkno, mgr_locs in _get_blkno_placements(blknos, len(self.blocks),
group=True):
if blkno == -1:
# If we've got here, fill_tuple was not None.
fill_value = fill_tuple[0]
blocks.append(self._make_na_block(placement=mgr_locs,
fill_value=fill_value))
else:
blk = self.blocks[blkno]
# Otherwise, slicing along items axis is necessary.
if not blk._can_consolidate:
# A non-consolidatable block, it's easy, because there's
# only one item and each mgr loc is a copy of that single
# item.
for mgr_loc in mgr_locs:
newblk = blk.copy(deep=True)
newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1)
blocks.append(newblk)
else:
blocks.append(blk.take_nd(blklocs[mgr_locs.indexer],
axis=0, new_mgr_locs=mgr_locs,
fill_tuple=None))
return blocks
def _make_na_block(self, placement, fill_value=None):
# TODO: infer dtypes other than float64 from fill_value
if fill_value is None:
fill_value = np.nan
block_shape = list(self.shape)
block_shape[0] = len(placement)
dtype, fill_value = com._infer_dtype_from_scalar(fill_value)
block_values = np.empty(block_shape, dtype=dtype)
block_values.fill(fill_value)
return make_block(block_values, placement=placement)
def take(self, indexer, axis=1, verify=True, convert=True):
"""
Take items along any axis.
"""
self._consolidate_inplace()
indexer = (np.arange(indexer.start, indexer.stop, indexer.step,
dtype='int64')
if isinstance(indexer, slice)
else np.asanyarray(indexer, dtype='int64'))
n = self.shape[axis]
if convert:
indexer = maybe_convert_indices(indexer, n)
if verify:
if ((indexer == -1) | (indexer >= n)).any():
raise Exception('Indices must be nonzero and less than '
'the axis length')
new_labels = self.axes[axis].take(indexer)
return self.reindex_indexer(new_axis=new_labels, indexer=indexer,
axis=axis, allow_dups=True)
def merge(self, other, lsuffix='', rsuffix=''):
if not self._is_indexed_like(other):
raise AssertionError('Must have same axes to merge managers')
l, r = items_overlap_with_suffix(left=self.items, lsuffix=lsuffix,
right=other.items, rsuffix=rsuffix)
new_items = _concat_indexes([l, r])
new_blocks = [blk.copy(deep=False) for blk in self.blocks]
offset = self.shape[0]
for blk in other.blocks:
blk = blk.copy(deep=False)
blk.mgr_locs = blk.mgr_locs.add(offset)
new_blocks.append(blk)
new_axes = list(self.axes)
new_axes[0] = new_items
return self.__class__(_consolidate(new_blocks), new_axes)
def _is_indexed_like(self, other):
"""
Check all axes except items
"""
if self.ndim != other.ndim:
raise AssertionError('Number of dimensions must agree '
'got %d and %d' % (self.ndim, other.ndim))
for ax, oax in zip(self.axes[1:], other.axes[1:]):
if not ax.equals(oax):
return False
return True
def equals(self, other):
self_axes, other_axes = self.axes, other.axes
if len(self_axes) != len(other_axes):
return False
if not all(ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)):
return False
self._consolidate_inplace()
other._consolidate_inplace()
if len(self.blocks) != len(other.blocks):
return False
# canonicalize block order, using a tuple combining the type
# name and then mgr_locs because there might be unconsolidated
# blocks (say, Categorical) which can only be distinguished by
# the iteration order
def canonicalize(block):
return (block.dtype.name, block.mgr_locs.as_array.tolist())
self_blocks = sorted(self.blocks, key=canonicalize)
other_blocks = sorted(other.blocks, key=canonicalize)
return all(block.equals(oblock)
for block, oblock in zip(self_blocks, other_blocks))
class SingleBlockManager(BlockManager):
""" manage a single block with """
ndim = 1
_is_consolidated = True
_known_consolidated = True
__slots__ = ()
def __init__(self, block, axis, do_integrity_check=False, fastpath=False):
if isinstance(axis, list):
if len(axis) != 1:
raise ValueError("cannot create SingleBlockManager with more "
"than 1 axis")
axis = axis[0]
# passed from constructor, single block, single axis
if fastpath:
self.axes = [axis]
if isinstance(block, list):
# empty block
if len(block) == 0:
block = [np.array([])]
elif len(block) != 1:
raise ValueError('Cannot create SingleBlockManager with '
'more than 1 block')
block = block[0]
else:
self.axes = [_ensure_index(axis)]
# create the block here
if isinstance(block, list):
# provide consolidation to the interleaved_dtype
if len(block) > 1:
dtype = _interleaved_dtype(block)
block = [b.astype(dtype) for b in block]
block = _consolidate(block)
if len(block) != 1:
raise ValueError('Cannot create SingleBlockManager with '
'more than 1 block')
block = block[0]
if not isinstance(block, Block):
block = make_block(block, placement=slice(0, len(axis)), ndim=1,
fastpath=True)
self.blocks = [block]
def _post_setstate(self):
pass
@property
def _block(self):
return self.blocks[0]
@property
def _values(self):
return self._block.values
def reindex(self, new_axis, indexer=None, method=None, fill_value=None,
limit=None, copy=True):
# if we are the same and don't copy, just return
if self.index.equals(new_axis):
if copy:
return self.copy(deep=True)
else:
return self
values = self._block.get_values()
if indexer is None:
indexer = self.items.get_indexer_for(new_axis)
if fill_value is None:
fill_value = np.nan
new_values = algos.take_1d(values, indexer, fill_value=fill_value)
# fill if needed
if method is not None or limit is not None:
new_values = missing.interpolate_2d(new_values,
method=method,
limit=limit,
fill_value=fill_value)
if self._block.is_sparse:
make_block = self._block.make_block_same_class
block = make_block(new_values, copy=copy,
placement=slice(0, len(new_axis)))
mgr = SingleBlockManager(block, new_axis)
mgr._consolidate_inplace()
return mgr
def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
return self.__class__(self._block._slice(slobj),
self.index[slobj], fastpath=True)
@property
def index(self):
return self.axes[0]
def convert(self, **kwargs):
""" convert the whole block as one """
kwargs['by_item'] = False
return self.apply('convert', **kwargs)
@property
def dtype(self):
return self._block.dtype
@property
def array_dtype(self):
return self._block.array_dtype
@property
def ftype(self):
return self._block.ftype
def get_dtype_counts(self):
return {self.dtype.name: 1}
def get_ftype_counts(self):
return {self.ftype: 1}
def get_dtypes(self):
return np.array([self._block.dtype])
def get_ftypes(self):
return np.array([self._block.ftype])
def external_values(self):
return self._block.external_values()
def internal_values(self):
return self._block.internal_values()
def get_values(self):
""" return a dense type view """
return np.array(self._block.to_dense(), copy=False)
@property
def asobject(self):
"""
return a object dtype array. datetime/timedelta like values are boxed
to Timestamp/Timedelta instances.
"""
return self._block.get_values(dtype=object)
@property
def itemsize(self):
return self._block.values.itemsize
@property
def _can_hold_na(self):
return self._block._can_hold_na
def is_consolidated(self):
return True
def _consolidate_check(self):
pass
def _consolidate_inplace(self):
pass
def delete(self, item):
"""
Delete single item from SingleBlockManager.
Ensures that self.blocks doesn't become empty.
"""
loc = self.items.get_loc(item)
self._block.delete(loc)
self.axes[0] = self.axes[0].delete(loc)
def fast_xs(self, loc):
"""
fast path for getting a cross-section
return a view of the data
"""
return self._block.values[loc]
def construction_error(tot_items, block_shape, axes, e=None):
""" raise a helpful message about our construction """
passed = tuple(map(int, [tot_items] + list(block_shape)))
implied = tuple(map(int, [len(ax) for ax in axes]))
if passed == implied and e is not None:
raise e
if block_shape[0] == 0:
raise ValueError("Empty data passed with indices specified.")
raise ValueError("Shape of passed values is {0}, indices imply {1}".format(
passed, implied))
def create_block_manager_from_blocks(blocks, axes):
try:
if len(blocks) == 1 and not isinstance(blocks[0], Block):
# if blocks[0] is of length 0, return empty blocks
if not len(blocks[0]):
blocks = []
else:
# It's OK if a single block is passed as values, its placement
# is basically "all items", but if there're many, don't bother
# converting, it's an error anyway.
blocks = [make_block(values=blocks[0],
placement=slice(0, len(axes[0])))]
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except (ValueError) as e:
blocks = [getattr(b, 'values', b) for b in blocks]
tot_items = sum(b.shape[0] for b in blocks)
construction_error(tot_items, blocks[0].shape[1:], axes, e)
def create_block_manager_from_arrays(arrays, names, axes):
try:
blocks = form_blocks(arrays, names, axes)
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except ValueError as e:
construction_error(len(arrays), arrays[0].shape, axes, e)
def form_blocks(arrays, names, axes):
# put "leftover" items in float bucket, where else?
# generalize?
float_items = []
complex_items = []
int_items = []
bool_items = []
object_items = []
sparse_items = []
datetime_items = []
datetime_tz_items = []
cat_items = []
extra_locs = []
names_idx = Index(names)
if names_idx.equals(axes[0]):
names_indexer = np.arange(len(names_idx))
else:
assert names_idx.intersection(axes[0]).is_unique
names_indexer = names_idx.get_indexer_for(axes[0])
for i, name_idx in enumerate(names_indexer):
if name_idx == -1:
extra_locs.append(i)
continue
k = names[name_idx]
v = arrays[name_idx]
if is_sparse(v):
sparse_items.append((i, k, v))
elif issubclass(v.dtype.type, np.floating):
float_items.append((i, k, v))
elif issubclass(v.dtype.type, np.complexfloating):
complex_items.append((i, k, v))
elif issubclass(v.dtype.type, np.datetime64):
if v.dtype != _NS_DTYPE:
v = tslib.cast_to_nanoseconds(v)
if is_datetimetz(v):
datetime_tz_items.append((i, k, v))
else:
datetime_items.append((i, k, v))
elif is_datetimetz(v):
datetime_tz_items.append((i, k, v))
elif issubclass(v.dtype.type, np.integer):
if v.dtype == np.uint64:
# HACK #2355 definite overflow
if (v > 2**63 - 1).any():
object_items.append((i, k, v))
continue
int_items.append((i, k, v))
elif v.dtype == np.bool_:
bool_items.append((i, k, v))
elif is_categorical(v):
cat_items.append((i, k, v))
else:
object_items.append((i, k, v))
blocks = []
if len(float_items):
float_blocks = _multi_blockify(float_items)
blocks.extend(float_blocks)
if len(complex_items):
complex_blocks = _multi_blockify(complex_items)
blocks.extend(complex_blocks)
if len(int_items):
int_blocks = _multi_blockify(int_items)
blocks.extend(int_blocks)
if len(datetime_items):
datetime_blocks = _simple_blockify(datetime_items, _NS_DTYPE)
blocks.extend(datetime_blocks)
if len(datetime_tz_items):
dttz_blocks = [make_block(array,
klass=DatetimeTZBlock,
fastpath=True,
placement=[i], )
for i, _, array in datetime_tz_items]
blocks.extend(dttz_blocks)
if len(bool_items):
bool_blocks = _simple_blockify(bool_items, np.bool_)
blocks.extend(bool_blocks)
if len(object_items) > 0:
object_blocks = _simple_blockify(object_items, np.object_)
blocks.extend(object_blocks)
if len(sparse_items) > 0:
sparse_blocks = _sparse_blockify(sparse_items)
blocks.extend(sparse_blocks)
if len(cat_items) > 0:
cat_blocks = [make_block(array, klass=CategoricalBlock, fastpath=True,
placement=[i])
for i, _, array in cat_items]
blocks.extend(cat_blocks)
if len(extra_locs):
shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:])
# empty items -> dtype object
block_values = np.empty(shape, dtype=object)
block_values.fill(np.nan)
na_block = make_block(block_values, placement=extra_locs)
blocks.append(na_block)
return blocks
def _simple_blockify(tuples, dtype):
""" return a single array of a block that has a single dtype; if dtype is
not None, coerce to this dtype
"""
values, placement = _stack_arrays(tuples, dtype)
# CHECK DTYPE?
if dtype is not None and values.dtype != dtype: # pragma: no cover
values = values.astype(dtype)
block = make_block(values, placement=placement)
return [block]
def _multi_blockify(tuples, dtype=None):
""" return an array of blocks that potentially have different dtypes """
# group by dtype
grouper = itertools.groupby(tuples, lambda x: x[2].dtype)
new_blocks = []
for dtype, tup_block in grouper:
values, placement = _stack_arrays(list(tup_block), dtype)
block = make_block(values, placement=placement)
new_blocks.append(block)
return new_blocks
def _sparse_blockify(tuples, dtype=None):
""" return an array of blocks that potentially have different dtypes (and
are sparse)
"""
new_blocks = []
for i, names, array in tuples:
array = _maybe_to_sparse(array)
block = make_block(array, klass=SparseBlock, fastpath=True,
placement=[i])
new_blocks.append(block)
return new_blocks
def _stack_arrays(tuples, dtype):
# fml
def _asarray_compat(x):
if isinstance(x, ABCSeries):
return x._values
else:
return np.asarray(x)
def _shape_compat(x):
if isinstance(x, ABCSeries):
return len(x),
else:
return x.shape
placement, names, arrays = zip(*tuples)
first = arrays[0]
shape = (len(arrays),) + _shape_compat(first)
stacked = np.empty(shape, dtype=dtype)
for i, arr in enumerate(arrays):
stacked[i] = _asarray_compat(arr)
return stacked, placement
def _interleaved_dtype(blocks):
if not len(blocks):
return None
counts = defaultdict(list)
for x in blocks:
counts[type(x)].append(x)
def _lcd_dtype(l):
""" find the lowest dtype that can accomodate the given types """
m = l[0].dtype
for x in l[1:]:
if x.dtype.itemsize > m.itemsize:
m = x.dtype
return m
have_int = len(counts[IntBlock]) > 0
have_bool = len(counts[BoolBlock]) > 0
have_object = len(counts[ObjectBlock]) > 0
have_float = len(counts[FloatBlock]) > 0
have_complex = len(counts[ComplexBlock]) > 0
have_dt64 = len(counts[DatetimeBlock]) > 0
have_dt64_tz = len(counts[DatetimeTZBlock]) > 0
have_td64 = len(counts[TimeDeltaBlock]) > 0
have_cat = len(counts[CategoricalBlock]) > 0
# TODO: have_sparse is not used
have_sparse = len(counts[SparseBlock]) > 0 # noqa
have_numeric = have_float or have_complex or have_int
has_non_numeric = have_dt64 or have_dt64_tz or have_td64 or have_cat
if (have_object or
(have_bool and
(have_numeric or have_dt64 or have_dt64_tz or have_td64)) or
(have_numeric and has_non_numeric) or have_cat or have_dt64 or
have_dt64_tz or have_td64):
return np.dtype(object)
elif have_bool:
return np.dtype(bool)
elif have_int and not have_float and not have_complex:
# if we are mixing unsigned and signed, then return
# the next biggest int type (if we can)
lcd = _lcd_dtype(counts[IntBlock])
kinds = set([i.dtype.kind for i in counts[IntBlock]])
if len(kinds) == 1:
return lcd
if lcd == 'uint64' or lcd == 'int64':
return np.dtype('int64')
# return 1 bigger on the itemsize if unsinged
if lcd.kind == 'u':
return np.dtype('int%s' % (lcd.itemsize * 8 * 2))
return lcd
elif have_complex:
return np.dtype('c16')
else:
return _lcd_dtype(counts[FloatBlock] + counts[SparseBlock])
def _consolidate(blocks):
"""
Merge blocks having same dtype, exclude non-consolidating blocks
"""
# sort by _can_consolidate, dtype
gkey = lambda x: x._consolidate_key
grouper = itertools.groupby(sorted(blocks, key=gkey), gkey)
new_blocks = []
for (_can_consolidate, dtype), group_blocks in grouper:
merged_blocks = _merge_blocks(list(group_blocks), dtype=dtype,
_can_consolidate=_can_consolidate)
new_blocks = _extend_blocks(merged_blocks, new_blocks)
return new_blocks
def _merge_blocks(blocks, dtype=None, _can_consolidate=True):
if len(blocks) == 1:
return blocks[0]
if _can_consolidate:
if dtype is None:
if len(set([b.dtype for b in blocks])) != 1:
raise AssertionError("_merge_blocks are invalid!")
dtype = blocks[0].dtype
# FIXME: optimization potential in case all mgrs contain slices and
# combination of those slices is a slice, too.
new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks])
new_values = _vstack([b.values for b in blocks], dtype)
argsort = np.argsort(new_mgr_locs)
new_values = new_values[argsort]
new_mgr_locs = new_mgr_locs[argsort]
return make_block(new_values, fastpath=True, placement=new_mgr_locs)
# no merge
return blocks
def _extend_blocks(result, blocks=None):
""" return a new extended blocks, givin the result """
if blocks is None:
blocks = []
if isinstance(result, list):
for r in result:
if isinstance(r, list):
blocks.extend(r)
else:
blocks.append(r)
elif isinstance(result, BlockManager):
blocks.extend(result.blocks)
else:
blocks.append(result)
return blocks
def _block_shape(values, ndim=1, shape=None):
""" guarantee the shape of the values to be at least 1 d """
if values.ndim <= ndim:
if shape is None:
shape = values.shape
values = values.reshape(tuple((1, ) + shape))
return values
def _vstack(to_stack, dtype):
# work around NumPy 1.6 bug
if dtype == _NS_DTYPE or dtype == _TD_DTYPE:
new_values = np.vstack([x.view('i8') for x in to_stack])
return new_values.view(dtype)
else:
return np.vstack(to_stack)
def _possibly_compare(a, b, op):
is_a_array = isinstance(a, np.ndarray)
is_b_array = isinstance(b, np.ndarray)
# numpy deprecation warning to have i8 vs integer comparisions
if is_datetimelike_v_numeric(a, b):
result = False
# numpy deprecation warning if comparing numeric vs string-like
elif is_numeric_v_string_like(a, b):
result = False
else:
result = op(a, b)
if lib.isscalar(result) and (is_a_array or is_b_array):
type_names = [type(a).__name__, type(b).__name__]
if is_a_array:
type_names[0] = 'ndarray(dtype=%s)' % a.dtype
if is_b_array:
type_names[1] = 'ndarray(dtype=%s)' % b.dtype
raise TypeError("Cannot compare types %r and %r" % tuple(type_names))
return result
def _concat_indexes(indexes):
return indexes[0].append(indexes[1:])
def _block2d_to_blocknd(values, placement, shape, labels, ref_items):
""" pivot to the labels shape """
from pandas.core.internals import make_block
panel_shape = (len(placement),) + shape
# TODO: lexsort depth needs to be 2!!
# Create observation selection vector using major and minor
# labels, for converting to panel format.
selector = _factor_indexer(shape[1:], labels)
mask = np.zeros(np.prod(shape), dtype=bool)
mask.put(selector, True)
if mask.all():
pvalues = np.empty(panel_shape, dtype=values.dtype)
else:
dtype, fill_value = _maybe_promote(values.dtype)
pvalues = np.empty(panel_shape, dtype=dtype)
pvalues.fill(fill_value)
values = values
for i in range(len(placement)):
pvalues[i].flat[mask] = values[:, i]
return make_block(pvalues, placement=placement)
def _factor_indexer(shape, labels):
"""
given a tuple of shape and a list of Categorical labels, return the
expanded label indexer
"""
mult = np.array(shape)[::-1].cumprod()[::-1]
return com._ensure_platform_int(
np.sum(np.array(labels).T * np.append(mult, [1]), axis=1).T)
def _get_blkno_placements(blknos, blk_count, group=True):
"""
Parameters
----------
blknos : array of int64
blk_count : int
group : bool
Returns
-------
iterator
yield (BlockPlacement, blkno)
"""
blknos = com._ensure_int64(blknos)
# FIXME: blk_count is unused, but it may avoid the use of dicts in cython
for blkno, indexer in lib.get_blkno_indexers(blknos, group):
yield blkno, BlockPlacement(indexer)
def items_overlap_with_suffix(left, lsuffix, right, rsuffix):
"""
If two indices overlap, add suffixes to overlapping entries.
If corresponding suffix is empty, the entry is simply converted to string.
"""
to_rename = left.intersection(right)
if len(to_rename) == 0:
return left, right
else:
if not lsuffix and not rsuffix:
raise ValueError('columns overlap but no suffix specified: %s' %
to_rename)
def lrenamer(x):
if x in to_rename:
return '%s%s' % (x, lsuffix)
return x
def rrenamer(x):
if x in to_rename:
return '%s%s' % (x, rsuffix)
return x
return (_transform_index(left, lrenamer),
_transform_index(right, rrenamer))
def _transform_index(index, func):
"""
Apply function to all values found in index.
This includes transforming multiindex entries separately.
"""
if isinstance(index, MultiIndex):
items = [tuple(func(y) for y in x) for x in index]
return MultiIndex.from_tuples(items, names=index.names)
else:
items = [func(x) for x in index]
return Index(items, name=index.name)
def _putmask_smart(v, m, n):
"""
Return a new block, try to preserve dtype if possible.
Parameters
----------
v : `values`, updated in-place (array like)
m : `mask`, applies to both sides (array like)
n : `new values` either scalar or an array like aligned with `values`
"""
# n should be the length of the mask or a scalar here
if not is_list_like(n):
n = np.array([n] * len(m))
elif isinstance(n, np.ndarray) and n.ndim == 0: # numpy scalar
n = np.repeat(np.array(n, ndmin=1), len(m))
# see if we are only masking values that if putted
# will work in the current dtype
try:
nn = n[m]
# make sure that we have a nullable type
# if we have nulls
if not _is_na_compat(v, nn[0]):
raise ValueError
nn_at = nn.astype(v.dtype)
# avoid invalid dtype comparisons
if not is_numeric_v_string_like(nn, nn_at):
comp = (nn == nn_at)
if is_list_like(comp) and comp.all():
nv = v.copy()
nv[m] = nn_at
return nv
except (ValueError, IndexError, TypeError):
pass
# change the dtype
dtype, _ = com._maybe_promote(n.dtype)
nv = v.astype(dtype)
try:
nv[m] = n[m]
except ValueError:
idx, = np.where(np.squeeze(m))
for mask_index, new_val in zip(idx, n[m]):
nv[mask_index] = new_val
return nv
def concatenate_block_managers(mgrs_indexers, axes, concat_axis, copy):
"""
Concatenate block managers into one.
Parameters
----------
mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples
axes : list of Index
concat_axis : int
copy : bool
"""
concat_plan = combine_concat_plans(
[get_mgr_concatenation_plan(mgr, indexers)
for mgr, indexers in mgrs_indexers], concat_axis)
blocks = [make_block(concatenate_join_units(join_units, concat_axis,
copy=copy),
placement=placement)
for placement, join_units in concat_plan]
return BlockManager(blocks, axes)
def get_empty_dtype_and_na(join_units):
"""
Return dtype and N/A values to use when concatenating specified units.
Returned N/A value may be None which means there was no casting involved.
Returns
-------
dtype
na
"""
if len(join_units) == 1:
blk = join_units[0].block
if blk is None:
return np.float64, np.nan
has_none_blocks = False
dtypes = [None] * len(join_units)
for i, unit in enumerate(join_units):
if unit.block is None:
has_none_blocks = True
else:
dtypes[i] = unit.dtype
upcast_classes = defaultdict(list)
null_upcast_classes = defaultdict(list)
for dtype, unit in zip(dtypes, join_units):
if dtype is None:
continue
if com.is_categorical_dtype(dtype):
upcast_cls = 'category'
elif com.is_datetimetz(dtype):
upcast_cls = 'datetimetz'
elif issubclass(dtype.type, np.bool_):
upcast_cls = 'bool'
elif issubclass(dtype.type, np.object_):
upcast_cls = 'object'
elif is_datetime64_dtype(dtype):
upcast_cls = 'datetime'
elif is_timedelta64_dtype(dtype):
upcast_cls = 'timedelta'
else:
upcast_cls = 'float'
# Null blocks should not influence upcast class selection, unless there
# are only null blocks, when same upcasting rules must be applied to
# null upcast classes.
if unit.is_null:
null_upcast_classes[upcast_cls].append(dtype)
else:
upcast_classes[upcast_cls].append(dtype)
if not upcast_classes:
upcast_classes = null_upcast_classes
# create the result
if 'object' in upcast_classes:
return np.dtype(np.object_), np.nan
elif 'bool' in upcast_classes:
if has_none_blocks:
return np.dtype(np.object_), np.nan
else:
return np.dtype(np.bool_), None
elif 'category' in upcast_classes:
return np.dtype(np.object_), np.nan
elif 'float' in upcast_classes:
return np.dtype(np.float64), np.nan
elif 'datetimetz' in upcast_classes:
dtype = upcast_classes['datetimetz']
return dtype[0], tslib.iNaT
elif 'datetime' in upcast_classes:
return np.dtype('M8[ns]'), tslib.iNaT
elif 'timedelta' in upcast_classes:
return np.dtype('m8[ns]'), tslib.iNaT
else: # pragma
raise AssertionError("invalid dtype determination in get_concat_dtype")
def concatenate_join_units(join_units, concat_axis, copy):
"""
Concatenate values from several join units along selected axis.
"""
if concat_axis == 0 and len(join_units) > 1:
# Concatenating join units along ax0 is handled in _merge_blocks.
raise AssertionError("Concatenating join units along axis0")
empty_dtype, upcasted_na = get_empty_dtype_and_na(join_units)
to_concat = [ju.get_reindexed_values(empty_dtype=empty_dtype,
upcasted_na=upcasted_na)
for ju in join_units]
if len(to_concat) == 1:
# Only one block, nothing to concatenate.
concat_values = to_concat[0]
if copy and concat_values.base is not None:
concat_values = concat_values.copy()
else:
concat_values = _concat._concat_compat(to_concat, axis=concat_axis)
return concat_values
def get_mgr_concatenation_plan(mgr, indexers):
"""
Construct concatenation plan for given block manager and indexers.
Parameters
----------
mgr : BlockManager
indexers : dict of {axis: indexer}
Returns
-------
plan : list of (BlockPlacement, JoinUnit) tuples
"""
# Calculate post-reindex shape , save for item axis which will be separate
# for each block anyway.
mgr_shape = list(mgr.shape)
for ax, indexer in indexers.items():
mgr_shape[ax] = len(indexer)
mgr_shape = tuple(mgr_shape)
if 0 in indexers:
ax0_indexer = indexers.pop(0)
blknos = algos.take_1d(mgr._blknos, ax0_indexer, fill_value=-1)
blklocs = algos.take_1d(mgr._blklocs, ax0_indexer, fill_value=-1)
else:
if mgr._is_single_block:
blk = mgr.blocks[0]
return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))]
ax0_indexer = None
blknos = mgr._blknos
blklocs = mgr._blklocs
plan = []
for blkno, placements in _get_blkno_placements(blknos, len(mgr.blocks),
group=False):
assert placements.is_slice_like
join_unit_indexers = indexers.copy()
shape = list(mgr_shape)
shape[0] = len(placements)
shape = tuple(shape)
if blkno == -1:
unit = JoinUnit(None, shape)
else:
blk = mgr.blocks[blkno]
ax0_blk_indexer = blklocs[placements.indexer]
unit_no_ax0_reindexing = (len(placements) == len(blk.mgr_locs) and
# Fastpath detection of join unit not
# needing to reindex its block: no ax0
# reindexing took place and block
# placement was sequential before.
((ax0_indexer is None and
blk.mgr_locs.is_slice_like and
blk.mgr_locs.as_slice.step == 1) or
# Slow-ish detection: all indexer locs
# are sequential (and length match is
# checked above).
(np.diff(ax0_blk_indexer) == 1).all()))
# Omit indexer if no item reindexing is required.
if unit_no_ax0_reindexing:
join_unit_indexers.pop(0, None)
else:
join_unit_indexers[0] = ax0_blk_indexer
unit = JoinUnit(blk, shape, join_unit_indexers)
plan.append((placements, unit))
return plan
def combine_concat_plans(plans, concat_axis):
"""
Combine multiple concatenation plans into one.
existing_plan is updated in-place.
"""
if len(plans) == 1:
for p in plans[0]:
yield p[0], [p[1]]
elif concat_axis == 0:
offset = 0
for plan in plans:
last_plc = None
for plc, unit in plan:
yield plc.add(offset), [unit]
last_plc = plc
if last_plc is not None:
offset += last_plc.as_slice.stop
else:
num_ended = [0]
def _next_or_none(seq):
retval = next(seq, None)
if retval is None:
num_ended[0] += 1
return retval
plans = list(map(iter, plans))
next_items = list(map(_next_or_none, plans))
while num_ended[0] != len(next_items):
if num_ended[0] > 0:
raise ValueError("Plan shapes are not aligned")
placements, units = zip(*next_items)
lengths = list(map(len, placements))
min_len, max_len = min(lengths), max(lengths)
if min_len == max_len:
yield placements[0], units
next_items[:] = map(_next_or_none, plans)
else:
yielded_placement = None
yielded_units = [None] * len(next_items)
for i, (plc, unit) in enumerate(next_items):
yielded_units[i] = unit
if len(plc) > min_len:
# trim_join_unit updates unit in place, so only
# placement needs to be sliced to skip min_len.
next_items[i] = (plc[min_len:],
trim_join_unit(unit, min_len))
else:
yielded_placement = plc
next_items[i] = _next_or_none(plans[i])
yield yielded_placement, yielded_units
def trim_join_unit(join_unit, length):
"""
Reduce join_unit's shape along item axis to length.
Extra items that didn't fit are returned as a separate block.
"""
if 0 not in join_unit.indexers:
extra_indexers = join_unit.indexers
if join_unit.block is None:
extra_block = None
else:
extra_block = join_unit.block.getitem_block(slice(length, None))
join_unit.block = join_unit.block.getitem_block(slice(length))
else:
extra_block = join_unit.block
extra_indexers = copy.copy(join_unit.indexers)
extra_indexers[0] = extra_indexers[0][length:]
join_unit.indexers[0] = join_unit.indexers[0][:length]
extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:]
join_unit.shape = (length,) + join_unit.shape[1:]
return JoinUnit(block=extra_block, indexers=extra_indexers,
shape=extra_shape)
class JoinUnit(object):
def __init__(self, block, shape, indexers=None):
# Passing shape explicitly is required for cases when block is None.
if indexers is None:
indexers = {}
self.block = block
self.indexers = indexers
self.shape = shape
def __repr__(self):
return '%s(%r, %s)' % (self.__class__.__name__, self.block,
self.indexers)
@cache_readonly
def needs_filling(self):
for indexer in self.indexers.values():
# FIXME: cache results of indexer == -1 checks.
if (indexer == -1).any():
return True
return False
@cache_readonly
def dtype(self):
if self.block is None:
raise AssertionError("Block is None, no dtype")
if not self.needs_filling:
return self.block.dtype
else:
return com._get_dtype(com._maybe_promote(self.block.dtype,
self.block.fill_value)[0])
return self._dtype
@cache_readonly
def is_null(self):
if self.block is None:
return True
if not self.block._can_hold_na:
return False
# Usually it's enough to check but a small fraction of values to see if
# a block is NOT null, chunks should help in such cases. 1000 value
# was chosen rather arbitrarily.
values = self.block.values
if self.block.is_categorical:
values_flat = values.categories
elif self.block.is_sparse:
# fill_value is not NaN and have holes
if not values._null_fill_value and values.sp_index.ngaps > 0:
return False
values_flat = values.ravel(order='K')
else:
values_flat = values.ravel(order='K')
total_len = values_flat.shape[0]
chunk_len = max(total_len // 40, 1000)
for i in range(0, total_len, chunk_len):
if not isnull(values_flat[i:i + chunk_len]).all():
return False
return True
def get_reindexed_values(self, empty_dtype, upcasted_na):
if upcasted_na is None:
# No upcasting is necessary
fill_value = self.block.fill_value
values = self.block.get_values()
else:
fill_value = upcasted_na
if self.is_null:
if getattr(self.block, 'is_object', False):
# we want to avoid filling with np.nan if we are
# using None; we already know that we are all
# nulls
values = self.block.values.ravel(order='K')
if len(values) and values[0] is None:
fill_value = None
if getattr(self.block, 'is_datetimetz', False):
pass
elif getattr(self.block, 'is_categorical', False):
pass
elif getattr(self.block, 'is_sparse', False):
pass
else:
missing_arr = np.empty(self.shape, dtype=empty_dtype)
missing_arr.fill(fill_value)
return missing_arr
if not self.indexers:
if not self.block._can_consolidate:
# preserve these for validation in _concat_compat
return self.block.values
if self.block.is_bool:
# External code requested filling/upcasting, bool values must
# be upcasted to object to avoid being upcasted to numeric.
values = self.block.astype(np.object_).values
else:
# No dtype upcasting is done here, it will be performed during
# concatenation itself.
values = self.block.get_values()
if not self.indexers:
# If there's no indexing to be done, we want to signal outside
# code that this array must be copied explicitly. This is done
# by returning a view and checking `retval.base`.
values = values.view()
else:
for ax, indexer in self.indexers.items():
values = algos.take_nd(values, indexer, axis=ax,
fill_value=fill_value)
return values
def _fast_count_smallints(arr):
"""Faster version of set(arr) for sequences of small numbers."""
if len(arr) == 0:
# Handle empty arr case separately: numpy 1.6 chokes on that.
return np.empty((0, 2), dtype=arr.dtype)
else:
counts = np.bincount(arr.astype(np.int_))
nz = counts.nonzero()[0]
return np.c_[nz, counts[nz]]
def _preprocess_slice_or_indexer(slice_or_indexer, length, allow_fill):
if isinstance(slice_or_indexer, slice):
return 'slice', slice_or_indexer, lib.slice_len(slice_or_indexer,
length)
elif (isinstance(slice_or_indexer, np.ndarray) and
slice_or_indexer.dtype == np.bool_):
return 'mask', slice_or_indexer, slice_or_indexer.sum()
else:
indexer = np.asanyarray(slice_or_indexer, dtype=np.int64)
if not allow_fill:
indexer = maybe_convert_indices(indexer, length)
return 'fancy', indexer, len(indexer)
|
{
"content_hash": "eaa6c736f6d8a29e3287081eb42e3650",
"timestamp": "",
"source": "github",
"line_count": 4969,
"max_line_length": 79,
"avg_line_length": 34.072851680418594,
"alnum_prop": 0.5421185059182082,
"repo_name": "BigDataforYou/movie_recommendation_workshop_1",
"id": "abfc5c989056ee03ce5ca181cca1f3161f610b9e",
"size": "169308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/core/internals.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "738713"
},
{
"name": "C++",
"bytes": "169366"
},
{
"name": "CSS",
"bytes": "14786"
},
{
"name": "Fortran",
"bytes": "3200"
},
{
"name": "HTML",
"bytes": "1408733"
},
{
"name": "JavaScript",
"bytes": "13700"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "19755294"
},
{
"name": "Shell",
"bytes": "3276"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
}
|
"""
Abstract base class upon which all front-ends are built.
"""
class IFrontEnd:
def __init__(self):
pass
def go(self):
print "OVERLOAD ME!"
pass
def submit(self, input, user):
"Submits a line of input for Howie to process, from a particular user."
# must delay this import until now to prevent circular references
import howie.core
return howie.core.submit(input, user)
def display(self, output, user):
"Displays output for the specified user."
pass
|
{
"content_hash": "fe06dfa84c3864b5a89b6ced36259b1d",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 79,
"avg_line_length": 24.90909090909091,
"alnum_prop": 0.6204379562043796,
"repo_name": "mpetyx/pychatbot",
"id": "c633ff688a99fe26f8e11cf0ad8efa7036e3faef",
"size": "548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AIML/howie-src-0.6.0/howie/frontends/frontend.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "99757"
},
{
"name": "C++",
"bytes": "1736027"
},
{
"name": "CSS",
"bytes": "287248"
},
{
"name": "D",
"bytes": "5487330"
},
{
"name": "Java",
"bytes": "4140"
},
{
"name": "JavaScript",
"bytes": "8460"
},
{
"name": "Objective-C",
"bytes": "39"
},
{
"name": "PHP",
"bytes": "4179"
},
{
"name": "Perl",
"bytes": "40530"
},
{
"name": "Python",
"bytes": "943590"
},
{
"name": "Shell",
"bytes": "175258"
},
{
"name": "TeX",
"bytes": "234627"
},
{
"name": "XSLT",
"bytes": "4027675"
}
],
"symlink_target": ""
}
|
import os
import sys
# ------------------------------------------------------------------------
#
# #TODO #FIXME Airflow 2.0
#
# Old import machinary below.
#
# This is deprecated but should be kept until Airflow 2.0
# for compatibility.
#
# ------------------------------------------------------------------------
# Imports the hooks dynamically while keeping the package API clean,
# abstracting the underlying modules
_hooks = {
'base_hook': ['BaseHook'],
'hive_hooks': [
'HiveCliHook',
'HiveMetastoreHook',
'HiveServer2Hook',
],
'hdfs_hook': ['HDFSHook'],
'webhdfs_hook': ['WebHDFSHook'],
'pig_hook': ['PigCliHook'],
'mysql_hook': ['MySqlHook'],
'postgres_hook': ['PostgresHook'],
'presto_hook': ['PrestoHook'],
'samba_hook': ['SambaHook'],
'sqlite_hook': ['SqliteHook'],
'S3_hook': ['S3Hook'],
'zendesk_hook': ['ZendeskHook'],
'http_hook': ['HttpHook'],
'druid_hook': [
'DruidHook',
'DruidDbApiHook',
],
'jdbc_hook': ['JdbcHook'],
'dbapi_hook': ['DbApiHook'],
'mssql_hook': ['MsSqlHook'],
'oracle_hook': ['OracleHook'],
'slack_hook': ['SlackHook'],
}
if not os.environ.get('AIRFLOW_USE_NEW_IMPORTS', False):
from airflow.utils.helpers import AirflowImporter
airflow_importer = AirflowImporter(sys.modules[__name__], _hooks)
def _integrate_plugins():
"""Integrate plugins to the context"""
from airflow.plugins_manager import hooks_modules
for hooks_module in hooks_modules:
sys.modules[hooks_module.__name__] = hooks_module
globals()[hooks_module._name] = hooks_module
##########################################################
# TODO FIXME Remove in Airflow 2.0
if not os.environ.get('AIRFLOW_USE_NEW_IMPORTS', False):
from zope.deprecation import deprecated
for _hook in hooks_module._objects:
hook_name = _hook.__name__
globals()[hook_name] = _hook
deprecated(
hook_name,
"Importing plugin hook '{i}' directly from "
"'airflow.hooks' has been deprecated. Please "
"import from 'airflow.hooks.[plugin_module]' "
"instead. Support for direct imports will be dropped "
"entirely in Airflow 2.0.".format(i=hook_name))
|
{
"content_hash": "aee023735722868518d3130699dd61a4",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 74,
"avg_line_length": 31.69736842105263,
"alnum_prop": 0.5350767953507679,
"repo_name": "gtoonstra/airflow",
"id": "38a7dcfebed2cdd0a2b5a6d24bf609ef502c93be",
"size": "3222",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "airflow/hooks/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "68968"
},
{
"name": "Dockerfile",
"bytes": "2001"
},
{
"name": "HTML",
"bytes": "280689"
},
{
"name": "JavaScript",
"bytes": "1385403"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "4340511"
},
{
"name": "Shell",
"bytes": "47989"
}
],
"symlink_target": ""
}
|
import scrapy
from scrapy.selector import Selector
from XboxBC.items import MetacriticXbox360Item
import re
class MetacriticXbox360(scrapy.Spider):
name = "MetacriticXbox360"
allowed_domains = ["metacritic.com"]
start_urls = (
'http://www.metacritic.com/browse/games/score/metascore/all/xbox360/all?hardware=all&page=0',
)
def parse(self, response):
numberOfPages = int(response.xpath('//*[@id="main"]/div[1]/div[2]/div/div[2]/ul/li[10]/a/text()').extract()[0])
for j in range(0,numberOfPages):
next_page = 'http://www.metacritic.com/browse/games/score/metascore/all/xbox360/all?hardware=all&page=' + str(j)
yield scrapy.Request(next_page, callback=self.metacriticX360Find)
def metacriticX360Find(self, response):
rows_in_big_table = response.xpath('//*[@id="main"]/div[1]/div[1]/div[2]/div[3]/div/div/div')
print(rows_in_big_table.extract())
for i, onerow in enumerate(rows_in_big_table):
metacriticGameItem = MetacriticXbox360Item()
gameName = onerow.xpath('div[3]/a/text()').extract()[0].strip()
reviewScorePro = onerow.xpath('div[2]/div/text()').extract()[0].strip()
reviewScoreUser = onerow.xpath('div[4]/span[2]/text()').extract()[0].strip()
if reviewScoreUser== 'tbd':
reviewScoreUser = ''
metacriticGameItem['gameName'] = gameName
metacriticGameItem['reviewScorePro'] = reviewScorePro
metacriticGameItem['reviewScoreUser'] = reviewScoreUser
yield metacriticGameItem
|
{
"content_hash": "a0a9a3eea733727d6441e779357003ff",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 124,
"avg_line_length": 51.90625,
"alnum_prop": 0.6219145093317279,
"repo_name": "NickTalavera/Xbox-One-Backwards-Compatibility-Predictions",
"id": "1e1efe60e8f3a5e3965adbc753b651d78f20cc12",
"size": "1661",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Scraping/XboxBC/spiders/MetacriticXbox360.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2091"
},
{
"name": "Python",
"bytes": "27729"
},
{
"name": "R",
"bytes": "99423"
},
{
"name": "Shell",
"bytes": "1256"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('maps', '0002_auto_20170202_0028'),
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=32)),
('adv', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='albums', to='maps.Adventure')),
('advMap', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='maps.Map')),
],
),
migrations.CreateModel(
name='Picture',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('caption', models.CharField(max_length=512)),
('filename', models.CharField(max_length=12)),
('album', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='maps.Album')),
],
),
]
|
{
"content_hash": "de503e92a44c1b76fc36372708dc3e0c",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 132,
"avg_line_length": 38.8125,
"alnum_prop": 0.5829307568438004,
"repo_name": "agilman/django_maps2",
"id": "b87270b1cac747baf0bd40d979970b9979e1649f",
"size": "1313",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maps/migrations/0003_album_picture.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9422"
},
{
"name": "HTML",
"bytes": "67213"
},
{
"name": "JavaScript",
"bytes": "76192"
},
{
"name": "Python",
"bytes": "59299"
},
{
"name": "Shell",
"bytes": "1071"
}
],
"symlink_target": ""
}
|
import pytest
from cactusbot.api import CactusAPI
from cactusbot.handlers import CommandHandler
from cactusbot.packets import MessagePacket
command_handler = CommandHandler(
"TestChannel", CactusAPI("test_token", "test_password"))
def verify(message, expected, *args, **kwargs):
"""Verify target substitutions."""
actual = command_handler._inject(
MessagePacket(
*message if isinstance(message, list) else (message,)),
*args, **kwargs
).text
assert actual == expected
@pytest.mark.asyncio
async def test_on_message():
assert (await command_handler.on_message(
MessagePacket("!cactus")
)).text == "Ohai! I'm CactusBot! 🌵"
def test_inject_argn():
verify(
"Let's raid %ARG1%!",
"Let's raid GreatStreamer!",
"raid", "GreatStreamer"
)
verify(
"Let's raid %ARG1%! #%ARG2%",
"Let's raid GreatStreamer! #ChannelRaid",
"raid", "GreatStreamer", "ChannelRaid"
)
verify(
"Let's raid %ARG1%!",
"Not enough arguments!",
"raid"
)
verify(
"This is the !%ARG0% command.",
"This is the !test command.",
"test", "arg1", "arg2"
)
verify(
"%ARG1|upper% IS AMAZING!",
"SALAD IS AMAZING!",
"amazing", "salad", "taco"
)
verify(
"If you reverse %ARG1%, you get %ARG1|reverse%!",
"If you reverse potato, you get otatop!",
"reverse", "potato"
)
verify(
["Let's raid %ARG1%! ", ("url", "beam.pro/%ARG1|tag%")],
"Let's raid @Streamer! beam.pro/Streamer",
"raid", "@Streamer"
)
def test_inject_args():
verify(
"Have some %ARGS%!",
"Have some hamster-powered floofle waffles!",
"gift", *"hamster-powered floofle waffles".split()
)
verify(
"Have some %ARGS%.",
"Not enough arguments!",
"give"
)
def test_inject_user():
verify(
"Ohai, %USER%!",
"Ohai, SomeUser!",
"ohai", username="SomeUser"
)
verify(
"Ohai, %USER%!",
"Ohai, %USER%!",
"ohai"
)
def test_inject_count():
pass
def test_inject_channel():
verify(
"Welcome to %CHANNEL%'s stream!",
"Welcome to GreatStreamer's stream!",
"welcome", channel="GreatStreamer"
)
verify(
"Welcome to %CHANNEL%'s stream!",
"Welcome to %CHANNEL%'s stream!",
"welcome"
)
|
{
"content_hash": "f892f1f25ecc43cc6ae8967c18c37d0c",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 67,
"avg_line_length": 20.899159663865547,
"alnum_prop": 0.548854041013269,
"repo_name": "CactusDev/CactusBot",
"id": "6b2483248d0ee89882ebde89de09e57c78ba1a0d",
"size": "2490",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/handlers/test_command.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "153245"
}
],
"symlink_target": ""
}
|
"""
pygments.lexers
~~~~~~~~~~~~~~~
Pygments lexers.
:copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
import types
import fnmatch
from os.path import basename
from pygments.lexers._mapping import LEXERS
from pygments.plugin import find_plugin_lexers
from pygments.util import ClassNotFound, bytes
from pygments.lexers import agile
from pygments.lexers import web
from pygments.lexers import _phpbuiltins
from pygments.lexers import compiled
__all__ = ['get_lexer_by_name', 'get_lexer_for_filename', 'find_lexer_class',
'guess_lexer'] + LEXERS.keys()
_lexer_cache = {}
def _load_lexers(module_name):
"""
Load a lexer (and all others in the module too).
"""
mod = __import__(module_name, None, None, ['__all__'])
for lexer_name in mod.__all__:
cls = getattr(mod, lexer_name)
_lexer_cache[cls.name] = cls
def get_all_lexers():
"""
Return a generator of tuples in the form ``(name, aliases,
filenames, mimetypes)`` of all know lexers.
"""
for item in LEXERS.itervalues():
yield item[1:]
for lexer in find_plugin_lexers():
yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes
def find_lexer_class(name):
"""
Lookup a lexer class by name. Return None if not found.
"""
if name in _lexer_cache:
return _lexer_cache[name]
# lookup builtin lexers
for module_name, lname, aliases, _, _ in LEXERS.itervalues():
if name == lname:
_load_lexers(module_name)
return _lexer_cache[name]
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if cls.name == name:
return cls
def get_lexer_by_name(_alias, **options):
"""
Get a lexer by an alias.
"""
# lookup builtin lexers
for module_name, name, aliases, _, _ in LEXERS.itervalues():
if _alias in aliases:
if name not in _lexer_cache:
_load_lexers(module_name)
return _lexer_cache[name](**options)
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if _alias in cls.aliases:
return cls(**options)
raise ClassNotFound('no lexer for alias %r found' % _alias)
def get_lexer_for_filename(_fn, code=None, **options):
"""
Get a lexer for a filename. If multiple lexers match the filename
pattern, use ``analyze_text()`` to figure out which one is more
appropriate.
"""
matches = []
fn = basename(_fn)
for modname, name, _, filenames, _ in LEXERS.itervalues():
for filename in filenames:
if fnmatch.fnmatch(fn, filename):
if name not in _lexer_cache:
_load_lexers(modname)
matches.append((_lexer_cache[name], filename))
for cls in find_plugin_lexers():
for filename in cls.filenames:
if fnmatch.fnmatch(fn, filename):
matches.append((cls, filename))
if sys.version_info > (3,) and isinstance(code, bytes):
# decode it, since all analyse_text functions expect unicode
code = code.decode('latin1')
def get_rating(info):
cls, filename = info
# explicit patterns get a bonus
bonus = '*' not in filename and 0.5 or 0
# The class _always_ defines analyse_text because it's included in
# the Lexer class. The default implementation returns None which
# gets turned into 0.0. Run scripts/detect_missing_analyse_text.py
# to find lexers which need it overridden.
if code:
return cls.analyse_text(code) + bonus
return bonus
if matches:
matches.sort(key=get_rating)
#print "Possible lexers, after sort:", matches
return matches[-1][0](**options)
raise ClassNotFound('no lexer for filename %r found' % _fn)
def get_lexer_for_mimetype(_mime, **options):
"""
Get a lexer for a mimetype.
"""
for modname, name, _, _, mimetypes in LEXERS.itervalues():
if _mime in mimetypes:
if name not in _lexer_cache:
_load_lexers(modname)
return _lexer_cache[name](**options)
for cls in find_plugin_lexers():
if _mime in cls.mimetypes:
return cls(**options)
raise ClassNotFound('no lexer for mimetype %r found' % _mime)
def _iter_lexerclasses():
"""
Return an iterator over all lexer classes.
"""
for key in sorted(LEXERS):
module_name, name = LEXERS[key][:2]
if name not in _lexer_cache:
_load_lexers(module_name)
yield _lexer_cache[name]
for lexer in find_plugin_lexers():
yield lexer
def guess_lexer_for_filename(_fn, _text, **options):
"""
Lookup all lexers that handle those filenames primary (``filenames``)
or secondary (``alias_filenames``). Then run a text analysis for those
lexers and choose the best result.
usage::
>>> from pygments.lexers import guess_lexer_for_filename
>>> guess_lexer_for_filename('hello.html', '<%= @foo %>')
<pygments.lexers.templates.RhtmlLexer object at 0xb7d2f32c>
>>> guess_lexer_for_filename('hello.html', '<h1>{{ title|e }}</h1>')
<pygments.lexers.templates.HtmlDjangoLexer object at 0xb7d2f2ac>
>>> guess_lexer_for_filename('style.css', 'a { color: <?= $link ?> }')
<pygments.lexers.templates.CssPhpLexer object at 0xb7ba518c>
"""
fn = basename(_fn)
primary = None
matching_lexers = set()
for lexer in _iter_lexerclasses():
for filename in lexer.filenames:
if fnmatch.fnmatch(fn, filename):
matching_lexers.add(lexer)
primary = lexer
for filename in lexer.alias_filenames:
if fnmatch.fnmatch(fn, filename):
matching_lexers.add(lexer)
if not matching_lexers:
raise ClassNotFound('no lexer for filename %r found' % fn)
if len(matching_lexers) == 1:
return matching_lexers.pop()(**options)
result = []
for lexer in matching_lexers:
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
result.append((rv, lexer))
result.sort()
if not result[-1][0] and primary is not None:
return primary(**options)
return result[-1][1](**options)
def guess_lexer(_text, **options):
"""
Guess a lexer by strong distinctions in the text (eg, shebang).
"""
best_lexer = [0.0, None]
for lexer in _iter_lexerclasses():
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
if rv > best_lexer[0]:
best_lexer[:] = (rv, lexer)
if not best_lexer[0] or best_lexer[1] is None:
raise ClassNotFound('no lexer matching the text found')
return best_lexer[1](**options)
class _automodule(types.ModuleType):
"""Automatically import lexers."""
def __getattr__(self, name):
info = LEXERS.get(name)
if info:
_load_lexers(info[0])
cls = _lexer_cache[info[1]]
setattr(self, name, cls)
return cls
raise AttributeError(name)
oldmod = sys.modules['pygments.lexers']
newmod = _automodule('pygments.lexers')
newmod.__dict__.update(oldmod.__dict__)
sys.modules['pygments.lexers'] = newmod
del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types
|
{
"content_hash": "4e7a63a4229f3c82eaebe3f732fe1a52",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 78,
"avg_line_length": 32.46120689655172,
"alnum_prop": 0.6113397955118842,
"repo_name": "sergioska/EvernoteSyntaxHighlight",
"id": "20f9c01c6c536ee43e293c47545768fe2ae35973",
"size": "7555",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pygments/lexers/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "27195"
},
{
"name": "Python",
"bytes": "3346084"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import typing
from ..core import Add, Basic, Integer, Lambda, Mul, Pow, Symbol
from ..core.mul import _keep_coeff
from ..core.relational import Relational
from ..core.sympify import sympify
from ..utilities import default_sort_key
from .precedence import precedence
from .str import StrPrinter
class AssignmentError(Exception):
"""Raised if an assignment variable for a loop is missing."""
class Assignment(Relational):
"""
Represents variable assignment for code generation.
Parameters
==========
lhs : Expr
Diofant object representing the lhs of the expression. These should be
singular objects, such as one would use in writing code. Notable types
include Symbol, MatrixSymbol, MatrixElement, and Indexed. Types that
subclass these types are also supported.
rhs : Expr
Diofant object representing the rhs of the expression. This can be any
type, provided its shape corresponds to that of the lhs. For example,
a Matrix type can be assigned to MatrixSymbol, but not to Symbol, as
the dimensions will not align.
Examples
========
>>> Assignment(x, y)
x := y
>>> Assignment(x, 0)
x := 0
>>> A = MatrixSymbol('A', 1, 3)
>>> mat = Matrix([x, y, z]).T
>>> Assignment(A, mat)
A := Matrix([[x, y, z]])
>>> Assignment(A[0, 1], x)
A[0, 1] := x
"""
rel_op = ':='
def __new__(cls, lhs, rhs=0, **assumptions): # pylint: disable=signature-differs
from ..matrices.expressions.matexpr import MatrixElement, MatrixSymbol
from ..tensor import Indexed
lhs = sympify(lhs, strict=True)
rhs = sympify(rhs, strict=True)
# Tuple of things that can be on the lhs of an assignment
assignable = (Symbol, MatrixSymbol, MatrixElement, Indexed)
if not isinstance(lhs, assignable):
raise TypeError('Cannot assign to lhs of type %s.' % type(lhs))
# Indexed types implement shape, but don't define it until later. This
# causes issues in assignment validation. For now, matrices are defined
# as anything with a shape that is not an Indexed
lhs_is_mat = hasattr(lhs, 'shape') and not isinstance(lhs, Indexed)
rhs_is_mat = hasattr(rhs, 'shape') and not isinstance(rhs, Indexed)
# If lhs and rhs have same structure, then this assignment is ok
if lhs_is_mat:
if not rhs_is_mat:
raise ValueError('Cannot assign a scalar to a matrix.')
if lhs.shape != rhs.shape:
raise ValueError("Dimensions of lhs and rhs don't align.")
elif rhs_is_mat and not lhs_is_mat:
raise ValueError('Cannot assign a matrix to a scalar.')
return Relational.__new__(cls, lhs, rhs, **assumptions)
class CodePrinter(StrPrinter):
"""The base class for code-printing subclasses."""
_operators = {
'and': '&&',
'or': '||',
'not': '!',
}
_default_settings: dict[str, typing.Any] = {
'order': None,
'full_prec': 'auto',
'error_on_reserved': False,
'reserved_word_suffix': '_'
}
def __init__(self, settings=None):
super().__init__(settings=settings)
self.reserved_words = set()
def doprint(self, expr, assign_to=None):
"""
Print the expression as code.
Parameters
==========
expr : Expression
The expression to be printed.
assign_to : Symbol, MatrixSymbol, or string (optional)
If provided, the printed code will set the expression to a
variable with name ``assign_to``.
"""
from ..matrices import MatrixSymbol
if isinstance(assign_to, str):
if expr.is_Matrix:
assign_to = MatrixSymbol(assign_to, *expr.shape)
else:
assign_to = Symbol(assign_to)
elif not isinstance(assign_to, (Basic, type(None))):
raise TypeError(f'{type(self).__name__} cannot assign to object of type {type(assign_to)}')
if assign_to:
expr = Assignment(assign_to, expr)
else:
# non-strict sympify is not enough b/c it errors on iterables
expr = sympify(expr)
# keep a set of expressions that are not strictly translatable to Code
# and number constants that must be declared and initialized
self._not_supported = set() # pylint: disable=attribute-defined-outside-init
self._number_symbols = set() # pylint: disable=attribute-defined-outside-init
lines = self._print(expr).splitlines()
# format the output
if self._settings['human']:
frontlines = []
if len(self._not_supported) > 0:
frontlines.append(self._get_comment(
f'Not supported in {self.language}:'))
for expr in sorted(self._not_supported, key=str):
frontlines.append(self._get_comment(type(expr).__name__))
for name, value in sorted(self._number_symbols, key=str):
frontlines.append(self._declare_number_const(name, value))
lines = frontlines + lines
lines = self._format_code(lines)
result = '\n'.join(lines)
else:
lines = self._format_code(lines)
result = (self._number_symbols, self._not_supported,
'\n'.join(lines))
del self._not_supported
del self._number_symbols
return result
def _doprint_loops(self, expr, assign_to=None):
# Here we print an expression that contains Indexed objects, they
# correspond to arrays in the generated code. The low-level implementation
# involves looping over array elements and possibly storing results in temporary
# variables or accumulate it in the assign_to object.
assert self._settings['contract']
from ..tensor import get_contraction_structure
# Setup loops over non-dummy indices -- all terms need these
indices = self._get_expression_indices(expr, assign_to)
# Setup loops over dummy indices -- each term needs separate treatment
dummies = get_contraction_structure(expr)
openloop, closeloop = self._get_loop_opening_ending(indices)
# terms with no summations first
if None in dummies:
text = StrPrinter.doprint(self, Add(*dummies[None]))
else:
# If all terms have summations we must initialize array to Zero
text = StrPrinter.doprint(self, 0)
# skip redundant assignments (where lhs == rhs)
lhs_printed = self._print(assign_to)
lines = []
if text != lhs_printed:
lines.extend(openloop)
assert assign_to is not None
text = self._get_statement(f'{lhs_printed} = {text}')
lines.append(text)
lines.extend(closeloop)
# then terms with summations
for d, dv in dummies.items():
if isinstance(d, tuple):
indices = self._sort_optimized(d, expr)
openloop_d, closeloop_d = self._get_loop_opening_ending(
indices)
for term in dv:
if term in dummies and not ([list(f) for f in dummies[term]]
== [[None] for f in dummies[term]]):
# If one factor in the term has it's own internal
# contractions, those must be computed first.
# (temporary variables?)
raise NotImplementedError(
'FIXME: no support for contractions in factor yet')
# We need the lhs expression as an accumulator for
# the loops, i.e
#
# for (int d=0; d < dim; d++){
# lhs[] = lhs[] + term[][d]
# } ^.................. the accumulator
#
# We check if the expression already contains the
# lhs, and raise an exception if it does, as that
# syntax is currently undefined. FIXME: What would be
# a good interpretation?
assert assign_to is not None
assert not term.has(assign_to)
lines.extend(openloop)
lines.extend(openloop_d)
text = '%s = %s' % (lhs_printed,
StrPrinter.doprint(self,
assign_to + term))
lines.append(self._get_statement(text))
lines.extend(closeloop_d)
lines.extend(closeloop)
return '\n'.join(lines)
def _get_expression_indices(self, expr, assign_to):
from ..tensor import get_indices
rinds, _ = get_indices(expr)
linds, _ = get_indices(assign_to)
# support broadcast of scalar
if linds and not rinds:
rinds = linds
if rinds != linds:
raise ValueError(f'lhs indices must match non-dummy rhs indices in {expr}')
return self._sort_optimized(rinds, assign_to)
def _sort_optimized(self, indices, expr):
from ..tensor import Indexed
if not indices:
return []
# determine optimized loop order by giving a score to each index
# the index with the highest score are put in the innermost loop.
score_table = {}
for i in indices:
score_table[i] = 0
arrays = expr.atoms(Indexed)
for arr in arrays:
for p, ind in enumerate(arr.indices):
try:
score_table[ind] += self._rate_index_position(p)
except KeyError:
pass
return sorted(indices, key=lambda x: score_table[x])
def _rate_index_position(self, p):
"""Function to calculate score based on position among indices.
This method is used to sort loops in an optimized order, see
CodePrinter._sort_optimized().
"""
raise NotImplementedError('This function must be implemented by '
'subclass of CodePrinter.')
def _get_statement(self, codestring):
"""Formats a codestring with the proper line ending."""
raise NotImplementedError('This function must be implemented by '
'subclass of CodePrinter.')
def _get_comment(self, text):
"""Formats a text string as a comment."""
raise NotImplementedError('This function must be implemented by '
'subclass of CodePrinter.')
def _declare_number_const(self, name, value):
"""Declare a numeric constant at the top of a function."""
raise NotImplementedError('This function must be implemented by '
'subclass of CodePrinter.')
def _format_code(self, lines):
"""Take in a list of lines of code, and format them accordingly.
This may include indenting, wrapping long lines, etc...
"""
raise NotImplementedError('This function must be implemented by '
'subclass of CodePrinter.')
def _get_loop_opening_ending(self, indices):
"""Returns a tuple (open_lines, close_lines) containing lists
of codelines
"""
raise NotImplementedError('This function must be implemented by '
'subclass of CodePrinter.')
def _print_Assignment(self, expr):
from ..functions import Piecewise
from ..matrices import MatrixSymbol
from ..tensor.indexed import IndexedBase
lhs = expr.lhs
rhs = expr.rhs
# We special case assignments that take multiple lines
if isinstance(expr.rhs, Piecewise):
# Here we modify Piecewise so each expression is now
# an Assignment, and then continue on the print.
expressions = []
conditions = []
for (e, c) in rhs.args:
expressions.append(Assignment(lhs, e))
conditions.append(c)
temp = Piecewise(*zip(expressions, conditions))
return self._print(temp)
elif isinstance(lhs, MatrixSymbol):
# Here we form an Assignment for each element in the array,
# printing each one.
lines = []
for (i, j) in self._traverse_matrix_indices(lhs):
temp = Assignment(lhs[i, j], rhs[i, j])
code0 = self._print(temp)
lines.append(code0)
return '\n'.join(lines)
elif self._settings['contract'] and (lhs.has(IndexedBase) or
rhs.has(IndexedBase)):
# Here we check if there is looping to be done, and if so
# print the required loops.
return self._doprint_loops(rhs, lhs)
else:
lhs_code = self._print(lhs)
rhs_code = self._print(rhs)
return self._get_statement(f'{lhs_code} = {rhs_code}')
def _print_Symbol(self, expr):
name = super()._print_Symbol(expr)
if name in self.reserved_words:
if self._settings['error_on_reserved']:
msg = ('This expression includes the symbol "{}" which is a '
'reserved keyword in this language.')
raise ValueError(msg.format(name))
return name + self._settings['reserved_word_suffix']
else:
return name
def _print_Function(self, expr):
if expr.func.__name__ in self.known_functions:
cond_func = self.known_functions[expr.func.__name__]
func = None
if isinstance(cond_func, str):
func = cond_func
else:
for cond, func in cond_func:
if cond(*expr.args):
break
else:
return self._print_not_supported(expr)
try:
return func(*[self.parenthesize(item, 0) for item in expr.args])
except TypeError:
return '%s(%s)' % (func, self.stringify(expr.args, ', '))
elif hasattr(expr, '_imp_') and isinstance(expr._imp_, Lambda):
# inlined function
return self._print(expr._imp_(*expr.args))
else:
return self._print_not_supported(expr)
_print_MinMaxBase = _print_Function
def _print_NumberSymbol(self, expr):
# A Number symbol that is not implemented here or with _printmethod
# is registered and evaluated
self._number_symbols.add((expr,
self._print(expr.evalf(self._settings['precision']))))
return str(expr)
def _print_Dummy(self, expr):
# dummies must be printed as unique symbols
return f'{expr.name}_{expr.dummy_index:d}' # Dummy
def _print_Catalan(self, expr):
return self._print_NumberSymbol(expr)
def _print_EulerGamma(self, expr):
return self._print_NumberSymbol(expr)
def _print_GoldenRatio(self, expr):
return self._print_NumberSymbol(expr)
def _print_Exp1(self, expr):
return self._print_NumberSymbol(expr)
def _print_Pi(self, expr):
return self._print_NumberSymbol(expr)
def _print_And(self, expr):
PREC = precedence(expr)
return (' %s ' % self._operators['and']).join(self.parenthesize(a, PREC)
for a in sorted(expr.args, key=default_sort_key))
def _print_Or(self, expr):
PREC = precedence(expr)
return (' %s ' % self._operators['or']).join(self.parenthesize(a, PREC)
for a in sorted(expr.args, key=default_sort_key))
def _print_Xor(self, expr):
if self._operators.get('xor') is None:
return self._print_not_supported(expr)
PREC = precedence(expr)
return (' %s ' % self._operators['xor']).join(self.parenthesize(a, PREC)
for a in expr.args)
def _print_Equivalent(self, expr):
if self._operators.get('equivalent') is None:
return self._print_not_supported(expr)
PREC = precedence(expr)
return (' %s ' % self._operators['equivalent']).join(self.parenthesize(a, PREC)
for a in expr.args)
def _print_Not(self, expr):
PREC = precedence(expr)
return self._operators['not'] + self.parenthesize(expr.args[0], PREC)
def _print_Mul(self, expr):
prec = precedence(expr)
c, e = expr.as_coeff_Mul()
if c < 0:
expr = _keep_coeff(-c, e)
sign = '-'
else:
sign = ''
a = [] # items in the numerator
b = [] # items that are in the denominator (if any)
if self.order != 'none':
args = expr.as_ordered_factors()
else:
# use make_args in case expr was something like -x -> x
args = Mul.make_args(expr)
# Gather args for numerator/denominator
for item in args:
if item.is_commutative and item.is_Pow and item.exp.is_Rational and item.exp.is_negative:
if item.exp != -1:
b.append(Pow(item.base, -item.exp, evaluate=False))
else:
b.append(Pow(item.base, -item.exp))
else:
a.append(item)
a = a or [Integer(1)]
a_str = [self.parenthesize(x, prec) for x in a]
b_str = [self.parenthesize(x, prec) for x in b]
if len(b) == 0:
return sign + '*'.join(a_str)
elif len(b) == 1:
return sign + '*'.join(a_str) + '/' + b_str[0]
else:
return sign + '*'.join(a_str) + '/(%s)' % '*'.join(b_str)
def _print_not_supported(self, expr):
self._not_supported.add(expr)
return self.emptyPrinter(expr)
# The following can not be simply translated into C or Fortran
_print_Basic = _print_not_supported
_print_ComplexInfinity = _print_not_supported
_print_Derivative = _print_not_supported
_print_dict = _print_not_supported
_print_ExprCondPair = _print_not_supported
_print_GeometryEntity = _print_not_supported
_print_Infinity = _print_not_supported
_print_Integral = _print_not_supported
_print_Interval = _print_not_supported
_print_Limit = _print_not_supported
_print_list = _print_not_supported
_print_MatrixBase = _print_not_supported
_print_NaN = _print_not_supported
_print_NegativeInfinity = _print_not_supported
_print_Normal = _print_not_supported
_print_Order = _print_not_supported
_print_RootOf = _print_not_supported
_print_RootsOf = _print_not_supported
_print_RootSum = _print_not_supported
_print_tuple = _print_not_supported
_print_Wild = _print_not_supported
_print_WildFunction = _print_not_supported
|
{
"content_hash": "f9f974c2b7d6254fc07d72554a48c13b",
"timestamp": "",
"source": "github",
"line_count": 508,
"max_line_length": 103,
"avg_line_length": 38.253937007874015,
"alnum_prop": 0.561210312355272,
"repo_name": "diofant/diofant",
"id": "206a24dfb15f78fbd3ac7750af7a1904dffa0cb4",
"size": "19433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "diofant/printing/codeprinter.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "9063539"
}
],
"symlink_target": ""
}
|
"""Unit tests for the utility functions used by the placement API."""
import fixtures
from oslo_middleware import request_id
import webob
from nova.api.openstack.placement import microversion
from nova.api.openstack.placement import util
from nova import objects
from nova import test
from nova.tests import uuidsentinel
class TestCheckAccept(test.NoDBTestCase):
"""Confirm behavior of util.check_accept."""
@staticmethod
@util.check_accept('application/json', 'application/vnd.openstack')
def handler(req):
"""Fake handler to test decorator."""
return True
def test_fail_no_match(self):
req = webob.Request.blank('/')
req.accept = 'text/plain'
error = self.assertRaises(webob.exc.HTTPNotAcceptable,
self.handler, req)
self.assertEqual(
'Only application/json, application/vnd.openstack is provided',
str(error))
def test_fail_complex_no_match(self):
req = webob.Request.blank('/')
req.accept = 'text/html;q=0.9,text/plain,application/vnd.aws;q=0.8'
error = self.assertRaises(webob.exc.HTTPNotAcceptable,
self.handler, req)
self.assertEqual(
'Only application/json, application/vnd.openstack is provided',
str(error))
def test_success_no_accept(self):
req = webob.Request.blank('/')
self.assertTrue(self.handler(req))
def test_success_simple_match(self):
req = webob.Request.blank('/')
req.accept = 'application/json'
self.assertTrue(self.handler(req))
def test_success_complex_any_match(self):
req = webob.Request.blank('/')
req.accept = 'application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
self.assertTrue(self.handler(req))
def test_success_complex_lower_quality_match(self):
req = webob.Request.blank('/')
req.accept = 'application/xml;q=0.9,application/vnd.openstack;q=0.8'
self.assertTrue(self.handler(req))
class TestExtractJSON(test.NoDBTestCase):
# Although the intent of this test class is not to test that
# schemas work, we may as well use a real one to ensure that
# behaviors are what we expect.
schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"uuid": {"type": "string", "format": "uuid"}
},
"required": ["name"],
"additionalProperties": False
}
def test_not_json(self):
error = self.assertRaises(webob.exc.HTTPBadRequest,
util.extract_json,
'I am a string',
self.schema)
self.assertIn('Malformed JSON', str(error))
def test_malformed_json(self):
error = self.assertRaises(webob.exc.HTTPBadRequest,
util.extract_json,
'{"my bytes got left behind":}',
self.schema)
self.assertIn('Malformed JSON', str(error))
def test_schema_mismatch(self):
error = self.assertRaises(webob.exc.HTTPBadRequest,
util.extract_json,
'{"a": "b"}',
self.schema)
self.assertIn('JSON does not validate', str(error))
def test_type_invalid(self):
error = self.assertRaises(webob.exc.HTTPBadRequest,
util.extract_json,
'{"name": 1}',
self.schema)
self.assertIn('JSON does not validate', str(error))
def test_format_checker(self):
error = self.assertRaises(webob.exc.HTTPBadRequest,
util.extract_json,
'{"name": "hello", "uuid": "not a uuid"}',
self.schema)
self.assertIn('JSON does not validate', str(error))
def test_no_additional_properties(self):
error = self.assertRaises(webob.exc.HTTPBadRequest,
util.extract_json,
'{"name": "hello", "cow": "moo"}',
self.schema)
self.assertIn('JSON does not validate', str(error))
def test_valid(self):
data = util.extract_json(
'{"name": "cow", '
'"uuid": "%s"}' % uuidsentinel.rp_uuid,
self.schema)
self.assertEqual('cow', data['name'])
self.assertEqual(uuidsentinel.rp_uuid, data['uuid'])
class TestJSONErrorFormatter(test.NoDBTestCase):
def setUp(self):
super(TestJSONErrorFormatter, self).setUp()
self.environ = {}
# TODO(jaypipes): Remove this when we get more than a single version
# in the placement API. The fact that we only had a single version was
# masking a bug in the utils code.
_versions = [
'1.0',
'1.1',
]
mod_str = 'nova.api.openstack.placement.microversion.VERSIONS'
self.useFixture(fixtures.MonkeyPatch(mod_str, _versions))
def test_status_to_int_code(self):
body = ''
status = '404 Not Found'
title = ''
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertEqual(404, result['errors'][0]['status'])
def test_strip_body_tags(self):
body = '<h1>Big Error!</h1>'
status = '400 Bad Request'
title = ''
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertEqual('Big Error!', result['errors'][0]['detail'])
def test_request_id_presence(self):
body = ''
status = '400 Bad Request'
title = ''
# no request id in environ, none in error
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertNotIn('request_id', result['errors'][0])
# request id in environ, request id in error
self.environ[request_id.ENV_REQUEST_ID] = 'stub-id'
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertEqual('stub-id', result['errors'][0]['request_id'])
def test_microversion_406_handling(self):
body = ''
status = '400 Bad Request'
title = ''
# Not a 406, no version info required.
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertNotIn('max_version', result['errors'][0])
self.assertNotIn('min_version', result['errors'][0])
# A 406 but not because of microversions (microversion
# parsing was successful), no version info
# required.
status = '406 Not Acceptable'
version_obj = microversion.parse_version_string('2.3')
self.environ[microversion.MICROVERSION_ENVIRON] = version_obj
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertNotIn('max_version', result['errors'][0])
self.assertNotIn('min_version', result['errors'][0])
# Microversion parsing failed, status is 406, send version info.
del self.environ[microversion.MICROVERSION_ENVIRON]
result = util.json_error_formatter(
body, status, title, self.environ)
self.assertEqual(microversion.max_version_string(),
result['errors'][0]['max_version'])
self.assertEqual(microversion.min_version_string(),
result['errors'][0]['min_version'])
class TestRequireContent(test.NoDBTestCase):
"""Confirm behavior of util.require_accept."""
@staticmethod
@util.require_content('application/json')
def handler(req):
"""Fake handler to test decorator."""
return True
def test_fail_no_content_type(self):
req = webob.Request.blank('/')
error = self.assertRaises(webob.exc.HTTPUnsupportedMediaType,
self.handler, req)
self.assertEqual(
'The media type None is not supported, use application/json',
str(error))
def test_fail_wrong_content_type(self):
req = webob.Request.blank('/')
req.content_type = 'text/plain'
error = self.assertRaises(webob.exc.HTTPUnsupportedMediaType,
self.handler, req)
self.assertEqual(
'The media type text/plain is not supported, use application/json',
str(error))
def test_success_content_type(self):
req = webob.Request.blank('/')
req.content_type = 'application/json'
self.assertTrue(self.handler(req))
class TestPlacementURLs(test.NoDBTestCase):
def setUp(self):
super(TestPlacementURLs, self).setUp()
self.resource_provider = objects.ResourceProvider(
name=uuidsentinel.rp_name,
uuid=uuidsentinel.rp_uuid)
self.resource_class = objects.ResourceClass(
name='CUSTOM_BAREMETAL_GOLD',
id=1000)
def test_resource_provider_url(self):
environ = {}
expected_url = '/resource_providers/%s' % uuidsentinel.rp_uuid
self.assertEqual(expected_url, util.resource_provider_url(
environ, self.resource_provider))
def test_resource_provider_url_prefix(self):
# SCRIPT_NAME represents the mount point of a WSGI
# application when it is hosted at a path/prefix.
environ = {'SCRIPT_NAME': '/placement'}
expected_url = ('/placement/resource_providers/%s'
% uuidsentinel.rp_uuid)
self.assertEqual(expected_url, util.resource_provider_url(
environ, self.resource_provider))
def test_inventories_url(self):
environ = {}
expected_url = ('/resource_providers/%s/inventories'
% uuidsentinel.rp_uuid)
self.assertEqual(expected_url, util.inventory_url(
environ, self.resource_provider))
def test_inventory_url(self):
resource_class = 'DISK_GB'
environ = {}
expected_url = ('/resource_providers/%s/inventories/%s'
% (uuidsentinel.rp_uuid, resource_class))
self.assertEqual(expected_url, util.inventory_url(
environ, self.resource_provider, resource_class))
def test_resource_class_url(self):
environ = {}
expected_url = '/resource_classes/CUSTOM_BAREMETAL_GOLD'
self.assertEqual(expected_url, util.resource_class_url(
environ, self.resource_class))
def test_resource_class_url_prefix(self):
# SCRIPT_NAME represents the mount point of a WSGI
# application when it is hosted at a path/prefix.
environ = {'SCRIPT_NAME': '/placement'}
expected_url = '/placement/resource_classes/CUSTOM_BAREMETAL_GOLD'
self.assertEqual(expected_url, util.resource_class_url(
environ, self.resource_class))
|
{
"content_hash": "a23a9855ea156142cdca4ff8ed6ee978",
"timestamp": "",
"source": "github",
"line_count": 301,
"max_line_length": 79,
"avg_line_length": 37.08637873754153,
"alnum_prop": 0.582907820478366,
"repo_name": "rajalokan/nova",
"id": "48e9ad0750e710a2c00497792df8c3098f797882",
"size": "11737",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nova/tests/unit/api/openstack/placement/test_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "601"
},
{
"name": "PHP",
"bytes": "4503"
},
{
"name": "Python",
"bytes": "19100322"
},
{
"name": "Shell",
"bytes": "26793"
},
{
"name": "Smarty",
"bytes": "299237"
}
],
"symlink_target": ""
}
|
def load(provider, name, **kargs):
try:
if provider == 'ec2':
from ec2 import Provider
return Provider(name, **kargs)
print 'Error: no such provider: %s' % provider
return False
except BaseException, e:
print e
|
{
"content_hash": "5acc8b5052757a21353036240b18eb0e",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 48,
"avg_line_length": 22.8,
"alnum_prop": 0.6754385964912281,
"repo_name": "tiwilliam/ecluster",
"id": "fe27198509ef968dadf644da9d18e82400335a5d",
"size": "228",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ecluster/provider/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11982"
},
{
"name": "Shell",
"bytes": "305"
}
],
"symlink_target": ""
}
|
"""
test_examples
~~~~~~~~~~~~~
:copyright: 2015 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'examples'))
import math_expr
class TestMath(object):
"""
Tests for the `examples/math_expr.py` module.
"""
def test_addition(self):
assert math_expr.evaluate('1 + 1') == 2
def test_subtraction(self):
assert math_expr.evaluate('1 - 1') == 0
def test_multiplication(self):
assert math_expr.evaluate('2 * 2') == 4
def test_division(self):
assert math_expr.evaluate('4 / 2') == 2
def test_parentheses(self):
assert math_expr.evaluate('(1 + 1) * 2') == 4
def test_multiplication_before_addition(self):
assert math_expr.evaluate('1 + 1 * 2') == 3
def test_multiplication_before_subtraction(self):
assert math_expr.evaluate('2 - 1 * 2') == 0
def test_division_before_addition(self):
assert math_expr.evaluate('2 + 4 / 2') == 4
def test_division_before_subtraction(self):
assert math_expr.evaluate('2 - 4 / 2') == 0
|
{
"content_hash": "dbecb940c21e18fe690067fffb5a3fc9",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 71,
"avg_line_length": 26.204545454545453,
"alnum_prop": 0.603642671292281,
"repo_name": "DasIch/pratt",
"id": "84424edbfed3205f70eae5929af6883ff155aeed",
"size": "1172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_examples.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "26721"
}
],
"symlink_target": ""
}
|
import os
import re
import sys
from pom_handlers import DependencyInfo
from pom_utils import PomUtils
class PomProperties(object):
def safe_property_name(self, property_name):
"""Replace characters that aren't safe for bash variables with an underscore"""
return re.sub(r'\W', '_', property_name)
def write_properties(self, pom_file_path, output_stream, rootdir=None):
di = DependencyInfo(pom_file_path, rootdir)
for property_name, value in di.properties.iteritems():
output_stream.write('{0}="{1}"\n'.format(self.safe_property_name(property_name), value))
# Print out some other things. These are useful for script/pants_kochiku_build_wrapper
output_stream.write('project_artifactId="{0}"\n'.format(di.artifactId))
output_stream.write('project_groupId="{0}"\n'.format(di.groupId))
def usage():
print "usage: {0} [args] ".format(sys.argv[0])
print "Prints all the properties defined in a pom.xml in bash variable syntax."
print ""
print "-?,-h Show this message"
PomUtils.common_usage()
sys.exit(1)
def main():
arguments = PomUtils.parse_common_args(sys.argv[1:])
flags = set(arg for arg in arguments if arg.startswith('-'))
for f in flags:
if f == '-h' or f == '-?':
usage()
return
else:
print ("Unknown flag {0}".format(f))
usage()
return
path_args = list(set(arguments) - flags)
if len(path_args) != 1 :
print("Expected a single project path that contains a pom.xml file.")
usage()
pom_file_path = os.path.join(os.path.realpath(path_args[0]), 'pom.xml')
if not os.path.exists(pom_file_path):
print ("Couldn't find {0}".format(pom_file_path))
usage()
PomProperties().write_properties(pom_file_path, sys.stdout)
if __name__ == '__main__':
main()
|
{
"content_hash": "8e141110d135306fc110ac9c5bce6f51",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 94,
"avg_line_length": 28.078125,
"alnum_prop": 0.6633277685030606,
"repo_name": "ericzundel/mvn2pants",
"id": "985f028786f944f60474c128a48808e47d6c1539",
"size": "1966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/squarepants/pom_properties.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "283"
},
{
"name": "Python",
"bytes": "641401"
},
{
"name": "Shell",
"bytes": "240"
}
],
"symlink_target": ""
}
|
from sympy.polys import Poly, LexPoly, roots, \
SymbolsError, PolynomialError
from sympy.polys.algorithms import poly_groebner
from sympy.utilities import any, all
def solve_poly_system(system, *symbols):
"""Solves a system of polynomial equations.
Returns all possible solutions over C[x_1, x_2, ..., x_m] of a
set F = { f_1, f_2, ..., f_n } of polynomial equations, using
Groebner basis approach. For now only zero-dimensional systems
are supported, which means F can have at most a finite number
of solutions.
The algorithm works by the fact that, supposing G is the basis
of F with respect to an elimination order (here lexicographic
order is used), G and F generate the same ideal, they have the
same set of solutions. By the elimination property, if G is a
reduced, zero-dimensional Groebner basis, then there exists an
univariate polynomial in G (in its last variable). This can be
solved by computing its roots. Substituting all computed roots
for the last (eliminated) variable in other elements of G, new
polynomial system is generated. Applying the above procedure
recursively, a finite number of solutions can be found.
The ability of finding all solutions by this procedure depends
on the root finding algorithms. If no solutions were found, it
means only that roots() failed, but the system is solvable. To
overcome this difficulty use numerical algorithms instead.
>>> from sympy import solve_poly_system
>>> from sympy.abc import x, y
>>> solve_poly_system([x*y - 2*y, 2*y**2 - x**2], x, y)
[(0, 0), (2, -2**(1/2)), (2, 2**(1/2))]
For more information on the implemented algorithm refer to:
[1] B. Buchberger, Groebner Bases: A Short Introduction for
Systems Theorists, In: R. Moreno-Diaz, B. Buchberger,
J.L. Freire, Proceedings of EUROCAST'01, February, 2001
[2] D. Cox, J. Little, D. O'Shea, Ideals, Varieties and
Algorithms, Springer, Second Edition, 1997, pp. 112
"""
def is_univariate(f):
"""Returns True if 'f' is univariate in its last variable. """
for monom in f.iter_monoms():
if any(exp > 0 for exp in monom[:-1]):
return False
return True
def solve_reduced_system(system, entry=False):
"""Recursively solves reduced polynomial systems. """
basis = poly_groebner(system)
if len(basis) == 1 and basis[0].is_one:
if not entry:
return []
else:
return None
univariate = filter(is_univariate, basis)
if len(univariate) == 1:
f = univariate.pop()
else:
raise PolynomialError("Not a zero-dimensional system")
zeros = roots(Poly(f, f.symbols[-1])).keys()
if not zeros:
return []
if len(basis) == 1:
return [ [zero] for zero in zeros ]
solutions = []
for zero in zeros:
new_system = []
for poly in basis[:-1]:
eq = poly.evaluate((poly.symbols[-1], zero))
if not eq.is_zero:
new_system.append(eq)
for solution in solve_reduced_system(new_system):
solutions.append(solution + [zero])
return solutions
if hasattr(system, "__iter__"):
system = list(system)
else:
raise TypeError("Expected iterable container, got %s" % system)
f = system.pop(0)
if not isinstance(f, Poly):
f = LexPoly(f, *symbols)
else:
if not symbols:
f = LexPoly(f)
else:
raise SymbolsError("Redundant symbols were given")
head, tail = f.unify_with(system)
solutions = solve_reduced_system([head] + tail, True)
if solutions is None:
return None
else:
return sorted(tuple(s) for s in solutions)
|
{
"content_hash": "6c2bd9b79902a2aa26a59367ff6f46a4",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 71,
"avg_line_length": 33.78991596638655,
"alnum_prop": 0.6023377269335987,
"repo_name": "fperez/sympy",
"id": "78fe757a12b4e4c7f117175ff6a4cf879869727c",
"size": "4021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sympy/solvers/polysys.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7547665"
},
{
"name": "Scheme",
"bytes": "125"
}
],
"symlink_target": ""
}
|
import getopt
import socket
import sys
from coapthon.client.helperclient import HelperClient
from coapthon.utils import parse_uri
__author__ = 'Giacomo Tanganelli'
client = None
def usage(): # pragma: no cover
print "Command:\tcoapclient.py -o -p [-P]"
print "Options:"
print "\t-o, --operation=\tGET|PUT|POST|DELETE|DISCOVER|OBSERVE"
print "\t-p, --path=\t\t\tPath of the request"
print "\t-P, --payload=\t\tPayload of the request"
print "\t-f, --payload-file=\t\tFile with payload of the request"
def client_callback(response):
print "Callback"
def client_callback_observe(response): # pragma: no cover
global client
print "Callback_observe"
check = True
while check:
chosen = raw_input("Stop observing? [y/N]: ")
if chosen != "" and not (chosen == "n" or chosen == "N" or chosen == "y" or chosen == "Y"):
print "Unrecognized choose."
continue
elif chosen == "y" or chosen == "Y":
while True:
rst = raw_input("Send RST message? [Y/n]: ")
if rst != "" and not (rst == "n" or rst == "N" or rst == "y" or rst == "Y"):
print "Unrecognized choose."
continue
elif rst == "" or rst == "y" or rst == "Y":
client.cancel_observing(response, True)
else:
client.cancel_observing(response, False)
check = False
break
else:
break
def main(): # pragma: no cover
global client
op = None
path = None
payload = None
try:
opts, args = getopt.getopt(sys.argv[1:], "ho:p:P:f:", ["help", "operation=", "path=", "payload=",
"payload_file="])
except getopt.GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
for o, a in opts:
if o in ("-o", "--operation"):
op = a
elif o in ("-p", "--path"):
path = a
elif o in ("-P", "--payload"):
payload = a
elif o in ("-f", "--payload-file"):
with open(a, 'r') as f:
payload = f.read()
elif o in ("-h", "--help"):
usage()
sys.exit()
else:
usage()
sys.exit(2)
if op is None:
print "Operation must be specified"
usage()
sys.exit(2)
if path is None:
print "Path must be specified"
usage()
sys.exit(2)
if not path.startswith("coap://"):
print "Path must be conform to coap://host[:port]/path"
usage()
sys.exit(2)
host, port, path = parse_uri(path)
try:
tmp = socket.gethostbyname(host)
host = tmp
except socket.gaierror:
pass
client = HelperClient(server=(host, port))
if op == "GET":
if path is None:
print "Path cannot be empty for a GET request"
usage()
sys.exit(2)
response = client.get(path)
print response.pretty_print()
client.stop()
elif op == "OBSERVE":
if path is None:
print "Path cannot be empty for a GET request"
usage()
sys.exit(2)
client.observe(path, client_callback_observe)
elif op == "DELETE":
if path is None:
print "Path cannot be empty for a DELETE request"
usage()
sys.exit(2)
response = client.delete(path)
print response.pretty_print()
client.stop()
elif op == "POST":
if path is None:
print "Path cannot be empty for a POST request"
usage()
sys.exit(2)
if payload is None:
print "Payload cannot be empty for a POST request"
usage()
sys.exit(2)
response = client.post(path, payload)
print response.pretty_print()
client.stop()
elif op == "PUT":
if path is None:
print "Path cannot be empty for a PUT request"
usage()
sys.exit(2)
if payload is None:
print "Payload cannot be empty for a PUT request"
usage()
sys.exit(2)
response = client.put(path, payload)
print response.pretty_print()
client.stop()
elif op == "DISCOVER":
response = client.discover()
print response.pretty_print()
client.stop()
else:
print "Operation not recognized"
usage()
sys.exit(2)
if __name__ == '__main__': # pragma: no cover
main()
|
{
"content_hash": "e4b697e59128894f1f1a3f70840c2f73",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 105,
"avg_line_length": 29.67080745341615,
"alnum_prop": 0.5128741888214361,
"repo_name": "mcfreis/CoAPthon",
"id": "a903a1ce27fb1d312d129eb75aab16c94d14be65",
"size": "4799",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "coapclient.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "541744"
}
],
"symlink_target": ""
}
|
from __future__ import division
import ast
import _ast
try:
# Python 3+
NameConstant = _ast.NameConstant
except AttributeError:
# Python 2.7
NameConstant = _ast.Name
class NotSafeExpression(Exception):
pass
class UnsafeNode(Exception):
pass
class Evaler(object):
ALLOWED_NODES = {
_ast.Module,
# math
_ast.Add,
_ast.UAdd,
_ast.Sub,
_ast.USub,
_ast.Mult,
_ast.Div,
_ast.FloorDiv,
_ast.Pow,
_ast.Mod,
# binary math
_ast.LShift,
_ast.RShift,
_ast.BitAnd,
_ast.BitOr,
_ast.BitXor,
_ast.Invert,
# conditions
_ast.Not,
_ast.IfExp,
# base expressions
_ast.Expr,
_ast.BinOp,
_ast.UnaryOp,
# comparisons
_ast.Compare,
_ast.Eq,
_ast.NotEq,
_ast.Lt,
_ast.LtE,
_ast.Gt,
_ast.GtE,
_ast.Is,
_ast.IsNot,
_ast.In,
_ast.NotIn,
# structures
_ast.Tuple,
_ast.List,
_ast.Dict,
# system
_ast.Num,
_ast.Str,
_ast.Name,
NameConstant, # True, False, None in Python 3+
_ast.Load,
_ast.Call, # visit_Call makes the rest
}
def __init__(self, safe_funcs=None):
if safe_funcs is None:
safe_funcs = []
# to preserve ordering. OrderedDict is overkill here, I think
self.safe_func_names = [func.__name__ for func in safe_funcs]
self.checker = Evaler.IsExprSafe(self)
self.safe_funcs = {func.__name__: func for func in safe_funcs}
self.boolean_builtins = {"True": True, "False": False}
def eval(self, expr, variables=None):
unsafe = self.expr_is_unsafe(expr)
if not unsafe:
return self.raw_eval(expr, variables)
else:
raise NotSafeExpression(expr, unsafe)
def __str__(self):
return "Evaler([%s])" % ", ".join(self.safe_func_names)
def get_allowed_nodes(self):
return self.ALLOWED_NODES
def expr_is_unsafe(self, expr):
ast_tree = ast.parse(expr)
try:
self.checker.visit(ast_tree)
return None
except UnsafeNode as e:
return e
def raw_eval(self, expr, variables=None):
locals = dict(self.boolean_builtins)
if variables is not None:
locals.update(variables)
return eval(expr, {'__builtins__': self.safe_funcs}, locals)
class IsExprSafe(ast.NodeVisitor):
def __init__(self, evaler):
self.evaler = evaler
self.safe_func_names = set(evaler.safe_func_names)
ast.NodeVisitor.__init__(self)
def visit_Module(self, node):
self.generic_visit(node)
return True
def visit_Call(self, node):
func = node.func
if "id" in func.__dict__:
if func.id not in self.safe_func_names:
raise UnsafeNode(ast.dump(node))
else:
raise UnsafeNode(ast.dump(node))
self.generic_visit(node)
def visit_Compare(self, node):
operands = node.comparators + [node.left]
for op in operands:
self.generic_visit(op)
def visit_BoolOp(self, node):
for val in node.values:
self.generic_visit(val)
def generic_visit(self, node):
if type(node) not in self.evaler.get_allowed_nodes():
raise UnsafeNode(ast.dump(node))
ast.NodeVisitor.generic_visit(self, node)
|
{
"content_hash": "63c5249d77b0a74fb9eae688815145ae",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 70,
"avg_line_length": 24.43046357615894,
"alnum_prop": 0.5264299268094335,
"repo_name": "utter-step/exleval",
"id": "341bf0d3de2f84090193877a393b3ca231b0049c",
"size": "3689",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/exleval/evaler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9091"
}
],
"symlink_target": ""
}
|
from django.views.generic import ListView, DetailView, CreateView, \
DeleteView, UpdateView, \
ArchiveIndexView, DateDetailView, \
DayArchiveView, MonthArchiveView, \
TodayArchiveView, WeekArchiveView, \
YearArchiveView
from teacher.models import Teacher_loan
class Teacher_loanView(object):
model = Teacher_loan
def get_template_names(self):
"""Nest templates within teacher_loan directory."""
tpl = super(Teacher_loanView, self).get_template_names()[0]
app = self.model._meta.app_label
mdl = 'teacher_loan'
#self.template_name = tpl.replace(app, '{0}/{1}'.format(app, mdl))
self.template_name = tpl[:8]+'teacher_loan/'+tpl[8:]
return [self.template_name]
class Teacher_loanDateView(Teacher_loanView):
date_field = 'timestamp'
month_format = '%m'
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_loan_list')
class Teacher_loanBaseListView(Teacher_loanView):
paginate_by = 10
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_loan_list')
class Teacher_loanArchiveIndexView(
Teacher_loanDateView, Teacher_loanBaseListView, ArchiveIndexView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_loan_list')
class Teacher_loanCreateView(Teacher_loanView, CreateView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_loan_list')
class Teacher_loanDateDetailView(Teacher_loanDateView, DateDetailView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_loan_list')
class Teacher_loanDayArchiveView(
Teacher_loanDateView, Teacher_loanBaseListView, DayArchiveView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_loan_list')
class Teacher_loanDeleteView(Teacher_loanView, DeleteView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_loan_list')
class Teacher_loanDetailView(Teacher_loanView, DetailView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_loan_list')
class Teacher_loanListView(Teacher_loanBaseListView, ListView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_loan_list')
class Teacher_loanMonthArchiveView(
Teacher_loanDateView, Teacher_loanBaseListView, MonthArchiveView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_loan_list')
class Teacher_loanTodayArchiveView(
Teacher_loanDateView, Teacher_loanBaseListView, TodayArchiveView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_loan_list')
class Teacher_loanUpdateView(Teacher_loanView, UpdateView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_loan_list')
class Teacher_loanWeekArchiveView(
Teacher_loanDateView, Teacher_loanBaseListView, WeekArchiveView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_loan_list')
class Teacher_loanYearArchiveView(
Teacher_loanDateView, Teacher_loanBaseListView, YearArchiveView):
make_object_list = True
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_loan_list')
|
{
"content_hash": "89ba5d3b9ce20b007d0353d2dbf57441",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 74,
"avg_line_length": 28.937062937062937,
"alnum_prop": 0.6897051715804736,
"repo_name": "tnemisteam/cdf-steps",
"id": "648c23b71a85421c97f9af1ceb5f4f247031c2a3",
"size": "4138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "teacher/views/teacher_loan_views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "766660"
},
{
"name": "HTML",
"bytes": "1419093"
},
{
"name": "JavaScript",
"bytes": "3631641"
},
{
"name": "PHP",
"bytes": "5016"
},
{
"name": "Python",
"bytes": "921129"
}
],
"symlink_target": ""
}
|
import os
import sys
import json
import time
import random
import urllib
import hashlib
from datetime import datetime
try:
from urllib.parse import quote, quote_plus, urlparse
except:
from urllib import quote, quote_plus
from urlparse import urlparse
from qingcloud.conn.auth import QSSignatureAuthHandler
from qingcloud.conn.connection import HttpConnection, HTTPRequest
from .bucket import Bucket
from .exception import get_response_error
class Zone(object):
DEFAULT = ""
PEK3A = "pek3a"
class VirtualHostStyleFormat(object):
def build_host(self, server, bucket=""):
if bucket:
return "%s.%s" % (bucket, server)
else:
return server
def build_auth_path(self, bucket="", key=""):
path = "/"
if bucket:
path += bucket
if key:
path += "/%s" % key
return path
def build_path_base(self, bucket="", key=""):
path = "/"
if key:
path += quote(key)
return path
class QSConnection(HttpConnection):
""" Public connection to qingstor
"""
def __init__(self, qy_access_key_id=None, qy_secret_access_key=None,
host="qingstor.com", port=443, protocol="https",
style_format_class=VirtualHostStyleFormat,
retry_time=3, timeout=900, debug=False):
"""
@param qy_access_key_id - the access key id
@param qy_secret_access_key - the secret access key
@param host - the host to make the connection to
@param port - the port to use when connect to host
@param protocol - the protocol to access to server, "http" or "https"
@param retry_time - the retry_time when message send fail
@param timeout - blocking operations will timeout after that many seconds
@param debug - debug mode
"""
# Set default host
host = host
# Set user agent
self.user_agent = "QingStor SDK Python"
# Set retry times
self.retry_time = retry_time
self.style_format = style_format_class()
super(QSConnection, self).__init__(
qy_access_key_id, qy_secret_access_key, host, port, protocol,
None, None, timeout, debug)
if qy_access_key_id and qy_secret_access_key:
self._auth_handler = QSSignatureAuthHandler(host, qy_access_key_id,
qy_secret_access_key)
else:
self._auth_handler = None
def get_all_buckets(self, zone=""):
if zone:
headers = {"Location": zone}
else:
headers = {}
response = self.make_request("GET", headers=headers)
if response.status == 200:
return json.loads(response.read())
else:
err = get_response_error(response)
raise err
def create_bucket(self, bucket, zone=Zone.DEFAULT):
""" Create a new bucket.
Keyword arguments:
bucket - The name of the bucket
zone - The zone at which bucket and its objects will locate.
(Default: follow the service-side rule)
"""
headers = {"Location": zone}
response = self.make_request("PUT", bucket, headers=headers)
if response.status in [200, 201]:
return Bucket(self, bucket)
else:
raise get_response_error(response)
def get_bucket(self, bucket, validate=True):
""" Retrieve a bucket by name.
Keyword arguments:
bucket - The name of the bucket
validate - If ``True``, the function will try to verify the bucket exists
on the service-side. (Default: ``True``)
"""
if not validate:
return Bucket(self, bucket)
response = self.make_request("HEAD", bucket)
if response.status == 200:
return Bucket(self, bucket)
elif response.status == 401:
err = get_response_error(response)
err.code = "invalid_access_key_id"
err.message = "Request not authenticated, Access Key ID is either " \
"missing or invalid."
raise err
elif response.status == 403:
err = get_response_error(response)
err.code = "permission_denied"
err.message = "You don't have enough permission to accomplish " \
"this request."
raise err
elif response.status == 404:
err = get_response_error(response)
err.code = "bucket_not_exists"
err.message = "The bucket you are accessing doesn't exist."
raise err
else:
err = get_response_error(response)
raise err
def _get_content_length(self, body):
thelen = 0
try:
thelen = str(len(body))
except TypeError:
# If this is a file-like object, try to fstat its file descriptor
try:
thelen = str(os.fstat(body.fileno()).st_size)
except (AttributeError, OSError):
# Don't send a length if this failed
pass
return thelen
def _get_body_checksum(self, data):
if hasattr(data, "read"):
# Calculate the MD5 by reading the whole file content
# This is evil, need to refactor later
md5 = hashlib.md5()
blocksize = 1024 * 4
datablock = data.read(blocksize)
while datablock:
if sys.version > "3" and isinstance(datablock, str):
datablock = datablock.encode()
md5.update(datablock)
datablock = data.read(blocksize)
token = md5.hexdigest()
data.seek(0)
else:
if sys.version > "3" and isinstance(data, str):
data = data.encode()
token = hashlib.md5(data).hexdigest()
return token
def _build_params(self, params):
params_str = ""
params = params or {}
for key, value in params.items():
if params_str:
params_str += "&"
params_str += "%s" % quote_plus(key)
if value is not None:
params_str += "=%s" % quote_plus(value)
return params_str
def _urlparse(self, url):
parts = urlparse(url)
return parts.hostname, parts.path or "/", parts.query
def build_http_request(self, method, path, params, auth_path,
headers, host, data):
if isinstance(params, str):
path = "%s?%s" % (path, params)
else:
suffix = self._build_params(params)
path = "%s?%s" % (path, suffix) if suffix else path
req = HTTPRequest(method, self.protocol, headers, host, self.port,
path, params, auth_path, data)
return req
def make_request(self, method, bucket="", key="", headers=None,
data="", params=None, num_retries=3):
""" Make request
"""
host = self.style_format.build_host(self.host, bucket)
path = self.style_format.build_path_base(bucket, key)
auth_path = self.style_format.build_auth_path(bucket, key)
# Build request headers
if not headers:
headers = {}
if "Host" not in headers:
headers["Host"] = host
if "Date" not in headers:
headers["Date"] = datetime.utcnow().strftime("%a, %d %b %Y %X GMT")
if "Content-Length" not in headers:
headers["Content-Length"] = self._get_content_length(data)
if data and "Content-MD5" not in headers:
headers["Content-MD5"] = self._get_body_checksum(data)
if "User-Agent" not in headers:
headers["User-Agent"] = self.user_agent
retry_time = 0
while retry_time < self.retry_time:
next_sleep = random.random() * (2 ** retry_time)
try:
response = self.send(method, path, params, headers, host,
auth_path, data)
if response.status == 307:
location = response.getheader("location")
host, path, params = self._urlparse(location)
headers["Host"] = host
# Seek to the start if this is a file-like object
if hasattr(data, "read") and hasattr(data, "seek"):
data.seek(0)
elif response.status in (500, 502, 503):
time.sleep(next_sleep)
else:
if response.length == 0:
response.close()
return response
except Exception:
if retry_time >= self.retry_time - 1:
raise
retry_time += 1
|
{
"content_hash": "afcbd35fab43de02937b679927d4f661",
"timestamp": "",
"source": "github",
"line_count": 259,
"max_line_length": 81,
"avg_line_length": 34.552123552123554,
"alnum_prop": 0.543859649122807,
"repo_name": "Xuanwo/qingcloud-sdk-python",
"id": "89c61b5119330aad4244232d4b083bb8c3f01a84",
"size": "9774",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "qingcloud/qingstor/connection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "339667"
}
],
"symlink_target": ""
}
|
import re
# from https://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-snake-case
def snake_case(string):
''' Takes a string that represents for example a class name and returns
the snake case version of it. It is used for model-to-key conversion '''
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', string)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def camelCase(string):
''' Takes a string that represents the redis key version of a model name
and returns its camel case version. It is used for key-to-model
conversion. '''
return ''.join(s[0].upper() + s[1:] for s in string.split('_'))
def parse_embed(embed_array):
if not embed_array:
return []
fields = {}
for item in embed_array:
pieces = item.split('.', maxsplit=1)
if pieces[0] not in fields:
fields[pieces[0]] = None
if len(pieces) == 2:
if fields[pieces[0]] is None:
fields[pieces[0]] = []
fields[pieces[0]].append(pieces[1])
return sorted(map(
lambda item: list(item),
fields.items()
), key=lambda x: x[0])
|
{
"content_hash": "cddb88c309b49425aac74a0c781fa98e",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 109,
"avg_line_length": 28.78048780487805,
"alnum_prop": 0.5949152542372881,
"repo_name": "getfleety/coralillo",
"id": "d78bf6192fe22fdbef0564bbe25baa70deadc77e",
"size": "1180",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coralillo/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Lua",
"bytes": "2851"
},
{
"name": "Makefile",
"bytes": "239"
},
{
"name": "Python",
"bytes": "88550"
}
],
"symlink_target": ""
}
|
class LivingRoom(object):
light = False
def set_light(self, light):
self.light = light
def get_light(self):
return self.light
|
{
"content_hash": "b13fa2197cc38a79956c75d0f07cb623",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 31,
"avg_line_length": 19.5,
"alnum_prop": 0.6089743589743589,
"repo_name": "metaodi/hostages",
"id": "3383cf73c11ddbbe0051c09f6d1d6e40399a87c8",
"size": "156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hostages/livingroom.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8425"
}
],
"symlink_target": ""
}
|
"""
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class BuildconfigurationsetsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def add_configuration(self, id, **kwargs):
"""
Adds a configuration to the Specified Set
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.add_configuration(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build Configuration Set id (required)
:param BuildConfigurationRest body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.add_configuration_with_http_info(id, **kwargs)
else:
(data) = self.add_configuration_with_http_info(id, **kwargs)
return data
def add_configuration_with_http_info(self, id, **kwargs):
"""
Adds a configuration to the Specified Set
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.add_configuration_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build Configuration Set id (required)
:param BuildConfigurationRest body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_configuration" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `add_configuration`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/build-configuration-sets/{id}/build-configurations', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def build(self, id, **kwargs):
"""
Builds the Configurations for the Specified Set
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.build(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build Configuration Set id (required)
:param str callback_url: Optional Callback URL
:param bool temporary_build: Is it a temporary build or a standard build?
:param bool force_rebuild: DEPRECATED: Use RebuildMode.
:param bool timestamp_alignment: Should we add a timestamp during the alignment? Valid only for temporary builds.
:param str rebuild_mode: Rebuild Modes: FORCE: always rebuild all the configurations in the set; EXPLICIT_DEPENDENCY_CHECK: check if any of user defined dependencies has been update; IMPLICIT_DEPENDENCY_CHECK: check if any captured dependency has been updated;
:return: BuildConfigSetRecordSingleton
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.build_with_http_info(id, **kwargs)
else:
(data) = self.build_with_http_info(id, **kwargs)
return data
def build_with_http_info(self, id, **kwargs):
"""
Builds the Configurations for the Specified Set
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.build_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build Configuration Set id (required)
:param str callback_url: Optional Callback URL
:param bool temporary_build: Is it a temporary build or a standard build?
:param bool force_rebuild: DEPRECATED: Use RebuildMode.
:param bool timestamp_alignment: Should we add a timestamp during the alignment? Valid only for temporary builds.
:param str rebuild_mode: Rebuild Modes: FORCE: always rebuild all the configurations in the set; EXPLICIT_DEPENDENCY_CHECK: check if any of user defined dependencies has been update; IMPLICIT_DEPENDENCY_CHECK: check if any captured dependency has been updated;
:return: BuildConfigSetRecordSingleton
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'callback_url', 'temporary_build', 'force_rebuild', 'timestamp_alignment', 'rebuild_mode']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method build" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `build`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
if 'callback_url' in params:
query_params.append(('callbackUrl', params['callback_url']))
if 'temporary_build' in params:
query_params.append(('temporaryBuild', params['temporary_build']))
if 'force_rebuild' in params:
query_params.append(('forceRebuild', params['force_rebuild']))
if 'timestamp_alignment' in params:
query_params.append(('timestampAlignment', params['timestamp_alignment']))
if 'rebuild_mode' in params:
query_params.append(('rebuildMode', params['rebuild_mode']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/build-configuration-sets/{id}/build', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BuildConfigSetRecordSingleton',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def build_versioned(self, id, **kwargs):
"""
Builds the configurations for the Specified Set with an option to specify exact revision of a BC
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.build_versioned(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build Configuration Set id (required)
:param str callback_url: Optional Callback URL
:param bool temporary_build: Is it a temporary build or a standard build?
:param bool force_rebuild: DEPRECATED: Use RebuildMode.
:param bool timestamp_alignment: Should we add a timestamp during the alignment? Valid only for temporary builds.
:param BuildConfigurationSetWithAuditedBCsRest body:
:param str rebuild_mode: Rebuild Modes: FORCE: always rebuild all the configurations in the set; EXPLICIT_DEPENDENCY_CHECK: check if any of user defined dependencies has been update; IMPLICIT_DEPENDENCY_CHECK: check if any captured dependency has been updated;
:return: BuildConfigSetRecordSingleton
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.build_versioned_with_http_info(id, **kwargs)
else:
(data) = self.build_versioned_with_http_info(id, **kwargs)
return data
def build_versioned_with_http_info(self, id, **kwargs):
"""
Builds the configurations for the Specified Set with an option to specify exact revision of a BC
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.build_versioned_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build Configuration Set id (required)
:param str callback_url: Optional Callback URL
:param bool temporary_build: Is it a temporary build or a standard build?
:param bool force_rebuild: DEPRECATED: Use RebuildMode.
:param bool timestamp_alignment: Should we add a timestamp during the alignment? Valid only for temporary builds.
:param BuildConfigurationSetWithAuditedBCsRest body:
:param str rebuild_mode: Rebuild Modes: FORCE: always rebuild all the configurations in the set; EXPLICIT_DEPENDENCY_CHECK: check if any of user defined dependencies has been update; IMPLICIT_DEPENDENCY_CHECK: check if any captured dependency has been updated;
:return: BuildConfigSetRecordSingleton
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'callback_url', 'temporary_build', 'force_rebuild', 'timestamp_alignment', 'body', 'rebuild_mode']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method build_versioned" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `build_versioned`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
if 'callback_url' in params:
query_params.append(('callbackUrl', params['callback_url']))
if 'temporary_build' in params:
query_params.append(('temporaryBuild', params['temporary_build']))
if 'force_rebuild' in params:
query_params.append(('forceRebuild', params['force_rebuild']))
if 'timestamp_alignment' in params:
query_params.append(('timestampAlignment', params['timestamp_alignment']))
if 'rebuild_mode' in params:
query_params.append(('rebuildMode', params['rebuild_mode']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/build-configuration-sets/{id}/build-versioned', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BuildConfigSetRecordSingleton',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_new(self, **kwargs):
"""
Creates a new Build Configuration Set
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_new(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param BuildConfigurationSetRest body:
:return: BuildConfigurationSetSingleton
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_new_with_http_info(**kwargs)
else:
(data) = self.create_new_with_http_info(**kwargs)
return data
def create_new_with_http_info(self, **kwargs):
"""
Creates a new Build Configuration Set
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_new_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param BuildConfigurationSetRest body:
:return: BuildConfigurationSetSingleton
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_new" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/build-configuration-sets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BuildConfigurationSetSingleton',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_specific(self, id, **kwargs):
"""
Removes a specific Build Configuration Set
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_specific(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build Configuration Set id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_specific_with_http_info(id, **kwargs)
else:
(data) = self.delete_specific_with_http_info(id, **kwargs)
return data
def delete_specific_with_http_info(self, id, **kwargs):
"""
Removes a specific Build Configuration Set
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_specific_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build Configuration Set id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_specific" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_specific`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/build-configuration-sets/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_all(self, **kwargs):
"""
Gets all Build Configuration Sets
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:param str q: RSQL Query
:return: BuildConfigurationSetPage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_all_with_http_info(**kwargs)
else:
(data) = self.get_all_with_http_info(**kwargs)
return data
def get_all_with_http_info(self, **kwargs):
"""
Gets all Build Configuration Sets
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:param str q: RSQL Query
:return: BuildConfigurationSetPage
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_index', 'page_size', 'sort', 'q']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'page_index' in params:
query_params.append(('pageIndex', params['page_index']))
if 'page_size' in params:
query_params.append(('pageSize', params['page_size']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
if 'q' in params:
query_params.append(('q', params['q']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/build-configuration-sets', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BuildConfigurationSetPage',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_all_build_config_set_records(self, id, **kwargs):
"""
Get all build config set execution records associated with this build config set, returns empty list if none are found
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_build_config_set_records(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build config set id (required)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:param str q: RSQL Query
:return: BuildConfigurationSetRecordPage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_all_build_config_set_records_with_http_info(id, **kwargs)
else:
(data) = self.get_all_build_config_set_records_with_http_info(id, **kwargs)
return data
def get_all_build_config_set_records_with_http_info(self, id, **kwargs):
"""
Get all build config set execution records associated with this build config set, returns empty list if none are found
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_build_config_set_records_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build config set id (required)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:param str q: RSQL Query
:return: BuildConfigurationSetRecordPage
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'page_index', 'page_size', 'sort', 'q']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_build_config_set_records" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_all_build_config_set_records`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
if 'page_index' in params:
query_params.append(('pageIndex', params['page_index']))
if 'page_size' in params:
query_params.append(('pageSize', params['page_size']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
if 'q' in params:
query_params.append(('q', params['q']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/build-configuration-sets/{id}/build-config-set-records', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BuildConfigurationSetRecordPage',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_build_records(self, id, **kwargs):
"""
Gets all build records associated with the contained build configurations
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_build_records(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build configuration set id (required)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:param str q: RSQL Query
:return: BuildRecordPage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_build_records_with_http_info(id, **kwargs)
else:
(data) = self.get_build_records_with_http_info(id, **kwargs)
return data
def get_build_records_with_http_info(self, id, **kwargs):
"""
Gets all build records associated with the contained build configurations
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_build_records_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build configuration set id (required)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:param str q: RSQL Query
:return: BuildRecordPage
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'page_index', 'page_size', 'sort', 'q']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_build_records" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_build_records`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
if 'page_index' in params:
query_params.append(('pageIndex', params['page_index']))
if 'page_size' in params:
query_params.append(('pageSize', params['page_size']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
if 'q' in params:
query_params.append(('q', params['q']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/build-configuration-sets/{id}/build-records', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BuildRecordPage',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_configurations(self, id, **kwargs):
"""
Gets the Configurations for the Specified Set
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_configurations(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build Configuration Set id (required)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:param str q: RSQL Query
:return: BuildConfigurationPage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_configurations_with_http_info(id, **kwargs)
else:
(data) = self.get_configurations_with_http_info(id, **kwargs)
return data
def get_configurations_with_http_info(self, id, **kwargs):
"""
Gets the Configurations for the Specified Set
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_configurations_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build Configuration Set id (required)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:param str q: RSQL Query
:return: BuildConfigurationPage
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'page_index', 'page_size', 'sort', 'q']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_configurations" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_configurations`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
if 'page_index' in params:
query_params.append(('pageIndex', params['page_index']))
if 'page_size' in params:
query_params.append(('pageSize', params['page_size']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
if 'q' in params:
query_params.append(('q', params['q']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/build-configuration-sets/{id}/build-configurations', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BuildConfigurationPage',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_specific(self, id, **kwargs):
"""
Gets a specific Build Configuration Set
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_specific(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build Configuration Set id (required)
:return: BuildConfigurationSetSingleton
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_specific_with_http_info(id, **kwargs)
else:
(data) = self.get_specific_with_http_info(id, **kwargs)
return data
def get_specific_with_http_info(self, id, **kwargs):
"""
Gets a specific Build Configuration Set
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_specific_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build Configuration Set id (required)
:return: BuildConfigurationSetSingleton
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_specific" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_specific`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/build-configuration-sets/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BuildConfigurationSetSingleton',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def remove_configuration(self, id, config_id, **kwargs):
"""
Removes a configuration from the specified config set
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.remove_configuration(id, config_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build configuration set id (required)
:param int config_id: Build configuration id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.remove_configuration_with_http_info(id, config_id, **kwargs)
else:
(data) = self.remove_configuration_with_http_info(id, config_id, **kwargs)
return data
def remove_configuration_with_http_info(self, id, config_id, **kwargs):
"""
Removes a configuration from the specified config set
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.remove_configuration_with_http_info(id, config_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build configuration set id (required)
:param int config_id: Build configuration id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'config_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method remove_configuration" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `remove_configuration`")
# verify the required parameter 'config_id' is set
if ('config_id' not in params) or (params['config_id'] is None):
raise ValueError("Missing the required parameter `config_id` when calling `remove_configuration`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'config_id' in params:
path_params['configId'] = params['config_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/build-configuration-sets/{id}/build-configurations/{configId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update(self, id, **kwargs):
"""
Updates an existing Build Configuration Set
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build Configuration Set id (required)
:param BuildConfigurationSetRest body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_with_http_info(id, **kwargs)
else:
(data) = self.update_with_http_info(id, **kwargs)
return data
def update_with_http_info(self, id, **kwargs):
"""
Updates an existing Build Configuration Set
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build Configuration Set id (required)
:param BuildConfigurationSetRest body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/build-configuration-sets/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_configurations(self, id, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_configurations(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build Configuration Set Id (required)
:param list[BuildConfigurationRest] body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_configurations_with_http_info(id, **kwargs)
else:
(data) = self.update_configurations_with_http_info(id, **kwargs)
return data
def update_configurations_with_http_info(self, id, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_configurations_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build Configuration Set Id (required)
:param list[BuildConfigurationRest] body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_configurations" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update_configurations`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/build-configuration-sets/{id}/build-configurations', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
{
"content_hash": "acc558786336749b08e046aa459549ec",
"timestamp": "",
"source": "github",
"line_count": 1529,
"max_line_length": 268,
"avg_line_length": 42.00457815565729,
"alnum_prop": 0.5467497080576099,
"repo_name": "project-ncl/pnc-cli",
"id": "99ea3f26196827574c6495e63d54019abf9839bc",
"size": "64242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pnc_cli/swagger_client/apis/buildconfigurationsets_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "70367"
},
{
"name": "Python",
"bytes": "1865278"
},
{
"name": "Shell",
"bytes": "2479"
}
],
"symlink_target": ""
}
|
"""Facilities for displaying status messages with job progress.
"""
from googlecloudsdk.core import log
STATUS_REPORTING_NONE = 'none'
STATUS_REPORTING_CHANGES = 'changes'
STATUS_REPORTING_PERIODIC = 'periodic'
class ProgressReporter(object):
"""Wraps an object implementing the AbstractWaitPrinter interface.
The object wrapped depends on command-line arguments.
"""
def __init__(self, status_reporting_mode):
"""Initialize this ProgressReporter based on command-line arguments.
Args:
status_reporting_mode: the frequency with which the status of a job being
waited for is to be reported, one of STATUS_REPORTING_NONE,
STATUS_REPORTING_CHANGES, or STATUS_REPORTING_PERIODIC
"""
if status_reporting_mode == STATUS_REPORTING_NONE:
self._wait_printer = QuietWaitPrinter()
elif status_reporting_mode == STATUS_REPORTING_CHANGES:
self._wait_printer = TransitionWaitPrinter()
else: # status_reporting_mode == STATUS_REPORTING_PERIODIC
self._wait_printer = VerboseWaitPrinter()
def Print(self, job_id, wait_time, status):
"""Prints status for the current job we are waiting on.
Args:
job_id: the identifier for this job.
wait_time: the number of seconds we have been waiting so far.
status: the status of the job we are waiting for.
"""
self._wait_printer.Print(job_id, wait_time, status)
def Done(self):
"""Waiting is done and no more Print calls will be made.
"""
self._wait_printer.Done()
class AbstractWaitPrinter(object):
"""Base class that defines the AbstractWaitPrinter interface."""
print_on_done = False
def Print(self, job_id, wait_time, status):
"""Prints status for the current job we are waiting on.
Args:
job_id: the identifier for this job.
wait_time: the number of seconds we have been waiting so far.
status: the status of the job we are waiting for.
"""
raise NotImplementedError('Subclass must implement Print')
def Done(self):
"""Waiting is done and no more Print calls will be made.
This function should handle the case of Print not being called.
"""
if self.print_on_done:
log.status.Print()
class QuietWaitPrinter(AbstractWaitPrinter):
"""An AbstractWaitPrinter that prints nothing."""
def Print(self, unused_job_id, unused_wait_time, unused_status):
"""Prints status for the current job we are waiting on.
Args:
unused_job_id: the identifier for this job.
unused_wait_time: the number of seconds we have been waiting so far.
unused_status: the status of the job we are waiting for.
"""
pass
class VerboseWaitPrinter(AbstractWaitPrinter):
"""An AbstractWaitPrinter that prints every update."""
def Print(self, job_id, wait_time, status):
"""Prints status for the current job we are waiting on.
Args:
job_id: the identifier for this job.
wait_time: the number of seconds we have been waiting so far.
status: the status of the job we are waiting for.
"""
self.print_on_done = True
log.status.write(
'\rWaiting on {job} ... ({seconds}s) Current status: {status:<7}'
.format(job=job_id, seconds=int(wait_time + 0.5), status=status))
log.status.flush()
class TransitionWaitPrinter(VerboseWaitPrinter):
"""A AbstractWaitPrinter that only prints status change updates."""
_previous_status = None
def Print(self, job_id, wait_time, status):
"""Prints status for the current job we are waiting on.
Args:
job_id: the identifier for this job.
wait_time: the number of seconds we have been waiting so far.
status: the status of the job we are waiting for.
"""
if status != self._previous_status:
self._previous_status = status
super(TransitionWaitPrinter, self).Print(
job_id, wait_time, status)
|
{
"content_hash": "09c96af0d003a23d186f8d542d739011",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 79,
"avg_line_length": 31.827868852459016,
"alnum_prop": 0.6886428019572496,
"repo_name": "wemanuel/smry",
"id": "9811927c862fbdcf135ba2aa75ed91ee933a2b62",
"size": "3934",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "smry/server-auth/ls/google-cloud-sdk/lib/googlecloudsdk/bigquery/lib/job_progress.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3990"
},
{
"name": "Groff",
"bytes": "1221174"
},
{
"name": "HTML",
"bytes": "1873470"
},
{
"name": "JavaScript",
"bytes": "2192"
},
{
"name": "Makefile",
"bytes": "6032"
},
{
"name": "PHP",
"bytes": "16660"
},
{
"name": "Python",
"bytes": "47139164"
},
{
"name": "Shell",
"bytes": "37102"
},
{
"name": "SourcePawn",
"bytes": "1160"
}
],
"symlink_target": ""
}
|
from . import bar
def progressbar(iterator, min_value=0, max_value=None,
widgets=None, prefix=None, suffix=None, **kwargs):
progressbar = bar.ProgressBar(
min_value=min_value, max_value=max_value,
widgets=widgets, prefix=prefix, suffix=suffix, **kwargs)
for result in progressbar(iterator):
yield result
|
{
"content_hash": "c01962f48997b13719f5be41de99f89a",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 66,
"avg_line_length": 32.27272727272727,
"alnum_prop": 0.6619718309859155,
"repo_name": "WoLpH/python-progressbar",
"id": "f882a5a23a93d52cc276d59bda00872481b65b06",
"size": "355",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "progressbar/shortcuts.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "160879"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import subprocess
import sys
import os
import json
from util import appendline, get_ip_address
if __name__ == "__main__":
path = os.path.dirname(os.path.realpath(__file__))
config = json.load(open(path+'/cluster-config.json'));
for node in config['nodes']:
files = subprocess.check_output(["ssh", "cloud-user@"+node['ip'], 'ls /home/cloud-user']).split('\n')
if 'StreamBench' not in files:
p = subprocess.Popen('ssh cloud-user@'+node['ip']+' "git clone https://github.com/wangyangjun/StreamBench.git"', shell=True)
else:
p = subprocess.Popen('ssh cloud-user@'+node['ip']+' "cd /home/cloud-user/StreamBench;git checkout .;git pull;"', shell=True)
|
{
"content_hash": "94450cf4b770c6a570c5c9acbfe66017",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 127,
"avg_line_length": 41.23529411764706,
"alnum_prop": 0.6861626248216833,
"repo_name": "wangyangjun/StreamBench",
"id": "d6ca0382a7b7cff4298d6a754c2cde4747340163",
"size": "715",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "script/pull-updates.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "11303"
},
{
"name": "C",
"bytes": "634224"
},
{
"name": "C++",
"bytes": "956151"
},
{
"name": "CSS",
"bytes": "38165"
},
{
"name": "HTML",
"bytes": "2867853"
},
{
"name": "Java",
"bytes": "4090113"
},
{
"name": "JavaScript",
"bytes": "257143"
},
{
"name": "M4",
"bytes": "77455"
},
{
"name": "Makefile",
"bytes": "140277"
},
{
"name": "Mako",
"bytes": "13678"
},
{
"name": "Perl",
"bytes": "66982"
},
{
"name": "Perl 6",
"bytes": "231886"
},
{
"name": "Python",
"bytes": "210673"
},
{
"name": "Roff",
"bytes": "2240683"
},
{
"name": "Shell",
"bytes": "434957"
},
{
"name": "XS",
"bytes": "132994"
},
{
"name": "XSLT",
"bytes": "7359"
}
],
"symlink_target": ""
}
|
__author__ = 'max'
from unittest import TestCase
from mock import patch
from mockriak import create_mock_riak_client
from drow import models
from drow.errors import InvalidConfig
def decoder(value):
pass
def encoder(value):
pass
def creation_validator(data):
pass
def storage_validator(data):
pass
def resolver(riak_object):
pass
content_type = 'application/x.content'
class TestModelInitialization(TestCase):
@patch.object(models, 'settings')
def test_config_switches(self, settings):
settings.RIAK_CLIENT = create_mock_riak_client()
client = settings.RIAK_CLIENT
bucket = client.bucket()
class MyModel(models.Model):
class Meta:
content_type = content_type
decoder = decoder
encoder = encoder
index = 'test_index'
bucket_name = 'test_bucket'
bucket_type_name = 'test_type'
resolver = resolver
creation_validator = creation_validator
storage_validator = storage_validator
self.assertEqual(MyModel.objects._state.bucket, bucket)
client.bucket_type.assert_called_once_with('test_type')
client.bucket_type.return_value.bucket.assert_called_once_with(
'test_bucket')
self.assertEqual(MyModel._meta.content_type, content_type)
self.assertEqual(MyModel._meta.index, 'test_index')
self.assertEqual(MyModel._meta.creation_validator, creation_validator)
self.assertEqual(MyModel._meta.storage_validator, storage_validator)
bucket.set_decoder.assert_called_once_with(content_type, decoder)
bucket.set_encoder.assert_called_once_with(content_type, encoder)
self.assertEqual(bucket.resolver, resolver)
@patch.object(models, 'settings')
def test_predefined_queryset(self, settings):
from drow.queryset import QuerySet
settings.RIAK_CLIENT = create_mock_riak_client()
queryset = QuerySet()
class MyModel(models.Model):
class Meta:
bucket_name = 'test_bucket'
bucket_type_name = 'test_type'
objects = queryset
self.assertIs(MyModel.objects, queryset)
@patch.object(models, 'settings')
def test_decoder_required_for_custom_content_type(self, settings):
with self.assertRaises(InvalidConfig):
class MyModel(models.Model):
class Meta:
content_type = content_type
bucket_name = 'test_bucket'
bucket_type_name = 'test_type'
encoder = encoder
@patch.object(models, 'settings')
def test_encoder_required_for_custom_content_type(self, settings):
with self.assertRaises(InvalidConfig):
class MyModel(models.Model):
class Meta:
content_type = content_type
bucket_name = 'test_bucket'
bucket_type_name = 'test_type'
decoder = decoder
@patch.object(models, 'settings')
def test_bucket_name_required(self, settings):
with self.assertRaises(InvalidConfig):
class MyModel(models.Model):
class Meta:
bucket_type_name = 'test_type'
@patch.object(models, 'settings')
def test_bucket_type_required(self, settings):
with self.assertRaises(InvalidConfig):
class MyModel(models.Model):
class Meta:
bucket_name = 'test_bucket'
@patch.object(models, 'settings')
def test_callable_restriction(self, settings):
with self.assertRaises(InvalidConfig):
class MyModel(models.Model):
class Meta:
bucket_name = 'test_bucket'
bucket_type_name = 'test_type'
creation_validator = 'test'
with self.assertRaises(InvalidConfig):
class MyModel(models.Model): # noqa
class Meta:
bucket_name = 'test_bucket'
bucket_type_name = 'test_type'
storage_validator = 'test'
with self.assertRaises(InvalidConfig):
class MyModel(models.Model): # noqa
class Meta:
bucket_name = 'test_bucket'
bucket_type_name = 'test_type'
get_bucket = 'test'
# Make sure the base declaration works fine, so we're not detecting
# some other cause for an InvalidConfig error
class MyModel(models.Model): # noqa
class Meta:
bucket_name = 'test_bucket'
bucket_type_name = 'test_type'
@patch.object(models, 'settings')
def test_repr(self, settings):
settings.RIAK_CLIENT = create_mock_riak_client()
class MyModel(models.Model):
class Meta:
content_type = content_type
decoder = decoder
encoder = encoder
index = 'test_index'
bucket_name = 'test_bucket'
bucket_type_name = 'test_type'
resolver = resolver
creation_validator = creation_validator
storage_validator = storage_validator
# Make sure none of these raises an exception. Reprs should be able
# to handle unicode and any repr errors should not cause a program
# to crash
repr(MyModel.objects.get('this_is_a_repor'))
repr(MyModel.objects.get(u'\xee'))
repr(MyModel.objects.get(u'\xee'.encode('utf-8')))
repr(MyModel.objects.get(u'\xee'.encode('utf-16')))
@patch.object(models, 'settings')
def test_empty_metaclass(self, settings):
with self.assertRaises(InvalidConfig):
class MyModel(models.Model):
pass
|
{
"content_hash": "15f1b73906f5f5b5f02be612d49af21d",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 78,
"avg_line_length": 34.55232558139535,
"alnum_prop": 0.585562847046946,
"repo_name": "Sendhub/drow",
"id": "0c3166ecb2ecc9261a4535bb56ced17a5ecfe4d3",
"size": "5943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "drow/tests/test_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52520"
},
{
"name": "Shell",
"bytes": "141"
}
],
"symlink_target": ""
}
|
import time
_hello_resp = '''\
<html>
<head>
<title>Hello {name}</title>
</head>
<body>
<h1>Hello {name}!</h1>
</body>
</html>'''
def hello_world(environ, start_response):
start_response('200 OK', [ ('Content-type','text/html')])
params = environ['params']
resp = _hello_resp.format(name=params.get('name'))
yield resp.encode('utf-8')
_localtime_resp = '''\
<?xml version="1.0"?>
<time>
<year>{t.tm_year}</year>
<month>{t.tm_mon}</month>
<day>{t.tm_mday}</day>
<hour>{t.tm_hour}</hour>
<minute>{t.tm_min}</minute>
<second>{t.tm_sec}</second>
</time>'''
def localtime(environ, start_response):
start_response('200 OK', [ ('Content-type', 'application/xml') ])
resp = _localtime_resp.format(t=time.localtime())
yield resp.encode('utf-8')
if __name__ == '__main__':
from resty import PathDispatcher
from wsgiref.simple_server import make_server
# Create the dispatcher and register functions
dispatcher = PathDispatcher()
dispatcher.register('GET', '/hello', hello_world)
dispatcher.register('GET', '/localtime', localtime)
# Launch a basic server
httpd = make_server('', 8080, dispatcher)
print('Serving on port 8080...')
httpd.serve_forever()
|
{
"content_hash": "002849dc5882a41fc31fdbae9316d57b",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 69,
"avg_line_length": 26.595744680851062,
"alnum_prop": 0.624,
"repo_name": "SysCompass/compass-adapters",
"id": "75b24d05f6f285d74d7f4554acd33ca758928959",
"size": "1250",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "chef/cookbooks/python/src/11/creating_a_simple_rest_based_interface/example1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "21125"
},
{
"name": "CSS",
"bytes": "111630"
},
{
"name": "Perl",
"bytes": "848"
},
{
"name": "Python",
"bytes": "208453"
},
{
"name": "Ruby",
"bytes": "1406351"
},
{
"name": "Shell",
"bytes": "5072"
}
],
"symlink_target": ""
}
|
import fiona
import os
import shutil
import tempfile
import unittest
from fiona.ogrext import calc_gdal_version_num, get_gdal_version_num
"""
OGR 54bit handling: https://trac.osgeo.org/gdal/wiki/rfc31_ogr_64
Shapefile: OFTInteger fields are created by default with a width of 9
characters, so to be unambiguously read as OFTInteger (and if specifying
integer that require 10 or 11 characters. the field is dynamically extended
like managed since a few versions). OFTInteger64 fields are created by default
with a width of 18 digits, so to be unambiguously read as OFTInteger64, and
extented to 19 or 20 if needed. Integer fields of width between 10 and 18
will be read as OFTInteger64. Above they will be treated as OFTReal. In
previous GDAL versions, Integer fields were created with a default with of 10,
and thus will be now read as OFTInteger64. An open option, DETECT_TYPE=YES, can
be specified so as OGR does a full scan of the DBF file to see if integer
fields of size 10 or 11 hold 32 bit or 64 bit values and adjust the type
accordingly (and same for integer fields of size 19 or 20, in case of overflow
of 64 bit integer, OFTReal is chosen)
"""
class TestBigInt(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tempdir)
def testCreateBigIntSchema(self):
name = os.path.join(self.tempdir, 'output1.shp')
a_bigint = 10 ** 18 - 1
fieldname = 'abigint'
kwargs = {
'driver': 'ESRI Shapefile',
'crs': 'EPSG:4326',
'schema': {
'geometry': 'Point',
'properties': [(fieldname, 'int:10')]}}
if get_gdal_version_num() < calc_gdal_version_num(2, 0, 0):
with self.assertRaises(OverflowError):
with fiona.open(name, 'w', **kwargs) as dst:
rec = {}
rec['geometry'] = {'type': 'Point', 'coordinates': (0, 0)}
rec['properties'] = {fieldname: a_bigint}
dst.write(rec)
else:
with fiona.open(name, 'w', **kwargs) as dst:
rec = {}
rec['geometry'] = {'type': 'Point', 'coordinates': (0, 0)}
rec['properties'] = {fieldname: a_bigint}
dst.write(rec)
with fiona.open(name) as src:
if get_gdal_version_num() >= calc_gdal_version_num(2, 0, 0):
first = next(src)
self.assertEqual(first['properties'][fieldname], a_bigint)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
{
"content_hash": "7c9a681e67bfa098469f0c1194cf7376",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 79,
"avg_line_length": 38.69565217391305,
"alnum_prop": 0.6183520599250937,
"repo_name": "perrygeo/Fiona",
"id": "35f4eaa66fc5144d7c4b503d6b778781584898de",
"size": "2670",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_bigint.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "333612"
},
{
"name": "Shell",
"bytes": "2574"
}
],
"symlink_target": ""
}
|
"""Support for Salda Smarty XP/XV Ventilation Unit Sensors."""
import datetime as dt
import logging
from homeassistant.core import callback
from homeassistant.const import (
TEMP_CELSIUS,
DEVICE_CLASS_TEMPERATURE,
DEVICE_CLASS_TIMESTAMP,
)
import homeassistant.util.dt as dt_util
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from . import DOMAIN, SIGNAL_UPDATE_SMARTY
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Smarty Sensor Platform."""
smarty = hass.data[DOMAIN]["api"]
name = hass.data[DOMAIN]["name"]
sensors = [
SupplyAirTemperatureSensor(name, smarty),
ExtractAirTemperatureSensor(name, smarty),
OutdoorAirTemperatureSensor(name, smarty),
SupplyFanSpeedSensor(name, smarty),
ExtractFanSpeedSensor(name, smarty),
FilterDaysLeftSensor(name, smarty),
]
async_add_entities(sensors, True)
class SmartySensor(Entity):
"""Representation of a Smarty Sensor."""
def __init__(
self, name: str, device_class: str, smarty, unit_of_measurement: str = ""
):
"""Initialize the entity."""
self._name = name
self._state = None
self._sensor_type = device_class
self._unit_of_measurement = unit_of_measurement
self._smarty = smarty
@property
def should_poll(self) -> bool:
"""Do not poll."""
return False
@property
def device_class(self):
"""Return the device class of the sensor."""
return self._sensor_type
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
async def async_added_to_hass(self):
"""Call to update."""
async_dispatcher_connect(self.hass, SIGNAL_UPDATE_SMARTY, self._update_callback)
@callback
def _update_callback(self):
"""Call update method."""
self.async_schedule_update_ha_state(True)
class SupplyAirTemperatureSensor(SmartySensor):
"""Supply Air Temperature Sensor."""
def __init__(self, name, smarty):
"""Supply Air Temperature Init."""
super().__init__(
name="{} Supply Air Temperature".format(name),
device_class=DEVICE_CLASS_TEMPERATURE,
unit_of_measurement=TEMP_CELSIUS,
smarty=smarty,
)
def update(self) -> None:
"""Update state."""
_LOGGER.debug("Updating sensor %s", self._name)
self._state = self._smarty.supply_air_temperature
class ExtractAirTemperatureSensor(SmartySensor):
"""Extract Air Temperature Sensor."""
def __init__(self, name, smarty):
"""Supply Air Temperature Init."""
super().__init__(
name="{} Extract Air Temperature".format(name),
device_class=DEVICE_CLASS_TEMPERATURE,
unit_of_measurement=TEMP_CELSIUS,
smarty=smarty,
)
def update(self) -> None:
"""Update state."""
_LOGGER.debug("Updating sensor %s", self._name)
self._state = self._smarty.extract_air_temperature
class OutdoorAirTemperatureSensor(SmartySensor):
"""Extract Air Temperature Sensor."""
def __init__(self, name, smarty):
"""Outdoor Air Temperature Init."""
super().__init__(
name="{} Outdoor Air Temperature".format(name),
device_class=DEVICE_CLASS_TEMPERATURE,
unit_of_measurement=TEMP_CELSIUS,
smarty=smarty,
)
def update(self) -> None:
"""Update state."""
_LOGGER.debug("Updating sensor %s", self._name)
self._state = self._smarty.outdoor_air_temperature
class SupplyFanSpeedSensor(SmartySensor):
"""Supply Fan Speed RPM."""
def __init__(self, name, smarty):
"""Supply Fan Speed RPM Init."""
super().__init__(
name="{} Supply Fan Speed".format(name),
device_class=None,
unit_of_measurement=None,
smarty=smarty,
)
def update(self) -> None:
"""Update state."""
_LOGGER.debug("Updating sensor %s", self._name)
self._state = self._smarty.supply_fan_speed
class ExtractFanSpeedSensor(SmartySensor):
"""Extract Fan Speed RPM."""
def __init__(self, name, smarty):
"""Extract Fan Speed RPM Init."""
super().__init__(
name="{} Extract Fan Speed".format(name),
device_class=None,
unit_of_measurement=None,
smarty=smarty,
)
def update(self) -> None:
"""Update state."""
_LOGGER.debug("Updating sensor %s", self._name)
self._state = self._smarty.extract_fan_speed
class FilterDaysLeftSensor(SmartySensor):
"""Filter Days Left."""
def __init__(self, name, smarty):
"""Filter Days Left Init."""
super().__init__(
name="{} Filter Days Left".format(name),
device_class=DEVICE_CLASS_TIMESTAMP,
unit_of_measurement=None,
smarty=smarty,
)
self._days_left = 91
def update(self) -> None:
"""Update state."""
_LOGGER.debug("Updating sensor %s", self._name)
days_left = self._smarty.filter_timer
if days_left is not None and days_left != self._days_left:
self._state = dt_util.now() + dt.timedelta(days=days_left)
self._days_left = days_left
|
{
"content_hash": "00e703ce323b6f51cc38cffaa6a5d051",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 88,
"avg_line_length": 29.778350515463917,
"alnum_prop": 0.6025618833304484,
"repo_name": "fbradyirl/home-assistant",
"id": "16d910beeb5b576e947e2ce4b4da927adb5056e3",
"size": "5777",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/smarty/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1829"
},
{
"name": "Python",
"bytes": "16494727"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17784"
}
],
"symlink_target": ""
}
|
from flask import Flask, render_template, request
from tenseflow.database import db_session
from tenseflow.models import Answer
from tenseflow import change_tense
app = Flask(__name__)
@app.teardown_appcontext
def shutdown_session(exception=None):
db_session.remove()
@app.route('/', methods=['GET', 'POST'])
def start():
return render_template('form.html', text_in='', text_out='', tense='past')
@app.route('/result/', methods=['GET', 'POST'])
def result():
print(list(request.form.iteritems()))
if 'input_text' in request.form:
text_in = request.form['input_text']
tense = request.form['tense']
else:
text_in = ''
tense = ''
try:
text_out = change_tense(text_in, request.form['tense'])
except:
text_out = 'ERROR!!!!!!'
if 'correction' in request.form:
correction = request.form['correction']
else:
correction = ''
if 'errortick' in request.form:
incorrect = request.form['errortick']
else:
incorrect = False
db_session.add(Answer(text_in, tense, text_out, incorrect=incorrect, correction=correction))
db_session.commit()
return render_template('form.html', text_in=text_in, text_out=text_out, tense=request.form['tense'])
if __name__ == "__main__":
app.run(host='0.0.0.0')
|
{
"content_hash": "f5cd9c5b4cf90440134ab9cac0296ca8",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 104,
"avg_line_length": 25.576923076923077,
"alnum_prop": 0.630827067669173,
"repo_name": "bendichter/tenseflow",
"id": "751a23eead228cf5557c93756e690aa055cc485b",
"size": "1330",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3337"
},
{
"name": "JavaScript",
"bytes": "96"
},
{
"name": "Jupyter Notebook",
"bytes": "75383"
},
{
"name": "Python",
"bytes": "10938"
}
],
"symlink_target": ""
}
|
import pkg_resources
__VERSION__ = pkg_resources.require('omise')[0].version
|
{
"content_hash": "3176986cae124ca77537019b18bce6c9",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 55,
"avg_line_length": 38.5,
"alnum_prop": 0.7402597402597403,
"repo_name": "omise/omise-python",
"id": "3c3e4835519935541579fb93e78cdc6ee3d6a41a",
"size": "77",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "omise/version.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "654"
},
{
"name": "Python",
"bytes": "271278"
},
{
"name": "Shell",
"bytes": "74"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
import os
import os.path
try:
import unittest2 as unittest
except ImportError:
import unittest
from fabric.api import *
from fabric.state import connections
import fabtools
from fabtools import require
def version():
"""
Get the vagrant version as a tuple
"""
with settings(hide('running')):
res = local('vagrant --version', capture=True)
ver = res.split()[2]
return tuple(map(int, ver.split('.')))
def halt_and_destroy():
"""
Halt and destoy virtual machine
"""
with lcd(os.path.dirname(__file__)):
if os.path.exists(os.path.join(env['lcwd'], 'Vagrantfile')):
local('vagrant halt')
if version() >= (0, 9, 99):
local('vagrant destroy -f')
else:
local('vagrant destroy')
def base_boxes():
"""
Get the list of vagrant base boxes to use
The default is to get the list of all base boxes.
This can be overridden with the FABTOOLS_TEST_BOXES environment variable.
"""
boxes = os.environ.get('FABTOOLS_TEST_BOXES')
if boxes is not None:
return boxes.split()
else:
with settings(warn_only=True):
res = local('vagrant box list', capture=True)
if res.failed:
return []
else:
return res.splitlines()
class VagrantTestSuite(unittest.BaseTestSuite):
"""
Test suite with vagrant support
"""
def __init__(self, base_boxes):
self.base_boxes = base_boxes
self.current_box = None
unittest.BaseTestSuite.__init__(self)
def addTest(self, test):
test._suite = self
unittest.BaseTestSuite.addTest(self, test)
def run(self, result):
"""
Run the test suite on all the virtual machines
"""
for base_box in self.base_boxes:
# Start a virtual machine using this base box
self.current_box = base_box
self.start_box()
# Clear fabric connection cache
with self.settings():
if env.host_string in connections:
del connections[env.host_string]
# Make sure the vagrant user can sudo to any user
with self.settings():
require.sudoer('vagrant')
# Make sure the package index is up to date
with self.settings():
fabtools.deb.update_index()
# Run the test suite
unittest.BaseTestSuite.run(self, result)
# Stop the virtual machine and clean up
self.stop_box()
def start_box(self):
"""
Spin up a new vagrant box
"""
with lcd(os.path.dirname(__file__)):
if not os.path.exists('Vagrantfile') \
or not os.environ.get('FABTOOLS_TEST_NODESTROY'):
# Create a fresh vagrant config file
local('rm -f Vagrantfile')
local('vagrant init %s' % self.current_box)
# Clean up
halt_and_destroy()
# Spin up the box
# (retry as it sometimes fails for no good reason)
local('vagrant up || vagrant up')
def ssh_config(self):
"""
Get SSH connection parameters for the current box
"""
with lcd(os.path.dirname(__file__)):
if version() >= (0, 9, 0):
command = 'ssh-config'
else:
command = 'ssh_config'
with settings(hide('running')):
output = local('vagrant %s' % command, capture=True)
config = {}
for line in output.splitlines()[1:]:
key, value = line.strip().split(' ', 2)
config[key] = value
return config
def stop_box(self):
"""
Spin down the vagrant box
"""
if not os.environ.get('FABTOOLS_TEST_NODESTROY'):
halt_and_destroy()
with lcd(os.path.dirname(__file__)):
local('rm -f Vagrantfile')
self.current_box = None
def settings(self, *args, **kwargs):
"""
Return a Fabric context manager with the right host settings
"""
config = self.ssh_config()
user = config['User']
hostname = config['HostName']
port = config['Port']
kwargs['host_string'] = "%s@%s:%s" % (user, hostname, port)
kwargs['user'] = user
kwargs['key_filename'] = config['IdentityFile']
kwargs['disable_known_hosts'] = True
return settings(*args, **kwargs)
class VagrantTestCase(unittest.TestCase):
"""
Test case with vagrant support
"""
def __init__(self, name, callable):
super(VagrantTestCase, self).__init__()
self._name = name
self._callable = callable
def run(self, result=None):
"""
Run the test case within a Fabric context manager
"""
with self._suite.settings():
unittest.TestCase.run(self, result)
def runTest(self):
self._callable()
|
{
"content_hash": "a9fdb412b453bb2bbdcf8b7333d32b52",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 77,
"avg_line_length": 27.18617021276596,
"alnum_prop": 0.5494032478966934,
"repo_name": "pahaz/fabtools",
"id": "9c1765c789bb34c794f6400d268a459c9a2fc541",
"size": "5111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabtools/tests/vagrant.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "140349"
}
],
"symlink_target": ""
}
|
import json
import sys
sys.path.append('..')
from spytify import Spotify
from spytify import AuthorizationCode, ClientCredentials
def load_config():
with open('config.json') as config_file:
return json.loads(config_file.read())
def create_spotify():
config = load_config()
auth = ClientCredentials(config['client_id'], config['client_secret'])
return Spotify(auth=auth)
print(load_config())
|
{
"content_hash": "594787729cb95184d70a5d2189fbc1d7",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 74,
"avg_line_length": 21.1,
"alnum_prop": 0.7109004739336493,
"repo_name": "cauebs/spytify",
"id": "6011ec81f2344926c49213db1cd7302c72a04080",
"size": "422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20235"
}
],
"symlink_target": ""
}
|
"""A WSGI server implementation using a shared thread pool."""
import collections
import errno
import httplib
import logging
import select
import socket
import threading
import time
import google
from cherrypy import wsgiserver
from google.appengine.tools.devappserver2 import errors
from google.appengine.tools.devappserver2 import http_runtime_constants
from google.appengine.tools.devappserver2 import thread_executor
_HAS_POLL = hasattr(select, 'poll')
# TODO: the only reason we need to timeout is to pick up added or remove
# descriptors. But AFAICT, we only add descriptors at startup and remove them at
# shutdown so for the bulk of the run, the timeout is useless and just simply
# wastes CPU. For startup, if we wait to start the thread until after all
# WSGI servers are created, we are good (although may need to be careful in the
# runtime instances depending on when servers are created relative to the
# sandbox being enabled). For shutdown, more research is needed (one idea is
# simply not remove descriptors as the process is about to exit).
_READINESS_TIMEOUT_SECONDS = 1
_SECONDS_TO_MILLISECONDS = 1000
# Due to reports of failure to find a consistent port, trying a higher value
# to see if that reduces the problem sufficiently. If it doesn't we can try
# increasing it (on my circa 2010 desktop, it takes about 1/2 second per 1024
# tries) but it would probably be better to either figure out a better
# algorithm or make it possible for code to work with inconsistent ports.
_PORT_0_RETRIES = 2048
class BindError(errors.Error):
"""The server failed to bind its address."""
_THREAD_POOL = thread_executor.ThreadExecutor()
class _SharedCherryPyThreadPool(object):
"""A mimic of wsgiserver.ThreadPool that delegates to a shared thread pool."""
def __init__(self):
self._condition = threading.Condition()
self._connections = set() # Protected by self._condition.
def stop(self, timeout=5):
_THREAD_POOL.submit(self._stop, timeout)
def _stop(self, timeout):
timeout_time = time.time() + timeout
with self._condition:
while self._connections and time.time() < timeout_time:
self._condition.wait(timeout_time - time.time())
for connection in self._connections:
self._shutdown_connection(connection)
@staticmethod
def _shutdown_connection(connection):
if not connection.rfile.closed:
connection.socket.shutdown(socket.SHUT_RD)
def put(self, obj):
with self._condition:
self._connections.add(obj)
_THREAD_POOL.submit(self._handle, obj)
def _handle(self, obj):
try:
obj.communicate()
finally:
obj.close()
with self._condition:
self._connections.remove(obj)
self._condition.notify()
class SelectThread(object):
"""A thread that selects on sockets and calls corresponding callbacks."""
def __init__(self):
self._lock = threading.Lock()
# self._file_descriptors is a frozenset and
# self._file_descriptor_to_callback is never mutated so they can be
# snapshotted by the select thread without needing to copy.
self._file_descriptors = frozenset()
self._file_descriptor_to_callback = {}
self._select_thread = threading.Thread(target=self._loop_forever)
self._select_thread.daemon = True
def start(self):
self._select_thread.start()
def add_socket(self, s, callback):
"""Add a new socket to watch.
Args:
s: A socket to select on.
callback: A callable with no args to be called when s is ready for a read.
"""
with self._lock:
self._file_descriptors = self._file_descriptors.union([s.fileno()])
new_file_descriptor_to_callback = self._file_descriptor_to_callback.copy()
new_file_descriptor_to_callback[s.fileno()] = callback
self._file_descriptor_to_callback = new_file_descriptor_to_callback
def remove_socket(self, s):
"""Remove a watched socket."""
with self._lock:
self._file_descriptors = self._file_descriptors.difference([s.fileno()])
new_file_descriptor_to_callback = self._file_descriptor_to_callback.copy()
del new_file_descriptor_to_callback[s.fileno()]
self._file_descriptor_to_callback = new_file_descriptor_to_callback
def _loop_forever(self):
while True:
self._select()
def _select(self):
with self._lock:
fds = self._file_descriptors
fd_to_callback = self._file_descriptor_to_callback
if fds:
if _HAS_POLL:
# With 100 file descriptors, it is approximately 5x slower to
# recreate and reinitialize the Poll object on every call to _select
# rather reuse one. But the absolute cost of contruction,
# initialization and calling poll(0) is ~25us so code simplicity
# wins.
poll = select.poll()
for fd in fds:
poll.register(fd, select.POLLIN)
ready_file_descriptors = [fd for fd, _ in poll.poll(
_READINESS_TIMEOUT_SECONDS * _SECONDS_TO_MILLISECONDS)]
else:
ready_file_descriptors, _, _ = select.select(fds, [], [],
_READINESS_TIMEOUT_SECONDS)
for fd in ready_file_descriptors:
fd_to_callback[fd]()
else:
# select([], [], [], 1) is not supported on Windows.
time.sleep(_READINESS_TIMEOUT_SECONDS)
_SELECT_THREAD = SelectThread()
_SELECT_THREAD.start()
class _SingleAddressWsgiServer(wsgiserver.CherryPyWSGIServer):
"""A WSGI server that uses a shared SelectThread and thread pool."""
def __init__(self, host, app):
"""Constructs a _SingleAddressWsgiServer.
Args:
host: A (hostname, port) tuple containing the hostname and port to bind.
The port can be 0 to allow any port.
app: A WSGI app to handle requests.
"""
super(_SingleAddressWsgiServer, self).__init__(host, self)
self._lock = threading.Lock()
self._app = app # Protected by _lock.
self._error = None # Protected by _lock.
self.requests = _SharedCherryPyThreadPool()
self.software = http_runtime_constants.SERVER_SOFTWARE
# Some servers, especially the API server, may receive many simultaneous
# requests so set the listen() backlog to something high to reduce the
# likelihood of refused connections.
self.request_queue_size = 100
def start(self):
"""Starts the _SingleAddressWsgiServer.
This is a modified version of the base class implementation. Changes:
- Removed unused functionality (Unix domain socket and SSL support).
- Raises BindError instead of socket.error.
- Uses _SharedCherryPyThreadPool instead of wsgiserver.ThreadPool.
- Calls _SELECT_THREAD.add_socket instead of looping forever.
Raises:
BindError: The address could not be bound.
"""
# AF_INET or AF_INET6 socket
# Get the correct address family for our host (allows IPv6 addresses)
host, port = self.bind_addr
try:
info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
except socket.gaierror:
if ':' in host:
info = [(socket.AF_INET6, socket.SOCK_STREAM, 0, '',
self.bind_addr + (0, 0))]
else:
info = [(socket.AF_INET, socket.SOCK_STREAM, 0, '', self.bind_addr)]
self.socket = None
for res in info:
af, socktype, proto, _, _ = res
try:
self.bind(af, socktype, proto)
except socket.error as socket_error:
if self.socket:
self.socket.close()
self.socket = None
continue
break
if not self.socket:
raise BindError('Unable to bind %s:%s' % self.bind_addr, socket_error)
# Timeout so KeyboardInterrupt can be caught on Win32
self.socket.settimeout(1)
self.socket.listen(self.request_queue_size)
self.ready = True
self._start_time = time.time()
_SELECT_THREAD.add_socket(self.socket, self.tick)
def quit(self):
"""Quits the _SingleAddressWsgiServer."""
_SELECT_THREAD.remove_socket(self.socket)
self.requests.stop(timeout=1)
@property
def port(self):
"""Returns the port that the server is bound to."""
return self.socket.getsockname()[1]
def set_app(self, app):
"""Sets the PEP-333 app to use to serve requests."""
with self._lock:
self._app = app
def set_error(self, error):
"""Sets the HTTP status code to serve for all requests."""
with self._lock:
self._error = error
self._app = None
def __call__(self, environ, start_response):
with self._lock:
app = self._app
error = self._error
if app:
return app(environ, start_response)
else:
start_response('%d %s' % (error, httplib.responses[error]), [])
return []
class WsgiServer(object):
def __init__(self, host, app):
"""Constructs a WsgiServer.
Args:
host: A (hostname, port) tuple containing the hostname and port to bind.
The port can be 0 to allow any port.
app: A WSGI app to handle requests.
"""
self.bind_addr = host
self._app = app
self._servers = []
def start(self):
"""Starts the WsgiServer.
This starts multiple _SingleAddressWsgiServers to bind the address in all
address families.
Raises:
BindError: The address could not be bound.
"""
host, port = self.bind_addr
try:
addrinfo = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
sockaddrs = [addr[-1] for addr in addrinfo]
host_ports = [sockaddr[:2] for sockaddr in sockaddrs]
# Remove duplicate addresses caused by bad hosts file. Retain the
# order to minimize behavior change (and so we don't have to tweak
# unit tests to deal with different order).
host_ports = list(collections.OrderedDict.fromkeys(host_ports))
except socket.gaierror:
host_ports = [self.bind_addr]
if port != 0:
self._start_all_fixed_port(host_ports)
else:
for _ in range(_PORT_0_RETRIES):
if self._start_all_dynamic_port(host_ports):
break
else:
raise BindError('Unable to find a consistent port for %s' % host)
def _start_all_fixed_port(self, host_ports):
"""Starts a server for each specified address with a fixed port.
Does the work of actually trying to create a _SingleAddressWsgiServer for
each specified address.
Args:
host_ports: An iterable of host, port tuples.
Raises:
BindError: The address could not be bound.
"""
for host, port in host_ports:
assert port != 0
server = _SingleAddressWsgiServer((host, port), self._app)
try:
server.start()
except BindError as bind_error:
# TODO: I'm not sure about the behavior of quietly ignoring an
# EADDRINUSE as long as the bind succeeds on at least one interface. I
# think we should either:
# - Fail (just like we do now when bind fails on every interface).
# - Retry on next highest port.
logging.debug('Failed to bind "%s:%s": %s', host, port, bind_error)
continue
else:
self._servers.append(server)
if not self._servers:
raise BindError('Unable to bind %s:%s' % self.bind_addr)
def _start_all_dynamic_port(self, host_ports):
"""Starts a server for each specified address with a dynamic port.
Does the work of actually trying to create a _SingleAddressWsgiServer for
each specified address.
Args:
host_ports: An iterable of host, port tuples.
Returns:
The list of all servers (also saved as self._servers). A non empty list
indicates success while an empty list indicates failure.
"""
port = 0
for host, _ in host_ports:
server = _SingleAddressWsgiServer((host, port), self._app)
try:
server.start()
if port == 0:
port = server.port
except BindError as bind_error:
if bind_error[1][0] == errno.EADDRINUSE:
# The port picked at random for first interface was not available
# on one of the other interfaces. Forget them and try again.
for server in self._servers:
server.quit()
self._servers = []
break
else:
# Ignore the interface if we get an error other than EADDRINUSE.
logging.debug('Failed to bind "%s:%s": %s', host, port, bind_error)
continue
else:
self._servers.append(server)
return self._servers
def quit(self):
"""Quits the WsgiServer."""
for server in self._servers:
server.quit()
@property
def host(self):
"""Returns the host that the server is bound to."""
return self._servers[0].socket.getsockname()[0]
@property
def port(self):
"""Returns the port that the server is bound to."""
return self._servers[0].socket.getsockname()[1]
def set_app(self, app):
"""Sets the PEP-333 app to use to serve requests."""
self._app = app
for server in self._servers:
server.set_app(app)
def set_error(self, error):
"""Sets the HTTP status code to serve for all requests."""
self._error = error
self._app = None
for server in self._servers:
server.set_error(error)
@property
def ready(self):
return all(server.ready for server in self._servers)
|
{
"content_hash": "0a6ca806daddd98c84025c814cc64dcc",
"timestamp": "",
"source": "github",
"line_count": 401,
"max_line_length": 80,
"avg_line_length": 33.44887780548628,
"alnum_prop": 0.6566018042197868,
"repo_name": "yencarnacion/jaikuengine",
"id": "14fd3f83e422551146932cfc0e1cd9ebc51cd6b3",
"size": "14014",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": ".google_appengine/google/appengine/tools/devappserver2/wsgi_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "407860"
},
{
"name": "C++",
"bytes": "20"
},
{
"name": "CSS",
"bytes": "330328"
},
{
"name": "Emacs Lisp",
"bytes": "4733"
},
{
"name": "JavaScript",
"bytes": "751903"
},
{
"name": "PHP",
"bytes": "1808240"
},
{
"name": "Python",
"bytes": "50134630"
},
{
"name": "R",
"bytes": "1277"
},
{
"name": "Shell",
"bytes": "39632"
},
{
"name": "VimL",
"bytes": "5645"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import os
import numpy.testing as npt
from skbio.util import get_data_path
def test_get_data_path():
fn = 'parrot'
path = os.path.dirname(os.path.abspath(__file__))
data_path = os.path.join(path, 'data', fn)
data_path_2 = get_data_path(fn)
npt.assert_string_equal(data_path_2, data_path)
if __name__ == '__main__':
import nose
nose.runmodule()
|
{
"content_hash": "427393d1921e08f4798970c5d6bc0d72",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 78,
"avg_line_length": 29.51851851851852,
"alnum_prop": 0.5508155583437893,
"repo_name": "JWDebelius/scikit-bio",
"id": "e0b6eddfb325e9b252a71742af54294631ef90cc",
"size": "819",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "skbio/util/tests/test_testing.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from homebrain import Agent, Dispatcher, Event
from websocket_server import WebsocketServer
import threading
import logging
import json
class WebSocket(Agent):
autostart = True
def __init__(self):
super(WebSocket, self).__init__()
self.target = self.identifier
self.server = WebsocketServer(5601, "0.0.0.0")
self.wsThread = threading.Thread(target=self.server.run_forever)
self.clients = self.server.clients
self.subscriptions = []
@self.server.set_fn_new_client
def new_client(client, server):
client["subscriptions"] = []
@self.server.set_fn_client_left
def client_left(client, server):
logging.info("Client(%d) disconnected" % client['id'])
@self.server.set_fn_message_received
def message_received(client, server, msg):
if msg:
try:
event = json.loads(msg)
event_type = event["type"]
event_data = event["data"]
if event_type == "subscribe":
self._subscribe(client, event_data)
elif event_type == "unsubscribe":
self._unsubscribe(client, msg)
else:
Dispatcher().put_event(Event(type=event_type, data=event_data))
except json.decoder.JSONDecodeError as e:
pass
def run(self):
self.wsThread.start()
self._listener()
def _listener(self):
while self.wsThread.isAlive():
event = self.next_event()
event_type = event["type"]
event_data = event["data"]
for client in self.clients:
if event_type in client["subscriptions"]:
self.server.send_message(client, json.dumps(event))
def _subscribe(self, client, event_data):
client["subscriptions"].append(str(event_data))
if not str(event_data) in self.subscriptions:
self.subscriptions.append(str(event_data))
Dispatcher().bind(self, event_data)
logging.info("Client subscribed to " + event_data)
def _unsubscribe(self, client, event_data):
if str(event_data) in client["subscriptions"]:
client["subscriptions"].remove(event_data)
othersubscriber = False
for otherclient in self.clients:
if str(event_data) in otherclient["subscriptions"]:
othersubscriber = True
if not othersubscriber:
self.subscriptions.remove(str(event_data))
logging.info("Client unsubscribed from " + event_data)
else:
# Client wasn't found in subscribers list
pass
|
{
"content_hash": "f624867c7b95b9da3820ed1c598ca4af",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 87,
"avg_line_length": 36.80263157894737,
"alnum_prop": 0.5652484805148373,
"repo_name": "Homebrain/Homebrain",
"id": "85e8b458cf356cb36f4ff37c9574732025034d89",
"size": "2798",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homebrain/agents/websocket/websocket.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "656"
},
{
"name": "HTML",
"bytes": "10116"
},
{
"name": "JavaScript",
"bytes": "6026"
},
{
"name": "Python",
"bytes": "54990"
},
{
"name": "Shell",
"bytes": "732"
}
],
"symlink_target": ""
}
|
import lxml.html
from mock import Mock
from preggy import expect
from holmes.config import Config
from holmes.reviewer import Reviewer
from holmes.validators.anchor_without_any_text import (
AnchorWithoutAnyTextValidator
)
from holmes.utils import _
from tests.unit.base import ValidatorTestCase
from tests.fixtures import PageFactory
class TestAnchorWithoutAnyTextValidator(ValidatorTestCase):
def test_validate_anchor_without_any_text(self):
config = Config()
page = PageFactory.create()
reviewer = Reviewer(
api_url='http://localhost:2368',
page_uuid=page.uuid,
page_url=page.url,
page_score=0.0,
config=config,
validators=[]
)
content = '<html><a href="http://globo.com"></a><a href="http://globo.com/index.html">teste</a></html>'
result = {
'url': page.url,
'status': 200,
'content': content,
'html': lxml.html.fromstring(content)
}
reviewer.responses[page.url] = result
reviewer.get_response = Mock(return_value=result)
link = Mock()
link.text_content = Mock(return_value='')
link.findall = Mock(return_value='')
link.get.return_value = 'http://globo.com'
validator = AnchorWithoutAnyTextValidator(reviewer)
validator.add_fact = Mock()
validator.add_violation = Mock()
validator.review.data = {'page.all_links': [link]}
validator.validate()
validator.add_violation.assert_called_once_with(
key='empty.anchors',
value=['http://globo.com'],
points=20)
def test_can_get_violation_definitions(self):
reviewer = Mock()
validator = AnchorWithoutAnyTextValidator(reviewer)
definitions = validator.get_violation_definitions()
expect(definitions).to_length(1)
expect('empty.anchors' in definitions).to_be_true()
links = ['http://globo.com']
expect(validator.get_empty_anchors_parsed_value(links)).to_equal(
'<a href="http://globo.com" target="_blank">#0</a>'
)
|
{
"content_hash": "e8d6b0a974f6370c174e99a3ef8aad01",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 111,
"avg_line_length": 31.420289855072465,
"alnum_prop": 0.6166974169741697,
"repo_name": "holmes-app/holmes-api",
"id": "333b03a54bf13cc793169a80c1eddb04ff1710d8",
"size": "2211",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/unit/validators/test_anchor_without_any_text.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "212454"
},
{
"name": "Makefile",
"bytes": "11334"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "809395"
}
],
"symlink_target": ""
}
|
import datetime
import sys
from typing import Any, Dict, List, Optional, TYPE_CHECKING, Union
from ... import _serialization
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from .. import models as _models
if sys.version_info >= (3, 9):
from collections.abc import MutableMapping
else:
from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports
JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object
class CertificateBodyDescription(_serialization.Model):
"""The JSON-serialized X509 Certificate.
:ivar certificate: base-64 representation of the X509 leaf certificate .cer file or just .pem
file content.
:vartype certificate: str
"""
_attribute_map = {
"certificate": {"key": "certificate", "type": "str"},
}
def __init__(self, *, certificate: Optional[str] = None, **kwargs):
"""
:keyword certificate: base-64 representation of the X509 leaf certificate .cer file or just
.pem file content.
:paramtype certificate: str
"""
super().__init__(**kwargs)
self.certificate = certificate
class CertificateDescription(_serialization.Model):
"""The X509 Certificate.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar properties: The description of an X509 CA Certificate.
:vartype properties: ~azure.mgmt.iothub.v2019_07_01_preview.models.CertificateProperties
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The name of the certificate.
:vartype name: str
:ivar etag: The entity tag.
:vartype etag: str
:ivar type: The resource type.
:vartype type: str
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"etag": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"properties": {"key": "properties", "type": "CertificateProperties"},
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"etag": {"key": "etag", "type": "str"},
"type": {"key": "type", "type": "str"},
}
def __init__(self, *, properties: Optional["_models.CertificateProperties"] = None, **kwargs):
"""
:keyword properties: The description of an X509 CA Certificate.
:paramtype properties: ~azure.mgmt.iothub.v2019_07_01_preview.models.CertificateProperties
"""
super().__init__(**kwargs)
self.properties = properties
self.id = None
self.name = None
self.etag = None
self.type = None
class CertificateListDescription(_serialization.Model):
"""The JSON-serialized array of Certificate objects.
:ivar value: The array of Certificate objects.
:vartype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.CertificateDescription]
"""
_attribute_map = {
"value": {"key": "value", "type": "[CertificateDescription]"},
}
def __init__(self, *, value: Optional[List["_models.CertificateDescription"]] = None, **kwargs):
"""
:keyword value: The array of Certificate objects.
:paramtype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.CertificateDescription]
"""
super().__init__(**kwargs)
self.value = value
class CertificateProperties(_serialization.Model):
"""The description of an X509 CA Certificate.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar subject: The certificate's subject name.
:vartype subject: str
:ivar expiry: The certificate's expiration date and time.
:vartype expiry: ~datetime.datetime
:ivar thumbprint: The certificate's thumbprint.
:vartype thumbprint: str
:ivar is_verified: Determines whether certificate has been verified.
:vartype is_verified: bool
:ivar created: The certificate's create date and time.
:vartype created: ~datetime.datetime
:ivar updated: The certificate's last update date and time.
:vartype updated: ~datetime.datetime
:ivar certificate: The certificate content.
:vartype certificate: str
"""
_validation = {
"subject": {"readonly": True},
"expiry": {"readonly": True},
"thumbprint": {"readonly": True},
"is_verified": {"readonly": True},
"created": {"readonly": True},
"updated": {"readonly": True},
}
_attribute_map = {
"subject": {"key": "subject", "type": "str"},
"expiry": {"key": "expiry", "type": "rfc-1123"},
"thumbprint": {"key": "thumbprint", "type": "str"},
"is_verified": {"key": "isVerified", "type": "bool"},
"created": {"key": "created", "type": "rfc-1123"},
"updated": {"key": "updated", "type": "rfc-1123"},
"certificate": {"key": "certificate", "type": "str"},
}
def __init__(self, *, certificate: Optional[str] = None, **kwargs):
"""
:keyword certificate: The certificate content.
:paramtype certificate: str
"""
super().__init__(**kwargs)
self.subject = None
self.expiry = None
self.thumbprint = None
self.is_verified = None
self.created = None
self.updated = None
self.certificate = certificate
class CertificatePropertiesWithNonce(_serialization.Model):
"""The description of an X509 CA Certificate including the challenge nonce issued for the Proof-Of-Possession flow.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar subject: The certificate's subject name.
:vartype subject: str
:ivar expiry: The certificate's expiration date and time.
:vartype expiry: ~datetime.datetime
:ivar thumbprint: The certificate's thumbprint.
:vartype thumbprint: str
:ivar is_verified: Determines whether certificate has been verified.
:vartype is_verified: bool
:ivar created: The certificate's create date and time.
:vartype created: ~datetime.datetime
:ivar updated: The certificate's last update date and time.
:vartype updated: ~datetime.datetime
:ivar verification_code: The certificate's verification code that will be used for proof of
possession.
:vartype verification_code: str
:ivar certificate: The certificate content.
:vartype certificate: str
"""
_validation = {
"subject": {"readonly": True},
"expiry": {"readonly": True},
"thumbprint": {"readonly": True},
"is_verified": {"readonly": True},
"created": {"readonly": True},
"updated": {"readonly": True},
"verification_code": {"readonly": True},
"certificate": {"readonly": True},
}
_attribute_map = {
"subject": {"key": "subject", "type": "str"},
"expiry": {"key": "expiry", "type": "rfc-1123"},
"thumbprint": {"key": "thumbprint", "type": "str"},
"is_verified": {"key": "isVerified", "type": "bool"},
"created": {"key": "created", "type": "rfc-1123"},
"updated": {"key": "updated", "type": "rfc-1123"},
"verification_code": {"key": "verificationCode", "type": "str"},
"certificate": {"key": "certificate", "type": "str"},
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.subject = None
self.expiry = None
self.thumbprint = None
self.is_verified = None
self.created = None
self.updated = None
self.verification_code = None
self.certificate = None
class CertificateVerificationDescription(_serialization.Model):
"""The JSON-serialized leaf certificate.
:ivar certificate: base-64 representation of X509 certificate .cer file or just .pem file
content.
:vartype certificate: str
"""
_attribute_map = {
"certificate": {"key": "certificate", "type": "str"},
}
def __init__(self, *, certificate: Optional[str] = None, **kwargs):
"""
:keyword certificate: base-64 representation of X509 certificate .cer file or just .pem file
content.
:paramtype certificate: str
"""
super().__init__(**kwargs)
self.certificate = certificate
class CertificateWithNonceDescription(_serialization.Model):
"""The X509 Certificate.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar properties: The description of an X509 CA Certificate including the challenge nonce
issued for the Proof-Of-Possession flow.
:vartype properties:
~azure.mgmt.iothub.v2019_07_01_preview.models.CertificatePropertiesWithNonce
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The name of the certificate.
:vartype name: str
:ivar etag: The entity tag.
:vartype etag: str
:ivar type: The resource type.
:vartype type: str
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"etag": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"properties": {"key": "properties", "type": "CertificatePropertiesWithNonce"},
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"etag": {"key": "etag", "type": "str"},
"type": {"key": "type", "type": "str"},
}
def __init__(self, *, properties: Optional["_models.CertificatePropertiesWithNonce"] = None, **kwargs):
"""
:keyword properties: The description of an X509 CA Certificate including the challenge nonce
issued for the Proof-Of-Possession flow.
:paramtype properties:
~azure.mgmt.iothub.v2019_07_01_preview.models.CertificatePropertiesWithNonce
"""
super().__init__(**kwargs)
self.properties = properties
self.id = None
self.name = None
self.etag = None
self.type = None
class CloudToDeviceProperties(_serialization.Model):
"""The IoT hub cloud-to-device messaging properties.
:ivar max_delivery_count: The max delivery count for cloud-to-device messages in the device
queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype max_delivery_count: int
:ivar default_ttl_as_iso8601: The default time to live for cloud-to-device messages in the
device queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype default_ttl_as_iso8601: ~datetime.timedelta
:ivar feedback: The properties of the feedback queue for cloud-to-device messages.
:vartype feedback: ~azure.mgmt.iothub.v2019_07_01_preview.models.FeedbackProperties
"""
_validation = {
"max_delivery_count": {"maximum": 100, "minimum": 1},
}
_attribute_map = {
"max_delivery_count": {"key": "maxDeliveryCount", "type": "int"},
"default_ttl_as_iso8601": {"key": "defaultTtlAsIso8601", "type": "duration"},
"feedback": {"key": "feedback", "type": "FeedbackProperties"},
}
def __init__(
self,
*,
max_delivery_count: Optional[int] = None,
default_ttl_as_iso8601: Optional[datetime.timedelta] = None,
feedback: Optional["_models.FeedbackProperties"] = None,
**kwargs
):
"""
:keyword max_delivery_count: The max delivery count for cloud-to-device messages in the device
queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype max_delivery_count: int
:keyword default_ttl_as_iso8601: The default time to live for cloud-to-device messages in the
device queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype default_ttl_as_iso8601: ~datetime.timedelta
:keyword feedback: The properties of the feedback queue for cloud-to-device messages.
:paramtype feedback: ~azure.mgmt.iothub.v2019_07_01_preview.models.FeedbackProperties
"""
super().__init__(**kwargs)
self.max_delivery_count = max_delivery_count
self.default_ttl_as_iso8601 = default_ttl_as_iso8601
self.feedback = feedback
class EndpointHealthData(_serialization.Model):
"""The health data for an endpoint.
:ivar endpoint_id: Id of the endpoint.
:vartype endpoint_id: str
:ivar health_status: Health statuses have following meanings. The 'healthy' status shows that
the endpoint is accepting messages as expected. The 'unhealthy' status shows that the endpoint
is not accepting messages as expected and IoT Hub is retrying to send data to this endpoint.
The status of an unhealthy endpoint will be updated to healthy when IoT Hub has established an
eventually consistent state of health. The 'dead' status shows that the endpoint is not
accepting messages, after IoT Hub retried sending messages for the retrial period. See IoT Hub
metrics to identify errors and monitor issues with endpoints. The 'unknown' status shows that
the IoT Hub has not established a connection with the endpoint. No messages have been delivered
to or rejected from this endpoint. Known values are: "unknown", "healthy", "unhealthy", and
"dead".
:vartype health_status: str or
~azure.mgmt.iothub.v2019_07_01_preview.models.EndpointHealthStatus
"""
_attribute_map = {
"endpoint_id": {"key": "endpointId", "type": "str"},
"health_status": {"key": "healthStatus", "type": "str"},
}
def __init__(
self,
*,
endpoint_id: Optional[str] = None,
health_status: Optional[Union[str, "_models.EndpointHealthStatus"]] = None,
**kwargs
):
"""
:keyword endpoint_id: Id of the endpoint.
:paramtype endpoint_id: str
:keyword health_status: Health statuses have following meanings. The 'healthy' status shows
that the endpoint is accepting messages as expected. The 'unhealthy' status shows that the
endpoint is not accepting messages as expected and IoT Hub is retrying to send data to this
endpoint. The status of an unhealthy endpoint will be updated to healthy when IoT Hub has
established an eventually consistent state of health. The 'dead' status shows that the endpoint
is not accepting messages, after IoT Hub retried sending messages for the retrial period. See
IoT Hub metrics to identify errors and monitor issues with endpoints. The 'unknown' status
shows that the IoT Hub has not established a connection with the endpoint. No messages have
been delivered to or rejected from this endpoint. Known values are: "unknown", "healthy",
"unhealthy", and "dead".
:paramtype health_status: str or
~azure.mgmt.iothub.v2019_07_01_preview.models.EndpointHealthStatus
"""
super().__init__(**kwargs)
self.endpoint_id = endpoint_id
self.health_status = health_status
class EndpointHealthDataListResult(_serialization.Model):
"""The JSON-serialized array of EndpointHealthData objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: JSON-serialized array of Endpoint health data.
:vartype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.EndpointHealthData]
:ivar next_link: Link to more results.
:vartype next_link: str
"""
_validation = {
"next_link": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[EndpointHealthData]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, *, value: Optional[List["_models.EndpointHealthData"]] = None, **kwargs):
"""
:keyword value: JSON-serialized array of Endpoint health data.
:paramtype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.EndpointHealthData]
"""
super().__init__(**kwargs)
self.value = value
self.next_link = None
class EnrichmentProperties(_serialization.Model):
"""The properties of an enrichment that your IoT hub applies to messages delivered to endpoints.
All required parameters must be populated in order to send to Azure.
:ivar key: The key or name for the enrichment property. Required.
:vartype key: str
:ivar value: The value for the enrichment property. Required.
:vartype value: str
:ivar endpoint_names: The list of endpoints for which the enrichment is applied to the message.
Required.
:vartype endpoint_names: list[str]
"""
_validation = {
"key": {"required": True},
"value": {"required": True},
"endpoint_names": {"required": True, "min_items": 1},
}
_attribute_map = {
"key": {"key": "key", "type": "str"},
"value": {"key": "value", "type": "str"},
"endpoint_names": {"key": "endpointNames", "type": "[str]"},
}
def __init__(self, *, key: str, value: str, endpoint_names: List[str], **kwargs):
"""
:keyword key: The key or name for the enrichment property. Required.
:paramtype key: str
:keyword value: The value for the enrichment property. Required.
:paramtype value: str
:keyword endpoint_names: The list of endpoints for which the enrichment is applied to the
message. Required.
:paramtype endpoint_names: list[str]
"""
super().__init__(**kwargs)
self.key = key
self.value = value
self.endpoint_names = endpoint_names
class ErrorDetails(_serialization.Model):
"""Error details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar http_status_code: The HTTP status code.
:vartype http_status_code: str
:ivar message: The error message.
:vartype message: str
:ivar details: The error details.
:vartype details: str
"""
_validation = {
"code": {"readonly": True},
"http_status_code": {"readonly": True},
"message": {"readonly": True},
"details": {"readonly": True},
}
_attribute_map = {
"code": {"key": "code", "type": "str"},
"http_status_code": {"key": "httpStatusCode", "type": "str"},
"message": {"key": "message", "type": "str"},
"details": {"key": "details", "type": "str"},
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.code = None
self.http_status_code = None
self.message = None
self.details = None
class EventHubConsumerGroupInfo(_serialization.Model):
"""The properties of the EventHubConsumerGroupInfo object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar properties: The tags.
:vartype properties: dict[str, str]
:ivar id: The Event Hub-compatible consumer group identifier.
:vartype id: str
:ivar name: The Event Hub-compatible consumer group name.
:vartype name: str
:ivar type: the resource type.
:vartype type: str
:ivar etag: The etag.
:vartype etag: str
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"etag": {"readonly": True},
}
_attribute_map = {
"properties": {"key": "properties", "type": "{str}"},
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"etag": {"key": "etag", "type": "str"},
}
def __init__(self, *, properties: Optional[Dict[str, str]] = None, **kwargs):
"""
:keyword properties: The tags.
:paramtype properties: dict[str, str]
"""
super().__init__(**kwargs)
self.properties = properties
self.id = None
self.name = None
self.type = None
self.etag = None
class EventHubConsumerGroupsListResult(_serialization.Model):
"""The JSON-serialized array of Event Hub-compatible consumer group names with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of consumer groups objects.
:vartype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.EventHubConsumerGroupInfo]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
"next_link": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[EventHubConsumerGroupInfo]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, *, value: Optional[List["_models.EventHubConsumerGroupInfo"]] = None, **kwargs):
"""
:keyword value: List of consumer groups objects.
:paramtype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.EventHubConsumerGroupInfo]
"""
super().__init__(**kwargs)
self.value = value
self.next_link = None
class EventHubProperties(_serialization.Model):
"""The properties of the provisioned Event Hub-compatible endpoint used by the IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar retention_time_in_days: The retention time for device-to-cloud messages in days. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages.
:vartype retention_time_in_days: int
:ivar partition_count: The number of partitions for receiving device-to-cloud messages in the
Event Hub-compatible endpoint. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages.
:vartype partition_count: int
:ivar partition_ids: The partition ids in the Event Hub-compatible endpoint.
:vartype partition_ids: list[str]
:ivar path: The Event Hub-compatible name.
:vartype path: str
:ivar endpoint: The Event Hub-compatible endpoint.
:vartype endpoint: str
"""
_validation = {
"partition_ids": {"readonly": True},
"path": {"readonly": True},
"endpoint": {"readonly": True},
}
_attribute_map = {
"retention_time_in_days": {"key": "retentionTimeInDays", "type": "int"},
"partition_count": {"key": "partitionCount", "type": "int"},
"partition_ids": {"key": "partitionIds", "type": "[str]"},
"path": {"key": "path", "type": "str"},
"endpoint": {"key": "endpoint", "type": "str"},
}
def __init__(
self, *, retention_time_in_days: Optional[int] = None, partition_count: Optional[int] = None, **kwargs
):
"""
:keyword retention_time_in_days: The retention time for device-to-cloud messages in days. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages.
:paramtype retention_time_in_days: int
:keyword partition_count: The number of partitions for receiving device-to-cloud messages in
the Event Hub-compatible endpoint. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages.
:paramtype partition_count: int
"""
super().__init__(**kwargs)
self.retention_time_in_days = retention_time_in_days
self.partition_count = partition_count
self.partition_ids = None
self.path = None
self.endpoint = None
class ExportDevicesRequest(_serialization.Model):
"""Use to provide parameters when requesting an export of all devices in the IoT hub.
All required parameters must be populated in order to send to Azure.
:ivar export_blob_container_uri: The export blob container URI. Required.
:vartype export_blob_container_uri: str
:ivar exclude_keys: The value indicating whether keys should be excluded during export.
Required.
:vartype exclude_keys: bool
"""
_validation = {
"export_blob_container_uri": {"required": True},
"exclude_keys": {"required": True},
}
_attribute_map = {
"export_blob_container_uri": {"key": "exportBlobContainerUri", "type": "str"},
"exclude_keys": {"key": "excludeKeys", "type": "bool"},
}
def __init__(self, *, export_blob_container_uri: str, exclude_keys: bool, **kwargs):
"""
:keyword export_blob_container_uri: The export blob container URI. Required.
:paramtype export_blob_container_uri: str
:keyword exclude_keys: The value indicating whether keys should be excluded during export.
Required.
:paramtype exclude_keys: bool
"""
super().__init__(**kwargs)
self.export_blob_container_uri = export_blob_container_uri
self.exclude_keys = exclude_keys
class FailoverInput(_serialization.Model):
"""Use to provide failover region when requesting manual Failover for a hub.
All required parameters must be populated in order to send to Azure.
:ivar failover_region: Region the hub will be failed over to. Required.
:vartype failover_region: str
"""
_validation = {
"failover_region": {"required": True},
}
_attribute_map = {
"failover_region": {"key": "failoverRegion", "type": "str"},
}
def __init__(self, *, failover_region: str, **kwargs):
"""
:keyword failover_region: Region the hub will be failed over to. Required.
:paramtype failover_region: str
"""
super().__init__(**kwargs)
self.failover_region = failover_region
class FallbackRouteProperties(_serialization.Model):
"""The properties of the fallback route. IoT Hub uses these properties when it routes messages to the fallback endpoint.
All required parameters must be populated in order to send to Azure.
:ivar name: The name of the route. The name can only include alphanumeric characters, periods,
underscores, hyphens, has a maximum length of 64 characters, and must be unique.
:vartype name: str
:ivar source: The source to which the routing rule is to be applied to. For example,
DeviceMessages. Required. Known values are: "Invalid", "DeviceMessages", "TwinChangeEvents",
"DeviceLifecycleEvents", "DeviceJobLifecycleEvents", and "DigitalTwinChangeEvents".
:vartype source: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingSource
:ivar condition: The condition which is evaluated in order to apply the fallback route. If the
condition is not provided it will evaluate to true by default. For grammar, See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.
:vartype condition: str
:ivar endpoint_names: The list of endpoints to which the messages that satisfy the condition
are routed to. Currently only 1 endpoint is allowed. Required.
:vartype endpoint_names: list[str]
:ivar is_enabled: Used to specify whether the fallback route is enabled. Required.
:vartype is_enabled: bool
"""
_validation = {
"source": {"required": True},
"endpoint_names": {"required": True, "max_items": 1, "min_items": 1},
"is_enabled": {"required": True},
}
_attribute_map = {
"name": {"key": "name", "type": "str"},
"source": {"key": "source", "type": "str"},
"condition": {"key": "condition", "type": "str"},
"endpoint_names": {"key": "endpointNames", "type": "[str]"},
"is_enabled": {"key": "isEnabled", "type": "bool"},
}
def __init__(
self,
*,
source: Union[str, "_models.RoutingSource"],
endpoint_names: List[str],
is_enabled: bool,
name: Optional[str] = None,
condition: Optional[str] = None,
**kwargs
):
"""
:keyword name: The name of the route. The name can only include alphanumeric characters,
periods, underscores, hyphens, has a maximum length of 64 characters, and must be unique.
:paramtype name: str
:keyword source: The source to which the routing rule is to be applied to. For example,
DeviceMessages. Required. Known values are: "Invalid", "DeviceMessages", "TwinChangeEvents",
"DeviceLifecycleEvents", "DeviceJobLifecycleEvents", and "DigitalTwinChangeEvents".
:paramtype source: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingSource
:keyword condition: The condition which is evaluated in order to apply the fallback route. If
the condition is not provided it will evaluate to true by default. For grammar, See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.
:paramtype condition: str
:keyword endpoint_names: The list of endpoints to which the messages that satisfy the condition
are routed to. Currently only 1 endpoint is allowed. Required.
:paramtype endpoint_names: list[str]
:keyword is_enabled: Used to specify whether the fallback route is enabled. Required.
:paramtype is_enabled: bool
"""
super().__init__(**kwargs)
self.name = name
self.source = source
self.condition = condition
self.endpoint_names = endpoint_names
self.is_enabled = is_enabled
class FeedbackProperties(_serialization.Model):
"""The properties of the feedback queue for cloud-to-device messages.
:ivar lock_duration_as_iso8601: The lock duration for the feedback queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype lock_duration_as_iso8601: ~datetime.timedelta
:ivar ttl_as_iso8601: The period of time for which a message is available to consume before it
is expired by the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype ttl_as_iso8601: ~datetime.timedelta
:ivar max_delivery_count: The number of times the IoT hub attempts to deliver a message on the
feedback queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype max_delivery_count: int
"""
_validation = {
"max_delivery_count": {"maximum": 100, "minimum": 1},
}
_attribute_map = {
"lock_duration_as_iso8601": {"key": "lockDurationAsIso8601", "type": "duration"},
"ttl_as_iso8601": {"key": "ttlAsIso8601", "type": "duration"},
"max_delivery_count": {"key": "maxDeliveryCount", "type": "int"},
}
def __init__(
self,
*,
lock_duration_as_iso8601: Optional[datetime.timedelta] = None,
ttl_as_iso8601: Optional[datetime.timedelta] = None,
max_delivery_count: Optional[int] = None,
**kwargs
):
"""
:keyword lock_duration_as_iso8601: The lock duration for the feedback queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype lock_duration_as_iso8601: ~datetime.timedelta
:keyword ttl_as_iso8601: The period of time for which a message is available to consume before
it is expired by the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype ttl_as_iso8601: ~datetime.timedelta
:keyword max_delivery_count: The number of times the IoT hub attempts to deliver a message on
the feedback queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype max_delivery_count: int
"""
super().__init__(**kwargs)
self.lock_duration_as_iso8601 = lock_duration_as_iso8601
self.ttl_as_iso8601 = ttl_as_iso8601
self.max_delivery_count = max_delivery_count
class ImportDevicesRequest(_serialization.Model):
"""Use to provide parameters when requesting an import of all devices in the hub.
All required parameters must be populated in order to send to Azure.
:ivar input_blob_container_uri: The input blob container URI. Required.
:vartype input_blob_container_uri: str
:ivar output_blob_container_uri: The output blob container URI. Required.
:vartype output_blob_container_uri: str
"""
_validation = {
"input_blob_container_uri": {"required": True},
"output_blob_container_uri": {"required": True},
}
_attribute_map = {
"input_blob_container_uri": {"key": "inputBlobContainerUri", "type": "str"},
"output_blob_container_uri": {"key": "outputBlobContainerUri", "type": "str"},
}
def __init__(self, *, input_blob_container_uri: str, output_blob_container_uri: str, **kwargs):
"""
:keyword input_blob_container_uri: The input blob container URI. Required.
:paramtype input_blob_container_uri: str
:keyword output_blob_container_uri: The output blob container URI. Required.
:paramtype output_blob_container_uri: str
"""
super().__init__(**kwargs)
self.input_blob_container_uri = input_blob_container_uri
self.output_blob_container_uri = output_blob_container_uri
class IotHubCapacity(_serialization.Model):
"""IoT Hub capacity information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar minimum: The minimum number of units.
:vartype minimum: int
:ivar maximum: The maximum number of units.
:vartype maximum: int
:ivar default: The default number of units.
:vartype default: int
:ivar scale_type: The type of the scaling enabled. Known values are: "Automatic", "Manual", and
"None".
:vartype scale_type: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubScaleType
"""
_validation = {
"minimum": {"readonly": True, "maximum": 1, "minimum": 1},
"maximum": {"readonly": True},
"default": {"readonly": True},
"scale_type": {"readonly": True},
}
_attribute_map = {
"minimum": {"key": "minimum", "type": "int"},
"maximum": {"key": "maximum", "type": "int"},
"default": {"key": "default", "type": "int"},
"scale_type": {"key": "scaleType", "type": "str"},
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.minimum = None
self.maximum = None
self.default = None
self.scale_type = None
class Resource(_serialization.Model):
"""The common properties of an Azure resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:ivar location: The resource location. Required.
:vartype location: str
:ivar tags: The resource tags.
:vartype tags: dict[str, str]
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True, "pattern": r"^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{2,49}[a-zA-Z0-9]$"},
"type": {"readonly": True},
"location": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
}
def __init__(self, *, location: str, tags: Optional[Dict[str, str]] = None, **kwargs):
"""
:keyword location: The resource location. Required.
:paramtype location: str
:keyword tags: The resource tags.
:paramtype tags: dict[str, str]
"""
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
class IotHubDescription(Resource):
"""The description of the IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:ivar location: The resource location. Required.
:vartype location: str
:ivar tags: The resource tags.
:vartype tags: dict[str, str]
:ivar etag: The Etag field is *not* required. If it is provided in the response body, it must
also be provided as a header per the normal ETag convention.
:vartype etag: str
:ivar properties: IotHub properties.
:vartype properties: ~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubProperties
:ivar sku: IotHub SKU info. Required.
:vartype sku: ~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubSkuInfo
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True, "pattern": r"^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{2,49}[a-zA-Z0-9]$"},
"type": {"readonly": True},
"location": {"required": True},
"sku": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"etag": {"key": "etag", "type": "str"},
"properties": {"key": "properties", "type": "IotHubProperties"},
"sku": {"key": "sku", "type": "IotHubSkuInfo"},
}
def __init__(
self,
*,
location: str,
sku: "_models.IotHubSkuInfo",
tags: Optional[Dict[str, str]] = None,
etag: Optional[str] = None,
properties: Optional["_models.IotHubProperties"] = None,
**kwargs
):
"""
:keyword location: The resource location. Required.
:paramtype location: str
:keyword tags: The resource tags.
:paramtype tags: dict[str, str]
:keyword etag: The Etag field is *not* required. If it is provided in the response body, it
must also be provided as a header per the normal ETag convention.
:paramtype etag: str
:keyword properties: IotHub properties.
:paramtype properties: ~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubProperties
:keyword sku: IotHub SKU info. Required.
:paramtype sku: ~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubSkuInfo
"""
super().__init__(location=location, tags=tags, **kwargs)
self.etag = etag
self.properties = properties
self.sku = sku
class IotHubDescriptionListResult(_serialization.Model):
"""The JSON-serialized array of IotHubDescription objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The array of IotHubDescription objects.
:vartype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubDescription]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
"next_link": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[IotHubDescription]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, *, value: Optional[List["_models.IotHubDescription"]] = None, **kwargs):
"""
:keyword value: The array of IotHubDescription objects.
:paramtype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubDescription]
"""
super().__init__(**kwargs)
self.value = value
self.next_link = None
class IotHubLocationDescription(_serialization.Model):
"""Public representation of one of the locations where a resource is provisioned.
:ivar location: Azure Geo Regions.
:vartype location: str
:ivar role: Specific Role assigned to this location. Known values are: "primary" and
"secondary".
:vartype role: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubReplicaRoleType
"""
_attribute_map = {
"location": {"key": "location", "type": "str"},
"role": {"key": "role", "type": "str"},
}
def __init__(
self,
*,
location: Optional[str] = None,
role: Optional[Union[str, "_models.IotHubReplicaRoleType"]] = None,
**kwargs
):
"""
:keyword location: Azure Geo Regions.
:paramtype location: str
:keyword role: Specific Role assigned to this location. Known values are: "primary" and
"secondary".
:paramtype role: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubReplicaRoleType
"""
super().__init__(**kwargs)
self.location = location
self.role = role
class IotHubNameAvailabilityInfo(_serialization.Model):
"""The properties indicating whether a given IoT hub name is available.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name_available: The value which indicates whether the provided name is available.
:vartype name_available: bool
:ivar reason: The reason for unavailability. Known values are: "Invalid" and "AlreadyExists".
:vartype reason: str or
~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubNameUnavailabilityReason
:ivar message: The detailed reason message.
:vartype message: str
"""
_validation = {
"name_available": {"readonly": True},
"reason": {"readonly": True},
}
_attribute_map = {
"name_available": {"key": "nameAvailable", "type": "bool"},
"reason": {"key": "reason", "type": "str"},
"message": {"key": "message", "type": "str"},
}
def __init__(self, *, message: Optional[str] = None, **kwargs):
"""
:keyword message: The detailed reason message.
:paramtype message: str
"""
super().__init__(**kwargs)
self.name_available = None
self.reason = None
self.message = message
class IotHubProperties(_serialization.Model): # pylint: disable=too-many-instance-attributes
"""The properties of an IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar authorization_policies: The shared access policies you can use to secure a connection to
the IoT hub.
:vartype authorization_policies:
list[~azure.mgmt.iothub.v2019_07_01_preview.models.SharedAccessSignatureAuthorizationRule]
:ivar ip_filter_rules: The IP filter rules.
:vartype ip_filter_rules: list[~azure.mgmt.iothub.v2019_07_01_preview.models.IpFilterRule]
:ivar provisioning_state: The provisioning state.
:vartype provisioning_state: str
:ivar state: The hub state.
:vartype state: str
:ivar host_name: The name of the host.
:vartype host_name: str
:ivar event_hub_endpoints: The Event Hub-compatible endpoint properties. The only possible keys
to this dictionary is events. This key has to be present in the dictionary while making create
or update calls for the IoT hub.
:vartype event_hub_endpoints: dict[str,
~azure.mgmt.iothub.v2019_07_01_preview.models.EventHubProperties]
:ivar routing: The routing related properties of the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging.
:vartype routing: ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingProperties
:ivar storage_endpoints: The list of Azure Storage endpoints where you can upload files.
Currently you can configure only one Azure Storage account and that MUST have its key as
$default. Specifying more than one storage account causes an error to be thrown. Not specifying
a value for this property when the enableFileUploadNotifications property is set to True,
causes an error to be thrown.
:vartype storage_endpoints: dict[str,
~azure.mgmt.iothub.v2019_07_01_preview.models.StorageEndpointProperties]
:ivar messaging_endpoints: The messaging endpoint properties for the file upload notification
queue.
:vartype messaging_endpoints: dict[str,
~azure.mgmt.iothub.v2019_07_01_preview.models.MessagingEndpointProperties]
:ivar enable_file_upload_notifications: If True, file upload notifications are enabled.
:vartype enable_file_upload_notifications: bool
:ivar cloud_to_device: The IoT hub cloud-to-device messaging properties.
:vartype cloud_to_device: ~azure.mgmt.iothub.v2019_07_01_preview.models.CloudToDeviceProperties
:ivar comments: IoT hub comments.
:vartype comments: str
:ivar device_streams: The device streams properties of iothub.
:vartype device_streams:
~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubPropertiesDeviceStreams
:ivar features: The capabilities and features enabled for the IoT hub. Known values are: "None"
and "DeviceManagement".
:vartype features: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.Capabilities
:ivar locations: Primary and secondary location for iot hub.
:vartype locations:
list[~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubLocationDescription]
"""
_validation = {
"provisioning_state": {"readonly": True},
"state": {"readonly": True},
"host_name": {"readonly": True},
"locations": {"readonly": True},
}
_attribute_map = {
"authorization_policies": {"key": "authorizationPolicies", "type": "[SharedAccessSignatureAuthorizationRule]"},
"ip_filter_rules": {"key": "ipFilterRules", "type": "[IpFilterRule]"},
"provisioning_state": {"key": "provisioningState", "type": "str"},
"state": {"key": "state", "type": "str"},
"host_name": {"key": "hostName", "type": "str"},
"event_hub_endpoints": {"key": "eventHubEndpoints", "type": "{EventHubProperties}"},
"routing": {"key": "routing", "type": "RoutingProperties"},
"storage_endpoints": {"key": "storageEndpoints", "type": "{StorageEndpointProperties}"},
"messaging_endpoints": {"key": "messagingEndpoints", "type": "{MessagingEndpointProperties}"},
"enable_file_upload_notifications": {"key": "enableFileUploadNotifications", "type": "bool"},
"cloud_to_device": {"key": "cloudToDevice", "type": "CloudToDeviceProperties"},
"comments": {"key": "comments", "type": "str"},
"device_streams": {"key": "deviceStreams", "type": "IotHubPropertiesDeviceStreams"},
"features": {"key": "features", "type": "str"},
"locations": {"key": "locations", "type": "[IotHubLocationDescription]"},
}
def __init__(
self,
*,
authorization_policies: Optional[List["_models.SharedAccessSignatureAuthorizationRule"]] = None,
ip_filter_rules: Optional[List["_models.IpFilterRule"]] = None,
event_hub_endpoints: Optional[Dict[str, "_models.EventHubProperties"]] = None,
routing: Optional["_models.RoutingProperties"] = None,
storage_endpoints: Optional[Dict[str, "_models.StorageEndpointProperties"]] = None,
messaging_endpoints: Optional[Dict[str, "_models.MessagingEndpointProperties"]] = None,
enable_file_upload_notifications: Optional[bool] = None,
cloud_to_device: Optional["_models.CloudToDeviceProperties"] = None,
comments: Optional[str] = None,
device_streams: Optional["_models.IotHubPropertiesDeviceStreams"] = None,
features: Optional[Union[str, "_models.Capabilities"]] = None,
**kwargs
):
"""
:keyword authorization_policies: The shared access policies you can use to secure a connection
to the IoT hub.
:paramtype authorization_policies:
list[~azure.mgmt.iothub.v2019_07_01_preview.models.SharedAccessSignatureAuthorizationRule]
:keyword ip_filter_rules: The IP filter rules.
:paramtype ip_filter_rules: list[~azure.mgmt.iothub.v2019_07_01_preview.models.IpFilterRule]
:keyword event_hub_endpoints: The Event Hub-compatible endpoint properties. The only possible
keys to this dictionary is events. This key has to be present in the dictionary while making
create or update calls for the IoT hub.
:paramtype event_hub_endpoints: dict[str,
~azure.mgmt.iothub.v2019_07_01_preview.models.EventHubProperties]
:keyword routing: The routing related properties of the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging.
:paramtype routing: ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingProperties
:keyword storage_endpoints: The list of Azure Storage endpoints where you can upload files.
Currently you can configure only one Azure Storage account and that MUST have its key as
$default. Specifying more than one storage account causes an error to be thrown. Not specifying
a value for this property when the enableFileUploadNotifications property is set to True,
causes an error to be thrown.
:paramtype storage_endpoints: dict[str,
~azure.mgmt.iothub.v2019_07_01_preview.models.StorageEndpointProperties]
:keyword messaging_endpoints: The messaging endpoint properties for the file upload
notification queue.
:paramtype messaging_endpoints: dict[str,
~azure.mgmt.iothub.v2019_07_01_preview.models.MessagingEndpointProperties]
:keyword enable_file_upload_notifications: If True, file upload notifications are enabled.
:paramtype enable_file_upload_notifications: bool
:keyword cloud_to_device: The IoT hub cloud-to-device messaging properties.
:paramtype cloud_to_device:
~azure.mgmt.iothub.v2019_07_01_preview.models.CloudToDeviceProperties
:keyword comments: IoT hub comments.
:paramtype comments: str
:keyword device_streams: The device streams properties of iothub.
:paramtype device_streams:
~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubPropertiesDeviceStreams
:keyword features: The capabilities and features enabled for the IoT hub. Known values are:
"None" and "DeviceManagement".
:paramtype features: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.Capabilities
"""
super().__init__(**kwargs)
self.authorization_policies = authorization_policies
self.ip_filter_rules = ip_filter_rules
self.provisioning_state = None
self.state = None
self.host_name = None
self.event_hub_endpoints = event_hub_endpoints
self.routing = routing
self.storage_endpoints = storage_endpoints
self.messaging_endpoints = messaging_endpoints
self.enable_file_upload_notifications = enable_file_upload_notifications
self.cloud_to_device = cloud_to_device
self.comments = comments
self.device_streams = device_streams
self.features = features
self.locations = None
class IotHubPropertiesDeviceStreams(_serialization.Model):
"""The device streams properties of iothub.
:ivar streaming_endpoints: List of Device Streams Endpoints.
:vartype streaming_endpoints: list[str]
"""
_attribute_map = {
"streaming_endpoints": {"key": "streamingEndpoints", "type": "[str]"},
}
def __init__(self, *, streaming_endpoints: Optional[List[str]] = None, **kwargs):
"""
:keyword streaming_endpoints: List of Device Streams Endpoints.
:paramtype streaming_endpoints: list[str]
"""
super().__init__(**kwargs)
self.streaming_endpoints = streaming_endpoints
class IotHubQuotaMetricInfo(_serialization.Model):
"""Quota metrics properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the quota metric.
:vartype name: str
:ivar current_value: The current value for the quota metric.
:vartype current_value: int
:ivar max_value: The maximum value of the quota metric.
:vartype max_value: int
"""
_validation = {
"name": {"readonly": True},
"current_value": {"readonly": True},
"max_value": {"readonly": True},
}
_attribute_map = {
"name": {"key": "name", "type": "str"},
"current_value": {"key": "currentValue", "type": "int"},
"max_value": {"key": "maxValue", "type": "int"},
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.name = None
self.current_value = None
self.max_value = None
class IotHubQuotaMetricInfoListResult(_serialization.Model):
"""The JSON-serialized array of IotHubQuotaMetricInfo objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The array of quota metrics objects.
:vartype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubQuotaMetricInfo]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
"next_link": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[IotHubQuotaMetricInfo]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, *, value: Optional[List["_models.IotHubQuotaMetricInfo"]] = None, **kwargs):
"""
:keyword value: The array of quota metrics objects.
:paramtype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubQuotaMetricInfo]
"""
super().__init__(**kwargs)
self.value = value
self.next_link = None
class IotHubSkuDescription(_serialization.Model):
"""SKU properties.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar resource_type: The type of the resource.
:vartype resource_type: str
:ivar sku: The type of the resource. Required.
:vartype sku: ~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubSkuInfo
:ivar capacity: IotHub capacity. Required.
:vartype capacity: ~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubCapacity
"""
_validation = {
"resource_type": {"readonly": True},
"sku": {"required": True},
"capacity": {"required": True},
}
_attribute_map = {
"resource_type": {"key": "resourceType", "type": "str"},
"sku": {"key": "sku", "type": "IotHubSkuInfo"},
"capacity": {"key": "capacity", "type": "IotHubCapacity"},
}
def __init__(self, *, sku: "_models.IotHubSkuInfo", capacity: "_models.IotHubCapacity", **kwargs):
"""
:keyword sku: The type of the resource. Required.
:paramtype sku: ~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubSkuInfo
:keyword capacity: IotHub capacity. Required.
:paramtype capacity: ~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubCapacity
"""
super().__init__(**kwargs)
self.resource_type = None
self.sku = sku
self.capacity = capacity
class IotHubSkuDescriptionListResult(_serialization.Model):
"""The JSON-serialized array of IotHubSkuDescription objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The array of IotHubSkuDescription.
:vartype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubSkuDescription]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
"next_link": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[IotHubSkuDescription]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, *, value: Optional[List["_models.IotHubSkuDescription"]] = None, **kwargs):
"""
:keyword value: The array of IotHubSkuDescription.
:paramtype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubSkuDescription]
"""
super().__init__(**kwargs)
self.value = value
self.next_link = None
class IotHubSkuInfo(_serialization.Model):
"""Information about the SKU of the IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar name: The name of the SKU. Required. Known values are: "F1", "S1", "S2", "S3", "B1",
"B2", and "B3".
:vartype name: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubSku
:ivar tier: The billing tier for the IoT hub. Known values are: "Free", "Standard", and
"Basic".
:vartype tier: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubSkuTier
:ivar capacity: The number of provisioned IoT Hub units. See:
https://docs.microsoft.com/azure/azure-subscription-service-limits#iot-hub-limits.
:vartype capacity: int
"""
_validation = {
"name": {"required": True},
"tier": {"readonly": True},
}
_attribute_map = {
"name": {"key": "name", "type": "str"},
"tier": {"key": "tier", "type": "str"},
"capacity": {"key": "capacity", "type": "int"},
}
def __init__(self, *, name: Union[str, "_models.IotHubSku"], capacity: Optional[int] = None, **kwargs):
"""
:keyword name: The name of the SKU. Required. Known values are: "F1", "S1", "S2", "S3", "B1",
"B2", and "B3".
:paramtype name: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.IotHubSku
:keyword capacity: The number of provisioned IoT Hub units. See:
https://docs.microsoft.com/azure/azure-subscription-service-limits#iot-hub-limits.
:paramtype capacity: int
"""
super().__init__(**kwargs)
self.name = name
self.tier = None
self.capacity = capacity
class IpFilterRule(_serialization.Model):
"""The IP filter rules for the IoT hub.
All required parameters must be populated in order to send to Azure.
:ivar filter_name: The name of the IP filter rule. Required.
:vartype filter_name: str
:ivar action: The desired action for requests captured by this rule. Required. Known values
are: "Accept" and "Reject".
:vartype action: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.IpFilterActionType
:ivar ip_mask: A string that contains the IP address range in CIDR notation for the rule.
Required.
:vartype ip_mask: str
"""
_validation = {
"filter_name": {"required": True},
"action": {"required": True},
"ip_mask": {"required": True},
}
_attribute_map = {
"filter_name": {"key": "filterName", "type": "str"},
"action": {"key": "action", "type": "str"},
"ip_mask": {"key": "ipMask", "type": "str"},
}
def __init__(self, *, filter_name: str, action: Union[str, "_models.IpFilterActionType"], ip_mask: str, **kwargs):
"""
:keyword filter_name: The name of the IP filter rule. Required.
:paramtype filter_name: str
:keyword action: The desired action for requests captured by this rule. Required. Known values
are: "Accept" and "Reject".
:paramtype action: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.IpFilterActionType
:keyword ip_mask: A string that contains the IP address range in CIDR notation for the rule.
Required.
:paramtype ip_mask: str
"""
super().__init__(**kwargs)
self.filter_name = filter_name
self.action = action
self.ip_mask = ip_mask
class JobResponse(_serialization.Model):
"""The properties of the Job Response object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar job_id: The job identifier.
:vartype job_id: str
:ivar start_time_utc: The start time of the job.
:vartype start_time_utc: ~datetime.datetime
:ivar end_time_utc: The time the job stopped processing.
:vartype end_time_utc: ~datetime.datetime
:ivar type: The type of the job. Known values are: "unknown", "export", "import", "backup",
"readDeviceProperties", "writeDeviceProperties", "updateDeviceConfiguration", "rebootDevice",
"factoryResetDevice", and "firmwareUpdate".
:vartype type: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.JobType
:ivar status: The status of the job. Known values are: "unknown", "enqueued", "running",
"completed", "failed", and "cancelled".
:vartype status: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.JobStatus
:ivar failure_reason: If status == failed, this string containing the reason for the failure.
:vartype failure_reason: str
:ivar status_message: The status message for the job.
:vartype status_message: str
:ivar parent_job_id: The job identifier of the parent job, if any.
:vartype parent_job_id: str
"""
_validation = {
"job_id": {"readonly": True},
"start_time_utc": {"readonly": True},
"end_time_utc": {"readonly": True},
"type": {"readonly": True},
"status": {"readonly": True},
"failure_reason": {"readonly": True},
"status_message": {"readonly": True},
"parent_job_id": {"readonly": True},
}
_attribute_map = {
"job_id": {"key": "jobId", "type": "str"},
"start_time_utc": {"key": "startTimeUtc", "type": "rfc-1123"},
"end_time_utc": {"key": "endTimeUtc", "type": "rfc-1123"},
"type": {"key": "type", "type": "str"},
"status": {"key": "status", "type": "str"},
"failure_reason": {"key": "failureReason", "type": "str"},
"status_message": {"key": "statusMessage", "type": "str"},
"parent_job_id": {"key": "parentJobId", "type": "str"},
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.job_id = None
self.start_time_utc = None
self.end_time_utc = None
self.type = None
self.status = None
self.failure_reason = None
self.status_message = None
self.parent_job_id = None
class JobResponseListResult(_serialization.Model):
"""The JSON-serialized array of JobResponse objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The array of JobResponse objects.
:vartype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.JobResponse]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
"next_link": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[JobResponse]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, *, value: Optional[List["_models.JobResponse"]] = None, **kwargs):
"""
:keyword value: The array of JobResponse objects.
:paramtype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.JobResponse]
"""
super().__init__(**kwargs)
self.value = value
self.next_link = None
class MatchedRoute(_serialization.Model):
"""Routes that matched.
:ivar properties: Properties of routes that matched.
:vartype properties: ~azure.mgmt.iothub.v2019_07_01_preview.models.RouteProperties
"""
_attribute_map = {
"properties": {"key": "properties", "type": "RouteProperties"},
}
def __init__(self, *, properties: Optional["_models.RouteProperties"] = None, **kwargs):
"""
:keyword properties: Properties of routes that matched.
:paramtype properties: ~azure.mgmt.iothub.v2019_07_01_preview.models.RouteProperties
"""
super().__init__(**kwargs)
self.properties = properties
class MessagingEndpointProperties(_serialization.Model):
"""The properties of the messaging endpoints used by this IoT hub.
:ivar lock_duration_as_iso8601: The lock duration. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:vartype lock_duration_as_iso8601: ~datetime.timedelta
:ivar ttl_as_iso8601: The period of time for which a message is available to consume before it
is expired by the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:vartype ttl_as_iso8601: ~datetime.timedelta
:ivar max_delivery_count: The number of times the IoT hub attempts to deliver a message. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:vartype max_delivery_count: int
"""
_validation = {
"max_delivery_count": {"maximum": 100, "minimum": 1},
}
_attribute_map = {
"lock_duration_as_iso8601": {"key": "lockDurationAsIso8601", "type": "duration"},
"ttl_as_iso8601": {"key": "ttlAsIso8601", "type": "duration"},
"max_delivery_count": {"key": "maxDeliveryCount", "type": "int"},
}
def __init__(
self,
*,
lock_duration_as_iso8601: Optional[datetime.timedelta] = None,
ttl_as_iso8601: Optional[datetime.timedelta] = None,
max_delivery_count: Optional[int] = None,
**kwargs
):
"""
:keyword lock_duration_as_iso8601: The lock duration. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:paramtype lock_duration_as_iso8601: ~datetime.timedelta
:keyword ttl_as_iso8601: The period of time for which a message is available to consume before
it is expired by the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:paramtype ttl_as_iso8601: ~datetime.timedelta
:keyword max_delivery_count: The number of times the IoT hub attempts to deliver a message.
See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:paramtype max_delivery_count: int
"""
super().__init__(**kwargs)
self.lock_duration_as_iso8601 = lock_duration_as_iso8601
self.ttl_as_iso8601 = ttl_as_iso8601
self.max_delivery_count = max_delivery_count
class Name(_serialization.Model):
"""Name of Iot Hub type.
:ivar value: IotHub type.
:vartype value: str
:ivar localized_value: Localized value of name.
:vartype localized_value: str
"""
_attribute_map = {
"value": {"key": "value", "type": "str"},
"localized_value": {"key": "localizedValue", "type": "str"},
}
def __init__(self, *, value: Optional[str] = None, localized_value: Optional[str] = None, **kwargs):
"""
:keyword value: IotHub type.
:paramtype value: str
:keyword localized_value: Localized value of name.
:paramtype localized_value: str
"""
super().__init__(**kwargs)
self.value = value
self.localized_value = localized_value
class Operation(_serialization.Model):
"""IoT Hub REST API operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Operation name: {provider}/{resource}/{read | write | action | delete}.
:vartype name: str
:ivar display: The object that represents the operation.
:vartype display: ~azure.mgmt.iothub.v2019_07_01_preview.models.OperationDisplay
"""
_validation = {
"name": {"readonly": True},
}
_attribute_map = {
"name": {"key": "name", "type": "str"},
"display": {"key": "display", "type": "OperationDisplay"},
}
def __init__(self, *, display: Optional["_models.OperationDisplay"] = None, **kwargs):
"""
:keyword display: The object that represents the operation.
:paramtype display: ~azure.mgmt.iothub.v2019_07_01_preview.models.OperationDisplay
"""
super().__init__(**kwargs)
self.name = None
self.display = display
class OperationDisplay(_serialization.Model):
"""The object that represents the operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provider: Service provider: Microsoft Devices.
:vartype provider: str
:ivar resource: Resource Type: IotHubs.
:vartype resource: str
:ivar operation: Name of the operation.
:vartype operation: str
:ivar description: Description of the operation.
:vartype description: str
"""
_validation = {
"provider": {"readonly": True},
"resource": {"readonly": True},
"operation": {"readonly": True},
"description": {"readonly": True},
}
_attribute_map = {
"provider": {"key": "provider", "type": "str"},
"resource": {"key": "resource", "type": "str"},
"operation": {"key": "operation", "type": "str"},
"description": {"key": "description", "type": "str"},
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.provider = None
self.resource = None
self.operation = None
self.description = None
class OperationInputs(_serialization.Model):
"""Input values.
All required parameters must be populated in order to send to Azure.
:ivar name: The name of the IoT hub to check. Required.
:vartype name: str
"""
_validation = {
"name": {"required": True},
}
_attribute_map = {
"name": {"key": "name", "type": "str"},
}
def __init__(self, *, name: str, **kwargs):
"""
:keyword name: The name of the IoT hub to check. Required.
:paramtype name: str
"""
super().__init__(**kwargs)
self.name = name
class OperationListResult(_serialization.Model):
"""Result of the request to list IoT Hub operations. It contains a list of operations and a URL link to get the next set of results.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of IoT Hub operations supported by the Microsoft.Devices resource provider.
:vartype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.Operation]
:ivar next_link: URL to get the next set of operation list results if there are any.
:vartype next_link: str
"""
_validation = {
"value": {"readonly": True},
"next_link": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[Operation]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.value = None
self.next_link = None
class RegistryStatistics(_serialization.Model):
"""Identity registry statistics.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar total_device_count: The total count of devices in the identity registry.
:vartype total_device_count: int
:ivar enabled_device_count: The count of enabled devices in the identity registry.
:vartype enabled_device_count: int
:ivar disabled_device_count: The count of disabled devices in the identity registry.
:vartype disabled_device_count: int
"""
_validation = {
"total_device_count": {"readonly": True},
"enabled_device_count": {"readonly": True},
"disabled_device_count": {"readonly": True},
}
_attribute_map = {
"total_device_count": {"key": "totalDeviceCount", "type": "int"},
"enabled_device_count": {"key": "enabledDeviceCount", "type": "int"},
"disabled_device_count": {"key": "disabledDeviceCount", "type": "int"},
}
def __init__(self, **kwargs):
""" """
super().__init__(**kwargs)
self.total_device_count = None
self.enabled_device_count = None
self.disabled_device_count = None
class RouteCompilationError(_serialization.Model):
"""Compilation error when evaluating route.
:ivar message: Route error message.
:vartype message: str
:ivar severity: Severity of the route error. Known values are: "error" and "warning".
:vartype severity: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.RouteErrorSeverity
:ivar location: Location where the route error happened.
:vartype location: ~azure.mgmt.iothub.v2019_07_01_preview.models.RouteErrorRange
"""
_attribute_map = {
"message": {"key": "message", "type": "str"},
"severity": {"key": "severity", "type": "str"},
"location": {"key": "location", "type": "RouteErrorRange"},
}
def __init__(
self,
*,
message: Optional[str] = None,
severity: Optional[Union[str, "_models.RouteErrorSeverity"]] = None,
location: Optional["_models.RouteErrorRange"] = None,
**kwargs
):
"""
:keyword message: Route error message.
:paramtype message: str
:keyword severity: Severity of the route error. Known values are: "error" and "warning".
:paramtype severity: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.RouteErrorSeverity
:keyword location: Location where the route error happened.
:paramtype location: ~azure.mgmt.iothub.v2019_07_01_preview.models.RouteErrorRange
"""
super().__init__(**kwargs)
self.message = message
self.severity = severity
self.location = location
class RouteErrorPosition(_serialization.Model):
"""Position where the route error happened.
:ivar line: Line where the route error happened.
:vartype line: int
:ivar column: Column where the route error happened.
:vartype column: int
"""
_attribute_map = {
"line": {"key": "line", "type": "int"},
"column": {"key": "column", "type": "int"},
}
def __init__(self, *, line: Optional[int] = None, column: Optional[int] = None, **kwargs):
"""
:keyword line: Line where the route error happened.
:paramtype line: int
:keyword column: Column where the route error happened.
:paramtype column: int
"""
super().__init__(**kwargs)
self.line = line
self.column = column
class RouteErrorRange(_serialization.Model):
"""Range of route errors.
:ivar start: Start where the route error happened.
:vartype start: ~azure.mgmt.iothub.v2019_07_01_preview.models.RouteErrorPosition
:ivar end: End where the route error happened.
:vartype end: ~azure.mgmt.iothub.v2019_07_01_preview.models.RouteErrorPosition
"""
_attribute_map = {
"start": {"key": "start", "type": "RouteErrorPosition"},
"end": {"key": "end", "type": "RouteErrorPosition"},
}
def __init__(
self,
*,
start: Optional["_models.RouteErrorPosition"] = None,
end: Optional["_models.RouteErrorPosition"] = None,
**kwargs
):
"""
:keyword start: Start where the route error happened.
:paramtype start: ~azure.mgmt.iothub.v2019_07_01_preview.models.RouteErrorPosition
:keyword end: End where the route error happened.
:paramtype end: ~azure.mgmt.iothub.v2019_07_01_preview.models.RouteErrorPosition
"""
super().__init__(**kwargs)
self.start = start
self.end = end
class RouteProperties(_serialization.Model):
"""The properties of a routing rule that your IoT hub uses to route messages to endpoints.
All required parameters must be populated in order to send to Azure.
:ivar name: The name of the route. The name can only include alphanumeric characters, periods,
underscores, hyphens, has a maximum length of 64 characters, and must be unique. Required.
:vartype name: str
:ivar source: The source that the routing rule is to be applied to, such as DeviceMessages.
Required. Known values are: "Invalid", "DeviceMessages", "TwinChangeEvents",
"DeviceLifecycleEvents", "DeviceJobLifecycleEvents", and "DigitalTwinChangeEvents".
:vartype source: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingSource
:ivar condition: The condition that is evaluated to apply the routing rule. If no condition is
provided, it evaluates to true by default. For grammar, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.
:vartype condition: str
:ivar endpoint_names: The list of endpoints to which messages that satisfy the condition are
routed. Currently only one endpoint is allowed. Required.
:vartype endpoint_names: list[str]
:ivar is_enabled: Used to specify whether a route is enabled. Required.
:vartype is_enabled: bool
"""
_validation = {
"name": {"required": True, "pattern": r"^[A-Za-z0-9-._]{1,64}$"},
"source": {"required": True},
"endpoint_names": {"required": True, "max_items": 1, "min_items": 1},
"is_enabled": {"required": True},
}
_attribute_map = {
"name": {"key": "name", "type": "str"},
"source": {"key": "source", "type": "str"},
"condition": {"key": "condition", "type": "str"},
"endpoint_names": {"key": "endpointNames", "type": "[str]"},
"is_enabled": {"key": "isEnabled", "type": "bool"},
}
def __init__(
self,
*,
name: str,
source: Union[str, "_models.RoutingSource"],
endpoint_names: List[str],
is_enabled: bool,
condition: Optional[str] = None,
**kwargs
):
"""
:keyword name: The name of the route. The name can only include alphanumeric characters,
periods, underscores, hyphens, has a maximum length of 64 characters, and must be unique.
Required.
:paramtype name: str
:keyword source: The source that the routing rule is to be applied to, such as DeviceMessages.
Required. Known values are: "Invalid", "DeviceMessages", "TwinChangeEvents",
"DeviceLifecycleEvents", "DeviceJobLifecycleEvents", and "DigitalTwinChangeEvents".
:paramtype source: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingSource
:keyword condition: The condition that is evaluated to apply the routing rule. If no condition
is provided, it evaluates to true by default. For grammar, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.
:paramtype condition: str
:keyword endpoint_names: The list of endpoints to which messages that satisfy the condition are
routed. Currently only one endpoint is allowed. Required.
:paramtype endpoint_names: list[str]
:keyword is_enabled: Used to specify whether a route is enabled. Required.
:paramtype is_enabled: bool
"""
super().__init__(**kwargs)
self.name = name
self.source = source
self.condition = condition
self.endpoint_names = endpoint_names
self.is_enabled = is_enabled
class RoutingEndpoints(_serialization.Model):
"""The properties related to the custom endpoints to which your IoT hub routes messages based on the routing rules. A maximum of 10 custom endpoints are allowed across all endpoint types for paid hubs and only 1 custom endpoint is allowed across all endpoint types for free hubs.
:ivar service_bus_queues: The list of Service Bus queue endpoints that IoT hub routes the
messages to, based on the routing rules.
:vartype service_bus_queues:
list[~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingServiceBusQueueEndpointProperties]
:ivar service_bus_topics: The list of Service Bus topic endpoints that the IoT hub routes the
messages to, based on the routing rules.
:vartype service_bus_topics:
list[~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingServiceBusTopicEndpointProperties]
:ivar event_hubs: The list of Event Hubs endpoints that IoT hub routes messages to, based on
the routing rules. This list does not include the built-in Event Hubs endpoint.
:vartype event_hubs:
list[~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingEventHubProperties]
:ivar storage_containers: The list of storage container endpoints that IoT hub routes messages
to, based on the routing rules.
:vartype storage_containers:
list[~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingStorageContainerProperties]
"""
_attribute_map = {
"service_bus_queues": {"key": "serviceBusQueues", "type": "[RoutingServiceBusQueueEndpointProperties]"},
"service_bus_topics": {"key": "serviceBusTopics", "type": "[RoutingServiceBusTopicEndpointProperties]"},
"event_hubs": {"key": "eventHubs", "type": "[RoutingEventHubProperties]"},
"storage_containers": {"key": "storageContainers", "type": "[RoutingStorageContainerProperties]"},
}
def __init__(
self,
*,
service_bus_queues: Optional[List["_models.RoutingServiceBusQueueEndpointProperties"]] = None,
service_bus_topics: Optional[List["_models.RoutingServiceBusTopicEndpointProperties"]] = None,
event_hubs: Optional[List["_models.RoutingEventHubProperties"]] = None,
storage_containers: Optional[List["_models.RoutingStorageContainerProperties"]] = None,
**kwargs
):
"""
:keyword service_bus_queues: The list of Service Bus queue endpoints that IoT hub routes the
messages to, based on the routing rules.
:paramtype service_bus_queues:
list[~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingServiceBusQueueEndpointProperties]
:keyword service_bus_topics: The list of Service Bus topic endpoints that the IoT hub routes
the messages to, based on the routing rules.
:paramtype service_bus_topics:
list[~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingServiceBusTopicEndpointProperties]
:keyword event_hubs: The list of Event Hubs endpoints that IoT hub routes messages to, based on
the routing rules. This list does not include the built-in Event Hubs endpoint.
:paramtype event_hubs:
list[~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingEventHubProperties]
:keyword storage_containers: The list of storage container endpoints that IoT hub routes
messages to, based on the routing rules.
:paramtype storage_containers:
list[~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingStorageContainerProperties]
"""
super().__init__(**kwargs)
self.service_bus_queues = service_bus_queues
self.service_bus_topics = service_bus_topics
self.event_hubs = event_hubs
self.storage_containers = storage_containers
class RoutingEventHubProperties(_serialization.Model):
"""The properties related to an event hub endpoint.
All required parameters must be populated in order to send to Azure.
:ivar connection_string: The connection string of the event hub endpoint. Required.
:vartype connection_string: str
:ivar name: The name that identifies this endpoint. The name can only include alphanumeric
characters, periods, underscores, hyphens and has a maximum length of 64 characters. The
following names are reserved: events, fileNotifications, $default. Endpoint names must be
unique across endpoint types. Required.
:vartype name: str
:ivar subscription_id: The subscription identifier of the event hub endpoint.
:vartype subscription_id: str
:ivar resource_group: The name of the resource group of the event hub endpoint.
:vartype resource_group: str
"""
_validation = {
"connection_string": {"required": True},
"name": {"required": True, "pattern": r"^[A-Za-z0-9-._]{1,64}$"},
}
_attribute_map = {
"connection_string": {"key": "connectionString", "type": "str"},
"name": {"key": "name", "type": "str"},
"subscription_id": {"key": "subscriptionId", "type": "str"},
"resource_group": {"key": "resourceGroup", "type": "str"},
}
def __init__(
self,
*,
connection_string: str,
name: str,
subscription_id: Optional[str] = None,
resource_group: Optional[str] = None,
**kwargs
):
"""
:keyword connection_string: The connection string of the event hub endpoint. Required.
:paramtype connection_string: str
:keyword name: The name that identifies this endpoint. The name can only include alphanumeric
characters, periods, underscores, hyphens and has a maximum length of 64 characters. The
following names are reserved: events, fileNotifications, $default. Endpoint names must be
unique across endpoint types. Required.
:paramtype name: str
:keyword subscription_id: The subscription identifier of the event hub endpoint.
:paramtype subscription_id: str
:keyword resource_group: The name of the resource group of the event hub endpoint.
:paramtype resource_group: str
"""
super().__init__(**kwargs)
self.connection_string = connection_string
self.name = name
self.subscription_id = subscription_id
self.resource_group = resource_group
class RoutingMessage(_serialization.Model):
"""Routing message.
:ivar body: Body of routing message.
:vartype body: str
:ivar app_properties: App properties.
:vartype app_properties: dict[str, str]
:ivar system_properties: System properties.
:vartype system_properties: dict[str, str]
"""
_attribute_map = {
"body": {"key": "body", "type": "str"},
"app_properties": {"key": "appProperties", "type": "{str}"},
"system_properties": {"key": "systemProperties", "type": "{str}"},
}
def __init__(
self,
*,
body: Optional[str] = None,
app_properties: Optional[Dict[str, str]] = None,
system_properties: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword body: Body of routing message.
:paramtype body: str
:keyword app_properties: App properties.
:paramtype app_properties: dict[str, str]
:keyword system_properties: System properties.
:paramtype system_properties: dict[str, str]
"""
super().__init__(**kwargs)
self.body = body
self.app_properties = app_properties
self.system_properties = system_properties
class RoutingProperties(_serialization.Model):
"""The routing related properties of the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging.
:ivar endpoints: The properties related to the custom endpoints to which your IoT hub routes
messages based on the routing rules. A maximum of 10 custom endpoints are allowed across all
endpoint types for paid hubs and only 1 custom endpoint is allowed across all endpoint types
for free hubs.
:vartype endpoints: ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingEndpoints
:ivar routes: The list of user-provided routing rules that the IoT hub uses to route messages
to built-in and custom endpoints. A maximum of 100 routing rules are allowed for paid hubs and
a maximum of 5 routing rules are allowed for free hubs.
:vartype routes: list[~azure.mgmt.iothub.v2019_07_01_preview.models.RouteProperties]
:ivar fallback_route: The properties of the route that is used as a fall-back route when none
of the conditions specified in the 'routes' section are met. This is an optional parameter.
When this property is not set, the messages which do not meet any of the conditions specified
in the 'routes' section get routed to the built-in eventhub endpoint.
:vartype fallback_route: ~azure.mgmt.iothub.v2019_07_01_preview.models.FallbackRouteProperties
:ivar enrichments: The list of user-provided enrichments that the IoT hub applies to messages
to be delivered to built-in and custom endpoints. See: https://aka.ms/iotmsgenrich.
:vartype enrichments: list[~azure.mgmt.iothub.v2019_07_01_preview.models.EnrichmentProperties]
"""
_attribute_map = {
"endpoints": {"key": "endpoints", "type": "RoutingEndpoints"},
"routes": {"key": "routes", "type": "[RouteProperties]"},
"fallback_route": {"key": "fallbackRoute", "type": "FallbackRouteProperties"},
"enrichments": {"key": "enrichments", "type": "[EnrichmentProperties]"},
}
def __init__(
self,
*,
endpoints: Optional["_models.RoutingEndpoints"] = None,
routes: Optional[List["_models.RouteProperties"]] = None,
fallback_route: Optional["_models.FallbackRouteProperties"] = None,
enrichments: Optional[List["_models.EnrichmentProperties"]] = None,
**kwargs
):
"""
:keyword endpoints: The properties related to the custom endpoints to which your IoT hub routes
messages based on the routing rules. A maximum of 10 custom endpoints are allowed across all
endpoint types for paid hubs and only 1 custom endpoint is allowed across all endpoint types
for free hubs.
:paramtype endpoints: ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingEndpoints
:keyword routes: The list of user-provided routing rules that the IoT hub uses to route
messages to built-in and custom endpoints. A maximum of 100 routing rules are allowed for paid
hubs and a maximum of 5 routing rules are allowed for free hubs.
:paramtype routes: list[~azure.mgmt.iothub.v2019_07_01_preview.models.RouteProperties]
:keyword fallback_route: The properties of the route that is used as a fall-back route when
none of the conditions specified in the 'routes' section are met. This is an optional
parameter. When this property is not set, the messages which do not meet any of the conditions
specified in the 'routes' section get routed to the built-in eventhub endpoint.
:paramtype fallback_route:
~azure.mgmt.iothub.v2019_07_01_preview.models.FallbackRouteProperties
:keyword enrichments: The list of user-provided enrichments that the IoT hub applies to
messages to be delivered to built-in and custom endpoints. See: https://aka.ms/iotmsgenrich.
:paramtype enrichments:
list[~azure.mgmt.iothub.v2019_07_01_preview.models.EnrichmentProperties]
"""
super().__init__(**kwargs)
self.endpoints = endpoints
self.routes = routes
self.fallback_route = fallback_route
self.enrichments = enrichments
class RoutingServiceBusQueueEndpointProperties(_serialization.Model):
"""The properties related to service bus queue endpoint types.
All required parameters must be populated in order to send to Azure.
:ivar connection_string: The connection string of the service bus queue endpoint. Required.
:vartype connection_string: str
:ivar name: The name that identifies this endpoint. The name can only include alphanumeric
characters, periods, underscores, hyphens and has a maximum length of 64 characters. The
following names are reserved: events, fileNotifications, $default. Endpoint names must be
unique across endpoint types. The name need not be the same as the actual queue name. Required.
:vartype name: str
:ivar subscription_id: The subscription identifier of the service bus queue endpoint.
:vartype subscription_id: str
:ivar resource_group: The name of the resource group of the service bus queue endpoint.
:vartype resource_group: str
"""
_validation = {
"connection_string": {"required": True},
"name": {"required": True, "pattern": r"^[A-Za-z0-9-._]{1,64}$"},
}
_attribute_map = {
"connection_string": {"key": "connectionString", "type": "str"},
"name": {"key": "name", "type": "str"},
"subscription_id": {"key": "subscriptionId", "type": "str"},
"resource_group": {"key": "resourceGroup", "type": "str"},
}
def __init__(
self,
*,
connection_string: str,
name: str,
subscription_id: Optional[str] = None,
resource_group: Optional[str] = None,
**kwargs
):
"""
:keyword connection_string: The connection string of the service bus queue endpoint. Required.
:paramtype connection_string: str
:keyword name: The name that identifies this endpoint. The name can only include alphanumeric
characters, periods, underscores, hyphens and has a maximum length of 64 characters. The
following names are reserved: events, fileNotifications, $default. Endpoint names must be
unique across endpoint types. The name need not be the same as the actual queue name. Required.
:paramtype name: str
:keyword subscription_id: The subscription identifier of the service bus queue endpoint.
:paramtype subscription_id: str
:keyword resource_group: The name of the resource group of the service bus queue endpoint.
:paramtype resource_group: str
"""
super().__init__(**kwargs)
self.connection_string = connection_string
self.name = name
self.subscription_id = subscription_id
self.resource_group = resource_group
class RoutingServiceBusTopicEndpointProperties(_serialization.Model):
"""The properties related to service bus topic endpoint types.
All required parameters must be populated in order to send to Azure.
:ivar connection_string: The connection string of the service bus topic endpoint. Required.
:vartype connection_string: str
:ivar name: The name that identifies this endpoint. The name can only include alphanumeric
characters, periods, underscores, hyphens and has a maximum length of 64 characters. The
following names are reserved: events, fileNotifications, $default. Endpoint names must be
unique across endpoint types. The name need not be the same as the actual topic name.
Required.
:vartype name: str
:ivar subscription_id: The subscription identifier of the service bus topic endpoint.
:vartype subscription_id: str
:ivar resource_group: The name of the resource group of the service bus topic endpoint.
:vartype resource_group: str
"""
_validation = {
"connection_string": {"required": True},
"name": {"required": True, "pattern": r"^[A-Za-z0-9-._]{1,64}$"},
}
_attribute_map = {
"connection_string": {"key": "connectionString", "type": "str"},
"name": {"key": "name", "type": "str"},
"subscription_id": {"key": "subscriptionId", "type": "str"},
"resource_group": {"key": "resourceGroup", "type": "str"},
}
def __init__(
self,
*,
connection_string: str,
name: str,
subscription_id: Optional[str] = None,
resource_group: Optional[str] = None,
**kwargs
):
"""
:keyword connection_string: The connection string of the service bus topic endpoint. Required.
:paramtype connection_string: str
:keyword name: The name that identifies this endpoint. The name can only include alphanumeric
characters, periods, underscores, hyphens and has a maximum length of 64 characters. The
following names are reserved: events, fileNotifications, $default. Endpoint names must be
unique across endpoint types. The name need not be the same as the actual topic name.
Required.
:paramtype name: str
:keyword subscription_id: The subscription identifier of the service bus topic endpoint.
:paramtype subscription_id: str
:keyword resource_group: The name of the resource group of the service bus topic endpoint.
:paramtype resource_group: str
"""
super().__init__(**kwargs)
self.connection_string = connection_string
self.name = name
self.subscription_id = subscription_id
self.resource_group = resource_group
class RoutingStorageContainerProperties(_serialization.Model):
"""The properties related to a storage container endpoint.
All required parameters must be populated in order to send to Azure.
:ivar connection_string: The connection string of the storage account. Required.
:vartype connection_string: str
:ivar name: The name that identifies this endpoint. The name can only include alphanumeric
characters, periods, underscores, hyphens and has a maximum length of 64 characters. The
following names are reserved: events, fileNotifications, $default. Endpoint names must be
unique across endpoint types. Required.
:vartype name: str
:ivar subscription_id: The subscription identifier of the storage account.
:vartype subscription_id: str
:ivar resource_group: The name of the resource group of the storage account.
:vartype resource_group: str
:ivar container_name: The name of storage container in the storage account. Required.
:vartype container_name: str
:ivar file_name_format: File name format for the blob. Default format is
{iothub}/{partition}/{YYYY}/{MM}/{DD}/{HH}/{mm}. All parameters are mandatory but can be
reordered.
:vartype file_name_format: str
:ivar batch_frequency_in_seconds: Time interval at which blobs are written to storage. Value
should be between 60 and 720 seconds. Default value is 300 seconds.
:vartype batch_frequency_in_seconds: int
:ivar max_chunk_size_in_bytes: Maximum number of bytes for each blob written to storage. Value
should be between 10485760(10MB) and 524288000(500MB). Default value is 314572800(300MB).
:vartype max_chunk_size_in_bytes: int
:ivar encoding: Encoding that is used to serialize messages to blobs. Supported values are
'avro', 'avrodeflate', and 'JSON'. Default value is 'avro'. Known values are: "Avro",
"AvroDeflate", and "JSON".
:vartype encoding: str or
~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingStorageContainerPropertiesEncoding
"""
_validation = {
"connection_string": {"required": True},
"name": {"required": True, "pattern": r"^[A-Za-z0-9-._]{1,64}$"},
"container_name": {"required": True},
"batch_frequency_in_seconds": {"maximum": 720, "minimum": 60},
"max_chunk_size_in_bytes": {"maximum": 524288000, "minimum": 10485760},
}
_attribute_map = {
"connection_string": {"key": "connectionString", "type": "str"},
"name": {"key": "name", "type": "str"},
"subscription_id": {"key": "subscriptionId", "type": "str"},
"resource_group": {"key": "resourceGroup", "type": "str"},
"container_name": {"key": "containerName", "type": "str"},
"file_name_format": {"key": "fileNameFormat", "type": "str"},
"batch_frequency_in_seconds": {"key": "batchFrequencyInSeconds", "type": "int"},
"max_chunk_size_in_bytes": {"key": "maxChunkSizeInBytes", "type": "int"},
"encoding": {"key": "encoding", "type": "str"},
}
def __init__(
self,
*,
connection_string: str,
name: str,
container_name: str,
subscription_id: Optional[str] = None,
resource_group: Optional[str] = None,
file_name_format: Optional[str] = None,
batch_frequency_in_seconds: Optional[int] = None,
max_chunk_size_in_bytes: Optional[int] = None,
encoding: Optional[Union[str, "_models.RoutingStorageContainerPropertiesEncoding"]] = None,
**kwargs
):
"""
:keyword connection_string: The connection string of the storage account. Required.
:paramtype connection_string: str
:keyword name: The name that identifies this endpoint. The name can only include alphanumeric
characters, periods, underscores, hyphens and has a maximum length of 64 characters. The
following names are reserved: events, fileNotifications, $default. Endpoint names must be
unique across endpoint types. Required.
:paramtype name: str
:keyword subscription_id: The subscription identifier of the storage account.
:paramtype subscription_id: str
:keyword resource_group: The name of the resource group of the storage account.
:paramtype resource_group: str
:keyword container_name: The name of storage container in the storage account. Required.
:paramtype container_name: str
:keyword file_name_format: File name format for the blob. Default format is
{iothub}/{partition}/{YYYY}/{MM}/{DD}/{HH}/{mm}. All parameters are mandatory but can be
reordered.
:paramtype file_name_format: str
:keyword batch_frequency_in_seconds: Time interval at which blobs are written to storage. Value
should be between 60 and 720 seconds. Default value is 300 seconds.
:paramtype batch_frequency_in_seconds: int
:keyword max_chunk_size_in_bytes: Maximum number of bytes for each blob written to storage.
Value should be between 10485760(10MB) and 524288000(500MB). Default value is 314572800(300MB).
:paramtype max_chunk_size_in_bytes: int
:keyword encoding: Encoding that is used to serialize messages to blobs. Supported values are
'avro', 'avrodeflate', and 'JSON'. Default value is 'avro'. Known values are: "Avro",
"AvroDeflate", and "JSON".
:paramtype encoding: str or
~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingStorageContainerPropertiesEncoding
"""
super().__init__(**kwargs)
self.connection_string = connection_string
self.name = name
self.subscription_id = subscription_id
self.resource_group = resource_group
self.container_name = container_name
self.file_name_format = file_name_format
self.batch_frequency_in_seconds = batch_frequency_in_seconds
self.max_chunk_size_in_bytes = max_chunk_size_in_bytes
self.encoding = encoding
class RoutingTwin(_serialization.Model):
"""Twin reference input parameter. This is an optional parameter.
:ivar tags: Twin Tags.
:vartype tags: JSON
:ivar properties:
:vartype properties: ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingTwinProperties
"""
_attribute_map = {
"tags": {"key": "tags", "type": "object"},
"properties": {"key": "properties", "type": "RoutingTwinProperties"},
}
def __init__(
self, *, tags: Optional[JSON] = None, properties: Optional["_models.RoutingTwinProperties"] = None, **kwargs
):
"""
:keyword tags: Twin Tags.
:paramtype tags: JSON
:keyword properties:
:paramtype properties: ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingTwinProperties
"""
super().__init__(**kwargs)
self.tags = tags
self.properties = properties
class RoutingTwinProperties(_serialization.Model):
"""RoutingTwinProperties.
:ivar desired: Twin desired properties.
:vartype desired: JSON
:ivar reported: Twin desired properties.
:vartype reported: JSON
"""
_attribute_map = {
"desired": {"key": "desired", "type": "object"},
"reported": {"key": "reported", "type": "object"},
}
def __init__(self, *, desired: Optional[JSON] = None, reported: Optional[JSON] = None, **kwargs):
"""
:keyword desired: Twin desired properties.
:paramtype desired: JSON
:keyword reported: Twin desired properties.
:paramtype reported: JSON
"""
super().__init__(**kwargs)
self.desired = desired
self.reported = reported
class SharedAccessSignatureAuthorizationRule(_serialization.Model):
"""The properties of an IoT hub shared access policy.
All required parameters must be populated in order to send to Azure.
:ivar key_name: The name of the shared access policy. Required.
:vartype key_name: str
:ivar primary_key: The primary key.
:vartype primary_key: str
:ivar secondary_key: The secondary key.
:vartype secondary_key: str
:ivar rights: The permissions assigned to the shared access policy. Required. Known values are:
"RegistryRead", "RegistryWrite", "ServiceConnect", "DeviceConnect", "RegistryRead,
RegistryWrite", "RegistryRead, ServiceConnect", "RegistryRead, DeviceConnect", "RegistryWrite,
ServiceConnect", "RegistryWrite, DeviceConnect", "ServiceConnect, DeviceConnect",
"RegistryRead, RegistryWrite, ServiceConnect", "RegistryRead, RegistryWrite, DeviceConnect",
"RegistryRead, ServiceConnect, DeviceConnect", "RegistryWrite, ServiceConnect, DeviceConnect",
and "RegistryRead, RegistryWrite, ServiceConnect, DeviceConnect".
:vartype rights: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.AccessRights
"""
_validation = {
"key_name": {"required": True},
"rights": {"required": True},
}
_attribute_map = {
"key_name": {"key": "keyName", "type": "str"},
"primary_key": {"key": "primaryKey", "type": "str"},
"secondary_key": {"key": "secondaryKey", "type": "str"},
"rights": {"key": "rights", "type": "str"},
}
def __init__(
self,
*,
key_name: str,
rights: Union[str, "_models.AccessRights"],
primary_key: Optional[str] = None,
secondary_key: Optional[str] = None,
**kwargs
):
"""
:keyword key_name: The name of the shared access policy. Required.
:paramtype key_name: str
:keyword primary_key: The primary key.
:paramtype primary_key: str
:keyword secondary_key: The secondary key.
:paramtype secondary_key: str
:keyword rights: The permissions assigned to the shared access policy. Required. Known values
are: "RegistryRead", "RegistryWrite", "ServiceConnect", "DeviceConnect", "RegistryRead,
RegistryWrite", "RegistryRead, ServiceConnect", "RegistryRead, DeviceConnect", "RegistryWrite,
ServiceConnect", "RegistryWrite, DeviceConnect", "ServiceConnect, DeviceConnect",
"RegistryRead, RegistryWrite, ServiceConnect", "RegistryRead, RegistryWrite, DeviceConnect",
"RegistryRead, ServiceConnect, DeviceConnect", "RegistryWrite, ServiceConnect, DeviceConnect",
and "RegistryRead, RegistryWrite, ServiceConnect, DeviceConnect".
:paramtype rights: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.AccessRights
"""
super().__init__(**kwargs)
self.key_name = key_name
self.primary_key = primary_key
self.secondary_key = secondary_key
self.rights = rights
class SharedAccessSignatureAuthorizationRuleListResult(_serialization.Model):
"""The list of shared access policies with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of shared access policies.
:vartype value:
list[~azure.mgmt.iothub.v2019_07_01_preview.models.SharedAccessSignatureAuthorizationRule]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
"next_link": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[SharedAccessSignatureAuthorizationRule]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, *, value: Optional[List["_models.SharedAccessSignatureAuthorizationRule"]] = None, **kwargs):
"""
:keyword value: The list of shared access policies.
:paramtype value:
list[~azure.mgmt.iothub.v2019_07_01_preview.models.SharedAccessSignatureAuthorizationRule]
"""
super().__init__(**kwargs)
self.value = value
self.next_link = None
class StorageEndpointProperties(_serialization.Model):
"""The properties of the Azure Storage endpoint for file upload.
All required parameters must be populated in order to send to Azure.
:ivar sas_ttl_as_iso8601: The period of time for which the SAS URI generated by IoT Hub for
file upload is valid. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload#file-upload-notification-configuration-options.
:vartype sas_ttl_as_iso8601: ~datetime.timedelta
:ivar connection_string: The connection string for the Azure Storage account to which files are
uploaded. Required.
:vartype connection_string: str
:ivar container_name: The name of the root container where you upload files. The container need
not exist but should be creatable using the connectionString specified. Required.
:vartype container_name: str
"""
_validation = {
"connection_string": {"required": True},
"container_name": {"required": True},
}
_attribute_map = {
"sas_ttl_as_iso8601": {"key": "sasTtlAsIso8601", "type": "duration"},
"connection_string": {"key": "connectionString", "type": "str"},
"container_name": {"key": "containerName", "type": "str"},
}
def __init__(
self,
*,
connection_string: str,
container_name: str,
sas_ttl_as_iso8601: Optional[datetime.timedelta] = None,
**kwargs
):
"""
:keyword sas_ttl_as_iso8601: The period of time for which the SAS URI generated by IoT Hub for
file upload is valid. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload#file-upload-notification-configuration-options.
:paramtype sas_ttl_as_iso8601: ~datetime.timedelta
:keyword connection_string: The connection string for the Azure Storage account to which files
are uploaded. Required.
:paramtype connection_string: str
:keyword container_name: The name of the root container where you upload files. The container
need not exist but should be creatable using the connectionString specified. Required.
:paramtype container_name: str
"""
super().__init__(**kwargs)
self.sas_ttl_as_iso8601 = sas_ttl_as_iso8601
self.connection_string = connection_string
self.container_name = container_name
class TagsResource(_serialization.Model):
"""A container holding only the Tags for a resource, allowing the user to update the tags on an IoT Hub instance.
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
"""
_attribute_map = {
"tags": {"key": "tags", "type": "{str}"},
}
def __init__(self, *, tags: Optional[Dict[str, str]] = None, **kwargs):
"""
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
"""
super().__init__(**kwargs)
self.tags = tags
class TestAllRoutesInput(_serialization.Model):
"""Input for testing all routes.
:ivar routing_source: Routing source. Known values are: "Invalid", "DeviceMessages",
"TwinChangeEvents", "DeviceLifecycleEvents", "DeviceJobLifecycleEvents", and
"DigitalTwinChangeEvents".
:vartype routing_source: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingSource
:ivar message: Routing message.
:vartype message: ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingMessage
:ivar twin: Routing Twin Reference.
:vartype twin: ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingTwin
"""
_attribute_map = {
"routing_source": {"key": "routingSource", "type": "str"},
"message": {"key": "message", "type": "RoutingMessage"},
"twin": {"key": "twin", "type": "RoutingTwin"},
}
def __init__(
self,
*,
routing_source: Optional[Union[str, "_models.RoutingSource"]] = None,
message: Optional["_models.RoutingMessage"] = None,
twin: Optional["_models.RoutingTwin"] = None,
**kwargs
):
"""
:keyword routing_source: Routing source. Known values are: "Invalid", "DeviceMessages",
"TwinChangeEvents", "DeviceLifecycleEvents", "DeviceJobLifecycleEvents", and
"DigitalTwinChangeEvents".
:paramtype routing_source: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingSource
:keyword message: Routing message.
:paramtype message: ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingMessage
:keyword twin: Routing Twin Reference.
:paramtype twin: ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingTwin
"""
super().__init__(**kwargs)
self.routing_source = routing_source
self.message = message
self.twin = twin
class TestAllRoutesResult(_serialization.Model):
"""Result of testing all routes.
:ivar routes: JSON-serialized array of matched routes.
:vartype routes: list[~azure.mgmt.iothub.v2019_07_01_preview.models.MatchedRoute]
"""
_attribute_map = {
"routes": {"key": "routes", "type": "[MatchedRoute]"},
}
def __init__(self, *, routes: Optional[List["_models.MatchedRoute"]] = None, **kwargs):
"""
:keyword routes: JSON-serialized array of matched routes.
:paramtype routes: list[~azure.mgmt.iothub.v2019_07_01_preview.models.MatchedRoute]
"""
super().__init__(**kwargs)
self.routes = routes
class TestRouteInput(_serialization.Model):
"""Input for testing route.
All required parameters must be populated in order to send to Azure.
:ivar message: Routing message.
:vartype message: ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingMessage
:ivar route: Route properties. Required.
:vartype route: ~azure.mgmt.iothub.v2019_07_01_preview.models.RouteProperties
:ivar twin: Routing Twin Reference.
:vartype twin: ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingTwin
"""
_validation = {
"route": {"required": True},
}
_attribute_map = {
"message": {"key": "message", "type": "RoutingMessage"},
"route": {"key": "route", "type": "RouteProperties"},
"twin": {"key": "twin", "type": "RoutingTwin"},
}
def __init__(
self,
*,
route: "_models.RouteProperties",
message: Optional["_models.RoutingMessage"] = None,
twin: Optional["_models.RoutingTwin"] = None,
**kwargs
):
"""
:keyword message: Routing message.
:paramtype message: ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingMessage
:keyword route: Route properties. Required.
:paramtype route: ~azure.mgmt.iothub.v2019_07_01_preview.models.RouteProperties
:keyword twin: Routing Twin Reference.
:paramtype twin: ~azure.mgmt.iothub.v2019_07_01_preview.models.RoutingTwin
"""
super().__init__(**kwargs)
self.message = message
self.route = route
self.twin = twin
class TestRouteResult(_serialization.Model):
"""Result of testing one route.
:ivar result: Result of testing route. Known values are: "undefined", "false", and "true".
:vartype result: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.TestResultStatus
:ivar details: Detailed result of testing route.
:vartype details: ~azure.mgmt.iothub.v2019_07_01_preview.models.TestRouteResultDetails
"""
_attribute_map = {
"result": {"key": "result", "type": "str"},
"details": {"key": "details", "type": "TestRouteResultDetails"},
}
def __init__(
self,
*,
result: Optional[Union[str, "_models.TestResultStatus"]] = None,
details: Optional["_models.TestRouteResultDetails"] = None,
**kwargs
):
"""
:keyword result: Result of testing route. Known values are: "undefined", "false", and "true".
:paramtype result: str or ~azure.mgmt.iothub.v2019_07_01_preview.models.TestResultStatus
:keyword details: Detailed result of testing route.
:paramtype details: ~azure.mgmt.iothub.v2019_07_01_preview.models.TestRouteResultDetails
"""
super().__init__(**kwargs)
self.result = result
self.details = details
class TestRouteResultDetails(_serialization.Model):
"""Detailed result of testing a route.
:ivar compilation_errors: JSON-serialized list of route compilation errors.
:vartype compilation_errors:
list[~azure.mgmt.iothub.v2019_07_01_preview.models.RouteCompilationError]
"""
_attribute_map = {
"compilation_errors": {"key": "compilationErrors", "type": "[RouteCompilationError]"},
}
def __init__(self, *, compilation_errors: Optional[List["_models.RouteCompilationError"]] = None, **kwargs):
"""
:keyword compilation_errors: JSON-serialized list of route compilation errors.
:paramtype compilation_errors:
list[~azure.mgmt.iothub.v2019_07_01_preview.models.RouteCompilationError]
"""
super().__init__(**kwargs)
self.compilation_errors = compilation_errors
class UserSubscriptionQuota(_serialization.Model):
"""User subscription quota response.
:ivar id: IotHub type id.
:vartype id: str
:ivar type: Response type.
:vartype type: str
:ivar unit: Unit of IotHub type.
:vartype unit: str
:ivar current_value: Current number of IotHub type.
:vartype current_value: int
:ivar limit: Numerical limit on IotHub type.
:vartype limit: int
:ivar name: IotHub type.
:vartype name: ~azure.mgmt.iothub.v2019_07_01_preview.models.Name
"""
_attribute_map = {
"id": {"key": "id", "type": "str"},
"type": {"key": "type", "type": "str"},
"unit": {"key": "unit", "type": "str"},
"current_value": {"key": "currentValue", "type": "int"},
"limit": {"key": "limit", "type": "int"},
"name": {"key": "name", "type": "Name"},
}
def __init__(
self,
*,
id: Optional[str] = None, # pylint: disable=redefined-builtin
type: Optional[str] = None,
unit: Optional[str] = None,
current_value: Optional[int] = None,
limit: Optional[int] = None,
name: Optional["_models.Name"] = None,
**kwargs
):
"""
:keyword id: IotHub type id.
:paramtype id: str
:keyword type: Response type.
:paramtype type: str
:keyword unit: Unit of IotHub type.
:paramtype unit: str
:keyword current_value: Current number of IotHub type.
:paramtype current_value: int
:keyword limit: Numerical limit on IotHub type.
:paramtype limit: int
:keyword name: IotHub type.
:paramtype name: ~azure.mgmt.iothub.v2019_07_01_preview.models.Name
"""
super().__init__(**kwargs)
self.id = id
self.type = type
self.unit = unit
self.current_value = current_value
self.limit = limit
self.name = name
class UserSubscriptionQuotaListResult(_serialization.Model):
"""Json-serialized array of User subscription quota response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value:
:vartype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.UserSubscriptionQuota]
:ivar next_link:
:vartype next_link: str
"""
_validation = {
"next_link": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[UserSubscriptionQuota]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, *, value: Optional[List["_models.UserSubscriptionQuota"]] = None, **kwargs):
"""
:keyword value:
:paramtype value: list[~azure.mgmt.iothub.v2019_07_01_preview.models.UserSubscriptionQuota]
"""
super().__init__(**kwargs)
self.value = value
self.next_link = None
|
{
"content_hash": "5f6afca5787b50a34b086dcf1c1f5760",
"timestamp": "",
"source": "github",
"line_count": 2952,
"max_line_length": 283,
"avg_line_length": 41.073170731707314,
"alnum_prop": 0.6441838215888097,
"repo_name": "Azure/azure-sdk-for-python",
"id": "62019aca48610c3b44cf6b705cc2b1498bced00b",
"size": "121749",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2019_07_01_preview/models/_models_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
"""Provides functions for computing minors of a graph."""
from itertools import chain
from itertools import combinations
from itertools import permutations
from itertools import product
import networkx as nx
from networkx import density
from networkx.exception import NetworkXException
from networkx.utils import arbitrary_element
__all__ = ['contracted_edge', 'contracted_nodes',
'identified_nodes', 'quotient_graph', 'blockmodel']
chaini = chain.from_iterable
def equivalence_classes(iterable, relation):
"""Returns the set of equivalence classes of the given ``iterable`` under
the specified equivalence relation.
``relation`` must be a Boolean-valued function that takes two argument. It
must represent an equivalence relation (that is, the relation induced by
the function must be reflexive, symmetric, and transitive).
The return value is a set of sets. It is a partition of the elements of
``iterable``; duplicate elements will be ignored so it makes the most sense
for ``iterable`` to be a :class:`set`.
"""
# For simplicity of implementation, we initialize the return value as a
# list of lists, then convert it to a set of sets at the end of the
# function.
blocks = []
# Determine the equivalence class for each element of the iterable.
for y in iterable:
# Each element y must be in *exactly one* equivalence class.
#
# Each block is guaranteed to be non-empty
for block in blocks:
x = arbitrary_element(block)
if relation(x, y):
block.append(y)
break
else:
# If the element y is not part of any known equivalence class, it
# must be in its own, so we create a new singleton equivalence
# class for it.
blocks.append([y])
return {frozenset(block) for block in blocks}
def quotient_graph(G, partition, edge_relation=None, node_data=None,
edge_data=None, relabel=False, create_using=None):
"""Returns the quotient graph of ``G`` under the specified equivalence
relation on nodes.
Parameters
----------
G : NetworkX graph
The graph for which to return the quotient graph with the
specified node relation.
partition : function or list of sets
If a function, this function must represent an equivalence
relation on the nodes of ``G``. It must take two arguments *u*
and *v* and return ``True`` exactly when *u* and *v* are in the
same equivalence class. The equivalence classes form the nodes
in the returned graph.
If a list of sets, the list must form a valid partition of
the nodes of the graph. That is, each node must be in exactly
one block of the partition.
edge_relation : Boolean function with two arguments
This function must represent an edge relation on the *blocks* of
``G`` in the partition induced by ``node_relation``. It must
take two arguments, *B* and *C*, each one a set of nodes, and
return ``True`` exactly when there should be an edge joining
block *B* to block *C* in the returned graph.
If ``edge_relation`` is not specified, it is assumed to be the
following relation. Block *B* is related to block *C* if and
only if some node in *B* is adjacent to some node in *C*,
according to the edge set of ``G``.
edge_data : function
This function takes two arguments, *B* and *C*, each one a set
of nodes, and must return a dictionary representing the edge
data attributes to set on the edge joining *B* and *C*, should
there be an edge joining *B* and *C* in the quotient graph (if
no such edge occurs in the quotient graph as determined by
``edge_relation``, then the output of this function is ignored).
If the quotient graph would be a multigraph, this function is
not applied, since the edge data from each edge in the graph
``G`` appears in the edges of the quotient graph.
node_data : function
This function takes one argument, *B*, a set of nodes in ``G``,
and must return a dictionary representing the node data
attributes to set on the node representing *B* in the quotient graph.
If ``None``, the following node attributes will be set:
* ``'graph'``, the subgraph of the graph ``G`` that this block
represents,
* ``'nnodes'``, the number of nodes in this block,
* ``'nedges'``, the number of edges within this block,
* ``'density'``, the density of the subgraph of ``G`` that this
block represents.
relabel : bool
If ``True``, relabel the nodes of the quotient graph to be
nonnegative integers. Otherwise, the nodes are identified with
:class:`frozenset` instances representing the blocks given in
``partition``.
create_using : NetworkX graph
If specified, this must be an instance of a NetworkX graph
class. The nodes and edges of the quotient graph will be added
to this graph and returned. If not specified, the returned graph
will have the same type as the input graph.
Returns
-------
NetworkX graph
The quotient graph of ``G`` under the equivalence relation
specified by ``partition``. If the partition were given as a
list of :class:`set` instances and ``relabel`` is ``False``,
each node will be a :class:`frozenset` corresponding to the same
:class:`set`.
Raises
------
NetworkXException
If the given partition is not a valid partition of the nodes of
``G``.
Examples
--------
The quotient graph of the complete bipartite graph under the "same
neighbors" equivalence relation is `K_2`. Under this relation, two nodes
are equivalent if they are not adjacent but have the same neighbor set::
>>> import networkx as nx
>>> G = nx.complete_bipartite_graph(2, 3)
>>> same_neighbors = lambda u, v: (u not in G[v] and v not in G[u]
... and G[u] == G[v])
>>> Q = nx.quotient_graph(G, same_neighbors)
>>> K2 = nx.complete_graph(2)
>>> nx.is_isomorphic(Q, K2)
True
The quotient graph of a directed graph under the "same strongly connected
component" equivalence relation is the condensation of the graph (see
:func:`condensation`). This example comes from the Wikipedia article
*`Strongly connected component`_*::
>>> import networkx as nx
>>> G = nx.DiGraph()
>>> edges = ['ab', 'be', 'bf', 'bc', 'cg', 'cd', 'dc', 'dh', 'ea',
... 'ef', 'fg', 'gf', 'hd', 'hf']
>>> G.add_edges_from(tuple(x) for x in edges)
>>> components = list(nx.strongly_connected_components(G))
>>> sorted(sorted(component) for component in components)
[['a', 'b', 'e'], ['c', 'd', 'h'], ['f', 'g']]
>>>
>>> C = nx.condensation(G, components)
>>> component_of = C.graph['mapping']
>>> same_component = lambda u, v: component_of[u] == component_of[v]
>>> Q = nx.quotient_graph(G, same_component)
>>> nx.is_isomorphic(C, Q)
True
Node identification can be represented as the quotient of a graph under the
equivalence relation that places the two nodes in one block and each other
node in its own singleton block::
>>> import networkx as nx
>>> K24 = nx.complete_bipartite_graph(2, 4)
>>> K34 = nx.complete_bipartite_graph(3, 4)
>>> C = nx.contracted_nodes(K34, 1, 2)
>>> nodes = {1, 2}
>>> is_contracted = lambda u, v: u in nodes and v in nodes
>>> Q = nx.quotient_graph(K34, is_contracted)
>>> nx.is_isomorphic(Q, C)
True
>>> nx.is_isomorphic(Q, K24)
True
The blockmodeling technique described in [1]_ can be implemented as a
quotient graph::
>>> G = nx.path_graph(6)
>>> partition = [{0, 1}, {2, 3}, {4, 5}]
>>> M = nx.quotient_graph(G, partition, relabel=True)
>>> list(M.edges())
[(0, 1), (1, 2)]
.. _Strongly connected component: https://en.wikipedia.org/wiki/Strongly_connected_component
References
----------
.. [1] Patrick Doreian, Vladimir Batagelj, and Anuska Ferligoj.
*Generalized Blockmodeling*.
Cambridge University Press, 2004.
"""
# If the user provided an equivalence relation as a function compute
# the blocks of the partition on the nodes of G induced by the
# equivalence relation.
if callable(partition):
partition = equivalence_classes(G, partition)
# Each node in the graph must be in exactly one block.
if any(sum(1 for b in partition if v in b) != 1 for v in G):
raise NetworkXException('each node must be in exactly one block')
H = type(create_using)() if create_using is not None else type(G)()
# By default set some basic information about the subgraph that each block
# represents on the nodes in the quotient graph.
if node_data is None:
def node_data(b):
S = G.subgraph(b)
return dict(graph=S, nnodes=len(S), nedges=S.number_of_edges(),
density=density(S))
# Each block of the partition becomes a node in the quotient graph.
partition = [frozenset(b) for b in partition]
H.add_nodes_from((b, node_data(b)) for b in partition)
# By default, the edge relation is the relation defined as follows. B is
# adjacent to C if a node in B is adjacent to a node in C, according to the
# edge set of G.
#
# This is not a particularly efficient implementation of this relation:
# there are O(n^2) pairs to check and each check may require O(log n) time
# (to check set membership). This can certainly be parallelized.
if edge_relation is None:
def edge_relation(b, c):
return any(v in G[u] for u, v in product(b, c))
# By default, sum the weights of the edges joining pairs of nodes across
# blocks to get the weight of the edge joining those two blocks.
if edge_data is None:
def edge_data(b, c):
edgedata = (d for u, v, d in G.edges(b | c, data=True)
if (u in b and v in c) or (u in c and v in b))
return {'weight': sum(d.get('weight', 1) for d in edgedata)}
block_pairs = permutations(H, 2) if H.is_directed() else combinations(H, 2)
# In a multigraph, add one edge in the quotient graph for each edge
# in the original graph.
if H.is_multigraph():
edges = chaini(((b, c, G.get_edge_data(u, v, default={}))
for u, v in product(b, c) if v in G[u])
for b, c in block_pairs if edge_relation(b, c))
# In a simple graph, apply the edge data function to each pair of
# blocks to determine the edge data attributes to apply to each edge
# in the quotient graph.
else:
edges = ((b, c, edge_data(b, c)) for (b, c) in block_pairs
if edge_relation(b, c))
H.add_edges_from(edges)
# If requested by the user, relabel the nodes to be integers,
# numbered in increasing order from zero in the same order as the
# iteration order of ``partition``.
if relabel:
# Can't use nx.convert_node_labels_to_integers() here since we
# want the order of iteration to be the same for backward
# compatibility with the nx.blockmodel() function.
labels = {b: i for i, b in enumerate(partition)}
H = nx.relabel_nodes(H, labels)
return H
def contracted_nodes(G, u, v, self_loops=True):
"""Returns the graph that results from contracting ``u`` and ``v``.
Node contraction identifies the two nodes as a single node incident to any
edge that was incident to the original two nodes.
Parameters
----------
G : NetworkX graph
The graph whose nodes will be contracted.
u, v : nodes
Must be nodes in ``G``.
self_loops : Boolean
If this is ``True``, any edges joining ``u`` and ``v`` in ``G`` become
self-loops on the new node in the returned graph.
Returns
-------
Networkx graph
A new graph object of the same type as ``G`` (leaving ``G`` unmodified)
with ``u`` and ``v`` identified in a single node. The right node ``v``
will be merged into the node ``u``, so only ``u`` will appear in the
returned graph.
Examples
--------
Contracting two nonadjacent nodes of the cycle graph on four nodes `C_4`
yields the path graph (ignoring parallel edges)::
>>> import networkx as nx
>>> G = nx.cycle_graph(4)
>>> M = nx.contracted_nodes(G, 1, 3)
>>> P3 = nx.path_graph(3)
>>> nx.is_isomorphic(M, P3)
True
See also
--------
contracted_edge
quotient_graph
Notes
-----
This function is also available as ``identified_nodes``.
"""
H = G.copy()
if H.is_directed():
in_edges = ((w, u, d) for w, x, d in G.in_edges(v, data=True)
if self_loops or w != u)
out_edges = ((u, w, d) for x, w, d in G.out_edges(v, data=True)
if self_loops or w != u)
new_edges = chain(in_edges, out_edges)
else:
new_edges = ((u, w, d) for x, w, d in G.edges(v, data=True)
if self_loops or w != u)
v_data = H.node[v]
H.remove_node(v)
H.add_edges_from(new_edges)
if 'contraction' in H.node[u]:
H.node[u]['contraction'][v] = v_data
else:
H.node[u]['contraction'] = {v: v_data}
return H
identified_nodes = contracted_nodes
def contracted_edge(G, edge, self_loops=True):
"""Returns the graph that results from contracting the specified edge.
Edge contraction identifies the two endpoints of the edge as a single node
incident to any edge that was incident to the original two nodes. A graph
that results from edge contraction is called a *minor* of the original
graph.
Parameters
----------
G : NetworkX graph
The graph whose edge will be contracted.
edge : tuple
Must be a pair of nodes in ``G``.
self_loops : Boolean
If this is ``True``, any edges (including ``edge``) joining the
endpoints of ``edge`` in ``G`` become self-loops on the new node in the
returned graph.
Returns
-------
Networkx graph
A new graph object of the same type as ``G`` (leaving ``G`` unmodified)
with endpoints of ``edge`` identified in a single node. The right node
of ``edge`` will be merged into the left one, so only the left one will
appear in the returned graph.
Raises
------
ValueError
If ``edge`` is not an edge in ``G``.
Examples
--------
Attempting to contract two nonadjacent nodes yields an error::
>>> import networkx as nx
>>> G = nx.cycle_graph(4)
>>> nx.contracted_edge(G, (1, 3))
Traceback (most recent call last):
...
ValueError: Edge (1, 3) does not exist in graph G; cannot contract it
Contracting two adjacent nodes in the cycle graph on *n* nodes yields the
cycle graph on *n - 1* nodes::
>>> import networkx as nx
>>> C5 = nx.cycle_graph(5)
>>> C4 = nx.cycle_graph(4)
>>> M = nx.contracted_edge(C5, (0, 1), self_loops=False)
>>> nx.is_isomorphic(M, C4)
True
See also
--------
contracted_nodes
quotient_graph
"""
if not G.has_edge(*edge):
raise ValueError('Edge {0} does not exist in graph G; cannot contract'
' it'.format(edge))
return contracted_nodes(G, *edge, self_loops=self_loops)
def blockmodel(G, partition, multigraph=False):
"""Returns a reduced graph constructed using the generalized block modeling
technique.
The blockmodel technique collapses nodes into blocks based on a
given partitioning of the node set. Each partition of nodes
(block) is represented as a single node in the reduced graph.
Edges between nodes in the block graph are added according to the
edges in the original graph. If the parameter multigraph is False
(the default) a single edge is added with a weight equal to the
sum of the edge weights between nodes in the original graph
The default is a weight of 1 if weights are not specified. If the
parameter multigraph is True then multiple edges are added each
with the edge data from the original graph.
Parameters
----------
G : graph
A networkx Graph or DiGraph
partition : list of lists, or list of sets
The partition of the nodes. Must be non-overlapping.
multigraph : bool, optional
If True return a MultiGraph with the edge data of the original
graph applied to each corresponding edge in the new graph.
If False return a Graph with the sum of the edge weights, or a
count of the edges if the original graph is unweighted.
Returns
-------
blockmodel : a Networkx graph object
Examples
--------
>>> G = nx.path_graph(6)
>>> partition = [[0,1],[2,3],[4,5]]
>>> M = nx.blockmodel(G,partition)
References
----------
.. [1] Patrick Doreian, Vladimir Batagelj, and Anuska Ferligoj
"Generalized Blockmodeling",Cambridge University Press, 2004.
.. note:: Deprecated in NetworkX v1.11
``blockmodel`` will be removed in NetworkX 2.0. Instead use
``quotient_graph`` with keyword argument ``relabel=True``, and
``create_using=nx.MultiGraph()`` for multigraphs.
"""
if multigraph:
return nx.quotient_graph(G, partition,
create_using=nx.MultiGraph(), relabel=True)
else:
return nx.quotient_graph(G, partition, relabel=True)
|
{
"content_hash": "ff92a145fa50eea0fb8b191a2fdb4395",
"timestamp": "",
"source": "github",
"line_count": 461,
"max_line_length": 96,
"avg_line_length": 39.23861171366595,
"alnum_prop": 0.6197136381226159,
"repo_name": "andnovar/networkx",
"id": "059d8daa545d458f3a36b45567b0c1cf1d2b0dcf",
"size": "18440",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "networkx/algorithms/minors.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1786"
},
{
"name": "PowerShell",
"bytes": "3311"
},
{
"name": "Python",
"bytes": "3218696"
}
],
"symlink_target": ""
}
|
"""
Provides a function to report all internal modules for using freezing tools
pytest
"""
def freeze_includes():
"""
Returns a list of module names used by pytest that should be
included by cx_freeze.
"""
import py
import _pytest
result = list(_iter_all_modules(py))
result += list(_iter_all_modules(_pytest))
return result
def _iter_all_modules(package, prefix=""):
"""
Iterates over the names of all modules that can be found in the given
package, recursively.
Example:
_iter_all_modules(_pytest) ->
['_pytest.assertion.newinterpret',
'_pytest.capture',
'_pytest.core',
...
]
"""
import os
import pkgutil
if type(package) is not str:
path, prefix = package.__path__[0], package.__name__ + "."
else:
path = package
for _, name, is_package in pkgutil.iter_modules([path]):
if is_package:
for m in _iter_all_modules(os.path.join(path, name), prefix=name + "."):
yield prefix + m
else:
yield prefix + name
|
{
"content_hash": "5d6a30d116e6732f1b0d27b482d2ba14",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 84,
"avg_line_length": 25.681818181818183,
"alnum_prop": 0.5690265486725664,
"repo_name": "randyzingle/tools",
"id": "f9d613a2b641adcf597b392422a16f95834a28ce",
"size": "1130",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "kub/services/archive/cdk/python/sample-app/.env/lib/python3.6/site-packages/_pytest/freeze_support.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "773"
},
{
"name": "Go",
"bytes": "118058"
},
{
"name": "Groovy",
"bytes": "1555"
},
{
"name": "HTML",
"bytes": "4288"
},
{
"name": "Java",
"bytes": "818301"
},
{
"name": "JavaScript",
"bytes": "605048"
},
{
"name": "Makefile",
"bytes": "5063"
},
{
"name": "Python",
"bytes": "10438760"
},
{
"name": "Scala",
"bytes": "37406"
},
{
"name": "Shell",
"bytes": "8611"
}
],
"symlink_target": ""
}
|
from collections import namedtuple
from pykit.dictutil import FixedKeysDict
RSConfig = namedtuple('RSConfig', 'data,parity')
def _replica(n=None):
# data_replica can not be smaller than 1
if n is None:
return 1
n = int(n)
if n < 1:
raise ValueError('N.O. replica must >=1')
return n
def _ec_policy(p=None):
if p is None:
return 'lrc'
return str(p)
class ReplicationConfig(FixedKeysDict):
keys_default = {
'in_idc': lambda dp: RSConfig(dp[0], dp[1]),
'cross_idc': lambda dp: RSConfig(dp[0], dp[1]),
'ec_policy': _ec_policy,
'data_replica': _replica,
}
|
{
"content_hash": "765092452c4d430334d28616214a4d81",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 55,
"avg_line_length": 19.818181818181817,
"alnum_prop": 0.5978593272171254,
"repo_name": "sejust/pykit",
"id": "cf18464467fd54eb0f05cd0ef2495c4f7ad37141",
"size": "693",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ectypes/replication_config.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "142"
},
{
"name": "Python",
"bytes": "1193736"
},
{
"name": "Shell",
"bytes": "45490"
}
],
"symlink_target": ""
}
|
__author__ = 'Martin Galpin'
__contact__ = 'm@66laps.com'
__version__ = '1.0'
__license__ = 'Apache License, Version 2.0'
def lap_difference(session, lap):
'''Gets the time difference between this lap and the previous lap.'''
index = lap.index
return None if not index else lap.length - session.laps[index - 1].length
def is_fastest_lap(session, lap):
'''Returns True if the given lap is the fastest in the given session.'''
return lap == fastest_lap(session)
def fastest_lap_time(session):
'''Gets the fastest lap time in a given session.'''
if not _has_at_least_one_lap(session): return None
return min([lap.length for lap in session.laps if lap.length is not None])
def fastest_lap(session):
'''Gets the fastest lap in a given session.'''
if not _has_at_least_one_lap(session): return None
f = lambda x, lap: lap.length == fastest_lap_time(session) and lap or x
return reduce(f, session.laps)
def fastest_sector(session, sector):
'''
Returns the fastest time for a given sector in a given lap. `sector`
should be a index, starting at 0 for the first sector. If `sector`
is out of bounds or there is not at least one lap, this function
will return None.
For example:
>>> session = openmotorsport.Session()
>>> session.num_sectors = 2
>>> session.add_markers([10, 20, 30, 35, 39, 45])
>>> fastest_sector(session, 0)
5
>>> fastest_sector(session, 1)
4
>>> fastest_sector(session, 3)
None
'''
if not _has_at_least_one_lap(session): return None
if len(session.markers) <= sector: return None
return min([lap.sectors[sector] for lap in session.laps])
def is_fastest_sector(session, sector, time):
'''Returns True if a given time is the fastest for a given in a session.'''
return time == fastest_sector(session, sector)
def fastest_or_next_fastest_lap(lap):
'''
Gets the fastest lap for a given lap. If this already is the fastest lap,
get the next fastest lap instead. Finally, if there is no other lap, None
will be returned.
'''
the_fastest_lap = fastest_lap(lap.session)
if lap != the_fastest_lap: return the_fastest_lap
laps = [x for x in lap.session.laps if x.length >= lap.length and x != lap]
laps = sorted(laps, key=lambda lap: lap.length)
return laps[0] if laps else None
def slowest_lap_time(session):
'''Gets the slowest lap time in a given session.'''
if not _has_at_least_one_lap(session): return None
return max([lap.length for lap in session.laps if lap.length is not None])
def slowest_lap(session):
'''Gets the slowest Lap in a given session. Currently include outliners.'''
if not _has_at_least_one_lap(session): return None
f = lambda x, lap: lap.length == slowest_lap_time(session) and lap or x
return reduce(f, session.laps)
def slowest_or_next_slowest_lap(lap):
'''
Gets the slowest Lap for a given session. If this is already the slowest Lap,
get the next slowest Lap instead. Finally, if there is no other Lap, None will
be returned.
'''
the_slowest_lap = slowest_lap(lap.session)
if lap != the_slowest_lap: return the_slowest_lap
laps = [x for x in lap.session.laps if x.length <= lap.length and x != lap]
laps = sorted(laps, key=lambda lap: lap.length, reverse=True)
return laps[0] if laps else None
def next_lap(lap):
'''Get the next Lap sequentially. Returns None if this is the last lap.'''
return lap.session.laps[lap.index + 1] \
if lap.index < (len(lap.session.laps) - 1) else None
def previous_lap(lap):
''' Gets the previous Lap. Returns None if this is the first lap.'''
return lap.session.laps[lap.index - 1] if lap.index > 0 else None
def _has_at_least_one_lap(session):
'''Private method.
Gets whether a session has no laps or if it has a lap, that the lap is
complete (and that they didn't crash on their outlap - it happens!)
'''
return not(not session.laps or session.laps[0].length is None)
|
{
"content_hash": "61e49e8a24992a5f7928afb1aba88375",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 80,
"avg_line_length": 38.11764705882353,
"alnum_prop": 0.6957304526748971,
"repo_name": "galpin/python-openmotorsport",
"id": "e92becde58c16960bd1c58b5805e6a17a4072d48",
"size": "4607",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openmotorsport/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "112392"
},
{
"name": "Python",
"bytes": "70791"
}
],
"symlink_target": ""
}
|
"""
Composite Panel (:mod:`structmanager.structelem.panelcomp`)
========================================================
.. currentmodule:: structmanager.structelem.panelcomp
"""
import numpy as np
from .base import SE2D
class PanelComp(SE2D):
"""Composite Panel
This class should be used for cylindrical panels (i.e. fuselage panels).
For plates please refer to :class:`.Plate`.
For wing panels this class should also be adopted.
Attributes
----------
"""
def __init__(self, name, eids, model=None):
super(PanelComp, self).__init__(name, eids, model) #change to super(PanelComp, self)?
# geometric parameters
self.r = None
self.a = None
self.b = None
self.t = None
self.t_lb = None
self.t_ub = None
self.p45 = None
self.p45_lb = 0.1
self.p45_ub = None
self.p90 = 0.1
# material properties
# all material properties are got from FE model at ses.py
self.is_isotropic = None #change to orthotropic?
# optimization constraints
self.all_constraints = ['vonMises', 'buckling']
self.constraints = {'vonMises': 1,
'buckling': 1}
# finding corner nodes
# - assuming that they are those that share only one inner element
# - radius calculated assuming the panel has a common center
if self.elements is not None:
nodes = []
for element in self.elements:
for node in element.nodes:
nodes.append(node)
self.nodes = set(nodes)
ccoords = np.array([n.xyz for n in self.nodes])
xs = ccoords[:, 0]
ys = ccoords[:, 1]
zs = ccoords[:, 2]
rs = (ys**2 + zs**2)**0.5
thetas = np.arctan2(zs, ys)
self.r = rs.mean()
self.a = xs.max() - xs.min()
self.b = (thetas.max() - thetas.min())*self.r
# retrieving plies thicknesses from panel
self.t0 = self.elements[0].pid.Thickness(0)
self.t45 = self.elements[0].pid.Thickness(1) + self.elements[0].pid.Thickness(2)
self.t90 = self.elements[0].pid.Thickness(3)
self.t = self.t0 + self.t45 + self.t90
# calculating the thickness ratio
self.p45 = self.t45/self.t
|
{
"content_hash": "a80edaee54fa036c489815ee896d1734",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 93,
"avg_line_length": 31.116883116883116,
"alnum_prop": 0.5425709515859767,
"repo_name": "saullocastro/structMan",
"id": "595095f5a22b02859e590d379c478d4c09140250",
"size": "2396",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "structmanager/structelem/panelcomp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "70037"
},
{
"name": "FORTRAN",
"bytes": "150686"
},
{
"name": "Python",
"bytes": "226763"
},
{
"name": "Shell",
"bytes": "149"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import re
import sys
from .columns import ColumnList
from .preferences import BomPref
from . import units
from . import debug
from .sort import natural_sort
# String matches for marking a component as "do not fit"
DNF = [
"dnf",
"dnl",
"dnp",
"do not fit",
"do not place",
"do not load",
"nofit",
"nostuff",
"noplace",
"noload",
"not fitted",
"not loaded",
"not placed",
"no stuff",
]
# String matches for marking a component as "do not change" or "fixed"
DNC = [
"dnc",
"do not change",
"no change",
"fixed"
]
class Component():
"""Class for a component, aka 'comp' in the xml netlist file.
This component class is implemented by wrapping an xmlElement instance
with accessors. The xmlElement is held in field 'element'.
"""
def __init__(self, xml_element, prefs=None):
self.element = xml_element
self.libpart = None
if not prefs:
prefs = BomPref()
self.prefs = prefs
# Set to true when this component is included in a component group
self.grouped = False
# Compare the value of this part, to the value of another part (see if they match)
def compareValue(self, other):
# Simple string comparison
if self.getValue().lower() == other.getValue().lower():
return True
# Otherwise, perform a more complicated value comparison
if units.compareValues(self.getValue(), other.getValue()):
return True
# Ignore value if both components are connectors
if self.prefs.groupConnectors:
if 'connector' in self.getLibName().lower() and 'connector' in other.getLibName().lower():
return True
# No match, return False
return False
# Determine if two parts have the same name
def comparePartName(self, other):
pn1 = self.getPartName().lower()
pn2 = other.getPartName().lower()
# Simple direct match
if pn1 == pn2:
return True
# Compare part aliases e.g. "c" to "c_small"
for alias in self.prefs.aliases:
if pn1 in alias and pn2 in alias:
return True
return False
def compareField(self, other, field):
this_field = self.getField(field).lower()
other_field = other.getField(field).lower()
# If blank comparisons are allowed
if this_field == "" or other_field == "":
if not self.prefs.mergeBlankFields:
return False
if this_field == other_field:
return True
return False
def __eq__(self, other):
"""
Equivalency operator is used to determine if two parts are 'equal'
"""
# 'fitted' value must be the same for both parts
if self.isFitted() != other.isFitted():
return False
# 'fixed' value must be the same for both parts
if self.isFixed() != other.isFixed():
return False
if len(self.prefs.groups) == 0:
return False
for c in self.prefs.groups:
# Perform special matches
if c.lower() == ColumnList.COL_VALUE.lower():
if not self.compareValue(other):
return False
# Match part name
elif c.lower() == ColumnList.COL_PART.lower():
if not self.comparePartName(other):
return False
# Generic match
elif not self.compareField(other, c):
return False
return True
def setLibPart(self, part):
self.libpart = part
def getPrefix(self):
"""
Get the reference prefix
e.g. if this component has a reference U12, will return "U"
"""
prefix = ""
for c in self.getRef():
if c.isalpha():
prefix += c
else:
break
return prefix
def getSuffix(self):
"""
Return the reference suffix #
e.g. if this component has a reference U12, will return "12"
"""
suffix = ""
for c in self.getRef():
if c.isalpha():
suffix = ""
else:
suffix += c
return int(suffix)
def getLibPart(self):
return self.libpart
def getPartName(self):
return self.element.get("libsource", "part")
def getLibName(self):
return self.element.get("libsource", "lib")
def getSheetpathNames(self):
return self.element.get("sheetpath", "names")
def getDescription(self):
"""Extract the 'description' field for this component"""
# Give priority to a user "description" field
ret = self.element.get("field", "name", "description")
if ret:
return ret
try:
ret = self.element.get("libsource", "description")
except:
# Compatibility with old KiCad versions (4.x)
ret = self.element.get("field", "name", "description")
if ret == "":
try:
ret = self.libpart.getDescription()
except AttributeError:
# Raise a good error description here, so the user knows what the culprit component is.
# (sometimes libpart is None)
raise AttributeError('Could not get description for part {}{}.'.format(self.getPrefix(),
self.getSuffix()))
return ret
def setValue(self, value):
"""Set the value of this component"""
v = self.element.getChild("value")
if v:
v.setChars(value)
def getValue(self):
return self.element.get("value")
# Try to better sort R, L and C components
def getValueSort(self):
pref = self.getPrefix()
if pref in 'RLC' or pref == 'RV':
res = units.compMatch(self.getValue())
if res:
value, mult, unit = res
if pref in "CL":
# fempto Farads
value = "{0:15d}".format(int(value * 1e15 * mult + 0.1))
else:
# milli Ohms
value = "{0:15d}".format(int(value * 1000 * mult + 0.1))
return value
return self.element.get("value")
def setField(self, name, value):
""" Set the value of the specified field """
# Description field
doc = self.element.getChild('libsource')
if doc:
for att_name, att_value in doc.attributes.items():
if att_name.lower() == name.lower():
doc.attributes[att_name] = value
return value
# Common fields
field = self.element.getChild(name.lower())
if field:
field.setChars(value)
return value
# Other fields
fields = self.element.getChild('fields')
if fields:
for field in fields.getChildren():
if field.get('field', 'name') == name:
field.setChars(value)
return value
return None
def getField(self, name, ignoreCase=True, libraryToo=True):
"""Return the value of a field named name. The component is first
checked for the field, and then the components library part is checked
for the field. If the field doesn't exist in either, an empty string is
returned
Keywords:
name -- The name of the field to return the value for
libraryToo -- look in the libpart's fields for the same name if not found
in component itself
"""
fp = self.getFootprint().split(":")
if name.lower() == ColumnList.COL_REFERENCE.lower():
return self.getRef().strip()
elif name.lower() == ColumnList.COL_DESCRIPTION.lower():
return self.getDescription().strip()
elif name.lower() == ColumnList.COL_DATASHEET.lower():
return self.getDatasheet().strip()
# Footprint library is first element
elif name.lower() == ColumnList.COL_FP_LIB.lower():
if len(fp) > 1:
return fp[0].strip()
else:
# Explicit empty return
return ""
elif name.lower() == ColumnList.COL_FP.lower():
if len(fp) > 1:
return fp[1].strip()
elif len(fp) == 1:
return fp[0]
else:
return ""
elif name.lower() == ColumnList.COL_VALUE.lower():
return self.getValue().strip()
elif name.lower() == ColumnList.COL_PART.lower():
return self.getPartName().strip()
elif name.lower() == ColumnList.COL_PART_LIB.lower():
return self.getLibName().strip()
elif name.lower() == ColumnList.COL_SHEETPATH.lower():
return self.getSheetpathNames().strip()
# Other fields (case insensitive)
for f in self.getFieldNames():
if f.lower() == name.lower():
field = self.element.get("field", "name", f)
if field == "" and libraryToo:
field = self.libpart.getField(f)
return field.strip()
# Could not find a matching field
return ""
def getFieldNames(self):
"""Return a list of field names in play for this component. Mandatory
fields are not included, and they are: Value, Footprint, Datasheet, Ref.
The netlist format only includes fields with non-empty values. So if a field
is empty, it will not be present in the returned list.
"""
fieldNames = []
fields = self.element.getChild('fields')
if fields:
for f in fields.getChildren():
fieldNames.append(f.get('field', 'name'))
return fieldNames
def getRef(self):
return self.element.get("comp", "ref")
def isFitted(self):
""" Determine if a component is FITTED or not """
# Check the value field first
if self.getValue().lower() in DNF:
return False
check = self.getField(self.prefs.configField).lower()
# Empty value means part is fitted
if check == "":
return True
# Also support space separated list (simple cases)
opts = check.split(" ")
for opt in opts:
if opt.lower() in DNF:
return False
# Variants logic
opts = check.split(",")
# Exclude components that match a -VARIANT
for opt in opts:
opt = opt.strip()
# Any option containing a DNF is not fitted
if opt in DNF:
return False
# Options that start with '-' are explicitly removed from certain configurations
if opt.startswith("-") and opt[1:] in self.prefs.pcbConfig:
return False
# Include components that match +VARIANT
exclusive = False
for opt in opts:
# Options that start with '+' are fitted only for certain configurations
if opt.startswith("+"):
exclusive = True
if opt[1:] in self.prefs.pcbConfig:
return True
# No match
return not exclusive
def isFixed(self):
""" Determine if a component is FIXED or not.
Fixed components shouldn't be replaced without express authorization """
# Check the value field first
if self.getValue().lower() in DNC:
return True
check = self.getField(self.prefs.configField).lower()
# Empty is not fixed
if check == "":
return False
opts = check.split(" ")
for opt in opts:
if opt.lower() in DNC:
return True
opts = check.split(",")
for opt in opts:
if opt.lower() in DNC:
return True
return False
# Test if this part should be included, based on any regex expressions provided in the preferences
def testRegExclude(self):
for reg in self.prefs.regExcludes:
if type(reg) == list and len(reg) == 2:
field_name, regex = reg
field_value = self.getField(field_name)
# Attempt unicode escaping...
# Filthy hack
try:
regex = regex.decode("unicode_escape")
except:
pass
if re.search(regex, field_value, flags=re.IGNORECASE) is not None:
debug.info("Excluding '{ref}': Field '{field}' ({value}) matched '{reg}'".format(
ref=self.getRef(),
field=field_name,
value=field_value,
reg=regex).encode('utf-8')
)
# Found a match
return True
# Default, could not find any matches
return False
def testRegInclude(self):
if len(self.prefs.regIncludes) == 0: # Nothing to match against
return True
for reg in self.prefs.regIncludes:
if type(reg) == list and len(reg) == 2:
field_name, regex = reg
field_value = self.getField(field_name)
debug.info(field_name, field_value, regex)
if re.search(regex, field_value, flags=re.IGNORECASE) is not None:
# Found a match
return True
# Default, could not find a match
return False
def getFootprint(self, libraryToo=True):
ret = self.element.get("footprint")
if ret == "" and libraryToo:
if self.libpart:
ret = self.libpart.getFootprint()
return ret
def getDatasheet(self, libraryToo=True):
ret = self.element.get("datasheet")
if ret == "" and libraryToo:
ret = self.libpart.getDatasheet()
return ret
def getTimestamp(self):
return self.element.get("tstamp")
class joiner:
def __init__(self):
self.stack = []
def add(self, P, N):
if self.stack == []:
self.stack.append(((P, N), (P, N)))
return
S, E = self.stack[-1]
if N == E[1] + 1:
self.stack[-1] = (S, (P, N))
else:
self.stack.append(((P, N), (P, N)))
def flush(self, sep, N=None, dash='-'):
refstr = u''
c = 0
for Q in self.stack:
if bool(N) and c != 0 and c % N == 0:
refstr += u'\n'
elif c != 0:
refstr += sep + " "
S, E = Q
if S == E:
refstr += "%s%d" % S
c += 1
else:
# Do we have space?
if bool(N) and (c + 1) % N == 0:
refstr += u'\n'
c += 1
refstr += "%s%d%s%s%d" % (S[0], S[1], dash, E[0], E[1])
c += 2
return refstr
class ComponentGroup():
"""
Initialize the group with no components, and default fields
"""
def __init__(self, prefs=None):
self.components = []
self.fields = dict.fromkeys(ColumnList._COLUMNS_DEFAULT) # Columns loaded from KiCad
if not prefs:
prefs = BomPref()
self.prefs = prefs
def getField(self, field):
if field not in self.fields.keys():
return ""
if not self.fields[field]:
return ""
return u''.join((self.fields[field]))
def getCount(self):
return len(self.components)
# Test if a given component fits in this group
def matchComponent(self, c):
if len(self.components) == 0:
return True
if c == self.components[0]:
return True
return False
def containsComponent(self, c):
# Test if a given component is already contained in this grop
if not self.matchComponent(c):
return False
for comp in self.components:
if comp.getRef() == c.getRef():
return True
return False
def addComponent(self, c):
# Add a component to the group
if len(self.components) == 0:
self.components.append(c)
elif self.containsComponent(c):
return
elif self.matchComponent(c):
self.components.append(c)
def isFitted(self):
return any([c.isFitted() for c in self.components])
def isFixed(self):
return any([c.isFixed() for c in self.components])
def getRefs(self):
# Return a list of the components
separator = self.prefs.refSeparator
return separator.join([c.getRef() for c in self.components])
def getAltRefs(self):
S = joiner()
for n in self.components:
P, N = (n.getPrefix(), n.getSuffix())
S.add(P, N)
return S.flush(self.prefs.refSeparator)
# Sort the components in correct order
def sortComponents(self):
self.components = sorted(self.components, key=lambda c: natural_sort(c.getRef()))
# Update a given field, based on some rules and such
def updateField(self, field, fieldData):
# Protected fields cannot be overwritten
if field in ColumnList._COLUMNS_PROTECTED:
return
if field is None or field == "":
return
elif fieldData == "" or fieldData is None:
return
if (field not in self.fields.keys()) or (self.fields[field] is None) or (self.fields[field] == ""):
self.fields[field] = fieldData
elif fieldData.lower() in self.fields[field].lower():
return
else:
debug.warning("Field conflict: ({refs}) [{name}] : '{flds}' <- '{fld}'".format(
refs=self.getRefs(),
name=field,
flds=self.fields[field],
fld=fieldData).encode('utf-8'))
self.fields[field] += " " + fieldData
def updateFields(self, usealt=False, wrapN=None):
for c in self.components:
for f in c.getFieldNames():
# These columns are handled explicitly below
if f in ColumnList._COLUMNS_PROTECTED:
continue
self.updateField(f, c.getField(f))
# Update 'global' fields
if usealt:
self.fields[ColumnList.COL_REFERENCE] = self.getAltRefs()
else:
self.fields[ColumnList.COL_REFERENCE] = self.getRefs()
q = self.getCount()
self.fields[ColumnList.COL_GRP_QUANTITY] = "{n}{dnf}{dnc}".format(
n=q,
dnf=" (DNF)" if not self.isFitted() else "",
dnc=" (DNC)" if self.isFixed() else "")
self.fields[ColumnList.COL_GRP_BUILD_QUANTITY] = str(q * self.prefs.boards) if self.isFitted() else "0"
self.fields[ColumnList.COL_VALUE] = self.components[0].getValue()
self.fields[ColumnList.COL_PART] = self.components[0].getPartName()
self.fields[ColumnList.COL_PART_LIB] = self.components[0].getLibName()
self.fields[ColumnList.COL_DESCRIPTION] = self.components[0].getDescription()
self.fields[ColumnList.COL_DATASHEET] = self.components[0].getDatasheet()
self.fields[ColumnList.COL_SHEETPATH] = self.components[0].getSheetpathNames()
# Footprint field requires special attention
fp = self.components[0].getFootprint().split(":")
if len(fp) >= 2:
self.fields[ColumnList.COL_FP_LIB] = fp[0]
self.fields[ColumnList.COL_FP] = fp[1]
elif len(fp) == 1:
self.fields[ColumnList.COL_FP_LIB] = ""
self.fields[ColumnList.COL_FP] = fp[0]
else:
self.fields[ColumnList.COL_FP_LIB] = ""
self.fields[ColumnList.COL_FP] = ""
# Return a dict of the KiCad data based on the supplied columns
# NOW WITH UNICODE SUPPORT!
def getRow(self, columns):
row = []
for key in columns:
val = self.getField(key)
# Join fields (appending to current value) (#81)
for join_l in self.prefs.join:
# Each list is "target, source..." so we need at least 2 elements
elements = len(join_l)
target = join_l[0]
if elements > 1 and target == key:
# Append data from the other fields
for source in join_l[1:]:
v = self.getField(source)
if v:
val = val + ' ' + v
if val is None:
val = ""
else:
val = u'' + val
if sys.version_info[0] < 3:
val = val.encode('utf-8')
row.append(val)
return row
|
{
"content_hash": "0863eee69714e80fb4771a1e88b07c77",
"timestamp": "",
"source": "github",
"line_count": 699,
"max_line_length": 111,
"avg_line_length": 30.454935622317596,
"alnum_prop": 0.5325535512965051,
"repo_name": "SchrodingersGat/KiBoM",
"id": "95dd89c3603a9e0f10cbdc0ea9bbdd241cd15823",
"size": "21312",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kibom/component.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "96344"
},
{
"name": "Shell",
"bytes": "1541"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['architectures', 'name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
docdir = os.path.join(cwd,'..','documentation')
if not os.path.exists(docdir):
print "Couldn't find documentation file at: %s" % docdir
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','ti.material.js')
if not os.path.exists(js_file):
js_file = os.path.join(cwd,'..','assets','ti.material.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','TiMaterialModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def error(msg):
print "[ERROR] %s" % msg
def validate_license():
license_file = os.path.join(cwd,'LICENSE')
if not os.path.exists(license_file):
license_file = os.path.join(cwd,'..','LICENSE')
if os.path.exists(license_file):
c = open(license_file).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if manifest[key].strip() == '': die("manifest key '%s' missing required value" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignore=[],includeJSFiles=False):
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] == '.pyc': continue
if not includeJSFiles and len(e) == 2 and e[1] == '.js': continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, basepath, 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def verify_build_arch(manifest, config):
binaryname = 'lib%s.a' % manifest['moduleid']
binarypath = os.path.join('build', binaryname)
manifestarch = set(manifest['architectures'].split(' '))
output = subprocess.check_output('xcrun lipo -info %s' % binarypath, shell=True)
builtarch = set(output.split(':')[-1].strip().split(' '))
print 'Check build architectures\n'
if ('arm64' not in builtarch):
warn('built module is missing 64-bit support.')
if (manifestarch != builtarch):
warn('architectures in manifest: %s' % ', '.join(manifestarch))
warn('compiled binary architectures: %s' % ', '.join(builtarch))
print '\nMODULE BUILD FAILED'
error('there is discrepancy between the architectures specified in module manifest and compiled binary.')
error('Please update manifest to match module binary architectures.')
die('')
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
p = os.path.join(cwd, 'assets')
if not os.path.exists(p):
p = os.path.join(cwd, '..', 'assets')
if os.path.exists(p):
zip_dir(zf,p,'%s/%s' % (modulepath,'assets'),['README'])
for dn in ('example','platform'):
p = os.path.join(cwd, dn)
if not os.path.exists(p):
p = os.path.join(cwd, '..', dn)
if os.path.exists(p):
zip_dir(zf,p,'%s/%s' % (modulepath,dn),['README'],True)
license_file = os.path.join(cwd,'LICENSE')
if not os.path.exists(license_file):
license_file = os.path.join(cwd,'..','LICENSE')
if os.path.exists(license_file):
zf.write(license_file,'%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
print "**************************************************************"
print " WARNING!"
print " This Python script is deprecated!"
print " Please use 'ti build -p ios --build-only' instead"
print "**************************************************************"
print ""
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
verify_build_arch(manifest, config)
package_module(manifest,mf,config)
sys.exit(0)
|
{
"content_hash": "934d53031a048dd43072b947adb21a85",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 137,
"avg_line_length": 31.647686832740213,
"alnum_prop": 0.6758124367480041,
"repo_name": "caffeinalab/ti.material",
"id": "25a9281a2c98a00bf482f263ef860bbdadadf7d5",
"size": "8893",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iphone/build.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "22707"
},
{
"name": "JavaScript",
"bytes": "2364"
},
{
"name": "Objective-C",
"bytes": "145559"
},
{
"name": "Python",
"bytes": "8893"
}
],
"symlink_target": ""
}
|
import os
import Globals
import Engine, World
import aiMove, aiBattle
mobsFromFile = Globals.mobsFromFile
fileList = []
for region in Globals.RegionsList:
directoryFiles = os.listdir('blueprints/mob/'+str(region)+'/')
for obj in directoryFiles:
path = str(region)+'/'+ obj
fileList.append(path)
def loadMobs():
'''
handles loading all the mob prototypes into the world from files.
'''
for file in fileList:
loadMobFromFile(file)
for region in Globals.regionListDict:
for room in Globals.regionListDict[region]:
for obj in Globals.regionListDict[region][room].objects:
if obj.mobSpawner is not None:
for Amob in Globals.mobsFromFile:
if Amob.name == obj.mobSpawner.mob:
obj.mobSpawner.mob = Amob
def loadSavedMobs():
for region in Globals.regionListDict:
for room in Globals.regionListDict[region]:
path = 'data/world/'+region+'/mobs/'+room+'/'
if os.path.exists(path):
# print path + " exists."
# print Globals.regionListDict[region][room].mobs
for mob in Globals.regionListDict[region][room].mobs:
Globals.MoveTIMERS.remove(mob.aiMove.Timer)
Globals.regionListDict[region][room].mobs = []
# print Globals.regionListDict[region][room].mobs
# print Globals.masterRooms
#Globals.masterRooms[(region+room.capitalize())].mobs = []
# Globals.masterRooms = {}
mobFiles = os.listdir(path)
for mob in mobFiles:
# filePath = region + '/' + str(mob)
if mob != '':
if mob.endswith('~'):
pass
else:
newMob = loadSavedMobFromFile(mob, path)
# protoMob = newMob
# inventoryList = []
# for item in protoMob.kind.inventory:
# found = False
# for obj in Globals.fromFileList:
# if item.name == obj.name and found == False:
# # inventoryList.append(obj) # not the right way to handle this, should be forming new objects
# invObj = Engine.cmdSpawnObject(obj.name, Globals.regionListDict[region][room], alert=False, whereFrom='mobinv')
# inventoryList.append(invObj)
# Globals.regionListDict[region][room].objects.remove(invObj)
# found = True
# newMortal = World.mortal(protoMob.kind.hp, protoMob.kind.exp, inventoryList, protoMob.kind.inventorySize, protoMob.kind.equipment)
# newMob = World.Mob(protoMob.description, Globals.regionListDict[region][room], protoMob.name, Globals.regionListDict[region][room].region, protoMob.longDescription, protoMob.speech, newMortal, protoMob.species, None)
# newMoveAI = aiMove.movementAI(newMob, protoMob.aiMove.Timer.time)
# if protoMob.aiMove.Timer.actionFunction == protoMob.aiMove.basicRandom:
# newMoveAI.Timer.actionFunction = newMoveAI.basicRandom
# elif protoMob.aiMove.Timer.actionFunction == protoMob.aiMove.introvertRandom:
# newMoveAI.Timer.actionFunction = newMoveAI.introvertRandom
# elif protoMob.aiMove.Timer.actionFunction == protoMob.aiMove.extrovertRandom:
# newMoveAI.Timer.actionFunction = newMoveAI.extrovertRandom
# elif protoMob.aiMove.Timer.actionFunction == protoMob.aiMove.donotMove:
# newMoveAI.Timer.actionFunction = newMoveAI.doNotMove
# newMob.aiMove = newMoveAI
# Globals.MoveTIMERS.remove(newMob.aiMove.Timer)
# if hasattr(protoMob, 'expirator') and protoMob.expirator != None:
# newExpirator = World.expirator(newMob, protoMob.expirator.startingTime)
# newMob.expirator = newExpirator
# Globals.regionListDict[region][room].mobs.append(newMob)
# print Globals.regionListDict[region][room].mobs
#Globals.mobsFromFile.remove(protoMob)
#Globals.MoveTIMERS.remove(protoMob.aiMove.Timer)
def loadMobFromFile(file):
'''
handles loading a single mob from a given mob definition file into the world
'''
print file
if str(file).endswith('~'):
print '\n'
return
path = 'blueprints/mob/' + file
with open(path, 'r') as f:
fileData = f.readlines()
newMob = World.Mob('none', 'none', 'none')
newMob.mobID = ''
print fileData
splitFile = file.split("/")
mobID = None
name = 'none'
species = None
currentRoom = None
region = None
description = ''
longDescription = ''
hp = 0
exp = 0
inventory = []
inventorySize = 0
equipment = {}
kind = None
expirator = None
inventoryItems = []
currentRoomString = ''
moveAI = None
battleAI = None
newMob.kind = World.mortal(hp=0,maxHp=0,pp=0,maxPp=0,level=0,exp=0,money=0,offense=0,defense=0,speed=0,guts=0,luck=0,vitality=0,IQ=0,inventory=[],inventorySize=0,equipment={})
newMob.region = splitFile[0]
for Data in fileData:
if Data.startswith('mobID='):
IDstring = Data[6:-1]
if IDstring != '':
newMob.mobID = int(IDstring)
if Data.startswith('name='):
newMob.name = Data[5:-1]
if Data.startswith('species='):
newMob.species = Data[8:-1]
if Data.startswith('currentRoom='):
currentRoomString = Data[12:-1]
if Data.startswith('description='):
newMob.description = Data[12:-1]
if Data.startswith('longDescription='):
newMob.longDescription = Data[16:-1]
if Data.startswith('speech='):
newMob.speech = Data[7:-1]
if Data.startswith('expirator='):
expirator = Data[10:-1]
if expirator != '':
expirator = int(expirator)
if Data.startswith('moveAI='):
text = Data[7:-1]
moveAI = text.split(":")
if Data.startswith('battleAI='):
text = Data[9:-1]
if text == 'basicBash':
battleAI = aiBattle.basicBash
else:
battleAI = ''
if Data.startswith('kind.hp='):
newMob.kind.hp = int(Data[8:-1])
if Data.startswith('kind.maxHp='):
newMob.kind.maxHp = int(Data[11:-1])
if Data.startswith('kind.pp='):
newMob.kind.pp = int(Data[8:-1])
if Data.startswith('kind.maxPp='):
newMob.kind.maxPp = int(Data[11:-1])
if Data.startswith('kind.level='):
newMob.kind.level = int(Data[11:-1])
if Data.startswith('kind.exp='):
newMob.kind.exp = int(Data[9:-1])
if Data.startswith('kind.money='):
newMob.kind.money = int(Data[11:-1])
if Data.startswith('kind.offense='):
newMob.kind.offense = int(Data[13:-1])
if Data.startswith('kind.defense='):
newMob.kind.defense = int(Data[13:-1])
if Data.startswith('kind.speed='):
newMob.kind.speed = int(Data[11:-1])
if Data.startswith('kind.guts='):
newMob.kind.guts = int(Data[10:-1])
if Data.startswith('kind.luck='):
newMob.kind.luck = int(Data[10:-1])
if Data.startswith('kind.vitality='):
newMob.kind.vitality = int(Data[14:-1])
if Data.startswith('kind.IQ='):
newMob.kind.IQ = int(Data[8:-1])
if Data.startswith('kind.inventory='):
invString = Data[15:-1]
if invString != '':
#print "invString:" + invString
invList = invString.split(', ')
#print 'invList:' + str(invList)
for item in invList:
for ob in Globals.fromFileList:
if item == ob.name:
inventoryItems.append(item)
else:
inventoryItems = []
if Data.startswith('kind.inventorySize='):
newMob.kind.inventorySize = int(Data[19:-1])
if currentRoomString != '':
currentRoomCoords = currentRoomString.split(":")
newMob.currentRoom = Globals.regionListDict[currentRoomCoords[0]][currentRoomCoords[1]]
else:
# newMob.currentRoom = Globals.regionListDict[newMob.region]['bullpen']
newMob.currentRoom = None
if expirator != None and expirator != '':
expiratorComponent = World.expirator(newMob, expirator)
newMob.expirator = expiratorComponent
if moveAI != None and moveAI != []:
newMoveAI = aiMove.movementAI(newMob, int(moveAI[1]))
if moveAI[0] == 'basicRandom':
newMoveAI.Timer.actionFunction = newMoveAI.basicRandom
elif moveAI[0] == 'introvertRandom':
newMoveAI.Timer.actionFunction = newMoveAI.introvertRandom
elif moveAI[0] == 'extrovertRandom':
newMoveAI.Timer.actionFunction = newMoveAI.extrovertRandom
elif moveAI[0] == 'doNotMove':
newMoveAI.Timer.actionFunction = newMoveAI.doNotMove
newMob.aiMove = newMoveAI
Globals.MoveTIMERS.remove(newMob.aiMove.Timer)
if battleAI != None:
newMob.aiBattle = battleAI
#print 'invItems:' + str(inventoryItems)
for item in inventoryItems:
#print 'invitem:' + str(item)
removed = False
newItem = Engine.cmdSpawnObject(item, newMob.currentRoom, alert=False, whereFrom='mobinv')
newMob.kind.inventory.append(newItem)
# for obj in newMob.currentRoom.objects:
# if obj.name == item:
# newMob.currentRoom.objects.remove(obj)
if newMob.currentRoom is not None:
newMob.currentRoom.objects.remove(newItem)
if newMob.currentRoom is not None:
newMob.currentRoom.mobs.append(newMob)
if expirator != None and expirator != '':
Globals.TIMERS.remove(newMob.expirator.Timer)
#newMob.expirator.Timer = None
Globals.mobsFromFile.append(newMob)
#print 'region:' + str(newMob.region)
def loadSavedMobFromFile(file, path, isBattle=False):
'''
handles loading a single mob from a given mob definition file into the world
'''
print file
if str(file).endswith('~'):
print '\n'
return
# path = 'blueprints/mob/' + file
filePath = path + file
with open(filePath, 'r') as f:
fileData = f.readlines()
newMob = World.Mob('none', 'none', 'none')
newMob.mobID=''
print fileData
splitFile = file.split("/")
mobID = None
name = 'none'
species = None
currentRoom = None
region = None
description = ''
longDescription = ''
hp = 0
exp = 0
inventory = []
inventorySize = 0
equipment = {}
kind = None
expirator = None
inventoryItems = []
currentRoomString = ''
moveAI = None
battleAI = None
newMob.kind = World.mortal(hp=0,maxHp=0,pp=0,maxPp=0,level=0,exp=0,money=0,offense=0,defense=0,speed=0,guts=0,luck=0,vitality=0,IQ=0,inventory=[],inventorySize=0,equipment={})
newMob.region = splitFile[0]
for Data in fileData:
if Data.startswith('mobID='):
IDstring = Data[6:-1]
if IDstring != '':
newMob.mobID = str(IDstring)
if Data.startswith('name='):
newMob.name = Data[5:-1]
if Data.startswith('species='):
newMob.species = Data[8:-1]
if Data.startswith('currentRoom='):
currentRoomString = Data[12:-1]
if Data.startswith('description='):
newMob.description = Data[12:-1]
if Data.startswith('longDescription='):
newMob.longDescription = Data[16:-1]
if Data.startswith('speech='):
newMob.speech = Data[7:-1]
if Data.startswith('expirator='):
expirator = Data[10:-1]
if expirator != '':
expirator = int(expirator)
if Data.startswith('moveAI='):
text = Data[7:-1]
moveAI = text.split(":")
if Data.startswith('battleAI='):
text = Data[9:-1]
if text == 'basicBash':
battleAI = aiBattle.basicBash
else:
battleAI = ''
if Data.startswith('kind.hp='):
newMob.kind.hp = int(Data[8:-1])
if Data.startswith('kind.maxHp='):
newMob.kind.maxHp = int(Data[11:-1])
if Data.startswith('kind.pp='):
newMob.kind.pp = int(Data[8:-1])
if Data.startswith('kind.maxPp='):
newMob.kind.maxPp = int(Data[11:-1])
if Data.startswith('kind.level='):
newMob.kind.level = int(Data[11:-1])
if Data.startswith('kind.exp='):
newMob.kind.exp = int(Data[9:-1])
if Data.startswith('kind.money='):
newMob.kind.money = int(Data[11:-1])
if Data.startswith('kind.offense='):
newMob.kind.offense = int(Data[13:-1])
if Data.startswith('kind.defense='):
newMob.kind.defense = int(Data[13:-1])
if Data.startswith('kind.speed='):
newMob.kind.speed = int(Data[11:-1])
if Data.startswith('kind.guts='):
newMob.kind.guts = int(Data[10:-1])
if Data.startswith('kind.luck='):
newMob.kind.luck = int(Data[10:-1])
if Data.startswith('kind.vitality='):
newMob.kind.vitality = int(Data[14:-1])
if Data.startswith('kind.IQ='):
newMob.kind.IQ = int(Data[8:-1])
if Data.startswith('kind.inventory='):
invString = Data[15:-1]
if invString != '':
#print "invString:" + invString
invList = invString.split(', ')
#print 'invList:' + str(invList)
for item in invList:
for ob in Globals.fromFileList:
if item == ob.name:
inventoryItems.append(item)
if Data.startswith('kind.inventorySize='):
newMob.kind.inventorySize = int(Data[19:-1])
if currentRoomString != '':
if isBattle == False:
currentRoomCoords = currentRoomString.split(":")
newMob.currentRoom = Globals.regionListDict[currentRoomCoords[0]][currentRoomCoords[1]]
else:
currentRoomCoords = currentRoomString.split(":")
newMob.currentRoom = Globals.regionListDict['battles'][currentRoomCoords[1]]
else:
newMob.currentRoom = Globals.regionListDict[newMob.region]['bullpen']
if expirator != None and expirator != '':
expiratorComponent = World.expirator(newMob, expirator)
newMob.expirator = expiratorComponent
#newMob.expirator.Timer.attachedTo = newMob.expirator
if moveAI != None and moveAI != []:
newMoveAI = aiMove.movementAI(newMob, int(moveAI[1]))
if moveAI[0] == 'basicRandom':
newMoveAI.Timer.actionFunction = newMoveAI.basicRandom
elif moveAI[0] == 'introvertRandom':
newMoveAI.Timer.actionFunction = newMoveAI.introvertRandom
elif moveAI[0] == 'extrovertRandom':
newMoveAI.Timer.actionFunction = newMoveAI.extrovertRandom
elif moveAI[0] == 'doNotMove':
newMoveAI.Timer.actionFunction = newMoveAI.doNotMove
newMob.aiMove = newMoveAI
#Globals.MoveTIMERS.remove(newMob.aiMove.Timer)
if battleAI != None:
newMob.aiBattle = battleAI
for item in inventoryItems:
#print item
removed = False
newItem = Engine.cmdSpawnObject(item, newMob.currentRoom, alert=False, whereFrom='mobinv')
newMob.kind.inventory.append(newItem)
for obj in newMob.currentRoom.objects:
if obj.name == item:
newMob.currentRoom.objects.remove(obj)
if isBattle == True:
if newMob.expirator.Timer in Globals.TIMERS:
Globals.TIMERS.remove(newMob.expirator.Timer)
if newMob.aiMove.Timer in Globals.MoveTIMERS:
Globals.MoveTIMERS.remove(newMob.aiMove.Timer)
newMob.currentRoom.mobs.append(newMob)
# if expirator != None and expirator != '':
# Globals.TIMERS.remove(newMob.expirator.Timer)
# newMob.expirator.Timer = None
#Globals.mobsFromFile.append(newMob)
return newMob
#print 'region:' + str(newMob.region)
def saveMobs():
'''
handles saving all mobs in the world into unique mob definition files when the server is shutdown
'''
for region in Globals.regionListDict:
for room in Globals.regionListDict[region]:
path='data/world/'+region+ '/mobs/' + room + '/'
shortPath='data/world/'+region+'/mobs/'
if not os.path.exists(shortPath):
os.makedirs(shortPath)
if not os.path.exists(path):
os.makedirs(path)
dirList = os.listdir(path)
for mobFile in dirList:
#print mobFile
os.remove(path+mobFile)
for mob in Globals.regionListDict[region][room].mobs:
saveMobToFile(mob, path)
def saveMobToFile(mob, path):
'''
handles saving a single mob to a unique mob definition file when the server is shutdown
'''
filePath = path + str(mob)
with open(filePath, 'w') as f:
f.write('mobID=%s\n' %str(mob))
f.write('name=%s\n' %mob.name)
f.write('species=%s\n' %mob.species)
f.write('currentRoom=%s\n' %(str(mob.currentRoom.region)+ ":" +str(mob.currentRoom.name)))
f.write('\n')
f.write('description=%s\n' %mob.description)
f.write('\n')
f.write('longDescription=%s\n' %mob.longDescription)
f.write('\n')
f.write('speech=%s\n' %mob.speech)
f.write('\n')
if mob.expirator != None:
f.write('expirator=%s\n' %mob.expirator.startingTime)
if mob.aiMove != None:
if mob.aiMove.Timer.actionFunction == mob.aiMove.basicRandom:
actionFunction = 'basicRandom'
elif mob.aiMove.Timer.actionFunction == mob.aiMove.introvertRandom:
actionFunction = 'introvertRandom'
elif mob.aiMove.Timer.actionFunction == mob.aiMove.extrovertRandom:
actionFunction = 'extrovertRandom'
elif mob.aiMove.Timer.actionFunction == mob.aiMove.doNotMove:
actionFunction = 'doNotMove'
f.write('moveAI=%s:%s\n' %(actionFunction, mob.aiMove.Timer.time))
if mob.aiBattle != None:
if mob.aiBattle == aiBattle.basicBash:
battleFunction = 'basicBash'
f.write('battleAI=%s\n' %battleFunction)
f.write('\n')
f.write('kind.hp=%s\n' %str(mob.kind.hp))
f.write('kind.maxHp=%s\n' %str(mob.kind.maxHp))
f.write('kind.pp=%s\n' %str(mob.kind.pp))
f.write('kind.maxPp=%s\n' %str(mob.kind.maxPp))
f.write('kind.level=%s\n' %str(mob.kind.level))
f.write('kind.exp=%s\n' %str(mob.kind.exp))
f.write('kind.money=%s\n' %str(mob.kind.money))
f.write('kind.offense=%s\n' %str(mob.kind.offense))
f.write('kind.defense=%s\n' %str(mob.kind.defense))
f.write('kind.speed=%s\n' %str(mob.kind.speed))
f.write('kind.guts=%s\n' %str(mob.kind.guts))
f.write('kind.luck=%s\n' %str(mob.kind.luck))
f.write('kind.vitality=%s\n' %str(mob.kind.vitality))
f.write('kind.IQ=%s\n' %str(mob.kind.IQ))
f.write('kind.inventory=',)
invString = ''
for item in mob.kind.inventory:
invString += (item.name + ', ')
if invString.endswith(', '):
invString = invString[:-2]
f.write(invString)
f.write('\n')
f.write('kind.inventorySize=%s\n' %str(mob.kind.inventorySize))
f.write('kind.equipment=%s\n' %str(mob.kind.equipment))
|
{
"content_hash": "0c00f802b647a37d80c0e766a0cdee43",
"timestamp": "",
"source": "github",
"line_count": 542,
"max_line_length": 225,
"avg_line_length": 31.59409594095941,
"alnum_prop": 0.683426769446391,
"repo_name": "buckets1337/MotherMUD",
"id": "5e826abb0cc05978ac81992a05a7cf22e67aa60a",
"size": "17246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MobInit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1568859"
}
],
"symlink_target": ""
}
|
import idaapi
import idc
import sark
def message(*messages):
for msg in messages:
for line in msg.splitlines():
idaapi.msg("[Autostruct] {}\n".format(line))
class AutoStruct(idaapi.plugin_t):
flags = idaapi.PLUGIN_PROC
comment = "AutoStruct struct creator"
help = "Automagically Create and Apply Structs"
wanted_name = "AutoStruct"
wanted_hotkey = "Shift+T"
def init(self):
self._prev_struct_name = ""
return idaapi.PLUGIN_KEEP
def term(self):
pass
def run(self, arg):
start, end = sark.get_selection()
struct_name = idc.AskStr(self._prev_struct_name, "Struct Name")
if not struct_name:
message("No structure name provided. Operation cancelled.")
return
self._prev_struct_name = struct_name
common_reg = sark.structure.get_common_register(start, end)
reg_name = idc.AskStr(common_reg, "Register")
if not reg_name:
message("No offsets found. Operation cancelled.")
return
try:
offsets, operands = sark.structure.infer_struct_offsets(start, end, reg_name)
except sark.exceptions.InvalidStructOffset:
message("Invalid offset found. Cannot create structure.",
"Make sure there are no negative offsets in the selection.")
return
except sark.exceptions.SarkInvalidRegisterName:
message("Invalid register name {!r}. Cannot create structs.".format(reg_name))
return
try:
sark.structure.create_struct_from_offsets(struct_name, offsets)
except sark.exceptions.SarkStructAlreadyExists:
yes_no_cancel = idc.AskYN(idaapi.ASKBTN_NO,
"Struct already exists. Modify?\n"
"Cancel to avoid applying the struct.")
if yes_no_cancel == idaapi.ASKBTN_CANCEL:
return
elif yes_no_cancel == idaapi.ASKBTN_YES:
sid = sark.structure.get_struct(struct_name)
sark.structure.set_struct_offsets(offsets, sid)
else: # yes_no_cancel == idaapi.ASKBTN_NO:
pass
sark.structure.apply_struct(start, end, reg_name, struct_name)
def PLUGIN_ENTRY():
return AutoStruct()
|
{
"content_hash": "f42c5f4fa9c05d2c5cb34924a8422b63",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 90,
"avg_line_length": 32.80555555555556,
"alnum_prop": 0.5956816257408976,
"repo_name": "HackerTool/Sark",
"id": "54d5dae1f6bfb3f5197b47097320665420707ab4",
"size": "2362",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "plugins/autostruct.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "133495"
}
],
"symlink_target": ""
}
|
from yum.plugins import TYPE_CORE
from os import walk, path, fstat
requires_api_version = '2.3'
plugin_type = (TYPE_CORE,)
VERBOSE_DEBUGLEVEL = 3
def _stat_ino_fp(fp):
"""
Get the inode number from file descriptor
"""
return fstat(fp.fileno()).st_ino
def get_file_list(rpmpath):
"""
Enumerate all files in a directory
"""
for root, _, files in walk(rpmpath):
for f in files:
yield path.join(root, f)
def for_each_file(files, cb, m='rb'):
"""
Open each file with mode specified in `m`
and invoke `cb` on each of the file objects
"""
if not files or not cb:
return []
ret = []
for f in files:
with open(f, m) as fp:
ret.append(cb(fp))
return ret
def do_detect_copy_up(files):
"""
Open the files first R/O, then R/W and count unique
inode numbers
"""
num_files = len(files)
lower = for_each_file(files, _stat_ino_fp, 'rb')
upper = for_each_file(files, _stat_ino_fp, 'ab')
diff = set(lower + upper)
return len(diff) - num_files
def init_hook(conduit):
rpmdb_path = conduit._base.rpmdb._rpmdbpath
try:
files = list(get_file_list(rpmdb_path))
copied_num = do_detect_copy_up(files)
conduit.info(VERBOSE_DEBUGLEVEL, "ovl: Copying up (%i) files from OverlayFS lower layer" % copied_num)
except Exception as e:
conduit.error(1, "ovl: Error while doing RPMdb copy-up:\n%s" % e)
|
{
"content_hash": "c20ce2f05af343485872d9392e42b4ba",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 110,
"avg_line_length": 25.32758620689655,
"alnum_prop": 0.607215793056501,
"repo_name": "ax2d/ansible-docker",
"id": "fe55805326d668cc3a53f22e5ee2217bba098182",
"size": "1469",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "files/ovl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1469"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the mincoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 9334)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 19334)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
|
{
"content_hash": "8232a81efe6ebcef6f70329b69655f4c",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 98,
"avg_line_length": 31.456521739130434,
"alnum_prop": 0.5710665745219995,
"repo_name": "xieta/mincoin",
"id": "6bf36f05ec132eb88881fa464f6ceac80c4bec75",
"size": "4341",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "contrib/seeds/generate-seeds.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28456"
},
{
"name": "C",
"bytes": "687986"
},
{
"name": "C++",
"bytes": "4972454"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50622"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "186864"
},
{
"name": "Makefile",
"bytes": "110045"
},
{
"name": "Objective-C",
"bytes": "5461"
},
{
"name": "Objective-C++",
"bytes": "7240"
},
{
"name": "Python",
"bytes": "1138984"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Shell",
"bytes": "48732"
}
],
"symlink_target": ""
}
|
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
from collections import namedtuple
class Reporter(object):
"""Formats and emits reports.
Subclasses implement the callback methods, to provide specific reporting
functionality, e.g., to console or to browser.
"""
# Generic reporting settings.
# log_level: Display log messages up to this level.
# subsettings: subclass-specific settings.
Settings = namedtuple('Settings', ['log_level'])
def __init__(self, run_tracker, settings):
self.run_tracker = run_tracker
self.settings = settings
def open(self):
"""Begin the report."""
pass
def close(self):
"""End the report."""
pass
def start_workunit(self, workunit):
"""A new workunit has started."""
pass
def end_workunit(self, workunit):
"""A workunit has finished."""
pass
def handle_log(self, workunit, level, *msg_elements):
"""Handle a message logged by pants code.
level: One of the constants above.
Each element in msg_elements is either a message or a (message, detail) pair.
A subclass must show the message, but may choose to show the detail in some
sensible way (e.g., when the message text is clicked on in a browser).
This convenience implementation filters by log level and then delegates to do_handle_log.
"""
if level <= self.settings.log_level:
self.do_handle_log(workunit, level, *msg_elements)
def do_handle_log(self, workunit, level, *msg_elements):
"""Handle a message logged by pants code, after it's passed the log level check."""
pass
def handle_output(self, workunit, label, s):
"""Handle output captured from an invoked tool (e.g., javac).
workunit: The innermost WorkUnit in which the tool was invoked.
label: Classifies the output e.g., 'stdout' for output captured from a tool's stdout or
'debug' for debug output captured from a tool's logfiles.
s: The content captured.
"""
pass
def is_under_main_root(self, workunit):
"""Is the workunit running under the main thread's root."""
return self.run_tracker.is_under_main_root(workunit)
|
{
"content_hash": "6fabc0678abed387a475cfa25f1ba7e8",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 93,
"avg_line_length": 32.53623188405797,
"alnum_prop": 0.6837416481069042,
"repo_name": "Ervii/garage-time",
"id": "f13784f3d2a2bad4d70f1248611840871d0518d9",
"size": "2392",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "garage/src/python/pants/reporting/reporter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9347"
},
{
"name": "GAP",
"bytes": "4684"
},
{
"name": "HTML",
"bytes": "64603"
},
{
"name": "Java",
"bytes": "43275"
},
{
"name": "JavaScript",
"bytes": "9523"
},
{
"name": "Protocol Buffer",
"bytes": "4664"
},
{
"name": "Python",
"bytes": "2200035"
},
{
"name": "Scala",
"bytes": "6693"
},
{
"name": "Shell",
"bytes": "29352"
},
{
"name": "Thrift",
"bytes": "1946"
}
],
"symlink_target": ""
}
|
from .static import StaticCalculation
from .relax import IonRelaxation
__author__ = 'Guillermo Avendano-Franco'
|
{
"content_hash": "b7e023f6a9e80883967369c700b76754",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 40,
"avg_line_length": 28.25,
"alnum_prop": 0.7964601769911505,
"repo_name": "MaterialsDiscovery/PyChemia",
"id": "c71f7c825a4a081262a56308ba36711bb6ee3c31",
"size": "113",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pychemia/code/abinit/task/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1390398"
},
{
"name": "Shell",
"bytes": "325"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2015, NeverEatYellowSnow (NEYS)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. All advertising materials mentioning features or use of this software
must display the following acknowledgement:
This product includes software developed from NeverEatYellowSnow (NEYS).
4. Neither the name of NeverEatYellowSnow (NEYS) nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ''AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import struct
import codecs
class GenericStructParser:
def __init__(self, fmt, converter = lambda x: x):
self.fmt = fmt
self.converter = converter
def get(self,b,k):
r=struct.unpack_from(self.fmt,b,k)
if len(r) == 1:
r = r[0]
return self.converter(r),k+struct.calcsize(self.fmt)
def put(self,v):
try:
v[0]
except:
v = [v]
return struct.pack(self.fmt,*v)
def size(self):
return struct.calcsize(self.fmt)
class GenericArrayParser:
def __init__(self, lFmt, eSize, decode, encode):
self.lFmt = lFmt
self.eSize = eSize
self.decode = decode
self.encode = encode
def get(self,b,k):
l,=struct.unpack_from(self.lFmt,b,k)
k += struct.calcsize(self.lFmt)
nk = k+self.eSize*l
raw = b[k:nk]
return self.decode(raw),nk
def put(self,v):
raw = self.encode(v)
l = len(raw)//self.eSize
return struct.pack(self.lFmt,l) + raw
def size(self):
raise NotImplementedError
Uint8 = GenericStructParser('B')
Bool = GenericStructParser('B', lambda x: x != 0)
Uint16 = GenericStructParser('H')
Int16 = GenericStructParser('h')
Uint32 = GenericStructParser('I')
Int32 = GenericStructParser('i')
Float = GenericStructParser('f')
Vector3f = GenericStructParser('fff')
Ascii = GenericArrayParser(
'B', 1,
lambda x: codecs.decode(x, 'ascii', 'replace'),
lambda x: codecs.encode(x, 'ascii', 'strict'),
)
UTF32 = GenericArrayParser(
'B', 4,
lambda x: codecs.decode(x, 'utf-32', 'replace'),
lambda x: codecs.encode(x, 'utf-32', 'strict')[4:], # first 4 bytes are ignored?
)
class GenericPacket:
def __init__(self, **kw):
if len(kw):
for f,p in self._content:
setattr(self, f, kw[f])
def from_buffer(self, buffer, idx):
for f,p in self._content:
try:
r,idx = p.get(buffer,idx)
setattr(self,f,r)
except Exception as exc:
raise RuntimeError("Error while processing attribute %s: %s" % (f, str(exc)))
return idx,self
def to_buffer(self):
res = struct.pack('B', self.packetId)
for f,p in self._content:
res += p.put(getattr(self,f))
return res
def __str__(self):
res = str(type(self)) + "("
for f,_ in self._content:
v = getattr(self, f, None)
if type(v) in (tuple, list):
v = tuple(str(x) for x in v)
res += f + "=" + str(v) + ", "
res += ")"
return res
def size(self):
s = 0
for f,p in self._content:
s += p.size()
return s
class DictToClass:
def __init__(self, **kw):
for k in kw:
setattr(self, k, kw[k])
|
{
"content_hash": "74d893cb7415bd7487e7362a358d4f2a",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 93,
"avg_line_length": 34.47794117647059,
"alnum_prop": 0.619321817018554,
"repo_name": "flitzi/acplugins",
"id": "3e3b8f2d4c0b93f83a6efce13c7e3fe99cbdebab",
"size": "4689",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "acplugins4python/acplugins4python/ac_server_helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "367964"
},
{
"name": "Python",
"bytes": "41832"
}
],
"symlink_target": ""
}
|
import sys
sys.path.append('../models/')
from ModelBase import Status, SessionFactory
import codecs
session = SessionFactory()
print unicode(session.query(Status)[0])
|
{
"content_hash": "22e17cdff865115834e4329364d12ec7",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 44,
"avg_line_length": 21.125,
"alnum_prop": 0.7751479289940828,
"repo_name": "groceryheist/UWBotThings",
"id": "03514a566e8becf35f14e28e37bc9dbdf367f116",
"size": "169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/Network/stub.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "102944"
}
],
"symlink_target": ""
}
|
import time
from matrixbot import utils
class EchoPlugin:
def __init__(self, bot, settings):
self.name = "EchoPlugin"
self.bot = bot
self.logger = utils.get_logger()
self.logger.info(settings)
self.settings = settings
self.period = self.settings.get('period', 3) # 3 seconds.
self.last_time = time.time()
self.broadcast(self.compose_message())
def compose_message(self):
username = self.settings['username']
message = self.settings['message']
return "EchoPlugin (" + username + "): " + message
def broadcast(self, message):
for each in self.settings['rooms']:
room_id = self.bot.get_real_room_id(each)
self.send(room_id, message)
def send(self, room_id, message):
self.logger.info(message)
self.bot.send_html(room_id, message, msgtype="m.notice")
def dispatch(self, handler):
self.logger.debug("Echo::dispatch")
# Exit if now is within time interval.
now = time.time()
if now < self.last_time + self.period:
return
self.last_time = now
# Do action.
self.logger.debug('EchoPlugin::Send message')
self.broadcast(self.compose_message())
|
{
"content_hash": "f288c8bc5e5b4c987d10b1c1f5f1a6d1",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 67,
"avg_line_length": 31.121951219512194,
"alnum_prop": 0.5963949843260188,
"repo_name": "psaavedra/matrix-bot",
"id": "dd2ea8b47f62c8b4061b0601fd6aafe2d7c7a259",
"size": "1300",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "matrixbot/plugins/echo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "84552"
},
{
"name": "Shell",
"bytes": "3959"
}
],
"symlink_target": ""
}
|
import uuid
import re
import hashlib
import time
import formencode
from datetime import datetime, timedelta
from markupsafe import escape
import tornado.auth
from tornado.web import asynchronous, HTTPError
from tornado.options import options
from tornado import escape
from lib.utils import string2int, html_escape
from lib.time import timeover, timeout
from lib.decorators import authenticated
from cache.files import fetch_cached_board_topic, fetch_cached_board_topic_morecontent, fetch_cached_board_nodelist
from app.base.form import BaseForm
from app.base.validator import Utf8MaxLength
from app.base.handler import BaseHandler
from app.base.pagination import Pagination
from app.people.people_model import People
from app.board.board_model import BoardTopic,BoardComment,BoardTopicVoter,BoardNode, BoardNodeFollower
PageMaxLimit = 10
class BoardTopicForm(BaseForm):
topic_content = formencode.All(Utf8MaxLength(300), formencode.validators.String(not_empty=True, min=10, strip=True, messages={'tooLong':u'最多只能输入 %(max)i 个字', 'empty':u'请输入主题内容', 'tooShort':u'请至少输入%(min)i 个字'}))
#topic_more_content = formencode.All(Utf8MaxLength(3000), formencode.validators.String(strip=True, not_empty=False, messages={'tooLong':u'最多只能输入 %(max)i 个字'}))
#topic_tags = formencode.validators.String(not_empty=False, strip=True, max=200, messages={'tooLong':u'最多只能输入 %(max)i 个字'})
topic_videos = formencode.validators.URL(strip=True, messages={'noTLD':u'请输入正确的视频地址'})
#topic_images = formencode.validators.URL(strip=True)
class BoardCommentForm(BaseForm):
topic_id = formencode.validators.String(not_empty=True, strip=True)
topic_url = formencode.validators.String(not_empty=False, strip=True)
comment_content = formencode.All(Utf8MaxLength(300), formencode.validators.String(strip=True, not_empty=True, min=10, messages={'tooLong':u'最大只能输入 %(max)i 个字', 'empty':u'请输入评论内容'}))
class BoardTopicHandler(BaseHandler):
def get(self, topic_id=None, comment_content_error=None, comment_content=''):
if not topic_id:
self.about(404)
topic = fetch_cached_board_topic(topic_id)
topic_more_content = fetch_cached_board_topic_morecontent(topic_id)
if not topic:
self.about(404)
node = topic.node
topic_has_voted = False
people_id = None
people = None
page = self.get_argument("page", "1")
page = string2int(page)
if page < 1:
page = 1
if people_id:
topic_has_voted = topic.has_voted(people_id)
limit = PageMaxLimit
offset = (page-1) * PageMaxLimit
comments = topic.get_comments(limit=limit, offset=offset)
total_pages = topic.comment_count/limit
last_page = topic.comment_count % limit
if last_page > 0:
total_pages += 1
node_list = fetch_cached_board_nodelist()
to = timeout(topic.create_time)
topic_can_edit = False
#if people and to < 600 and topic.is_author(people):
# topic_can_edit = True
pagination = Pagination(page, total_pages)
#return self.write(topic_more_content)
self.render("board/topic.html", timeover=timeover, topic=topic, topic_more_content=topic_more_content, node=node, node_list=node_list,
xsrf_token=self.xsrf_token, pagination=pagination, comment_list=comments, topic_has_voted=topic_has_voted, topic_can_edit=topic_can_edit,
comment_content_error=comment_content_error, comment_content=comment_content)
class BoardCommentHandler(BaseHandler):
def _get_topic(self, topic_id=None, comment_content_error=None, comment_content=''):
if not topic_id:
self.about(404)
topic = fetch_cached_board_topic(topic_id)
if not topic:
self.about(404)
node = topic.node
topic_has_voted = False
people_id = None
people = None
page = self.get_argument("page", "1")
page = string2int(page)
if page < 1:
page = 1
if people_id:
topic_has_voted = topic.has_voted(people_id)
limit = PageMaxLimit
offset = (page-1) * PageMaxLimit
comments = topic.get_comments(limit=limit, offset=offset)
total_pages = topic.comment_count/limit
last_page = topic.comment_count % limit
if last_page > 0:
total_pages += 1
node_list = fetch_cached_board_nodelist()
to = timeout(topic.create_time)
topic_can_edit = False
if people and to < 600 and topic.is_author(people):
topic_can_edit = True
pagination = Pagination(page, total_pages)
self.render("board/topic.html", timeover=timeover, topic=topic, node=node, node_list=node_list,
xsrf_token=self.xsrf_token, pagination=pagination, comment_list=comments, topic_has_voted=topic_has_voted, topic_can_edit=topic_can_edit,
comment_content_error=comment_content_error, comment_content=comment_content)
@authenticated
def post(self, topic_id=None):
if not topic_id:
self.about(404)
topic = fetch_cached_board_topic(topic_id)
if not topic:
self.about(404)
node = topic.node
people = self.current_user
comment_schema = BoardCommentForm(self)
comment_content = self.get_argument('comment_content', '')
if comment_schema.validate():
topic_id = comment_schema.params.get('topic_id')
topic_url = comment_schema.params.get('topic_url')
comment_content = comment_schema.params.get('comment_content')
comment = BoardComment()
comment.content = comment_content
topic = fetch_cached_board_topic(topic_id)
comment.topic = topic
comment.people = people
comment.save()
topic.add_comment(comment)
fetch_cached_board_topic(topic.id, reflush=True)
comment_content = ''
return self.redirect(topic_url)
else:
comment_content_error = comment_schema.form_errors.get('comment_content')
#node_list = fetch_cached_board_nodelist()
#limit = PageMaxLimit
#total_pages = topic.comment_count/limit
#last_page = topic.comment_count % limit
#if last_page > 0:
# total_pages += 1
#pagination = Pagination(page, total_pages)
#page = self.get_argument("page", "1")
#page = string2int(page)
#if page < 1:
# page = 1
#offset = (page-1) * PageMaxLimit
#comments = topic.get_comments(limit=limit, offset=offset)
#comment_content = comment_schema.params.get('comment_content')
#return self.render("board/topic.html", timeover=timeover, topic=topic, node=node, node_list=node_list,
# xsrf_token=self.xsrf_token, pagination=pagination, comment_list=comments, topic_has_voted=topic_has_voted, topic_can_edit=topic_can_edit,
# comment_content_error=comment_content_error, comment_content=comment_content)
return self._get_topic(topic_id, comment_content_error=comment_content_error, comment_content=comment_content)
class BoardTopicSubmitHandler(BaseHandler):
@authenticated
def get(self, node_name=None):
if not node_name:
self.about(404)
node = BoardNode.get_by_name(node_name)
if not node:
self.about(404)
node_list = fetch_cached_board_nodelist()
self.render("board/submit.html", node=node, node_list=node_list)
@authenticated
def post(self, node_name=None):
if not node_name:
self.about(404)
node = BoardNode.get_by_name(node_name)
if not node:
self.about(404)
people = self.get_current_user()
schema = BoardTopicForm(self)
if schema.validate():
topic_content = schema.params.get("topic_content", None)
topic_more_content = schema.params.get("topic_more_content", None)
topic_videos = schema.params.get("topic_videos", None)
topic_tags = schema.params.get("topic_tags", None)
topic = BoardTopic()
topic.content = topic_content
topic.more_content = topic_more_content
topic.node = node
topic.people = people #People.objects().first()
if topic_videos:
topic.videos = [topic_videos]
topic.save()
#try:
# topic.save()
#except Exception, e:
# raise e
return self.redirect('/board/node/%s' % node_name)
else:
topic_content = self.get_argument('topic_content', '')
topic_more_content = self.get_argument('topic_more_content', '')
topic_content_error = schema.form_errors.get('topic_content', '')
topic_more_content_error = schema.form_errors.get('topic_more_content', '')
topic_videos_error = schema.form_errors.get('topic_images', '')
self.render("board/submit.html", node=node,
topic_content=topic_content,
topic_more_content=topic_more_content,
topic_content_error=topic_content_error,
topic_more_content_error=topic_more_content_error,
topic_videos_error=topic_videos_error)
class BoardIndexHandler(BaseHandler):
def get(self):
page = self.get_argument('page', '1')
page_no = string2int(page, -1)
if page_no == -1:
self.about(404)
if page_no < 1:
page_no = 1
offset = (page_no-1) * PageMaxLimit*2
node_list = fetch_cached_board_nodelist()
topics = BoardTopic.get_last_topics(limit=PageMaxLimit, offset=offset)
topic_list = []
for t in topics:
topic = fetch_cached_board_topic(t.id)
topic_list.append(topic)
now = time.time()
self.render("board/index.html", timeover=timeover, topic_list=topic_list, topic_count=len(topic_list), node_list=node_list, now_time=now,
page=page_no, offset=offset)
class BoardNodeHandler(BaseHandler):
def get(self, node_name=None):
if not node_name:
self.about(404)
node = BoardNode.get_by_name(node_name)
if not node:
self.about(404)
page = self.get_argument('page', '1')
page_no = string2int(page, -1)
if page_no == -1:
self.about(404)
if page_no < 1:
page_no = 1
offset = (page_no-1) * PageMaxLimit
node_list = fetch_cached_board_nodelist()
topics = BoardTopic.get_last_node_topics(node, limit=PageMaxLimit, offset=offset)
topic_list = []
for t in topics:
topic = fetch_cached_board_topic(t.id)
topic_list.append(topic)
now = time.time()
total_count = BoardTopic.get_node_topics_count(node)
total_pages = total_count / PageMaxLimit
last_page = total_count % PageMaxLimit
if last_page > 0:
total_pages += 1
pagination = Pagination(page_no, total_pages)
self.render("board/node.html", timeover=timeover, topic_list=topic_list, topic_count=len(topic_list), node_list=node_list, node = node, now_time=now,
pagination=pagination, offset=offset)
class BoardTagHandler(BaseHandler):
def get(self, tag_name=None):
self.render("home.html")
class BoardTopicVoteHandler(BaseHandler):
def post(self, tag_name=None):
people = self.current_user
if self.current_user == None:
return dict(result='redirect', url='/login')
topic_id = self.get_argument('id', None)
dir = self.get_argument('dir', None)
topic = fetch_cached_board_topic(topic_id)
if topic.people.id != people.id:
#result = json.dumps(dict(result='error', info='people is the author'))
return dict(result='error', info='people is the author')
if BoardTopicVoter.has_voted(people.id, topic.id):
#result = json.dumps(dict(result='error', info='topic has voted'))
return dict(result='error', info='topic has voted')
try:
voter = BoardTopicVoter()
voter.topic_id = topic.id
voter.member_id = people.id
voter.save()
topic.add_voter(voter)
fetch_cached_news_topic(topic.id, reflush=True)
return dict(result='ok', vote=topic.up_vote+1)
except OperationError, err:
return dict(result='error', info='not vid')
return dict(result='error', info='unknown error')
class BoardCommentListHandler(BaseHandler):
def get(self):
tid = self.get_argument('topic_id', None)
if not tid:
return self.write('')
topic = fetch_cached_board_topic(tid)
page = self.get_argument('page', 0)
limit = PageMaxLimit
start = string2int(page) * limit
if topic:
comment_list = topic.get_comments(limit=limit, offset=start)
return self.render('board/comments.html', topic=topic, comment_list=comment_list, timeover=timeover)
else:
return self.write('')
class BoardApiTopicMoreContentHandler(BaseHandler):
def get(self):
topic_id = self.get_argument('topic_id', None)
if not topic_id:
return self.render_json(dict(html=''))
html = fetch_cached_board_topic_morecontent(topic_id)
return self.render_json(dict(html=html))
handlers = [
(r"/", BoardIndexHandler),
(r"/board", BoardIndexHandler),
(r"/board/submit/([a-zA-Z0-9\-_]{3,32})", BoardTopicSubmitHandler),
(r"/board/commentlist", BoardCommentListHandler),
(r"/board/vote", BoardTopicVoteHandler),
(r"/board/tag/(.*)", BoardTagHandler),
(r"/board/topic/([a-zA-Z0-9]*)", BoardTopicHandler),
(r"/board/comment/([a-zA-Z0-9]*)", BoardCommentHandler),
(r"/board/node/([a-zA-Z0-9\-_]{3,32})", BoardNodeHandler),
(r"/board/api/morecontent", BoardApiTopicMoreContentHandler),
]
|
{
"content_hash": "d7ded32aebfdae2a3d517a74f69e4737",
"timestamp": "",
"source": "github",
"line_count": 396,
"max_line_length": 215,
"avg_line_length": 37.5479797979798,
"alnum_prop": 0.5972156836370973,
"repo_name": "feilaoda/FlickBoard",
"id": "afce9a2781ad2464e6751f5989d78d97740df2e9",
"size": "15019",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/app/board/board_handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "120662"
},
{
"name": "Python",
"bytes": "164260"
},
{
"name": "Shell",
"bytes": "0"
}
],
"symlink_target": ""
}
|
'''
"install" plugin for cocos command line tool
'''
__docformat__ = 'restructuredtext'
import sys
import os
import json
import inspect
from xml.dom import minidom
import shutil
import cocos
class CCPluginDeploy(cocos.CCPlugin):
"""
Install a project
"""
@staticmethod
def depends_on():
return ('compile',)
@staticmethod
def plugin_name():
return "deploy"
@staticmethod
def brief_description():
return "Deploy a project to the target"
def _add_custom_options(self, parser):
parser.add_argument("-m", "--mode", dest="mode", default='debug',
help="Set the deploy mode, should be debug|release, default is debug.")
def _check_custom_options(self, args):
if args.mode != 'release':
args.mode = 'debug'
self._mode = 'debug'
if 'release' == args.mode:
self._mode = args.mode
def _is_debug_mode(self):
return self._mode == 'debug'
def _xml_attr(self, dir, file_name, node_name, attr):
doc = minidom.parse(os.path.join(dir, file_name))
return doc.getElementsByTagName(node_name)[0].getAttribute(attr)
def deploy_ios(self, dependencies):
if not self._platforms.is_ios_active():
return
compile_dep = dependencies['compile']
self._iosapp_path = compile_dep._iosapp_path
self._use_sdk = compile_dep.use_sdk
def deploy_mac(self, dependencies):
if not self._platforms.is_mac_active():
return
compile_dep = dependencies['compile']
self._macapp_path = compile_dep._macapp_path
self.target_name = compile_dep.target_name
def deploy_web(self, dependencies):
if not self._platforms.is_web_active():
return
compile_dep = dependencies['compile']
self.sub_url = compile_dep.sub_url
self.run_root = compile_dep.run_root
def deploy_win32(self, dependencies):
if not self._platforms.is_win32_active():
return
compile_dep = dependencies['compile']
self.run_root = compile_dep.run_root
self.project_name = compile_dep.project_name
def deploy_linux(self, dependencies):
if not self._platforms.is_linux_active():
return
compile_dep = dependencies['compile']
self.run_root = compile_dep.run_root
self.project_name = compile_dep.project_name
def deploy_android(self, dependencies):
if not self._platforms.is_android_active():
return
project_dir = self._project.get_project_dir()
android_project_dir = self._platforms.project_path()
cocos.Logging.info("installing on device")
self.package = self._xml_attr(android_project_dir, 'AndroidManifest.xml', 'manifest', 'package')
activity_name = self._xml_attr(android_project_dir, 'AndroidManifest.xml', 'activity', 'android:name')
if activity_name.startswith('.'):
self.activity = self.package + activity_name
else:
self.activity = activity_name
compile_dep = dependencies['compile']
apk_path = compile_dep.apk_path
sdk_root = cocos.check_environment_variable('ANDROID_SDK_ROOT')
adb_path = cocos.CMDRunner.convert_path_to_cmd(os.path.join(sdk_root, 'platform-tools', 'adb'))
#TODO detect if the application is installed before running this
adb_uninstall = "%s uninstall %s" % (adb_path, self.package)
self._run_cmd(adb_uninstall)
adb_install = "%s install \"%s\"" % (adb_path, apk_path)
self._run_cmd(adb_install)
def get_filename_by_extention(self, ext, path):
filelist = os.listdir(path)
for fname in filelist:
name, extention = os.path.splitext(fname)
if extention == ext:
return fname
return None
def run(self, argv, dependencies):
self.parse_args(argv)
cocos.Logging.info('Deploying mode: %s' % self._mode)
self.deploy_ios(dependencies)
self.deploy_mac(dependencies)
self.deploy_android(dependencies)
self.deploy_web(dependencies)
self.deploy_win32(dependencies)
self.deploy_linux(dependencies)
|
{
"content_hash": "48e967a10e34235d33b172394f84da06",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 110,
"avg_line_length": 31.470588235294116,
"alnum_prop": 0.6172897196261682,
"repo_name": "AnySDK/Sample_CPP_Cocos2dx",
"id": "cc158431a71cd73981b8f03d5c39c83dc8597c53",
"size": "4538",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "cocos2d/tools/cocos2d-console/plugins/project_deploy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2434"
},
{
"name": "C",
"bytes": "1574332"
},
{
"name": "C#",
"bytes": "29913"
},
{
"name": "C++",
"bytes": "7456808"
},
{
"name": "CMake",
"bytes": "158462"
},
{
"name": "GLSL",
"bytes": "49475"
},
{
"name": "Java",
"bytes": "569654"
},
{
"name": "JavaScript",
"bytes": "6893"
},
{
"name": "Lua",
"bytes": "139165"
},
{
"name": "Makefile",
"bytes": "22489"
},
{
"name": "Objective-C",
"bytes": "968335"
},
{
"name": "Objective-C++",
"bytes": "315587"
},
{
"name": "PowerShell",
"bytes": "18747"
},
{
"name": "Python",
"bytes": "271570"
},
{
"name": "Shell",
"bytes": "23277"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('quotetron', '0004_auto_20141224_1213'),
]
operations = [
migrations.RemoveField(
model_name='quote',
name='votes',
),
migrations.AddField(
model_name='quote',
name='down_votes',
field=models.PositiveIntegerField(default=0, editable=False),
preserve_default=True,
),
migrations.AddField(
model_name='quote',
name='up_votes',
field=models.PositiveIntegerField(default=0, editable=False),
preserve_default=True,
),
]
|
{
"content_hash": "90659ec60e5d44cf885ed93232781c6a",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 73,
"avg_line_length": 25.862068965517242,
"alnum_prop": 0.564,
"repo_name": "CMU-Robotics-Club/roboticsclub.org",
"id": "4d973c5d588f17ad4db3ebd0b8cccca12d2ddcca",
"size": "774",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "quotetron/migrations/0005_auto_20141225_1040.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4725"
},
{
"name": "HTML",
"bytes": "33977"
},
{
"name": "JavaScript",
"bytes": "5079"
},
{
"name": "Python",
"bytes": "249072"
}
],
"symlink_target": ""
}
|
from functools import wraps
try:
from unittest import mock
except ImportError:
import mock
def _cleanUpPatch(fn):
@wraps(fn)
def cleaned(self, *args, **kwargs):
patch = fn(*args, **kwargs)
self.addCleanup(patch.stop)
return patch.start()
return cleaned
class PatchMixin(object):
patch = _cleanUpPatch(mock.patch)
patchDict = _cleanUpPatch(mock.patch.dict)
patchObject = _cleanUpPatch(mock.patch.object)
|
{
"content_hash": "9203dd010129da245e8b52b6c0eabd5b",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 50,
"avg_line_length": 22.095238095238095,
"alnum_prop": 0.6724137931034483,
"repo_name": "Julian/Ivoire",
"id": "13d770b75808f0b4e090904e33117073fbdcc0c5",
"size": "464",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ivoire/tests/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "64606"
}
],
"symlink_target": ""
}
|
"""
Guest tools for ESX to set up network in the guest.
On Windows we require pyWin32 installed on Python.
"""
import array
import gettext
import logging
import os
import platform
import socket
import struct
import subprocess
import sys
import time
gettext.install('nova', unicode=1)
PLATFORM_WIN = 'win32'
PLATFORM_LINUX = 'linux2'
ARCH_32_BIT = '32bit'
ARCH_64_BIT = '64bit'
NO_MACHINE_ID = 'No machine id'
# Logging
FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
if sys.platform == PLATFORM_WIN:
LOG_DIR = os.path.join(os.environ.get('ALLUSERSPROFILE'), 'openstack')
elif sys.platform == PLATFORM_LINUX:
LOG_DIR = '/var/log/openstack'
else:
LOG_DIR = 'logs'
if not os.path.exists(LOG_DIR):
os.mkdir(LOG_DIR)
LOG_FILENAME = os.path.join(LOG_DIR, 'openstack-guest-tools.log')
logging.basicConfig(filename=LOG_FILENAME, format=FORMAT)
if sys.hexversion < 0x3000000:
_byte = ord # 2.x chr to integer
else:
_byte = int # 3.x byte to integer
class ProcessExecutionError:
"""Process Execution Error Class."""
def __init__(self, exit_code, stdout, stderr, cmd):
self.exit_code = exit_code
self.stdout = stdout
self.stderr = stderr
self.cmd = cmd
def __str__(self):
return str(self.exit_code)
def _bytes2int(bytes):
"""Convert bytes to int."""
intgr = 0
for byt in bytes:
intgr = (intgr << 8) + _byte(byt)
return intgr
def _parse_network_details(machine_id):
"""
Parse the machine_id to get MAC, IP, Netmask and Gateway fields per NIC.
machine_id is of the form ('NIC_record#NIC_record#', '')
Each of the NIC will have record NIC_record in the form
'MAC;IP;Netmask;Gateway;Broadcast;DNS' where ';' is field separator.
Each record is separated by '#' from next record.
"""
logging.debug(_("Received machine_id from vmtools : %s") % machine_id[0])
network_details = []
if machine_id[1].strip() == "1":
pass
else:
for machine_id_str in machine_id[0].split('#'):
network_info_list = machine_id_str.split(';')
if len(network_info_list) % 6 != 0:
break
no_grps = len(network_info_list) / 6
i = 0
while i < no_grps:
k = i * 6
network_details.append((
network_info_list[k].strip().lower(),
network_info_list[k + 1].strip(),
network_info_list[k + 2].strip(),
network_info_list[k + 3].strip(),
network_info_list[k + 4].strip(),
network_info_list[k + 5].strip().split(',')))
i += 1
logging.debug(_("NIC information from vmtools : %s") % network_details)
return network_details
def _get_windows_network_adapters():
"""Get the list of windows network adapters."""
import win32com.client
wbem_locator = win32com.client.Dispatch('WbemScripting.SWbemLocator')
wbem_service = wbem_locator.ConnectServer('.', 'root\cimv2')
wbem_network_adapters = wbem_service.InstancesOf('Win32_NetworkAdapter')
network_adapters = []
for wbem_network_adapter in wbem_network_adapters:
if wbem_network_adapter.NetConnectionStatus == 2 or \
wbem_network_adapter.NetConnectionStatus == 7:
adapter_name = wbem_network_adapter.NetConnectionID
mac_address = wbem_network_adapter.MacAddress.lower()
wbem_network_adapter_config = \
wbem_network_adapter.associators_(
'Win32_NetworkAdapterSetting',
'Win32_NetworkAdapterConfiguration')[0]
ip_address = ''
subnet_mask = ''
if wbem_network_adapter_config.IPEnabled:
ip_address = wbem_network_adapter_config.IPAddress[0]
subnet_mask = wbem_network_adapter_config.IPSubnet[0]
#wbem_network_adapter_config.DefaultIPGateway[0]
network_adapters.append({'name': adapter_name,
'mac-address': mac_address,
'ip-address': ip_address,
'subnet-mask': subnet_mask})
return network_adapters
def _get_linux_network_adapters():
"""Get the list of Linux network adapters."""
import fcntl
max_bytes = 8096
arch = platform.architecture()[0]
if arch == ARCH_32_BIT:
offset1 = 32
offset2 = 32
elif arch == ARCH_64_BIT:
offset1 = 16
offset2 = 40
else:
raise OSError(_("Unknown architecture: %s") % arch)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
names = array.array('B', '\0' * max_bytes)
outbytes = struct.unpack('iL', fcntl.ioctl(
sock.fileno(),
0x8912,
struct.pack('iL', max_bytes, names.buffer_info()[0])))[0]
adapter_names = \
[names.tostring()[n_counter:n_counter + offset1].split('\0', 1)[0]
for n_counter in xrange(0, outbytes, offset2)]
network_adapters = []
for adapter_name in adapter_names:
ip_address = socket.inet_ntoa(fcntl.ioctl(
sock.fileno(),
0x8915,
struct.pack('256s', adapter_name))[20:24])
subnet_mask = socket.inet_ntoa(fcntl.ioctl(
sock.fileno(),
0x891b,
struct.pack('256s', adapter_name))[20:24])
raw_mac_address = '%012x' % _bytes2int(fcntl.ioctl(
sock.fileno(),
0x8927,
struct.pack('256s', adapter_name))[18:24])
mac_address = ":".join([raw_mac_address[m_counter:m_counter + 2]
for m_counter in range(0, len(raw_mac_address), 2)]).lower()
network_adapters.append({'name': adapter_name,
'mac-address': mac_address,
'ip-address': ip_address,
'subnet-mask': subnet_mask})
return network_adapters
def _get_adapter_name_and_ip_address(network_adapters, mac_address):
"""Get the adapter name based on the MAC address."""
adapter_name = None
ip_address = None
for network_adapter in network_adapters:
if network_adapter['mac-address'] == mac_address.lower():
adapter_name = network_adapter['name']
ip_address = network_adapter['ip-address']
break
return adapter_name, ip_address
def _get_win_adapter_name_and_ip_address(mac_address):
"""Get Windows network adapter name."""
network_adapters = _get_windows_network_adapters()
return _get_adapter_name_and_ip_address(network_adapters, mac_address)
def _get_linux_adapter_name_and_ip_address(mac_address):
"""Get Linux network adapter name."""
network_adapters = _get_linux_network_adapters()
return _get_adapter_name_and_ip_address(network_adapters, mac_address)
def _execute(cmd_list, process_input=None, check_exit_code=True):
"""Executes the command with the list of arguments specified."""
cmd = ' '.join(cmd_list)
logging.debug(_("Executing command: '%s'") % cmd)
env = os.environ.copy()
obj = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
result = None
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
obj.stdin.close()
if obj.returncode:
logging.debug(_("Result was %s") % obj.returncode)
if check_exit_code and obj.returncode != 0:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=obj.returncode,
stdout=stdout,
stderr=stderr,
cmd=cmd)
time.sleep(0.1)
return result
def _windows_set_networking():
"""Set IP address for the windows VM."""
program_files = os.environ.get('PROGRAMFILES')
program_files_x86 = os.environ.get('PROGRAMFILES(X86)')
vmware_tools_bin = None
if os.path.exists(os.path.join(program_files, 'VMware', 'VMware Tools',
'vmtoolsd.exe')):
vmware_tools_bin = os.path.join(program_files, 'VMware',
'VMware Tools', 'vmtoolsd.exe')
elif os.path.exists(os.path.join(program_files, 'VMware', 'VMware Tools',
'VMwareService.exe')):
vmware_tools_bin = os.path.join(program_files, 'VMware',
'VMware Tools', 'VMwareService.exe')
elif program_files_x86 and os.path.exists(os.path.join(program_files_x86,
'VMware', 'VMware Tools',
'VMwareService.exe')):
vmware_tools_bin = os.path.join(program_files_x86, 'VMware',
'VMware Tools', 'VMwareService.exe')
if vmware_tools_bin:
cmd = ['"' + vmware_tools_bin + '"', '--cmd', 'machine.id.get']
for network_detail in _parse_network_details(_execute(cmd,
check_exit_code=False)):
mac_address, ip_address, subnet_mask, gateway, broadcast,\
dns_servers = network_detail
adapter_name, current_ip_address = \
_get_win_adapter_name_and_ip_address(mac_address)
if adapter_name and not ip_address == current_ip_address:
cmd = ['netsh', 'interface', 'ip', 'set', 'address',
'name="%s"' % adapter_name, 'source=static', ip_address,
subnet_mask, gateway, '1']
_execute(cmd)
# Windows doesn't let you manually set the broadcast address
for dns_server in dns_servers:
if dns_server:
cmd = ['netsh', 'interface', 'ip', 'add', 'dns',
'name="%s"' % adapter_name, dns_server]
_execute(cmd)
else:
logging.warn(_("VMware Tools is not installed"))
def _filter_duplicates(all_entries):
final_list = []
for entry in all_entries:
if entry and entry not in final_list:
final_list.append(entry)
return final_list
def _set_rhel_networking(network_details=None):
"""Set IPv4 network settings for RHEL distros."""
network_details = network_details or []
all_dns_servers = []
for network_detail in network_details:
mac_address, ip_address, subnet_mask, gateway, broadcast,\
dns_servers = network_detail
all_dns_servers.extend(dns_servers)
adapter_name, current_ip_address = \
_get_linux_adapter_name_and_ip_address(mac_address)
if adapter_name and not ip_address == current_ip_address:
interface_file_name = \
'/etc/sysconfig/network-scripts/ifcfg-%s' % adapter_name
# Remove file
os.remove(interface_file_name)
# Touch file
_execute(['touch', interface_file_name])
interface_file = open(interface_file_name, 'w')
interface_file.write('\nDEVICE=%s' % adapter_name)
interface_file.write('\nUSERCTL=yes')
interface_file.write('\nONBOOT=yes')
interface_file.write('\nBOOTPROTO=static')
interface_file.write('\nBROADCAST=%s' % broadcast)
interface_file.write('\nNETWORK=')
interface_file.write('\nGATEWAY=%s' % gateway)
interface_file.write('\nNETMASK=%s' % subnet_mask)
interface_file.write('\nIPADDR=%s' % ip_address)
interface_file.write('\nMACADDR=%s' % mac_address)
interface_file.close()
if all_dns_servers:
dns_file_name = "/etc/resolv.conf"
os.remove(dns_file_name)
_execute(['touch', dns_file_name])
dns_file = open(dns_file_name, 'w')
dns_file.write("; generated by OpenStack guest tools")
unique_entries = _filter_duplicates(all_dns_servers)
for dns_server in unique_entries:
dns_file.write("\nnameserver %s" % dns_server)
dns_file.close()
_execute(['/sbin/service', 'network', 'restart'])
def _set_ubuntu_networking(network_details=None):
"""Set IPv4 network settings for Ubuntu."""
network_details = network_details or []
all_dns_servers = []
interface_file_name = '/etc/network/interfaces'
# Remove file
os.remove(interface_file_name)
# Touch file
_execute(['touch', interface_file_name])
interface_file = open(interface_file_name, 'w')
for device, network_detail in enumerate(network_details):
mac_address, ip_address, subnet_mask, gateway, broadcast,\
dns_servers = network_detail
all_dns_servers.extend(dns_servers)
adapter_name, current_ip_address = \
_get_linux_adapter_name_and_ip_address(mac_address)
if adapter_name:
interface_file.write('\nauto %s' % adapter_name)
interface_file.write('\niface %s inet static' % adapter_name)
interface_file.write('\nbroadcast %s' % broadcast)
interface_file.write('\ngateway %s' % gateway)
interface_file.write('\nnetmask %s' % subnet_mask)
interface_file.write('\naddress %s\n' % ip_address)
logging.debug(_("Successfully configured NIC %d with "
"NIC info %s") % (device, network_detail))
interface_file.close()
if all_dns_servers:
dns_file_name = "/etc/resolv.conf"
os.remove(dns_file_name)
_execute(['touch', dns_file_name])
dns_file = open(dns_file_name, 'w')
dns_file.write("; generated by OpenStack guest tools")
unique_entries = _filter_duplicates(all_dns_servers)
for dns_server in unique_entries:
dns_file.write("\nnameserver %s" % dns_server)
dns_file.close()
logging.debug(_("Restarting networking....\n"))
_execute(['/etc/init.d/networking', 'restart'])
def _linux_set_networking():
"""Set IP address for the Linux VM."""
vmware_tools_bin = None
if os.path.exists('/usr/sbin/vmtoolsd'):
vmware_tools_bin = '/usr/sbin/vmtoolsd'
elif os.path.exists('/usr/bin/vmtoolsd'):
vmware_tools_bin = '/usr/bin/vmtoolsd'
elif os.path.exists('/usr/sbin/vmware-guestd'):
vmware_tools_bin = '/usr/sbin/vmware-guestd'
elif os.path.exists('/usr/bin/vmware-guestd'):
vmware_tools_bin = '/usr/bin/vmware-guestd'
if vmware_tools_bin:
cmd = [vmware_tools_bin, '--cmd', 'machine.id.get']
network_details = _parse_network_details(_execute(cmd,
check_exit_code=False))
# TODO(sateesh): For other distros like suse, debian, BSD, etc.
if(platform.dist()[0] == 'Ubuntu'):
_set_ubuntu_networking(network_details)
elif (platform.dist()[0] == 'redhat'):
_set_rhel_networking(network_details)
else:
logging.warn(_("Distro '%s' not supported") % platform.dist()[0])
else:
logging.warn(_("VMware Tools is not installed"))
if __name__ == '__main__':
pltfrm = sys.platform
if pltfrm == PLATFORM_WIN:
_windows_set_networking()
elif pltfrm == PLATFORM_LINUX:
_linux_set_networking()
else:
raise NotImplementedError(_("Platform not implemented: '%s'") % pltfrm)
|
{
"content_hash": "3f4f4514789634b809db1f9cc6ce2648",
"timestamp": "",
"source": "github",
"line_count": 387,
"max_line_length": 79,
"avg_line_length": 40.485788113695094,
"alnum_prop": 0.5778657135562931,
"repo_name": "anbangr/trusted-nova",
"id": "8c8b4dfc5d8b2a9715f86fd13109c794a13c059a",
"size": "16362",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tools/esx/guest_tool.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "5690299"
},
{
"name": "Shell",
"bytes": "27086"
}
],
"symlink_target": ""
}
|
import pygame
class spritesheet(object):
def __init__(self, filename):
try:
self.sheet = pygame.image.load(filename).convert()
except pygame.error, message:
print 'Unable to load spritesheet image:', filename
raise SystemExit, message
# Load a specific image from a specific rectangle
def get_img(self, a, b):
rectangle= (a*32, b*32, 32, 32)
rect = pygame.Rect(rectangle)
image = pygame.Surface(rect.size).convert()
image.blit(self.sheet, (0, 0), rect)
colorkey = image.get_at((0,0))
image.set_colorkey(colorkey, pygame.RLEACCEL)
return image
def image_at(self, rectangle, colorkey = None):
"Loads image from x,y,x+offset,y+offset"
rect = pygame.Rect(rectangle)
image = pygame.Surface(rect.size).convert()
image.blit(self.sheet, (0, 0), rect)
#if colorkey is not None:
# if colorkey is -1:
colorkey = image.get_at((0,0))
image.set_colorkey(colorkey, pygame.RLEACCEL)
return image
# Load a whole bunch of images and return them as a list
def images_at(self, rects, colorkey = None):
"Loads multiple images, supply a list of coordinates"
return [self.image_at(rect, colorkey) for rect in rects]
# Load a whole strip of images
def load_strip(self, rect, image_count, colorkey = None):
"Loads a strip of images and returns them as a list"
tups = [(rect[0]+rect[2]*x, rect[1], rect[2], rect[3])
for x in range(image_count)]
return self.images_at(tups, colorkey)
|
{
"content_hash": "e5c943946b1d6a1a8760510c0eb02a18",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 64,
"avg_line_length": 36.37777777777778,
"alnum_prop": 0.6096518020769701,
"repo_name": "andersx/myrpg",
"id": "d151742c35166d9550a1f6a03e2f420d8c5f9ac8",
"size": "1637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sprite_sheet.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "27026"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import json
import numpy as np
from app.models import Game
from app.models.tag import compute_reverse_index
from app.utils import data_file
if __name__ == '__main__':
games = list(Game.get_all())
reverse_index = compute_reverse_index(games)
doc_tag_matrix = np.zeros((len(games), len(reverse_index) + 1), dtype=np.int)
app_ids = sorted([game.app_id for game in games])
tag_ids = sorted(reverse_index.keys())
doc_tag_matrix[:, 0] = np.array(app_ids)
for app_index in xrange(len(games)):
for tag_index in xrange(len(reverse_index)):
if app_ids[app_index] in reverse_index[tag_ids[tag_index]]:
doc_tag_matrix[app_index, tag_index + 1] = 1
with open(data_file("doc_tag_matrix.npy"), "wb") as f:
np.save(f, doc_tag_matrix)
|
{
"content_hash": "5246be577f66fa57d92370699270ab2e",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 81,
"avg_line_length": 34.875,
"alnum_prop": 0.6463560334528077,
"repo_name": "PapaCharlie/SteamyReviews",
"id": "5878d182f454a5552daf6542aca07b15a49a3218",
"size": "837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/routines/document_tag_matrix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25341"
},
{
"name": "HTML",
"bytes": "14665"
},
{
"name": "JavaScript",
"bytes": "43470"
},
{
"name": "Jupyter Notebook",
"bytes": "658515"
},
{
"name": "Python",
"bytes": "64501"
},
{
"name": "Shell",
"bytes": "227"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from __future__ import unicode_literals
from django.db import migrations, connection
from bluebottle.clients import properties
from bluebottle.clients.models import Client
from bluebottle.clients.utils import LocalTenant
from bluebottle.utils.utils import update_group_permissions
def add_group_permissions(apps, schema_editor):
tenant = Client.objects.get(schema_name=connection.tenant.schema_name)
with LocalTenant(tenant):
group_perms = {
'Staff': {
'perms': (
'add_officesubregion',
'change_officesubregion',
'delete_officesubregion',
'add_officeregion',
'change_officeregion',
'delete_officeregion',
)
},
}
update_group_permissions('offices', group_perms, apps)
class Migration(migrations.Migration):
dependencies = [
('offices', '0001_initial'),
]
operations = [
migrations.RunPython(
add_group_permissions,
migrations.RunPython.noop
)
]
|
{
"content_hash": "22cbb477d6581951945c1946725b4818",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 74,
"avg_line_length": 26.930232558139537,
"alnum_prop": 0.6010362694300518,
"repo_name": "onepercentclub/bluebottle",
"id": "06806bdbc02dea4aacd055a1151d8142541d460c",
"size": "1232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bluebottle/offices/migrations/0002_auto_20210203_1714.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "41694"
},
{
"name": "HTML",
"bytes": "246695"
},
{
"name": "Handlebars",
"bytes": "63"
},
{
"name": "JavaScript",
"bytes": "139123"
},
{
"name": "PHP",
"bytes": "35"
},
{
"name": "PLpgSQL",
"bytes": "1369882"
},
{
"name": "PostScript",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "4983116"
},
{
"name": "Rich Text Format",
"bytes": "39109"
},
{
"name": "SCSS",
"bytes": "99555"
},
{
"name": "Shell",
"bytes": "3068"
},
{
"name": "Smarty",
"bytes": "3814"
}
],
"symlink_target": ""
}
|
"""Tests for direct_fisher_rao."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import direct_fisher_rao as dfr
import numpy as np
import tensorflow as tf
class DirectFisherRaoTest(tf.test.TestCase):
perturbation = 1e-4
rtol = 1e-2
def test_empirical_fisher_constant_loss(self):
"""Asserts unregularized loss without variables evaluates to constant."""
labels = tf.constant([[1.0, 0.0]])
make_logits = lambda: tf.constant([[0.5, 0.5]])
logits, _ = dfr.make_empirical_fisher_regularizer(make_logits,
labels,
"test_scope",
lambda name: True,
self.perturbation)
with self.test_session() as sess:
self.assertAllEqual(sess.run(logits), [[0.5, 0.5]])
def test_empirical_fisher_constant_loss_regularizer(self):
"""Asserts regularizer for loss without variables evaluates to zero."""
labels = tf.constant([[1.0, 0.0]])
make_logits = lambda: tf.constant([[0.5, 0.5]])
_, regularizer = dfr.make_empirical_fisher_regularizer(make_logits,
labels,
"test_scope",
lambda name: True,
self.perturbation)
with self.test_session() as sess:
self.assertAllEqual(sess.run(regularizer), 0.)
def test_empirical_fisher_should_regularize_unchanged_loss(self):
"""Asserts unregularized loss unchanged by `should_regularize` function."""
labels = tf.constant([[1.0, 0.0]])
def make_logits():
l = tf.get_variable("a", initializer=tf.constant(1.))
return tf.stack([[l, tf.subtract(1., l)]])
loss_true, _ = dfr.make_empirical_fisher_regularizer(make_logits,
labels,
"test_scope",
lambda name: True,
self.perturbation)
loss_false, _ = dfr.make_empirical_fisher_regularizer(make_logits,
labels,
"test_scope_2",
lambda name: False,
self.perturbation)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllEqual(sess.run(loss_true), sess.run(loss_false))
def test_empirical_fisher_should_regularize_zero_regularizer(self):
"""Asserts regularizer forced unchanged by `should_regularize` function."""
labels = tf.constant([[1.0, 0.0]])
def make_logits():
l = tf.get_variable("a", initializer=tf.constant(1.))
return tf.stack([[l, tf.subtract(1., l)]])
_, regularizer = dfr.make_empirical_fisher_regularizer(make_logits,
labels,
"test_scope",
lambda name: False,
self.perturbation)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllEqual(sess.run(regularizer), 0.0)
def test_empirical_fisher_should_regularize_changed_regularizer(self):
"""Asserts regularizer correctly changed by `should_regularize` function."""
labels = tf.constant([[1.0, 0.0]])
def make_logits():
a = tf.get_variable("a", initializer=tf.constant(1.))
b = tf.get_variable("b", initializer=tf.constant(1.))
l = tf.multiply(a, b)
return tf.stack([[l, tf.subtract(1., l)]])
_, regularizer_b = dfr.make_empirical_fisher_regularizer(
make_logits,
labels,
"test_scope_should",
lambda name: "b" in name,
# Note that for the "b" in name check to work with the intended effect
# the scope name cannot contain the letter b
self.perturbation)
_, regularizer = dfr.make_empirical_fisher_regularizer(
make_logits,
labels,
"test_scope",
lambda name: True,
self.perturbation)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertNotEqual(sess.run(regularizer_b), sess.run(regularizer))
def make_empirical_fisher_sin_logits_and_regularizer(self):
"""Helper that creates Tensors for sin(x) logits and the regularizer."""
labels = tf.constant([[1.0, 0.0]])
def make_logits():
x = tf.get_variable("x", initializer=tf.constant(2.))
y = tf.get_variable("y", initializer=tf.constant(3.))
return tf.stack([[tf.sin(x), tf.sin(y)]])
return dfr.make_empirical_fisher_regularizer(make_logits,
labels,
"test_scope",
lambda name: True,
self.perturbation)
def test_empirical_fisher_sin_logits(self):
"""Asserts unregularized loss for sin logits evaluates symbolic solution."""
symbolic_logits = [[np.sin(2.), np.sin(3.)]]
logits, _ = self.make_empirical_fisher_sin_logits_and_regularizer()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(sess.run(logits), symbolic_logits, rtol=self.rtol)
def symbolic_regularizer_empirical_fisher_sin_logits(self, x):
expsum = np.exp(np.sin(2.)) + np.exp(np.sin(x))
dlogp1_1 = -np.exp(np.sin(x)) * np.cos(2.) / expsum
dlogp1_2 = np.exp(np.sin(x)) * np.cos(x) / expsum
symbolic_regularizer = np.square(dlogp1_1 * 2. + dlogp1_2 * x)
return symbolic_regularizer
def test_empirical_fisher_sin_regularizer(self):
"""Asserts regularizer for sin logits evaluates symbolic solution."""
symbolic_regularizer = \
self.symbolic_regularizer_empirical_fisher_sin_logits(3.)
_, regularizer = self.make_empirical_fisher_sin_logits_and_regularizer()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(sess.run(regularizer), symbolic_regularizer,
rtol=self.rtol)
def test_empirical_fisher_sin_gradient(self):
"""Validates gradient of regularizer for sin logits by finite difference."""
h = 1e-4
symbolic_regularizer_plus_pert = \
self.symbolic_regularizer_empirical_fisher_sin_logits(3. + h)
symbolic_regularizer_minus_pert = \
self.symbolic_regularizer_empirical_fisher_sin_logits(3. - h)
fd_gradient = (symbolic_regularizer_plus_pert -
symbolic_regularizer_minus_pert) / (2. * h)
_, regularizer = self.make_empirical_fisher_sin_logits_and_regularizer()
with tf.variable_scope("test_scope", reuse=True):
gradient = tf.gradients(regularizer, tf.get_variable("y"))[0]
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(sess.run(gradient),
fd_gradient,
rtol=np.sqrt(self.rtol))
# Using sqrt of usual tolerance because two finite difference
# approximations are being compared
def make_two_vars_product_loss_and_regularizer(self):
"""Helper that creates Tensors for a * b loss and its regularizer."""
def make_loss():
a = tf.get_variable("a", initializer=tf.constant(2.))
b = tf.get_variable("b", initializer=tf.constant(3.))
return tf.multiply(a, b)
return dfr.make_empirical_fisher_regularizer(make_loss,
"test_scope",
lambda name: True,
self.perturbation)
def test_empirical_fisher_batch(self):
"""Asserts sum property of regularizer gradient for sum reduction loss."""
labels_full_batch = \
[[0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]
labels_one_batch = [[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]
labels_two_batch = [[0.5, 0.5], [0.5, 0.5]]
x_full_batch = np.array([1., 2., 3., 4., 5.])
x_part_one_batch = np.array([1., 2., 3.])
x_part_two_batch = np.array([4., 5.])
def make_make_logits_part(x_batch):
def make_logits():
v = tf.get_variable("v", initializer=tf.constant(2.5))
y = tf.multiply(v, x_batch)
return tf.transpose(tf.stack([tf.sin(y), tf.cos(y)]))
return make_logits
_, regularizer_full = dfr.make_empirical_fisher_regularizer(
make_make_logits_part(x_full_batch),
labels_full_batch,
"test_scope",
lambda name: True,
self.perturbation)
_, regularizer_part_one = dfr.make_empirical_fisher_regularizer(
make_make_logits_part(x_part_one_batch),
labels_one_batch,
"test_scope_part_one",
lambda name: True,
self.perturbation)
_, regularizer_part_two = dfr.make_empirical_fisher_regularizer(
make_make_logits_part(x_part_two_batch),
labels_two_batch,
"test_scope_part_two",
lambda name: True,
self.perturbation)
with tf.variable_scope("test_scope", reuse=True):
gradient_full = tf.gradients(regularizer_full, tf.get_variable("v"))[0]
with tf.variable_scope("test_scope_part_one", reuse=True):
gradient_part_one = tf.gradients(regularizer_part_one,
tf.get_variable("v"))[0]
with tf.variable_scope("test_scope_part_two", reuse=True):
gradient_part_two = tf.gradients(regularizer_part_two,
tf.get_variable("v"))[0]
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(
5 * sess.run(gradient_full),
3 * sess.run(gradient_part_one) + 2 * sess.run(gradient_part_two),
rtol=self.rtol)
def helper_standard_fisher_constant_loss(self, differentiate_probability):
"""Asserts logits without variables evaluates to constant."""
make_logits = lambda: tf.constant([[0.5, 0.5]])
logits, _ = dfr.make_standard_fisher_regularizer(make_logits,
"test_scope",
lambda name: True,
self.perturbation,
differentiate_probability)
with self.test_session() as sess:
self.assertAllEqual(sess.run(logits), [[0.5, 0.5]])
def test_standard_fisher_constant_loss(self):
self.helper_standard_fisher_constant_loss(True)
def test_standard_fisher_constant_loss_stop(self):
self.helper_standard_fisher_constant_loss(False)
def helper_standard_fisher_constant_regularizer(self,
differentiate_probability):
"""Asserts regularizer for logits without variables evaluates to zero."""
make_logits = lambda: tf.constant([[0.5, 0.5]])
_, regularizer = dfr.make_standard_fisher_regularizer(
make_logits,
"test_scope",
lambda name: True,
self.perturbation,
differentiate_probability)
with self.test_session() as sess:
self.assertAllEqual(sess.run(regularizer), 0.)
def test_standard_fisher_constant_regularizer(self):
self.helper_standard_fisher_constant_regularizer(True)
def test_standard_fisher_constant_regularizer_stop(self):
self.helper_standard_fisher_constant_regularizer(False)
def helper_standard_fisher_should_regularize_unchanged_loss(
self,
differentiate_probability):
"""Asserts unregularized loss unchanged by `should_regularize` function."""
def make_logits():
l = tf.get_variable("a", initializer=tf.constant(1.))
return tf.stack([[l, tf.subtract(1., l)]])
loss_true, _ = dfr.make_standard_fisher_regularizer(
make_logits,
"test_scope_true",
lambda name: True,
self.perturbation,
differentiate_probability)
loss_false, _ = dfr.make_standard_fisher_regularizer(
make_logits,
"test_scope_false",
lambda name: False,
self.perturbation,
differentiate_probability)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllEqual(sess.run(loss_true), sess.run(loss_false))
def test_standard_fisher_should_regularize_unchanged_loss(self):
self.helper_standard_fisher_should_regularize_unchanged_loss(True)
def test_standard_fisher_should_regularize_unchanged_loss_stop(self):
self.helper_standard_fisher_should_regularize_unchanged_loss(False)
def helper_standard_fisher_should_regularize_zero_regularizer(
self,
differentiate_probability):
"""Asserts regularizer forced unchanged by `should_regularize` function."""
def make_logits():
l = tf.get_variable("a", initializer=tf.constant(1.))
return tf.stack([[l, tf.subtract(1., l)]])
_, regularizer = dfr.make_standard_fisher_regularizer(
make_logits,
"test_scope",
lambda name: False,
self.perturbation,
differentiate_probability)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllEqual(sess.run(regularizer), 0.0)
def test_standard_fisher_should_regularize_zero_regularizer(self):
self.helper_standard_fisher_should_regularize_zero_regularizer(True)
def test_standard_fisher_should_regularize_zero_regularizer_stop(self):
self.helper_standard_fisher_should_regularize_zero_regularizer(False)
def helper_standard_fisher_should_regularize_different_regularizer(
self,
differentiate_probability):
"""Asserts regularizer changed by `should_regularize` function."""
def make_logits():
a = tf.get_variable("a", initializer=tf.constant(1.))
b = tf.get_variable("b", initializer=tf.constant(1.))
l = tf.multiply(a, b)
return tf.stack([[l, tf.subtract(1., l)]])
_, regularizer_b = dfr.make_standard_fisher_regularizer(
make_logits,
"test_scope_should",
lambda name: "b" in name,
self.perturbation,
differentiate_probability)
_, regularizer = dfr.make_standard_fisher_regularizer(
make_logits,
"test_scope",
lambda name: True,
self.perturbation,
differentiate_probability)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertNotEqual(sess.run(regularizer_b), sess.run(regularizer))
def test_standard_fisher_should_regularize_different_regularizer(self):
self.helper_standard_fisher_should_regularize_different_regularizer(True)
def test_standard_fisher_should_regularize_different_regularizer_stop(self):
self.helper_standard_fisher_should_regularize_different_regularizer(False)
def helper_standard_fisher_should_regularize_symmetric_regularizer(
self,
differentiate_probability):
"""Asserts regularizer changed by `should_regularize` function."""
def make_logits():
a = tf.get_variable("a", initializer=tf.constant(1.))
b = tf.get_variable("b", initializer=tf.constant(1.))
l = tf.multiply(a, b)
return tf.stack([[l, tf.subtract(1., l)]])
_, regularizer_b = dfr.make_standard_fisher_regularizer(
make_logits,
"test_scope_should",
lambda name: "b" in name,
self.perturbation,
differentiate_probability)
_, regularizer_a = dfr.make_standard_fisher_regularizer(
make_logits,
"test_scope_should_symmetric",
lambda name: "a" in name,
self.perturbation,
differentiate_probability)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertEqual(sess.run(regularizer_b), sess.run(regularizer_a))
def test_standard_fisher_should_regularize_symmetric_regularizer(self):
self.helper_standard_fisher_should_regularize_symmetric_regularizer(True)
def test_standard_fisher_should_regularize_symmetric_regularizer_stop(self):
self.helper_standard_fisher_should_regularize_symmetric_regularizer(False)
def make_standard_fisher_sin_logits_and_regularizer(
self,
differentiate_probability):
"""Helper that creates Tensors for sin(x) logits and the regularizer."""
def make_logits():
x = tf.get_variable("x", initializer=tf.constant(2.))
y = tf.get_variable("y", initializer=tf.constant(3.))
return tf.stack([[tf.sin(x), tf.sin(y)]])
return dfr.make_standard_fisher_regularizer(make_logits,
"test_scope",
lambda name: True,
self.perturbation,
differentiate_probability)
def helper_standard_fisher_sin_logits(self, differentiate_probability):
"""Asserts unregularized loss for sin logits evaluates symbolic solution."""
symbolic_logits = [[np.sin(2.), np.sin(3.)]]
logits, _ = self.make_standard_fisher_sin_logits_and_regularizer(
differentiate_probability)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(sess.run(logits), symbolic_logits, rtol=self.rtol)
def test_standard_fisher_sin_logits(self):
self.helper_standard_fisher_sin_logits(True)
def test_standard_fisher_sin_logits_stop(self):
self.helper_standard_fisher_sin_logits(False)
def symbolic_regularizer_standard_fisher_sin_logits_no_probs(self, x, p1, p2):
expsum = np.exp(np.sin(2.)) + np.exp(np.sin(x))
dlogp1_1 = np.cos(2.) - 1. / expsum * np.exp(np.sin(2.)) * np.cos(2.)
dlogp1_2 = - 1. / expsum * np.exp(np.sin(x)) * np.cos(x)
dlogp2_1 = - 1. / expsum * np.exp(np.sin(2.)) * np.cos(2.)
dlogp2_2 = np.cos(x) - 1. / expsum * np.exp(np.sin(x)) * np.cos(x)
symbolic_regularizer = (np.square(dlogp1_1 * 2. + dlogp1_2 * x) * p1 +
np.square(dlogp2_1 * 2. + dlogp2_2 * x) * p2)
return symbolic_regularizer
def symbolic_regularizer_standard_fisher_sin_logits(self, x):
expsum = np.exp(np.sin(2.)) + np.exp(np.sin(x))
p1 = np.exp(np.sin(2.)) / expsum
p2 = np.exp(np.sin(x)) / expsum
return self.symbolic_regularizer_standard_fisher_sin_logits_no_probs(x,
p1,
p2)
def helper_standard_fisher_sin_regularizer(self, differentiate_probability):
"""Asserts regularizer for sin logits evaluates symbolic solution."""
symbolic_regularizer = \
self.symbolic_regularizer_standard_fisher_sin_logits(3.)
_, regularizer = self.make_standard_fisher_sin_logits_and_regularizer(
differentiate_probability)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(sess.run(regularizer), symbolic_regularizer,
rtol=self.rtol)
def test_standard_fisher_sin_regularizer(self):
self.helper_standard_fisher_sin_regularizer(True)
def test_standard_fisher_sin_regularizer_stop(self):
self.helper_standard_fisher_sin_regularizer(False)
def test_standard_fisher_sin_regularizer_gradient(self):
"""Validates gradient of regularizer for sin logits by finite difference."""
h = 1e-4
symbolic_regularizer_pert = \
self.symbolic_regularizer_standard_fisher_sin_logits(3. + h)
symbolic_regularizer = \
self.symbolic_regularizer_standard_fisher_sin_logits(3.)
fd_gradient = (symbolic_regularizer_pert - symbolic_regularizer) / h
_, regularizer = self.make_standard_fisher_sin_logits_and_regularizer(True)
with tf.variable_scope("test_scope", reuse=True):
gradient = tf.gradients(regularizer, tf.get_variable("y"))[0]
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(sess.run(gradient), fd_gradient, rtol=self.rtol)
def test_standard_fisher_sin_regularizer_gradient_stop(self):
"""Validates gradient of regularizer for sin logits by finite difference."""
expsum = np.exp(np.sin(2.)) + np.exp(np.sin(3.))
p1 = np.exp(np.sin(2.)) / expsum
p2 = np.exp(np.sin(3.)) / expsum
h = 1e-4
symbolic_regularizer_pert = \
self.symbolic_regularizer_standard_fisher_sin_logits_no_probs(3. + h,
p1,
p2)
symbolic_regularizer = \
self.symbolic_regularizer_standard_fisher_sin_logits_no_probs(3.,
p1,
p2)
fd_gradient = (symbolic_regularizer_pert - symbolic_regularizer) / h
_, regularizer = self.make_standard_fisher_sin_logits_and_regularizer(False)
with tf.variable_scope("test_scope", reuse=True):
gradient = tf.gradients(regularizer, tf.get_variable("y"))[0]
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(sess.run(gradient), fd_gradient, rtol=self.rtol)
def helper_standard_fisher_categorical_batch(self, differentiate_probability):
"""Asserts sum property of regularizer gradient for sum reduction loss."""
x_full_batch = np.array([1., 2., 3., 4., 5.])
x_part_one_batch = np.array([1., 2., 3.])
x_part_two_batch = np.array([4., 5.])
def make_make_logits_part(x_batch):
def make_logits():
v = tf.get_variable("v", initializer=tf.constant(2.5))
y = tf.multiply(v, x_batch)
return tf.transpose(tf.stack([tf.sin(y), tf.cos(y)]))
return make_logits
_, regularizer_full = dfr.make_standard_fisher_regularizer(
make_make_logits_part(x_full_batch),
"test_scope",
lambda name: True,
self.perturbation,
differentiate_probability)
_, regularizer_part_one = dfr.make_standard_fisher_regularizer(
make_make_logits_part(x_part_one_batch),
"test_scope_part_one",
lambda name: True,
self.perturbation,
differentiate_probability)
_, regularizer_part_two = dfr.make_standard_fisher_regularizer(
make_make_logits_part(x_part_two_batch),
"test_scope_part_two",
lambda name: True,
self.perturbation,
differentiate_probability)
with tf.variable_scope("test_scope", reuse=True):
gradient_full = tf.gradients(regularizer_full, tf.get_variable("v"))[0]
with tf.variable_scope("test_scope_part_one", reuse=True):
gradient_part_one = tf.gradients(regularizer_part_one,
tf.get_variable("v"))[0]
with tf.variable_scope("test_scope_part_two", reuse=True):
gradient_part_two = tf.gradients(regularizer_part_two,
tf.get_variable("v"))[0]
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(
5 * sess.run(gradient_full),
3 * sess.run(gradient_part_one) + 2 * sess.run(gradient_part_two),
rtol=self.rtol)
def test_standard_fisher_categorical_batch(self):
self.helper_standard_fisher_categorical_batch(True)
def test_standard_fisher_categorical_batch_stop(self):
self.helper_standard_fisher_categorical_batch(False)
if __name__ == "__main__":
tf.test.main()
|
{
"content_hash": "8899795cca5c935d87ad3ac6b3adb9d5",
"timestamp": "",
"source": "github",
"line_count": 586,
"max_line_length": 80,
"avg_line_length": 41.38737201365188,
"alnum_prop": 0.6005442625654558,
"repo_name": "brain-research/fisher-rao-regularization",
"id": "e0f09c83a0fd617a3f55f7a060b0cd442531afe6",
"size": "24829",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "direct_fisher_rao_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "48294"
}
],
"symlink_target": ""
}
|
import mock
from nova.cells import utils as cells_utils
from nova import objects
from nova.tests.functional.api_sample_tests import api_sample_base
class HypervisorsSampleJsonTests(api_sample_base.ApiSampleTestBaseV21):
ADMIN_API = True
sample_dir = "os-hypervisors"
def test_hypervisors_list(self):
response = self._do_get('os-hypervisors')
self._verify_response('hypervisors-list-resp', {}, response, 200)
def test_hypervisors_search(self):
response = self._do_get('os-hypervisors/fake/search')
self._verify_response('hypervisors-search-resp', {}, response, 200)
def test_hypervisors_without_servers(self):
response = self._do_get('os-hypervisors/fake/servers')
self._verify_response('hypervisors-without-servers-resp',
{}, response, 200)
@mock.patch("nova.compute.api.HostAPI.instance_get_all_by_host")
def test_hypervisors_with_servers(self, mock_instance_get):
instance = [
{
"deleted": None,
"name": "test_server1",
"uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
},
{
"deleted": None,
"name": "test_server2",
"uuid": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb"
}]
mock_instance_get.return_value = instance
response = self._do_get('os-hypervisors/fake/servers')
self._verify_response('hypervisors-with-servers-resp', {},
response, 200)
def test_hypervisors_detail(self):
hypervisor_id = '1'
subs = {
'hypervisor_id': hypervisor_id,
'service_id': '[0-9]+',
}
response = self._do_get('os-hypervisors/detail')
self._verify_response('hypervisors-detail-resp', subs, response, 200)
def test_hypervisors_show(self):
hypervisor_id = '1'
subs = {
'hypervisor_id': hypervisor_id,
'service_id': '[0-9]+',
}
response = self._do_get('os-hypervisors/%s' % hypervisor_id)
self._verify_response('hypervisors-show-resp', subs, response, 200)
def test_hypervisors_statistics(self):
response = self._do_get('os-hypervisors/statistics')
self._verify_response('hypervisors-statistics-resp', {}, response, 200)
def test_hypervisors_uptime(self):
def fake_get_host_uptime(self, context, hyp):
return (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
" 0.20, 0.12, 0.14")
self.stub_out('nova.compute.api.HostAPI.get_host_uptime',
fake_get_host_uptime)
hypervisor_id = '1'
response = self._do_get('os-hypervisors/%s/uptime' % hypervisor_id)
subs = {
'hypervisor_id': hypervisor_id,
}
self._verify_response('hypervisors-uptime-resp', subs, response, 200)
@mock.patch("nova.servicegroup.API.service_is_up", return_value=True)
class HypervisorsCellsSampleJsonTests(api_sample_base.ApiSampleTestBaseV21):
ADMIN_API = True
sample_dir = "os-hypervisors"
def setUp(self):
self.flags(enable=True, cell_type='api', group='cells')
super(HypervisorsCellsSampleJsonTests, self).setUp()
def test_hypervisor_uptime(self, mocks):
fake_hypervisor = objects.ComputeNode(id=1, host='fake-mini',
hypervisor_hostname='fake-mini')
def fake_get_host_uptime(self, context, hyp):
return (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
" 0.20, 0.12, 0.14")
def fake_compute_node_get(self, context, hyp):
return fake_hypervisor
def fake_service_get_by_compute_host(self, context, host):
return cells_utils.ServiceProxy(
objects.Service(id=1, host='fake-mini', disabled=False,
disabled_reason=None),
'cell1')
self.stub_out(
'nova.compute.cells_api.HostAPI.compute_node_get',
fake_compute_node_get)
self.stub_out(
'nova.compute.cells_api.HostAPI.service_get_by_compute_host',
fake_service_get_by_compute_host)
self.stub_out(
'nova.compute.cells_api.HostAPI.get_host_uptime',
fake_get_host_uptime)
hypervisor_id = fake_hypervisor.id
response = self._do_get('os-hypervisors/%s/uptime' % hypervisor_id)
subs = {'hypervisor_id': str(hypervisor_id)}
self._verify_response('hypervisors-uptime-resp', subs, response, 200)
class HypervisorsSampleJson228Tests(HypervisorsSampleJsonTests):
microversion = '2.28'
scenarios = [('v2_28', {'api_major_version': 'v2.1'})]
def setUp(self):
super(HypervisorsSampleJson228Tests, self).setUp()
self.api.microversion = self.microversion
class HypervisorsSampleJson233Tests(api_sample_base.ApiSampleTestBaseV21):
ADMIN_API = True
sample_dir = "os-hypervisors"
microversion = '2.33'
scenarios = [('v2_33', {'api_major_version': 'v2.1'})]
def setUp(self):
super(HypervisorsSampleJson233Tests, self).setUp()
self.api.microversion = self.microversion
# Start a new compute service to fake a record with hypervisor id=2
# for pagination test.
self.start_service('compute', host='host1')
def test_hypervisors_list(self):
response = self._do_get('os-hypervisors?limit=1&marker=1')
self._verify_response('hypervisors-list-resp', {}, response, 200)
def test_hypervisors_detail(self):
subs = {
'hypervisor_id': '2',
'host': 'host1',
'host_name': 'host1'
}
response = self._do_get('os-hypervisors/detail?limit=1&marker=1')
self._verify_response('hypervisors-detail-resp', subs, response, 200)
|
{
"content_hash": "26a9609c4d964c3e49b52d4f694c9fa0",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 79,
"avg_line_length": 38.05769230769231,
"alnum_prop": 0.6013137948458818,
"repo_name": "rajalokan/nova",
"id": "0b14343a709d451dc897982dfa56b7a0b25eeb42",
"size": "6569",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nova/tests/functional/api_sample_tests/test_hypervisors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "601"
},
{
"name": "PHP",
"bytes": "4503"
},
{
"name": "Python",
"bytes": "19100322"
},
{
"name": "Shell",
"bytes": "26793"
},
{
"name": "Smarty",
"bytes": "299237"
}
],
"symlink_target": ""
}
|
import json
from django.forms import Widget, MultipleChoiceField
from django.forms.utils import flatatt
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_unicode
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from taggit.models import Tag
# TODO: Factor out dependency on taggit so it can be a generic large-vocab
# selector.
class TagWidget(Widget):
"""Widget which sticks each tag in a separate <input type=hidden>
Designed to have the tag selection submitted all at once when a Submit
button is clicked
"""
# If True, render without editing controls:
read_only = False
# async_urls is a tuple: (URL for async add POSTs, URL for async remove
# POSTs). If this is (), assume you want to queue up tag adds and removes
# and submit them all at once through a form you wrap around this widget
# yourself. In this case, tag names will not be links, because we'd have
# to design some way of computing the URLs without hitting the network.
async_urls = ()
# make_link should be a function that takes a tag slug and returns some
# kind of meaningful link. Ignored if async_urls is ().
def make_link(self, slug):
return '#'
# Allow adding new tags to the vocab:
can_create_tags = False
# TODO: Add async_remove_url and async_add_url kwargs holding URLs to
# direct async remove and add requests to. The client app is then
# responsible for routing to those and doing the calls to remove/add
# the tag.
def _render_tag_list_items(self, control_name, tag_names):
"""Represent applied tags and render controls to allow removal."""
def render_one(tag):
output = u'<li class="tag">'
# Hidden input for form state:
if not self.async_urls:
output += u'<input%s />' % flatatt({
'value': force_unicode(tag.name),
'type': 'hidden',
'name': control_name})
# Linkless tag name:
output += (u'<span class="tag-name">%s</span>' %
escape(tag.name))
else:
# Anchor for link to by-tag view:
output += (u'<a class="tag-name" href="%s">%s</a>' %
(escape(self.make_link(tag.slug)),
escape(tag.name)))
# Remove button:
if not self.read_only:
output += (u'<input type="submit" '
u'value="✖" '
u'class="remover" '
u'name="remove-tag-%s" />' % escape(tag.name))
output += u'</li>'
return output
tags = Tag.objects.filter(name__in=tag_names)
representations = [render_one(t) for t in tags]
return u'\n'.join(representations)
def render(self, name, value, attrs=None):
"""Render a hidden input for each choice plus a blank text input."""
output = u'<div class="tag-adder tags%s"' % (
'' if self.read_only or self.async_urls else ' deferred')
if not self.read_only:
vocab = [t.name for t in Tag.objects.only('name').all()]
output += u' data-tag-vocab-json="%s"' % escape(json.dumps(vocab))
if self.can_create_tags:
output += u' data-can-create-tags="1"'
output += u'>'
if not self.read_only:
# Insert a hidden <input type=submit> before the removers so
# hitting return doesn't wreak destruction:
output += (u'<input type="submit" class="hidden-submitter" />')
# TODO: Render the little form around the tags as a JS-less fallback
# iff self.async_urls. And don't add the hidden Add button above.
output += u'<ul class="tag-list'
if self.read_only:
output += u' immutable'
output += u'">'
output += self._render_tag_list_items(name, value or [])
output += u'</ul>'
# TODO: Add a TagField kwarg for synchronous tag add URL, and draw the
# form here if it's filled out.
if not self.read_only:
# Add a field for inputting new tags. Since it's named the same as
# the hidden inputs, it should handily work as a JS-less fallback.
input_attrs = self.build_attrs(attrs, type='text', name=name,
**{'class': 'autocomplete-tags'})
output += u'<input%s />' % flatatt(input_attrs)
# Add the Add button:
output += u'<input%s />' % flatatt(dict(type='submit',
value=_('Add'),
**{'class': 'adder'}))
output += u'</div>'
return mark_safe(output)
def value_from_datadict(self, data, files, name):
# TODO: removed 'MergeDict' from classinfo check below
# could find not explicit use of MergeDict elsewhere in the codebase, so
# i think we're okay here?
if isinstance(data, MultiValueDict):
return data.getlist(name)
return data.get(name, None)
class TagField(MultipleChoiceField):
"""A field semantically equivalent to a MultipleChoiceField--just with a
list of choices so long that it would be awkward to display.
The `choices` kwarg passed to the constructor should be a callable.
If you use this, you'll probably also want to set many of the TagWidget's
attrs after form instantiation. There's no opportunity to set them at
construction, since TagField is typically instantiated deep within taggit.
"""
widget = TagWidget
# Unlike in the superclass, `choices` kwarg to __init__ is unused.
def valid_value(self, value):
"""Check the validity of a single tag."""
return (self.widget.can_create_tags or
Tag.objects.filter(name=value).exists())
def to_python(self, value):
"""Ignore the input field if it's blank; don't make a tag called ''."""
return [v for v in super(TagField, self).to_python(value) if v]
|
{
"content_hash": "7f78b98ec50cf64d9ece6464a7bae029",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 80,
"avg_line_length": 39.81645569620253,
"alnum_prop": 0.5903671912255604,
"repo_name": "anushbmx/kitsune",
"id": "4c1c99f3c44fccd6cbec95d1e064bf2e3798bb32",
"size": "6291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kitsune/tags/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "335184"
},
{
"name": "Dockerfile",
"bytes": "3547"
},
{
"name": "Groovy",
"bytes": "4221"
},
{
"name": "HTML",
"bytes": "628447"
},
{
"name": "JavaScript",
"bytes": "802494"
},
{
"name": "Makefile",
"bytes": "3600"
},
{
"name": "Python",
"bytes": "2994910"
},
{
"name": "Shell",
"bytes": "19325"
},
{
"name": "TSQL",
"bytes": "1035"
}
],
"symlink_target": ""
}
|
import os
import glob
print('Running all modules in tests/')
for file in glob.glob("tests/*.py"):
os.system('nosetests -v ' + file)
|
{
"content_hash": "77f46b112c9b8a4059fa429b66b02867",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 38,
"avg_line_length": 17.375,
"alnum_prop": 0.6690647482014388,
"repo_name": "Schille/weimar-graphstore",
"id": "db71ef35e097be808dd9ab871f1a334a1b1fe639",
"size": "210",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run_tests.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "143885"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class ValueValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="value",
parent_name="heatmap.colorbar.tickformatstop",
**kwargs
):
super(ValueValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
{
"content_hash": "4a04e46420839105042112287ab381e4",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 67,
"avg_line_length": 29.58823529411765,
"alnum_prop": 0.5765407554671969,
"repo_name": "plotly/python-api",
"id": "3c942d0f92fa8ea648d7df1bfede40e46caaf77d",
"size": "503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/heatmap/colorbar/tickformatstop/_value.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
import unittest
import test.util.stress as stress_util
import test.unit.configuration as configuration_module
import voodoo.configuration as ConfigurationManager
import voodoo.sessions.manager as SessionManager
import voodoo.sessions.session_type as SessionType
class SessionManagerTestCase(unittest.TestCase):
def setUp(self):
self.cfg_manager= ConfigurationManager.ConfigurationManager()
self.cfg_manager.append_module(configuration_module)
self.session_manager = SessionManager.SessionManager(
self.cfg_manager,
SessionType.Memory,
"foo"
)
def func():
self.session_manager.create_session()
self.runner = stress_util.MainRunner(func, "SessionManager")
def test_sequential(self):
iterations = 10000
max_time = 0.9 # And this is too much, too
print "seq",max(self.runner.run_sequential(iterations, max_time))
def test_concurrent(self):
threads = 200
iterations = 50
max_time = 0.7 # And this is far too much
print "con",max(self.runner.run_threaded(threads, iterations, max_time))
def suite():
return unittest.makeSuite(SessionManagerTestCase)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "2c83203691c7595de7a5420b6ef690e4",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 80,
"avg_line_length": 32.06976744186046,
"alnum_prop": 0.6272661348803481,
"repo_name": "zstars/weblabdeusto",
"id": "3cfef89717a2533dbfa3ba95b7820ddcb462df71",
"size": "1767",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "server/src/test/stress/voodoo/sessions/manager.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ASP",
"bytes": "4785"
},
{
"name": "ActionScript",
"bytes": "8508"
},
{
"name": "ApacheConf",
"bytes": "122186"
},
{
"name": "Batchfile",
"bytes": "7753"
},
{
"name": "C",
"bytes": "19456"
},
{
"name": "C#",
"bytes": "315160"
},
{
"name": "C++",
"bytes": "9547"
},
{
"name": "CSS",
"bytes": "150709"
},
{
"name": "CoffeeScript",
"bytes": "30909"
},
{
"name": "Go",
"bytes": "7076"
},
{
"name": "HTML",
"bytes": "452001"
},
{
"name": "Java",
"bytes": "1234794"
},
{
"name": "JavaScript",
"bytes": "1656027"
},
{
"name": "Makefile",
"bytes": "1571"
},
{
"name": "Mako",
"bytes": "824"
},
{
"name": "PHP",
"bytes": "155137"
},
{
"name": "Python",
"bytes": "3435335"
},
{
"name": "Shell",
"bytes": "2596"
},
{
"name": "Smarty",
"bytes": "20160"
},
{
"name": "VHDL",
"bytes": "5874"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.