text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from selenium import webdriver
from fixture.session import SessionHelper
from fixture.group import GroupHelper
from fixture.user import UserHelper
class Application:
def __init__(self, browser, base_url):
if browser == "firefox":
self.wd = webdriver.Firefox()
elif browser == "chrome":
self.wd = webdriver.Chrome()
elif browser == "ie":
self.wd = webdriver.Ie()
else:
raise ValueError("Unrecognized browser %s" % browser)
self.session = SessionHelper(self)
self.group = GroupHelper(self)
self.user = UserHelper(self)
self.base_url = base_url
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
def open_home_page(self):
wd = self.wd
wd.get(self.base_url)
def destroy(self):
self.wd.quit()
|
{
"content_hash": "8c277ec533fad660553b721aed0e8dfa",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 65,
"avg_line_length": 26.428571428571427,
"alnum_prop": 0.5837837837837838,
"repo_name": "maximatorrus/automated_testing_python",
"id": "31ad218fa93d3bdad88dc8962a1d4687bc3a991c",
"size": "925",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fixture/application.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "29718"
}
],
"symlink_target": ""
}
|
u"""
Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
This file is part of Toolium.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from behave import given, when, then
from toolium.driver_wrapper import DriverWrapper
from web_behave.pageobjects.login import LoginPageObject
@given('the home page is open in {browser}')
def step_impl(context, browser):
if browser == 'browser1':
# Use default driver
context.current_page = {'browser1': LoginPageObject()}
else:
# Create a second driver
second_wrapper = DriverWrapper()
second_wrapper.connect()
context.current_page['browser2'] = LoginPageObject(second_wrapper)
# Open home page
context.current_page[browser].open()
@when('the user logs in with username "{username}" and password "{password}" in {browser}')
def step_impl(context, username, password, browser):
user = {'username': username, 'password': password}
context.current_page[browser] = context.current_page[browser].login(user)
@then('the message "{message}" is shown in {browser}')
def step_impl(context, message, browser):
assert message in context.current_page[browser].message.get_message()
|
{
"content_hash": "eccaeba459f227f10595498e3530322e",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 91,
"avg_line_length": 36.52173913043478,
"alnum_prop": 0.7339285714285714,
"repo_name": "Telefonica/toolium-examples",
"id": "c31d27c9db7710282c84ac2c0c2f8868dc97b132",
"size": "1706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web_behave/steps/multiple_drivers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "3072"
},
{
"name": "Python",
"bytes": "87266"
}
],
"symlink_target": ""
}
|
import unittest
from pants.util.filtering import create_filter, create_filters, wrap_filters
class FilteringTest(unittest.TestCase):
def _divides_by(self, divisor_str):
return lambda n: n % int(divisor_str) == 0
def test_create_filter(self):
divides_by_2 = create_filter("2", self._divides_by)
self.assertTrue(divides_by_2(2))
self.assertFalse(divides_by_2(3))
self.assertTrue(divides_by_2(4))
self.assertTrue(divides_by_2(6))
def test_create_filters(self):
# This tests that create_filters() properly captures different closures.
divides_by_2, divides_by_3 = create_filters(["2", "3"], self._divides_by)
self.assertTrue(divides_by_2(2))
self.assertFalse(divides_by_2(3))
self.assertTrue(divides_by_2(4))
self.assertTrue(divides_by_2(6))
self.assertFalse(divides_by_3(2))
self.assertTrue(divides_by_3(3))
self.assertFalse(divides_by_3(4))
self.assertTrue(divides_by_3(6))
def test_wrap_filters(self):
divides_by_6 = wrap_filters(create_filters(["2", "3"], self._divides_by))
self.assertFalse(divides_by_6(2))
self.assertFalse(divides_by_6(3))
self.assertTrue(divides_by_6(6))
self.assertFalse(divides_by_6(9))
self.assertTrue(divides_by_6(12))
def test_list_filter(self):
divides_by_2_or_3 = create_filter("2,3", self._divides_by)
self.assertTrue(divides_by_2_or_3(2))
self.assertTrue(divides_by_2_or_3(3))
self.assertTrue(divides_by_2_or_3(4))
self.assertFalse(divides_by_2_or_3(5))
self.assertTrue(divides_by_2_or_3(6))
def test_explicit_plus_filter(self):
divides_by_2_or_3 = create_filter("+2,3", self._divides_by)
self.assertTrue(divides_by_2_or_3(2))
self.assertTrue(divides_by_2_or_3(3))
self.assertTrue(divides_by_2_or_3(4))
self.assertFalse(divides_by_2_or_3(5))
self.assertTrue(divides_by_2_or_3(6))
def test_negated_filter(self):
# This tests that the negation applies to the entire list.
coprime_to_2_and_3 = create_filter("-2,3", self._divides_by)
self.assertFalse(coprime_to_2_and_3(2))
self.assertFalse(coprime_to_2_and_3(3))
self.assertFalse(coprime_to_2_and_3(4))
self.assertTrue(coprime_to_2_and_3(5))
self.assertFalse(coprime_to_2_and_3(6))
|
{
"content_hash": "dc041103de511add800ef6908d8fdc81",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 81,
"avg_line_length": 39.77049180327869,
"alnum_prop": 0.6314921681780709,
"repo_name": "tdyas/pants",
"id": "d55c9933f533158f8c957f7c47081872f8cac28a",
"size": "2558",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/python/pants_test/util/test_filtering.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "655"
},
{
"name": "C++",
"bytes": "2010"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Dockerfile",
"bytes": "5596"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "2765"
},
{
"name": "HTML",
"bytes": "44381"
},
{
"name": "Java",
"bytes": "518180"
},
{
"name": "JavaScript",
"bytes": "22906"
},
{
"name": "Python",
"bytes": "7955590"
},
{
"name": "Rust",
"bytes": "1031208"
},
{
"name": "Scala",
"bytes": "106520"
},
{
"name": "Shell",
"bytes": "109904"
},
{
"name": "Starlark",
"bytes": "502255"
},
{
"name": "Thrift",
"bytes": "2953"
}
],
"symlink_target": ""
}
|
from stanza.nlp.data import *
from stanza.nlp.corenlp import *
|
{
"content_hash": "8ce28d44cf5caf92fb2d7f4446f7d160",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 32,
"avg_line_length": 31,
"alnum_prop": 0.7903225806451613,
"repo_name": "arunchaganty/presidential-debates",
"id": "5f7ebca9cf525cdbc1f657e684fc6a8b94e195b7",
"size": "62",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "third-party/stanza/stanza/nlp/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4165"
},
{
"name": "Java",
"bytes": "807"
},
{
"name": "JavaScript",
"bytes": "10159"
},
{
"name": "Python",
"bytes": "42221"
},
{
"name": "Shell",
"bytes": "6367"
}
],
"symlink_target": ""
}
|
""" Contains keras models and functions. """
from .keras_model import KerasModel
|
{
"content_hash": "4a5402c67e617ecfdb8f36aacd461ba8",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 44,
"avg_line_length": 40.5,
"alnum_prop": 0.7530864197530864,
"repo_name": "analysiscenter/dataset",
"id": "27ac26aad7a753d2e90a419f1b66e6792dbecbe3",
"size": "81",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "batchflow/models/keras/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "711078"
}
],
"symlink_target": ""
}
|
import httplib
#import eventlet
#httplib = eventlet.import_patched('httplib')
from .http import http_backend
class https_backend(http_backend):
def get_connection(self):
return httplib.HTTPSConnection(self.host, self.port)
|
{
"content_hash": "e2ddbfe436bcc75ff71d3ed2abc574ad",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 60,
"avg_line_length": 21.727272727272727,
"alnum_prop": 0.7489539748953975,
"repo_name": "geodelic/arke",
"id": "f41ea2447c1591819d33d1afd446b0f58e0cb224",
"size": "840",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "arke/plugins/persist/https.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "62246"
},
{
"name": "Ruby",
"bytes": "715"
}
],
"symlink_target": ""
}
|
"""Support for RFXtrx switches."""
import logging
import voluptuous as vol
from homeassistant.components import rfxtrx
from homeassistant.components.switch import SwitchDevice, PLATFORM_SCHEMA
from homeassistant.components.rfxtrx import (
CONF_AUTOMATIC_ADD, CONF_FIRE_EVENT, DEFAULT_SIGNAL_REPETITIONS,
CONF_SIGNAL_REPETITIONS, CONF_DEVICES)
from homeassistant.helpers import config_validation as cv
from homeassistant.const import CONF_NAME
DEPENDENCIES = ['rfxtrx']
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_DEVICES, default={}): {
cv.string: vol.Schema({
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_FIRE_EVENT, default=False): cv.boolean,
})
},
vol.Optional(CONF_AUTOMATIC_ADD, default=False): cv.boolean,
vol.Optional(CONF_SIGNAL_REPETITIONS, default=DEFAULT_SIGNAL_REPETITIONS):
vol.Coerce(int),
})
def setup_platform(hass, config, add_entities_callback, discovery_info=None):
"""Set up the RFXtrx platform."""
import RFXtrx as rfxtrxmod
# Add switch from config file
switches = rfxtrx.get_devices_from_config(config, RfxtrxSwitch)
add_entities_callback(switches)
def switch_update(event):
"""Handle sensor updates from the RFXtrx gateway."""
if not isinstance(event.device, rfxtrxmod.LightingDevice) or \
event.device.known_to_be_dimmable or \
event.device.known_to_be_rollershutter:
return
new_device = rfxtrx.get_new_device(event, config, RfxtrxSwitch)
if new_device:
add_entities_callback([new_device])
rfxtrx.apply_received_command(event)
# Subscribe to main RFXtrx events
if switch_update not in rfxtrx.RECEIVED_EVT_SUBSCRIBERS:
rfxtrx.RECEIVED_EVT_SUBSCRIBERS.append(switch_update)
class RfxtrxSwitch(rfxtrx.RfxtrxDevice, SwitchDevice):
"""Representation of a RFXtrx switch."""
def turn_on(self, **kwargs):
"""Turn the device on."""
self._send_command("turn_on")
|
{
"content_hash": "ce5438a62274820919c6dd23c7040e3d",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 78,
"avg_line_length": 33.725806451612904,
"alnum_prop": 0.6934481109516978,
"repo_name": "nugget/home-assistant",
"id": "141cf2c2c1a6172341708c27d8ec05fdeeecaa84",
"size": "2091",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/rfxtrx/switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "HCL",
"bytes": "826"
},
{
"name": "Python",
"bytes": "14492390"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17526"
}
],
"symlink_target": ""
}
|
from .util.config import config
import sqlalchemy
from sqlalchemy.orm import sessionmaker
# Create database
uri = '{driver}{user}:{password}@{path}'.format(**config.storage._asdict())
db = sqlalchemy.create_engine(uri, echo = False)
Session = sessionmaker()
Session.configure(bind = db)
from .models import Base
Base.metadata.bind = db
from .api.slack import Slack
slack = Slack()
from .bot.agent import Agent
agent = Agent()
slack.register_output_handler(agent.put)
# agent.start()
from .bot.action import all_actions
agent.register_actions(all_actions)
# actions = [a(agent.proxy) for a in all_actions]
# app.start()
|
{
"content_hash": "9379d9149133b6b9de7a3f9994686418",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 75,
"avg_line_length": 20.966666666666665,
"alnum_prop": 0.7424483306836248,
"repo_name": "tmacro/hitman_agent",
"id": "7b3dd20841542d2478d1d7240377d4f87f50b7bf",
"size": "629",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gygax/app.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "78651"
}
],
"symlink_target": ""
}
|
from itertools import groupby
from pychron.processing.analyses.analysis_group import InterpretedAgeGroup
def set_subgrouping_error(tag, selected, items):
ss = []
for s in selected:
if s.subgroup:
s.subgroup['error_kind'] = tag
ss.append(s.subgroup['name'])
if ss:
# ensure all items in the subgroup get updated
for i in items:
if i.subgroup and i.subgroup['name'] in ss:
i.subgroup['error_kind'] = tag
def apply_subgrouping(sg, selected, items=None, gid=None):
if len(selected) == 1:
return
if items is None and gid is None:
raise ValueError('must set items or gid')
if items:
gs = {r.subgroup['name'] for r in items}
gs = [int(gi) for gi in gs if gi]
gid = max(gs) + 1 if gs else 0
sg['name'] = '{:02n}'.format(gid)
for s in selected:
s.subgroup = sg
if items:
compress_groups(items)
return gid
def compress_groups(items):
cnt = 0
for kind, ans in groupby(sorted(items, key=subgrouping_key), subgrouping_key):
if kind:
ans = list(ans)
valid_ais = [a for a in ans if not a.is_omitted()]
if len(valid_ais) > 1:
v = '{:02n}'.format(cnt)
for a in ans:
a.subgroup['name'] = v
cnt += 1
else:
for a in ans:
a.subgroup = None
else:
for a in ans:
a.subgroup = None
def subgrouping_key(x):
if hasattr(x, 'subgroup'):
return x.subgroup['name'] if x.subgroup else ''
def make_interpreted_age_group(ans, gid):
ag = InterpretedAgeGroup(analyses=ans, group_id=gid)
ag.set_preferred_kinds()
return ag
def make_interpreted_age_groups(ans, group_id=0):
groups = []
analyses = []
for i, (subgroup, items) in enumerate(groupby(sorted(ans, key=subgrouping_key), key=subgrouping_key)):
items = list(items)
if subgroup:
item = items[0]
sg = item.subgroup
items = list(items)
ag = InterpretedAgeGroup(analyses=items,
group=sg)
ag.set_preferred_kinds(sg)
kind = ag.get_preferred_kind('age')
n = '{:02n}-{:02n}:{}'.format(group_id, ag.aliquot, kind[:2])
ag.label_name = n
ag.record_id = n
ag.subgroup_id = i
ag.group_id = group_id
groups.append(ag)
else:
analyses.extend(items)
return groups, analyses
# ============= EOF =============================================
|
{
"content_hash": "2f1921921325a98a053b66faa02831fe",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 106,
"avg_line_length": 27.272727272727273,
"alnum_prop": 0.5233333333333333,
"repo_name": "UManPychron/pychron",
"id": "c3d2ea2b4ea56bb58f114b6dfdf20c7fc9db50bb",
"size": "3430",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pychron/pipeline/subgrouping.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "131"
},
{
"name": "C++",
"bytes": "3706"
},
{
"name": "CSS",
"bytes": "279"
},
{
"name": "Fortran",
"bytes": "455875"
},
{
"name": "HTML",
"bytes": "40346"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Processing",
"bytes": "11421"
},
{
"name": "Python",
"bytes": "10234954"
},
{
"name": "Shell",
"bytes": "10753"
}
],
"symlink_target": ""
}
|
from debile.master.arches import get_preferred_affinity, get_source_arches
class FnordArch(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return "<Fnord: %s>" % (self.name)
valid_arches = [
FnordArch("amd64"),
FnordArch("sparc"),
FnordArch("ppc64"),
FnordArch("kfreebsd-amd64"),
FnordArch("kfreebsd-i386"),
FnordArch("hurd-amd64"),
FnordArch("hurd-i386"),
FnordArch("armhf"),
FnordArch("armel"),
FnordArch("mips"),
]
def test_affinity_basic():
arch = get_preferred_affinity(
['amd64', 'sparc', 'armhf'],
["amd64", "sparc", "ppc64"],
valid_arches
)
assert arch.name == 'amd64'
def test_affinity_out_of_order():
arch = get_preferred_affinity(
['amd64', 'sparc', 'armhf'],
["ppc64", "sparc", "amd64"],
valid_arches
)
assert arch.name == 'amd64'
def test_affinity_secondary():
arch = get_preferred_affinity(
['amd64', 'sparc', 'armhf'],
["ppc64", "sparc"],
valid_arches
)
assert arch.name == 'sparc'
def test_affinity_any():
arch = get_preferred_affinity(
['amd64', 'sparc', 'armhf'],
["any"],
valid_arches
)
assert arch.name == 'amd64'
def test_affinity_linux_any():
arch = get_preferred_affinity(
['amd64', 'sparc', 'armhf'],
["linux-any"],
valid_arches
)
assert arch.name == 'amd64'
def test_affinity_any_arm():
arch = get_preferred_affinity(
['amd64', 'sparc', 'armhf'],
["any-arm"],
valid_arches
)
assert arch.name == 'armhf'
def test_affinity_fail():
try:
arch = get_preferred_affinity(
['amd64', 'sparc', 'armhf'],
["ppc64", "armel"],
valid_arches
)
assert False == True, "Didn't bomb out as expected."
except ValueError:
pass
def test_any_arches():
assert valid_arches == get_source_arches(['any'], valid_arches)
def test_simple_arches():
assert set(['amd64', 'armhf']) == set([
x.name for x in get_source_arches(['amd64', 'armhf'], valid_arches)
])
def test_kfreebsd_arches():
assert set([
'kfreebsd-i386', 'kfreebsd-amd64', 'armhf'
]) == set([
x.name for x in get_source_arches([
'kfreebsd-i386', 'kfreebsd-amd64', 'armhf'
], valid_arches)
])
def test_hurd_arches():
assert set([
'hurd-i386', 'hurd-amd64', 'armel'
]) == set([
x.name for x in get_source_arches([
'hurd-i386', 'hurd-amd64', 'armel'
], valid_arches)
])
from debile.master.arches import arch_matches
from debile.master.orm import Arch
import unittest
import mock
class ArchesTestCase(unittest.TestCase):
arches = [
Arch(name="amd64"),
Arch(name="sparc"),
Arch(name="ppc64"),
Arch(name="kfreebsd-amd64"),
Arch(name="kfreebsd-i386"),
Arch(name="hurd-amd64"),
Arch(name="hurd-i386"),
Arch(name="armhf"),
Arch(name="armel"),
Arch(name="mips"),
]
def test_arch_matches_arch_equal_alias(self):
self.assertTrue(arch_matches('amd64','amd64'))
def test_arch_matches_pseudo_arches(self):
self.assertFalse(arch_matches('all', 'amd64'))
self.assertFalse(arch_matches('source', 'amd64'))
def test_arch_matches_any_arch(self):
self.assertTrue(arch_matches('amd64', 'any'))
def test_arch_matches_linux_any_alias(self):
self.assertTrue(arch_matches('amd64', 'linux-any'))
self.assertTrue(arch_matches('linux-amd64', 'linux-any'))
self.assertFalse(arch_matches('hurd-i386', 'linux-any'))
def test_arch_matches_ends_with_any(self):
self.assertTrue(arch_matches('bsd-amd64', 'bsd-any'))
self.assertFalse(arch_matches('linux-amd64', 'kfreebsd-any'))
def test_arch_matches_without_dash(self):
self.assertFalse(arch_matches('any', 'amd64'))
@mock.patch('debile.master.arches.run_command', return_value=(0,0,0))
def test_arch_matches_with_successful_run_command(self, mock):
self.assertTrue(arch_matches('linux-amd64', 'amd64'))
@mock.patch('debile.master.arches.run_command', return_value=(2,2,2))
def test_arch_matches_with_unsuccessful_run_command(self, mock):
self.assertFalse(arch_matches('linux-amd64', 'i386'))
def test_get_preferred_affinity_value_error(self):
affinity = ['linux-amd64']
valid = ['linux-i386']
self.assertRaises(ValueError, get_preferred_affinity, affinity, valid,
self.arches)
@mock.patch('debile.master.arches.arch_matches', return_value=True)
def test_get_preferred_affinity(self, mock):
affinity = ['amd64']
valid = ['i386']
arch = get_preferred_affinity(affinity, valid, self.arches)
self.assertEquals(arch.name, 'amd64')
@mock.patch('debile.master.arches.arch_matches', return_value=False)
def test_get_source_arches_without_matches(self, mock):
dsc_arch = ['linux-i386']
ret = get_source_arches(dsc_arch, self.arches)
self.assertEquals(ret, [])
@mock.patch('debile.master.arches.arch_matches', return_value=True)
def test_get_source_arches_without_matches(self, mock):
dsc_arch = ['amd64', 'sparc']
ret = get_source_arches(dsc_arch, self.arches)
self.assertEquals(ret[0].name, 'amd64')
self.assertEquals(ret[1].name, 'sparc')
|
{
"content_hash": "054fea5b7550aa3495e2a9ee5acceeca",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 78,
"avg_line_length": 25.671296296296298,
"alnum_prop": 0.5945897204688909,
"repo_name": "opencollab/debile",
"id": "668044c7a37138aede34f9cdee40d5d586c4c909",
"size": "5545",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/test_arches.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1710"
},
{
"name": "Nginx",
"bytes": "1734"
},
{
"name": "Python",
"bytes": "366543"
},
{
"name": "Shell",
"bytes": "14590"
}
],
"symlink_target": ""
}
|
"""Utilities and helper functions."""
import contextlib
import datetime
import errno
import functools
import hashlib
import inspect
import itertools
import os
import paramiko
import pyclbr
import random
import re
import shlex
import shutil
import signal
import socket
import struct
import sys
import tempfile
import time
import types
import warnings
from xml.sax import saxutils
from eventlet import event
from eventlet.green import subprocess
from eventlet import greenthread
from eventlet import pools
from cinder.common import deprecated
from cinder import exception
from cinder import flags
from cinder.openstack.common import excutils
from cinder.openstack.common import importutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
LOG = logging.getLogger(__name__)
ISO_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
FLAGS = flags.FLAGS
def find_config(config_path):
"""Find a configuration file using the given hint.
:param config_path: Full or relative path to the config.
:returns: Full path of the config, if it exists.
:raises: `cinder.exception.ConfigNotFound`
"""
possible_locations = [
config_path,
os.path.join(FLAGS.state_path, "etc", "cinder", config_path),
os.path.join(FLAGS.state_path, "etc", config_path),
os.path.join(FLAGS.state_path, config_path),
"/etc/cinder/%s" % config_path,
]
for path in possible_locations:
if os.path.exists(path):
return os.path.abspath(path)
raise exception.ConfigNotFound(path=os.path.abspath(config_path))
def fetchfile(url, target):
LOG.debug(_('Fetching %s') % url)
execute('curl', '--fail', url, '-o', target)
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def execute(*cmd, **kwargs):
"""Helper method to execute command with optional retry.
If you add a run_as_root=True command, don't forget to add the
corresponding filter to etc/cinder/rootwrap.d !
:param cmd: Passed to subprocess.Popen.
:param process_input: Send to opened process.
:param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise
exception.ProcessExecutionError unless
program exits with one of these code.
:param delay_on_retry: True | False. Defaults to True. If set to
True, wait a short amount of time
before retrying.
:param attempts: How many times to retry cmd.
:param run_as_root: True | False. Defaults to False. If set to True,
the command is prefixed by the command specified
in the root_helper FLAG.
:raises exception.Error: on receiving unknown arguments
:raises exception.ProcessExecutionError:
:returns: a tuple, (stdout, stderr) from the spawned process, or None if
the command fails.
"""
process_input = kwargs.pop('process_input', None)
check_exit_code = kwargs.pop('check_exit_code', [0])
ignore_exit_code = False
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
delay_on_retry = kwargs.pop('delay_on_retry', True)
attempts = kwargs.pop('attempts', 1)
run_as_root = kwargs.pop('run_as_root', False)
shell = kwargs.pop('shell', False)
if len(kwargs):
raise exception.Error(_('Got unknown keyword args '
'to utils.execute: %r') % kwargs)
if run_as_root:
if FLAGS.rootwrap_config is None or FLAGS.root_helper != 'sudo':
deprecated.warn(_('The root_helper option (which lets you specify '
'a root wrapper different from cinder-rootwrap, '
'and defaults to using sudo) is now deprecated. '
'You should use the rootwrap_config option '
'instead.'))
if (FLAGS.rootwrap_config is not None):
cmd = ['sudo', 'cinder-rootwrap',
FLAGS.rootwrap_config] + list(cmd)
else:
cmd = shlex.split(FLAGS.root_helper) + list(cmd)
cmd = map(str, cmd)
while attempts > 0:
attempts -= 1
try:
LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd))
_PIPE = subprocess.PIPE # pylint: disable=E1101
obj = subprocess.Popen(cmd,
stdin=_PIPE,
stdout=_PIPE,
stderr=_PIPE,
close_fds=True,
preexec_fn=_subprocess_setup,
shell=shell)
result = None
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
obj.stdin.close() # pylint: disable=E1101
_returncode = obj.returncode # pylint: disable=E1101
if _returncode:
LOG.debug(_('Result was %s') % _returncode)
if not ignore_exit_code and _returncode not in check_exit_code:
(stdout, stderr) = result
raise exception.ProcessExecutionError(
exit_code=_returncode,
stdout=stdout,
stderr=stderr,
cmd=' '.join(cmd))
return result
except exception.ProcessExecutionError:
if not attempts:
raise
else:
LOG.debug(_('%r failed. Retrying.'), cmd)
if delay_on_retry:
greenthread.sleep(random.randint(20, 200) / 100.0)
finally:
# NOTE(termie): this appears to be necessary to let the subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0)
def trycmd(*args, **kwargs):
"""
A wrapper around execute() to more easily handle warnings and errors.
Returns an (out, err) tuple of strings containing the output of
the command's stdout and stderr. If 'err' is not empty then the
command can be considered to have failed.
:discard_warnings True | False. Defaults to False. If set to True,
then for succeeding commands, stderr is cleared
"""
discard_warnings = kwargs.pop('discard_warnings', False)
try:
out, err = execute(*args, **kwargs)
failed = False
except exception.ProcessExecutionError, exn:
out, err = '', str(exn)
LOG.debug(err)
failed = True
if not failed and discard_warnings and err:
# Handle commands that output to stderr but otherwise succeed
LOG.debug(err)
err = ''
return out, err
def ssh_execute(ssh, cmd, process_input=None,
addl_env=None, check_exit_code=True):
LOG.debug(_('Running cmd (SSH): %s'), cmd)
if addl_env:
raise exception.Error(_('Environment not supported over SSH'))
if process_input:
# This is (probably) fixable if we need it...
raise exception.Error(_('process_input not supported over SSH'))
stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
channel = stdout_stream.channel
#stdin.write('process_input would go here')
#stdin.flush()
# NOTE(justinsb): This seems suspicious...
# ...other SSH clients have buffering issues with this approach
stdout = stdout_stream.read()
stderr = stderr_stream.read()
stdin_stream.close()
stdout_stream.close()
stderr_stream.close()
exit_status = channel.recv_exit_status()
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug(_('Result was %s') % exit_status)
if check_exit_code and exit_status != 0:
raise exception.ProcessExecutionError(exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=cmd)
channel.close()
return (stdout, stderr)
class SSHPool(pools.Pool):
"""A simple eventlet pool to hold ssh connections."""
def __init__(self, ip, port, conn_timeout, login, password=None,
privatekey=None, *args, **kwargs):
self.ip = ip
self.port = port
self.login = login
self.password = password
self.conn_timeout = conn_timeout
self.privatekey = privatekey
super(SSHPool, self).__init__(*args, **kwargs)
def create(self):
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if self.password:
ssh.connect(self.ip,
port=self.port,
username=self.login,
password=self.password,
timeout=self.conn_timeout)
elif self.privatekey:
pkfile = os.path.expanduser(self.privatekey)
privatekey = paramiko.RSAKey.from_private_key_file(pkfile)
ssh.connect(self.ip,
port=self.port,
username=self.login,
pkey=privatekey,
timeout=self.conn_timeout)
else:
msg = _("Specify a password or private_key")
raise exception.CinderException(msg)
# Paramiko by default sets the socket timeout to 0.1 seconds,
# ignoring what we set thru the sshclient. This doesn't help for
# keeping long lived connections. Hence we have to bypass it, by
# overriding it after the transport is initialized. We are setting
# the sockettimeout to None and setting a keepalive packet so that,
# the server will keep the connection open. All that does is send
# a keepalive packet every ssh_conn_timeout seconds.
transport = ssh.get_transport()
transport.sock.settimeout(None)
transport.set_keepalive(self.conn_timeout)
return ssh
except Exception as e:
msg = _("Error connecting via ssh: %s") % e
LOG.error(msg)
raise paramiko.SSHException(msg)
def get(self):
"""
Return an item from the pool, when one is available. This may
cause the calling greenthread to block. Check if a connection is active
before returning it. For dead connections create and return a new
connection.
"""
if self.free_items:
conn = self.free_items.popleft()
if conn:
if conn.get_transport().is_active():
return conn
else:
conn.close()
return self.create()
if self.current_size < self.max_size:
created = self.create()
self.current_size += 1
return created
return self.channel.get()
def cinderdir():
import cinder
return os.path.abspath(cinder.__file__).split('cinder/__init__.py')[0]
def debug(arg):
LOG.debug(_('debug in callback: %s'), arg)
return arg
def generate_uid(topic, size=8):
characters = '01234567890abcdefghijklmnopqrstuvwxyz'
choices = [random.choice(characters) for x in xrange(size)]
return '%s-%s' % (topic, ''.join(choices))
# Default symbols to use for passwords. Avoids visually confusing characters.
# ~6 bits per symbol
DEFAULT_PASSWORD_SYMBOLS = ('23456789', # Removed: 0,1
'ABCDEFGHJKLMNPQRSTUVWXYZ', # Removed: I, O
'abcdefghijkmnopqrstuvwxyz') # Removed: l
# ~5 bits per symbol
EASIER_PASSWORD_SYMBOLS = ('23456789', # Removed: 0, 1
'ABCDEFGHJKLMNPQRSTUVWXYZ') # Removed: I, O
def last_completed_audit_period(unit=None):
"""This method gives you the most recently *completed* audit period.
arguments:
units: string, one of 'hour', 'day', 'month', 'year'
Periods normally begin at the beginning (UTC) of the
period unit (So a 'day' period begins at midnight UTC,
a 'month' unit on the 1st, a 'year' on Jan, 1)
unit string may be appended with an optional offset
like so: 'day@18' This will begin the period at 18:00
UTC. 'month@15' starts a monthly period on the 15th,
and year@3 begins a yearly one on March 1st.
returns: 2 tuple of datetimes (begin, end)
The begin timestamp of this audit period is the same as the
end of the previous."""
if not unit:
unit = FLAGS.volume_usage_audit_period
offset = 0
if '@' in unit:
unit, offset = unit.split("@", 1)
offset = int(offset)
rightnow = timeutils.utcnow()
if unit not in ('month', 'day', 'year', 'hour'):
raise ValueError('Time period must be hour, day, month or year')
if unit == 'month':
if offset == 0:
offset = 1
end = datetime.datetime(day=offset,
month=rightnow.month,
year=rightnow.year)
if end >= rightnow:
year = rightnow.year
if 1 >= rightnow.month:
year -= 1
month = 12 + (rightnow.month - 1)
else:
month = rightnow.month - 1
end = datetime.datetime(day=offset,
month=month,
year=year)
year = end.year
if 1 >= end.month:
year -= 1
month = 12 + (end.month - 1)
else:
month = end.month - 1
begin = datetime.datetime(day=offset, month=month, year=year)
elif unit == 'year':
if offset == 0:
offset = 1
end = datetime.datetime(day=1, month=offset, year=rightnow.year)
if end >= rightnow:
end = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 1)
begin = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 2)
else:
begin = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 1)
elif unit == 'day':
end = datetime.datetime(hour=offset,
day=rightnow.day,
month=rightnow.month,
year=rightnow.year)
if end >= rightnow:
end = end - datetime.timedelta(days=1)
begin = end - datetime.timedelta(days=1)
elif unit == 'hour':
end = rightnow.replace(minute=offset, second=0, microsecond=0)
if end >= rightnow:
end = end - datetime.timedelta(hours=1)
begin = end - datetime.timedelta(hours=1)
return (begin, end)
def generate_password(length=20, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
"""Generate a random password from the supplied symbol groups.
At least one symbol from each group will be included. Unpredictable
results if length is less than the number of symbol groups.
Believed to be reasonably secure (with a reasonable password length!)
"""
r = random.SystemRandom()
# NOTE(jerdfelt): Some password policies require at least one character
# from each group of symbols, so start off with one random character
# from each symbol group
password = [r.choice(s) for s in symbolgroups]
# If length < len(symbolgroups), the leading characters will only
# be from the first length groups. Try our best to not be predictable
# by shuffling and then truncating.
r.shuffle(password)
password = password[:length]
length -= len(password)
# then fill with random characters from all symbol groups
symbols = ''.join(symbolgroups)
password.extend([r.choice(symbols) for _i in xrange(length)])
# finally shuffle to ensure first x characters aren't from a
# predictable group
r.shuffle(password)
return ''.join(password)
def generate_username(length=20, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
# Use the same implementation as the password generation.
return generate_password(length, symbolgroups)
def last_octet(address):
return int(address.split('.')[-1])
def get_my_linklocal(interface):
try:
if_str = execute('ip', '-f', 'inet6', '-o', 'addr', 'show', interface)
condition = '\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link'
links = [re.search(condition, x) for x in if_str[0].split('\n')]
address = [w.group(1) for w in links if w is not None]
if address[0] is not None:
return address[0]
else:
raise exception.Error(_('Link Local address is not found.:%s')
% if_str)
except Exception as ex:
raise exception.Error(_("Couldn't get Link Local IP of %(interface)s"
" :%(ex)s") % locals())
def parse_mailmap(mailmap='.mailmap'):
mapping = {}
if os.path.exists(mailmap):
fp = open(mailmap, 'r')
for l in fp:
l = l.strip()
if not l.startswith('#') and ' ' in l:
canonical_email, alias = l.split(' ')
mapping[alias.lower()] = canonical_email.lower()
return mapping
def str_dict_replace(s, mapping):
for s1, s2 in mapping.iteritems():
s = s.replace(s1, s2)
return s
class LazyPluggable(object):
"""A pluggable backend loaded lazily based on some value."""
def __init__(self, pivot, **backends):
self.__backends = backends
self.__pivot = pivot
self.__backend = None
def __get_backend(self):
if not self.__backend:
backend_name = FLAGS[self.__pivot]
if backend_name not in self.__backends:
raise exception.Error(_('Invalid backend: %s') % backend_name)
backend = self.__backends[backend_name]
if isinstance(backend, tuple):
name = backend[0]
fromlist = backend[1]
else:
name = backend
fromlist = backend
self.__backend = __import__(name, None, None, fromlist)
LOG.debug(_('backend %s'), self.__backend)
return self.__backend
def __getattr__(self, key):
backend = self.__get_backend()
return getattr(backend, key)
class LoopingCallDone(Exception):
"""Exception to break out and stop a LoopingCall.
The poll-function passed to LoopingCall can raise this exception to
break out of the loop normally. This is somewhat analogous to
StopIteration.
An optional return-value can be included as the argument to the exception;
this return-value will be returned by LoopingCall.wait()
"""
def __init__(self, retvalue=True):
""":param retvalue: Value that LoopingCall.wait() should return."""
self.retvalue = retvalue
class LoopingCall(object):
def __init__(self, f=None, *args, **kw):
self.args = args
self.kw = kw
self.f = f
self._running = False
def start(self, interval, initial_delay=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
self.f(*self.args, **self.kw)
if not self._running:
break
greenthread.sleep(interval)
except LoopingCallDone, e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_('in looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn(_inner)
return self.done
def stop(self):
self._running = False
def wait(self):
return self.done.wait()
def xhtml_escape(value):
"""Escapes a string so it is valid within XML or XHTML.
"""
return saxutils.escape(value, {'"': '"', "'": '''})
def utf8(value):
"""Try to turn a string into utf-8 if possible.
Code is directly from the utf8 function in
http://github.com/facebook/tornado/blob/master/tornado/escape.py
"""
if isinstance(value, unicode):
return value.encode('utf-8')
assert isinstance(value, str)
return value
def delete_if_exists(pathname):
"""delete a file, but ignore file not found error"""
try:
os.unlink(pathname)
except OSError as e:
if e.errno == errno.ENOENT:
return
else:
raise
def get_from_path(items, path):
"""Returns a list of items matching the specified path.
Takes an XPath-like expression e.g. prop1/prop2/prop3, and for each item
in items, looks up items[prop1][prop2][prop3]. Like XPath, if any of the
intermediate results are lists it will treat each list item individually.
A 'None' in items or any child expressions will be ignored, this function
will not throw because of None (anywhere) in items. The returned list
will contain no None values.
"""
if path is None:
raise exception.Error('Invalid mini_xpath')
(first_token, sep, remainder) = path.partition('/')
if first_token == '':
raise exception.Error('Invalid mini_xpath')
results = []
if items is None:
return results
if not isinstance(items, list):
# Wrap single objects in a list
items = [items]
for item in items:
if item is None:
continue
get_method = getattr(item, 'get', None)
if get_method is None:
continue
child = get_method(first_token)
if child is None:
continue
if isinstance(child, list):
# Flatten intermediate lists
for x in child:
results.append(x)
else:
results.append(child)
if not sep:
# No more tokens
return results
else:
return get_from_path(results, remainder)
def flatten_dict(dict_, flattened=None):
"""Recursively flatten a nested dictionary."""
flattened = flattened or {}
for key, value in dict_.iteritems():
if hasattr(value, 'iteritems'):
flatten_dict(value, flattened)
else:
flattened[key] = value
return flattened
def partition_dict(dict_, keys):
"""Return two dicts, one with `keys` the other with everything else."""
intersection = {}
difference = {}
for key, value in dict_.iteritems():
if key in keys:
intersection[key] = value
else:
difference[key] = value
return intersection, difference
def map_dict_keys(dict_, key_map):
"""Return a dict in which the dictionaries keys are mapped to new keys."""
mapped = {}
for key, value in dict_.iteritems():
mapped_key = key_map[key] if key in key_map else key
mapped[mapped_key] = value
return mapped
def subset_dict(dict_, keys):
"""Return a dict that only contains a subset of keys."""
subset = partition_dict(dict_, keys)[0]
return subset
def check_isinstance(obj, cls):
"""Checks that obj is of type cls, and lets PyLint infer types."""
if isinstance(obj, cls):
return obj
raise Exception(_('Expected object of type: %s') % (str(cls)))
# TODO(justinsb): Can we make this better??
return cls() # Ugly PyLint hack
def bool_from_str(val):
"""Convert a string representation of a bool into a bool value"""
if not val:
return False
try:
return True if int(val) else False
except ValueError:
return val.lower() == 'true'
def is_valid_boolstr(val):
"""Check if the provided string is a valid bool string or not. """
val = str(val).lower()
return (val == 'true' or val == 'false' or
val == 'yes' or val == 'no' or
val == 'y' or val == 'n' or
val == '1' or val == '0')
def is_valid_ipv4(address):
"""valid the address strictly as per format xxx.xxx.xxx.xxx.
where xxx is a value between 0 and 255.
"""
parts = address.split(".")
if len(parts) != 4:
return False
for item in parts:
try:
if not 0 <= int(item) <= 255:
return False
except ValueError:
return False
return True
def monkey_patch():
""" If the Flags.monkey_patch set as True,
this function patches a decorator
for all functions in specified modules.
You can set decorators for each modules
using FLAGS.monkey_patch_modules.
The format is "Module path:Decorator function".
Example: 'cinder.api.ec2.cloud:' \
cinder.openstack.common.notifier.api.notify_decorator'
Parameters of the decorator is as follows.
(See cinder.openstack.common.notifier.api.notify_decorator)
name - name of the function
function - object of the function
"""
# If FLAGS.monkey_patch is not True, this function do nothing.
if not FLAGS.monkey_patch:
return
# Get list of modules and decorators
for module_and_decorator in FLAGS.monkey_patch_modules:
module, decorator_name = module_and_decorator.split(':')
# import decorator function
decorator = importutils.import_class(decorator_name)
__import__(module)
# Retrieve module information using pyclbr
module_data = pyclbr.readmodule_ex(module)
for key in module_data.keys():
# set the decorator for the class methods
if isinstance(module_data[key], pyclbr.Class):
clz = importutils.import_class("%s.%s" % (module, key))
for method, func in inspect.getmembers(clz, inspect.ismethod):
setattr(
clz, method,
decorator("%s.%s.%s" % (module, key, method), func))
# set the decorator for the function
if isinstance(module_data[key], pyclbr.Function):
func = importutils.import_class("%s.%s" % (module, key))
setattr(sys.modules[module], key,
decorator("%s.%s" % (module, key), func))
def convert_to_list_dict(lst, label):
"""Convert a value or list into a list of dicts"""
if not lst:
return None
if not isinstance(lst, list):
lst = [lst]
return [{label: x} for x in lst]
def timefunc(func):
"""Decorator that logs how long a particular function took to execute"""
@functools.wraps(func)
def inner(*args, **kwargs):
start_time = time.time()
try:
return func(*args, **kwargs)
finally:
total_time = time.time() - start_time
LOG.debug(_("timefunc: '%(name)s' took %(total_time).2f secs") %
dict(name=func.__name__, total_time=total_time))
return inner
def generate_glance_url():
"""Generate the URL to glance."""
# TODO(jk0): This will eventually need to take SSL into consideration
# when supported in glance.
return "http://%s:%d" % (FLAGS.glance_host, FLAGS.glance_port)
@contextlib.contextmanager
def logging_error(message):
"""Catches exception, write message to the log, re-raise.
This is a common refinement of save_and_reraise that writes a specific
message to the log.
"""
try:
yield
except Exception as error:
with excutils.save_and_reraise_exception():
LOG.exception(message)
@contextlib.contextmanager
def remove_path_on_error(path):
"""Protect code that wants to operate on PATH atomically.
Any exception will cause PATH to be removed.
"""
try:
yield
except Exception:
with excutils.save_and_reraise_exception():
delete_if_exists(path)
def make_dev_path(dev, partition=None, base='/dev'):
"""Return a path to a particular device.
>>> make_dev_path('xvdc')
/dev/xvdc
>>> make_dev_path('xvdc', 1)
/dev/xvdc1
"""
path = os.path.join(base, dev)
if partition:
path += str(partition)
return path
def total_seconds(td):
"""Local total_seconds implementation for compatibility with python 2.6"""
if hasattr(td, 'total_seconds'):
return td.total_seconds()
else:
return ((td.days * 86400 + td.seconds) * 10 ** 6 +
td.microseconds) / 10.0 ** 6
def sanitize_hostname(hostname):
"""Return a hostname which conforms to RFC-952 and RFC-1123 specs."""
if isinstance(hostname, unicode):
hostname = hostname.encode('latin-1', 'ignore')
hostname = re.sub('[ _]', '-', hostname)
hostname = re.sub('[^\w.-]+', '', hostname)
hostname = hostname.lower()
hostname = hostname.strip('.-')
return hostname
def read_cached_file(filename, cache_info, reload_func=None):
"""Read from a file if it has been modified.
:param cache_info: dictionary to hold opaque cache.
:param reload_func: optional function to be called with data when
file is reloaded due to a modification.
:returns: data from file
"""
mtime = os.path.getmtime(filename)
if not cache_info or mtime != cache_info.get('mtime'):
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime
if reload_func:
reload_func(cache_info['data'])
return cache_info['data']
def file_open(*args, **kwargs):
"""Open file
see built-in file() documentation for more details
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return file(*args, **kwargs)
def hash_file(file_like_object):
"""Generate a hash for the contents of a file."""
checksum = hashlib.sha1()
any(map(checksum.update, iter(lambda: file_like_object.read(32768), '')))
return checksum.hexdigest()
@contextlib.contextmanager
def temporary_mutation(obj, **kwargs):
"""Temporarily set the attr on a particular object to a given value then
revert when finished.
One use of this is to temporarily set the read_deleted flag on a context
object:
with temporary_mutation(context, read_deleted="yes"):
do_something_that_needed_deleted_objects()
"""
NOT_PRESENT = object()
old_values = {}
for attr, new_value in kwargs.items():
old_values[attr] = getattr(obj, attr, NOT_PRESENT)
setattr(obj, attr, new_value)
try:
yield
finally:
for attr, old_value in old_values.items():
if old_value is NOT_PRESENT:
del obj[attr]
else:
setattr(obj, attr, old_value)
def service_is_up(service):
"""Check whether a service is up based on last heartbeat."""
last_heartbeat = service['updated_at'] or service['created_at']
# Timestamps in DB are UTC.
elapsed = total_seconds(timeutils.utcnow() - last_heartbeat)
return abs(elapsed) <= FLAGS.service_down_time
def generate_mac_address():
"""Generate an Ethernet MAC address."""
# NOTE(vish): We would prefer to use 0xfe here to ensure that linux
# bridge mac addresses don't change, but it appears to
# conflict with libvirt, so we use the next highest octet
# that has the unicast and locally administered bits set
# properly: 0xfa.
# Discussion: https://bugs.launchpad.net/cinder/+bug/921838
mac = [0xfa, 0x16, 0x3e,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff)]
return ':'.join(map(lambda x: "%02x" % x, mac))
def read_file_as_root(file_path):
"""Secure helper to read file as root."""
try:
out, _err = execute('cat', file_path, run_as_root=True)
return out
except exception.ProcessExecutionError:
raise exception.FileNotFound(file_path=file_path)
@contextlib.contextmanager
def temporary_chown(path, owner_uid=None):
"""Temporarily chown a path.
:params owner_uid: UID of temporary owner (defaults to current user)
"""
if owner_uid is None:
owner_uid = os.getuid()
orig_uid = os.stat(path).st_uid
if orig_uid != owner_uid:
execute('chown', owner_uid, path, run_as_root=True)
try:
yield
finally:
if orig_uid != owner_uid:
execute('chown', orig_uid, path, run_as_root=True)
@contextlib.contextmanager
def tempdir(**kwargs):
tmpdir = tempfile.mkdtemp(**kwargs)
try:
yield tmpdir
finally:
try:
shutil.rmtree(tmpdir)
except OSError, e:
LOG.debug(_('Could not remove tmpdir: %s'), str(e))
def strcmp_const_time(s1, s2):
"""Constant-time string comparison.
:params s1: the first string
:params s2: the second string
:return: True if the strings are equal.
This function takes two strings and compares them. It is intended to be
used when doing a comparison for authentication purposes to help guard
against timing attacks.
"""
if len(s1) != len(s2):
return False
result = 0
for (a, b) in zip(s1, s2):
result |= ord(a) ^ ord(b)
return result == 0
def walk_class_hierarchy(clazz, encountered=None):
"""Walk class hierarchy, yielding most derived classes first"""
if not encountered:
encountered = []
for subclass in clazz.__subclasses__():
if subclass not in encountered:
encountered.append(subclass)
# drill down to leaves first
for subsubclass in walk_class_hierarchy(subclass, encountered):
yield subsubclass
yield subclass
class UndoManager(object):
"""Provides a mechanism to facilitate rolling back a series of actions
when an exception is raised.
"""
def __init__(self):
self.undo_stack = []
def undo_with(self, undo_func):
self.undo_stack.append(undo_func)
def _rollback(self):
for undo_func in reversed(self.undo_stack):
undo_func()
def rollback_and_reraise(self, msg=None, **kwargs):
"""Rollback a series of actions then re-raise the exception.
.. note:: (sirp) This should only be called within an
exception handler.
"""
with excutils.save_and_reraise_exception():
if msg:
LOG.exception(msg, **kwargs)
self._rollback()
def ensure_tree(path):
"""Create a directory (and any ancestor directories required)
:param path: Directory to create
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
if not os.path.isdir(path):
raise
else:
raise
def to_bytes(text, default=0):
"""Try to turn a string into a number of bytes. Looks at the last
characters of the text to determine what conversion is needed to
turn the input text into a byte number.
Supports: B/b, K/k, M/m, G/g, T/t (or the same with b/B on the end)
"""
BYTE_MULTIPLIERS = {
'': 1,
't': 1024 ** 4,
'g': 1024 ** 3,
'm': 1024 ** 2,
'k': 1024,
}
# Take off everything not number 'like' (which should leave
# only the byte 'identifier' left)
mult_key_org = text.lstrip('-1234567890')
mult_key = mult_key_org.lower()
mult_key_len = len(mult_key)
if mult_key.endswith("b"):
mult_key = mult_key[0:-1]
try:
multiplier = BYTE_MULTIPLIERS[mult_key]
if mult_key_len:
# Empty cases shouldn't cause text[0:-0]
text = text[0:-mult_key_len]
return int(text) * multiplier
except KeyError:
msg = _('Unknown byte multiplier: %s') % mult_key_org
raise TypeError(msg)
except ValueError:
return default
|
{
"content_hash": "1452e4a676c9e13660fa7022eb6cf8c0",
"timestamp": "",
"source": "github",
"line_count": 1145,
"max_line_length": 79,
"avg_line_length": 32.60524017467249,
"alnum_prop": 0.5822730560094287,
"repo_name": "citrix-openstack-build/cinder",
"id": "1d5e89c5c9017a79cd37170fbc7ce38c5c1ca272",
"size": "38148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "15168"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "2520082"
},
{
"name": "Shell",
"bytes": "7522"
}
],
"symlink_target": ""
}
|
import re
if (__name__ == "__main__"):
# Assemble the script which embeds the Markdeep page into the preview blog
PreviewBlogPage = open("PreviewBlogPage.htm", "rb").read().decode("utf-8")
HeadMatch = re.search("<head(.*?)>(.*?)</head>", PreviewBlogPage, re.DOTALL)
HeadAttributes = HeadMatch.group(1)
FullDocumentHead = HeadMatch.group(2)
BodyMatch = re.search("<body(.*?)>(.*?)</body>", PreviewBlogPage, re.DOTALL)
BodyAttributes = BodyMatch.group(1)
FullPreviewBody = BodyMatch.group(2)
ArticleHTMLCodeMacro = "$(ARTICLE_HTML_CODE)"
iArticleHTMLCodeMacro = FullPreviewBody.find(ArticleHTMLCodeMacro)
DocumentBodyPrefix = FullPreviewBody[0:iArticleHTMLCodeMacro]
DocumentBodySuffix = FullPreviewBody[iArticleHTMLCodeMacro + len(ArticleHTMLCodeMacro):]
FullPrepareHTMLCode = open("PrepareHTML.js", "rb").read().decode("utf-8")
ReplacementList = [("$(FULL_DOCUMENT_HEAD)", FullDocumentHead),
("$(DOCUMENT_BODY_PREFIX)", DocumentBodyPrefix),
("$(DOCUMENT_BODY_SUFFIX)", DocumentBodySuffix)]
for Macro, Replacement in ReplacementList:
FullPrepareHTMLCode = FullPrepareHTMLCode.replace(
Macro,
Replacement.replace("\r\n", "\\r\\n\\\r\n").replace("'", "\\'"))
# Generate code which sets body and head attributes appropriately
for Element, AttributeCode in [("head", HeadAttributes), ("body", BodyAttributes)]:
FullPrepareHTMLCode += "\r\n// Setting " + Element + " attributes\r\n"
for Match in re.finditer("(\\w+)=\\\"(.*?)\\\"", AttributeCode):
FullPrepareHTMLCode += "document." + Element + ".setAttribute(\"" + Match.group(
1) + "\",\"" + Match.group(2) + "\");\r\n"
open("PrepareHTML.full.js", "wb").write(FullPrepareHTMLCode.encode("utf-8"))
# Concatenate all the scripts together
SourceFileList = [
"PrepareHTML.full.js", "SetMarkdeepMode.js", "markdeep.min.js", "DisplayMarkdeepOutput.js",
"InvokeMathJax.js"
]
OutputCode = "\r\n\r\n".join([
"// " + SourceFile + "\r\n\r\n" + open(SourceFile, "rb").read().decode("utf-8")
for SourceFile in SourceFileList
])
OutputFile = open("MarkdeepUtility.js", "wb")
OutputFile.write(OutputCode.encode("utf-8"))
OutputFile.close()
print("Done.")
|
{
"content_hash": "d232f6cda9c293740fe59866ae7cc451",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 97,
"avg_line_length": 51.18181818181818,
"alnum_prop": 0.6660746003552398,
"repo_name": "MTASZTAKI/ApertusVR",
"id": "ac0b394f1767df4dbe5d335b522e7f79ebfc0d4a",
"size": "2252",
"binary": false,
"copies": "6",
"ref": "refs/heads/0.9",
"path": "plugins/physics/bulletPhysics/3rdParty/bullet3/docs/pybullet_quickstart_guide/WordpressPreview/BuildMarkdeepUtility.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7599"
},
{
"name": "C++",
"bytes": "1207412"
},
{
"name": "CMake",
"bytes": "165066"
},
{
"name": "CSS",
"bytes": "1816"
},
{
"name": "GLSL",
"bytes": "223507"
},
{
"name": "HLSL",
"bytes": "141879"
},
{
"name": "HTML",
"bytes": "34827"
},
{
"name": "JavaScript",
"bytes": "140550"
},
{
"name": "Python",
"bytes": "1370"
}
],
"symlink_target": ""
}
|
import os
from django_node.base_service import BaseService
class TimeoutService(BaseService):
path_to_source = os.path.join(os.path.dirname(__file__), 'services', 'timeout.js')
timeout = 1.0
class ErrorService(BaseService):
path_to_source = os.path.join(os.path.dirname(__file__), 'services', 'error.js')
|
{
"content_hash": "c490b9fae2445ea43db3141c6241a226",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 86,
"avg_line_length": 29.09090909090909,
"alnum_prop": 0.709375,
"repo_name": "markfinger/django-node",
"id": "ebde682f03019dcf08efa6a821ccaf272ef927ed",
"size": "320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/services.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "717"
},
{
"name": "Python",
"bytes": "43989"
}
],
"symlink_target": ""
}
|
"""Tests for the views of the ``django-metrics-dashboard`` app."""
from django.test import TestCase
from django.test.client import RequestFactory
from django_libs.tests.factories import UserFactory
from django_libs.tests.mixins import ViewTestMixin
from metrics_dashboard.tests.mixins import WidgetTestCaseMixin
from metrics_dashboard.views import DashboardView, DashboardAPIWidgetView
class DashboardViewTestMixin(object):
def setUp(self):
super(DashboardViewTestMixin, self).setUp()
self.factory = RequestFactory()
self.request = self.factory.get(self.get_url())
self.request.user = UserFactory()
class DashboardAPIWidgetViewTestCase(WidgetTestCaseMixin,
DashboardViewTestMixin, ViewTestMixin,
TestCase):
"""Tests for the ``DashboardAPIWidgetView`` view class."""
def get_view_kwargs(self):
return {
'widget_name': 'DummyWidget',
}
def get_view_name(self):
return 'dashboard_api_widget'
def test_view(self):
"""api view should return the correct template and context"""
kwargs = self.get_view_kwargs()
response = DashboardAPIWidgetView().dispatch(self.request, **kwargs)
self.assertTrue('value' in response.context_data)
class DashboardViewTestCase(WidgetTestCaseMixin, DashboardViewTestMixin,
ViewTestMixin, TestCase):
def get_view_name(self):
return 'dashboard_view'
def test_view_anonymous(self):
"""View should be callable for anonymous users."""
self.should_be_callable_when_anonymous()
def test_view_login_required(self):
"""View should not be callable for anonymous if REQUIRE_LOGIN==True."""
with self.settings(DASHBOARD_REQUIRE_LOGIN=True):
self.should_redirect_to_login_when_anonymous()
def test_get_context_data(self):
"""View should add all registered widgets to the context."""
self._unregister_widgets()
response = DashboardView().dispatch(self.request)
self.assertTrue('widgets' in response.context_data)
|
{
"content_hash": "5b540f97eef53a15a964d9b738f0fff6",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 79,
"avg_line_length": 37.8421052631579,
"alnum_prop": 0.6731571627260083,
"repo_name": "bitmazk/django-metrics-dashboard",
"id": "234c0191a42f5402d02bd03ef08a731993e2d655",
"size": "2157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "metrics_dashboard/tests/integration_tests/views_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38836"
},
{
"name": "Shell",
"bytes": "5137"
}
],
"symlink_target": ""
}
|
import unicodecsv
from cStringIO import StringIO
from ckan.plugins.toolkit import BaseController, render, c, get_action, response
class InventoryController(BaseController):
def index(self):
context = {'user': c.user}
c.entries = get_action('inventory_entry_organization_summary')(context, {})
return render('inventory/index.html')
def get_inventory_entries_csv(self):
context = {'user': c.user}
entries = get_action('inventory_entry_csv')(context, {})
response.headers['Content-Type'] = 'text/csv'
s = StringIO()
writer = unicodecsv.writer(s)
writer.writerow(['nume_organizatie', 'nume_intrare_de_inventar', 'interval_de_recurenta', 'ultima_actualizare'])
for entry in entries:
writer.writerow(entry)
return s.getvalue()
|
{
"content_hash": "a8a9b9428b90adf7c427c785ab0eb5db",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 120,
"avg_line_length": 34.791666666666664,
"alnum_prop": 0.6574850299401198,
"repo_name": "govro/ckanext-inventory",
"id": "5ea32714a0db7a9853ce97c7cd509cd61eb27c08",
"size": "835",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ckanext/inventory/controllers/inventory.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "23141"
},
{
"name": "JavaScript",
"bytes": "1338"
},
{
"name": "Python",
"bytes": "45028"
},
{
"name": "Shell",
"bytes": "1394"
}
],
"symlink_target": ""
}
|
"""A client interface for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import re
import threading
import warnings
import numpy as np
import wrapt
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import pywrap_tf_session as tf_session
from tensorflow.python.eager import context
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import device
from tensorflow.python.framework import error_interpolation
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import session_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.experimental import mixed_precision_global_state
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.tf_export import tf_export
_python_session_create_counter = monitoring.Counter(
'/tensorflow/api/python/session_create_counter',
'Counter for number of sessions created in Python.')
class SessionInterface(object):
"""Base class for implementations of TensorFlow client sessions."""
@property
def graph(self):
"""The underlying TensorFlow graph, to be used in building Operations."""
raise NotImplementedError('graph')
@property
def sess_str(self):
"""The TensorFlow process to which this session will connect."""
raise NotImplementedError('sess_str')
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Runs operations in the session. See `BaseSession.run()` for details."""
raise NotImplementedError('run')
def partial_run_setup(self, fetches, feeds=None):
"""Sets up the feeds and fetches for partial runs in the session."""
raise NotImplementedError('partial_run_setup')
def partial_run(self, handle, fetches, feed_dict=None):
"""Continues the execution with additional feeds and fetches."""
raise NotImplementedError('partial_run')
def _get_indexed_slices_value_from_fetches(fetched_vals):
return ops.IndexedSlicesValue(
fetched_vals[0], fetched_vals[1],
fetched_vals[2] if len(fetched_vals) == 3 else None)
def _get_feeds_for_indexed_slices(feed, feed_val):
return list(
zip([feed.values, feed.indices] if feed.dense_shape is None else
[feed.values, feed.indices, feed.dense_shape], feed_val))
# List of extensions supported to convert run arguments into actual fetches and
# feeds.
#
# Each element in the list is a tuple of (Type, fetch_fn, feed_fn1, feed_fn2),
# where the function signatures are:
# fetch_fn : Type -> (list of Tensors,
# lambda: list of fetched np.ndarray -> TypeVal)
# feed_fn1 : Type, TypeVal -> list of (Tensor, value)
# feed_fn2 : Type -> list of Tensors
#
# `fetch_fn` describes how to expand fetch into its
# component Tensors and how to contract the fetched results back into
# a single return value.
#
# Each feed function describes how to unpack a single fed value and map it to
# feeds of one or more tensors and their corresponding values: `feed_fn1` is
# used to feed a run, `feed_fn2` to set up a partial run.
#
# TODO(touts): We could reimplement these as specialized _FeedMapper
# implementations after we refactor the feed handling code to use them.
#
# Eventually, this registration could be opened up to support custom Tensor
# expansions.
# pylint: disable=g-long-lambda
_REGISTERED_EXPANSIONS = [
# SparseTensors are fetched as SparseTensorValues. They can be fed
# SparseTensorValues or normal tuples.
(sparse_tensor.SparseTensor, lambda fetch: ([
fetch.indices, fetch.values, fetch.dense_shape
], lambda fetched_vals: sparse_tensor.SparseTensorValue(*fetched_vals)),
lambda feed, feed_val: list(
zip([feed.indices, feed.values, feed.dense_shape], feed_val)),
lambda feed: [feed.indices, feed.values, feed.dense_shape]),
# IndexedSlices are fetched as IndexedSlicesValues. They can be fed
# IndexedSlicesValues or normal tuples.
(ops.IndexedSlices,
lambda fetch: ([fetch.values, fetch.indices] if fetch.dense_shape is None
else [fetch.values, fetch.indices, fetch.dense_shape
], _get_indexed_slices_value_from_fetches),
_get_feeds_for_indexed_slices,
lambda feed: [feed.values, feed.indices] if feed.dense_shape is None else
[feed.values, feed.indices, feed.dense_shape]),
# The default catches all other types and performs no expansions.
(object, lambda fetch: ([fetch], lambda fetched_vals: fetched_vals[0]),
lambda feed, feed_val: [(feed, feed_val)], lambda feed: [feed])
]
# pylint: enable=g-long-lambda
def _convert_to_numpy_obj(numpy_dtype, obj):
"""Explicitly convert obj based on numpy type except for string type."""
return numpy_dtype(obj) if numpy_dtype is not object else str(obj)
def register_session_run_conversion_functions(
tensor_type,
fetch_function,
feed_function=None,
feed_function_for_partial_run=None):
"""Register fetch and feed conversion functions for `tf.Session.run()`.
This function registers a triple of conversion functions for fetching and/or
feeding values of user-defined types in a call to tf.Session.run().
An example
```python
class SquaredTensor(object):
def __init__(self, tensor):
self.sq = tf.square(tensor)
#you can define conversion functions as follows:
fetch_function = lambda squared_tensor:([squared_tensor.sq],
lambda val: val[0])
feed_function = lambda feed, feed_val: [(feed.sq, feed_val)]
feed_function_for_partial_run = lambda feed: [feed.sq]
#then after invoking this register function, you can use as follows:
session.run(squared_tensor1,
feed_dict = {squared_tensor2 : some_numpy_array})
```
Args:
tensor_type: The type for which you want to register a conversion function.
fetch_function: A callable that takes an object of type `tensor_type` and
returns a tuple, where the first element is a list of `tf.Tensor` objects,
and the second element is a callable that takes a list of ndarrays and
returns an object of some value type that corresponds to `tensor_type`.
fetch_function describes how to expand fetch into its component Tensors
and how to contract the fetched results back into a single return value.
feed_function: A callable that takes feed_key and feed_value as input, and
returns a list of tuples (feed_tensor, feed_val), feed_key must have type
`tensor_type`, and feed_tensor must have type `tf.Tensor`. Each feed
function describes how to unpack a single fed value and map it to feeds of
one or more tensors and their corresponding values.
feed_function_for_partial_run: A callable for specifying tensor values to
feed when setting up a partial run, which takes a `tensor_type` type
object as input, and returns a list of Tensors.
Raises:
ValueError: If `tensor_type` has already been registered.
"""
for conversion_function in _REGISTERED_EXPANSIONS:
if issubclass(conversion_function[0], tensor_type):
raise ValueError('%s has already been registered so ignore it.' %
tensor_type)
_REGISTERED_EXPANSIONS.insert(0, (tensor_type, fetch_function, feed_function,
feed_function_for_partial_run))
def _is_attrs_instance(obj):
"""Returns True if the given obj is an instance of attrs-decorated class."""
return getattr(obj.__class__, '__attrs_attrs__', None) is not None
def _get_attrs_values(obj):
"""Returns the list of values from an attrs instance."""
attrs = getattr(obj.__class__, '__attrs_attrs__')
return [getattr(obj, a.name) for a in attrs]
class _FetchMapper(object):
"""Definition of the interface provided by fetch mappers.
Fetch mappers are utility classes used by the _FetchHandler to handle
arbitrary structures for the `fetch` argument to `Session.run()`.
The `fetch` argument can be of various shapes: single tensor or op, list of
fetches, tuple of fetches, namedtuple of fetches, or dict of fetches. The
structures can be arbitrarily nested.
The low level run() API only wants a list of tensor or op names. The various
`_FetchMapper` subclasses below take care of handling the different shapes:
uniquifying the fetches, and constructing results with the original shape.
"""
def unique_fetches(self):
"""Return the list of unique tensors or ops needed by this fetch mapper.
Returns:
A list of tensors or ops.
"""
raise NotImplementedError('Must be implemented by subclasses')
def build_results(self, values):
"""Build results that match the original shape of the fetch.
Args:
values: List of values returned by run(). The values correspond exactly to
the list tensors or ops returned by unique_fetches().
Returns:
A struct of the same shape as the original fetch object handled by
this fetch mapper. In the returned struct, the original fetches are
replaced by their fetched values.
"""
raise NotImplementedError('Must be implemented by subclasses')
@staticmethod
def for_fetch(fetch):
"""Creates fetch mapper that handles the structure of `fetch`.
The default graph must be the one from which we want to fetch values when
this function is called.
Args:
fetch: An arbitrary fetch structure: singleton, list, tuple, namedtuple,
or dict.
Returns:
An instance of a subclass of `_FetchMapper` that handles the shape.
"""
if fetch is None:
raise TypeError('Fetch argument %r has invalid type %r' %
(fetch, type(fetch)))
elif isinstance(fetch, (list, tuple)):
# NOTE(touts): This is also the code path for namedtuples.
return _ListFetchMapper(fetch)
elif isinstance(fetch, collections_abc.Mapping):
return _DictFetchMapper(fetch)
elif _is_attrs_instance(fetch):
return _AttrsFetchMapper(fetch)
else:
# Look for a handler in the registered expansions.
for tensor_type, fetch_fn, _, _ in _REGISTERED_EXPANSIONS:
if isinstance(fetch, tensor_type):
fetches, contraction_fn = fetch_fn(fetch)
return _ElementFetchMapper(fetches, contraction_fn)
# Did not find anything.
raise TypeError('Fetch argument %r has invalid type %r' %
(fetch, type(fetch)))
class _ElementFetchMapper(_FetchMapper):
"""Fetch mapper for singleton tensors and ops."""
def __init__(self, fetches, contraction_fn):
"""Creates an _ElementFetchMapper.
This is the fetch mapper used for leaves in the fetch struct. Because of
the expansions mechanism, a leaf can actually fetch more than one tensor.
Also note that the fetches here can be just strings (tensor or op names) or
any other object that the graph knows how to convert to a tensor, such as a
Variable. So we have to run each fetch through `as_graph_element()` to get
the corresponding tensor or op.
Args:
fetches: List of objects, as returned by a fetch_fn defined in
_REGISTERED_EXPANSIONS.
contraction_fn: Callable as returned by a fetch_fn.
"""
self._unique_fetches = []
for fetch in fetches:
try:
self._unique_fetches.append(ops.get_default_graph().as_graph_element(
fetch, allow_tensor=True, allow_operation=True))
except TypeError as e:
raise TypeError('Fetch argument %r has invalid type %r, '
'must be a string or Tensor. (%s)' %
(fetch, type(fetch), str(e)))
except ValueError as e:
raise ValueError('Fetch argument %r cannot be interpreted as a '
'Tensor. (%s)' % (fetch, str(e)))
except KeyError as e:
raise ValueError('Fetch argument %r cannot be interpreted as a '
'Tensor. (%s)' % (fetch, str(e)))
self._contraction_fn = contraction_fn
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
if not values:
# 'Operation' case
return None
else:
return self._contraction_fn(values)
def _uniquify_fetches(fetch_mappers):
"""Uniquifies fetches from a list of fetch_mappers.
This is a utility function used by _ListFetchMapper and _DictFetchMapper. It
gathers all the unique fetches from a list of mappers and builds a list
containing all of them but without duplicates (unique_fetches).
It also returns a 2-D list of integers (values_indices) indicating at which
index in unique_fetches the fetches of the mappers are located.
This list is as follows:
values_indices[mapper_index][mapper_fetch_index] = unique_fetches_index
Args:
fetch_mappers: list of fetch mappers.
Returns:
A list of fetches.
A 2-D list of integers.
"""
unique_fetches = []
value_indices = []
seen_fetches = {}
for m in fetch_mappers:
m_value_indices = []
for f in m.unique_fetches():
j = seen_fetches.get(id(f))
if j is None:
j = len(seen_fetches)
seen_fetches[id(f)] = j
unique_fetches.append(f)
m_value_indices.append(j)
value_indices.append(m_value_indices)
return unique_fetches, value_indices
class _ListFetchMapper(_FetchMapper):
"""Fetch mapper for lists, tuples, and namedtuples."""
def __init__(self, fetches):
"""Creates a _ListFetchMapper.
Args:
fetches: List, tuple, or namedtuple of fetches.
"""
if isinstance(fetches, wrapt.ObjectProxy):
self._fetch_type = type(fetches.__wrapped__)
else:
self._fetch_type = type(fetches)
self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
# Create the list of results for each mapper.
results = []
for m, vi in zip(self._mappers, self._value_indices):
results.append(m.build_results([values[j] for j in vi]))
# Return a value of the original type of the fetches.
if issubclass(self._fetch_type, list):
return results
elif self._fetch_type == tuple:
return tuple(results)
else:
# This is the code path for namedtuple.
return self._fetch_type(*results)
class _DictFetchMapper(_FetchMapper):
"""Fetch mapper for dicts."""
def __init__(self, fetches):
"""Creates a _DictFetchMapper.
Args:
fetches: Dict of fetches.
"""
self._fetch_type = type(fetches)
if isinstance(fetches, collections.defaultdict):
self._type_ctor = functools.partial(collections.defaultdict,
fetches.default_factory)
else:
self._type_ctor = self._fetch_type
self._keys = fetches.keys()
self._mappers = [
_FetchMapper.for_fetch(fetch) for fetch in fetches.values()
]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
def _generator():
for k, m, vi in zip(self._keys, self._mappers, self._value_indices):
yield k, m.build_results([values[j] for j in vi])
return self._type_ctor(_generator())
class _AttrsFetchMapper(_FetchMapper):
"""Fetch mapper for attrs decorated classes."""
def __init__(self, fetches):
"""Creates a _AttrsFetchMapper.
Args:
fetches: An instance of an attrs decorated class.
"""
values = _get_attrs_values(fetches)
self._fetch_type = type(fetches)
self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in values]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
results = []
for m, vi in zip(self._mappers, self._value_indices):
results.append(m.build_results([values[j] for j in vi]))
return self._fetch_type(*results)
class _FetchHandler(object):
"""Handler for structured fetches.
Given a graph, a user-provided structure for fetches, and a feed dict, this
class takes care of generating a list of tensor names to fetch and op names
to run for a low level `run()` call.
Given the results of the low level run call, this class can also rebuild a
result structure matching the user-provided structure for fetches, but
containing the corresponding results.
"""
# TODO(touts): Make this class also take care of destructuring the feed
# dict instead of doing it in the callers.
def __init__(self, graph, fetches, feeds, feed_handles=None):
"""Creates a fetch handler.
Args:
graph: Graph of the fetches. Used to check for fetchability and to
convert all fetches to tensors or ops as needed.
fetches: An arbitrary fetch structure: singleton, list, tuple, namedtuple,
or dict.
feeds: A feed dict where keys are Tensors.
feed_handles: A dict from feed Tensors to TensorHandle objects used as
direct feeds.
"""
with graph.as_default():
self._fetch_mapper = _FetchMapper.for_fetch(fetches)
self._fetches = []
self._targets = []
self._feeds = feeds
self._feed_handles = feed_handles or {}
self._ops = []
self._fetch_handles = {}
for fetch in self._fetch_mapper.unique_fetches():
if isinstance(fetch, ops.Operation):
self._assert_fetchable(graph, fetch)
self._targets.append(fetch)
self._ops.append(True)
else:
self._assert_fetchable(graph, fetch.op)
self._fetches.append(fetch)
self._ops.append(False)
# Remember the fetch if it is for a tensor handle.
if (isinstance(fetch, ops.Tensor) and
(fetch.op.type == 'GetSessionHandle' or
fetch.op.type == 'GetSessionHandleV2')):
self._fetch_handles[fetch.ref()] = fetch.op.inputs[0].dtype
self._final_fetches = [x for x in self._fetches if x.ref() not in feeds]
def _assert_fetchable(self, graph, op):
if not graph.is_fetchable(op):
raise errors.InaccessibleTensorError(
'Operation %r has been marked as not fetchable. Typically this'
' happens when it is defined in another function or code block.'
' Use return values,explicit Python locals or TensorFlow collections'
' to access it.'
% op.name)
def fetches(self):
"""Return the unique names of tensors to fetch.
Returns:
A list of strings.
"""
return self._final_fetches
def targets(self):
"""Return the unique names of ops to run.
Returns:
A list of strings.
"""
return self._targets
def build_results(self, session, tensor_values):
"""Build results matching the original fetch shape.
`tensor_values` must be a list of the same length as
the one returned by `fetches()`, and holding the requested
fetch values.
This method builds a struct with the same shape as the original `fetches`
passed to the constructor, in which the fetches are replaced by their
fetched value.
Args:
session: The enclosing session. Used for tensor handles.
tensor_values: List of values matching the list returned by fetches().
Returns:
A structure of the same shape as the original `fetches` argument but
containing tensors or None (for fetched ops).
"""
full_values = []
assert len(self._final_fetches) == len(tensor_values)
i = 0
j = 0
for is_op in self._ops:
if is_op:
full_values.append(None)
else:
# If the fetch was in the feeds, use the fed value, otherwise
# use the returned value.
if self._fetches[i].ref() in self._feed_handles:
# A fetch had a corresponding direct TensorHandle feed. Call eval()
# to obtain the Tensor value from the TensorHandle.
value = self._feed_handles[self._fetches[i].ref()].eval()
else:
value = self._feeds.get(self._fetches[i].ref())
if value is None:
value = tensor_values[j]
j += 1
dtype = self._fetch_handles.get(self._fetches[i].ref())
if dtype:
full_values.append(session_ops.TensorHandle(value, dtype, session))
else:
full_values.append(value)
i += 1
assert j == len(tensor_values)
return self._fetch_mapper.build_results(full_values)
def _name_list(tensor_list):
"""Utility function for transitioning to the new session API.
Args:
tensor_list: a list of `Tensor`s.
Returns:
A list of each `Tensor`s name (as byte arrays).
"""
return [compat.as_bytes(t.name) for t in tensor_list]
class _DeviceAttributes(object):
"""Struct-like object describing a device's attributes.
Each device has 3 key properties:
- name: the fully-qualified TensorFlow path to the device. For
example: /job:worker/replica:0/task:3/device:CPU:0
- device_type: the type of the device (e.g. CPU, GPU, TPU, etc.)
- memory_limit_bytes: the maximum amount of memory available on the device
(in bytes).
"""
def __init__(self, name, device_type, memory_limit_bytes, incarnation):
self._name = device.canonical_name(name)
self._device_type = device_type
self._memory_limit_bytes = memory_limit_bytes
self._incarnation = incarnation
@property
def name(self):
return self._name
@property
def device_type(self):
return self._device_type
@property
def memory_limit_bytes(self):
return self._memory_limit_bytes
@property
def incarnation(self):
return self._incarnation
def __repr__(self):
return '_DeviceAttributes(%s, %s, %d, %d)' % (
self.name,
self.device_type,
self.memory_limit_bytes,
self.incarnation,
)
class BaseSession(SessionInterface):
"""A class for interacting with a TensorFlow computation.
The BaseSession enables incremental graph building with inline
execution of Operations and evaluation of Tensors.
"""
def __init__(self, target='', graph=None, config=None):
"""Constructs a new TensorFlow session.
Args:
target: (Optional) The TensorFlow execution engine to connect to.
graph: (Optional) The graph to be used. If this argument is None, the
default graph will be used.
config: (Optional) ConfigProto proto used to configure the session. If no
config is specified, the global default will be used. The global default
can be configured via the tf.config APIs.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
creating the TensorFlow session.
TypeError: If one of the arguments has the wrong type.
"""
_python_session_create_counter.get_cell().increase_by(1)
if graph is None:
self._graph = ops.get_default_graph()
else:
if not isinstance(graph, ops.Graph):
raise TypeError('graph must be a tf.Graph, but got %s' % type(graph))
self._graph = graph
self._closed = False
if target is not None:
try:
self._target = compat.as_bytes(target)
except TypeError:
if isinstance(target, config_pb2.ConfigProto):
raise TypeError('target must be a string, but got %s.'
' Did you do "Session(config)" instead of'
' "Session(config=config)"?' % type(target))
raise TypeError('target must be a string, but got %s' % type(target))
else:
self._target = None
self._delete_lock = threading.Lock()
self._dead_handles = []
if config is None:
config = context.context().config
if not isinstance(config, config_pb2.ConfigProto):
raise TypeError('config must be a tf.ConfigProto, but got %s' %
type(config))
if (mixed_precision_global_state.is_mixed_precision_graph_rewrite_enabled()
and config.graph_options.rewrite_options.auto_mixed_precision !=
rewriter_config_pb2.RewriterConfig.OFF):
new_config = config_pb2.ConfigProto()
new_config.CopyFrom(config)
new_config.graph_options.rewrite_options.auto_mixed_precision = (
rewriter_config_pb2.RewriterConfig.ON)
config = new_config
elif (config.graph_options.rewrite_options.auto_mixed_precision !=
rewriter_config_pb2.RewriterConfig.ON):
mixed_precision_global_state.set_non_mixed_precision_session_created(True)
self._config = config
self._add_shapes = config.graph_options.infer_shapes
self._session = None
opts = tf_session.TF_NewSessionOptions(target=self._target, config=config)
try:
# pylint: disable=protected-access
self._session = tf_session.TF_NewSessionRef(self._graph._c_graph, opts)
# pylint: enable=protected-access
finally:
tf_session.TF_DeleteSessionOptions(opts)
def list_devices(self):
"""Lists available devices in this session.
```python
devices = sess.list_devices()
for d in devices:
print(d.name)
```
Where:
Each element in the list has the following properties
name: A string with the full name of the device. ex:
`/job:worker/replica:0/task:3/device:CPU:0`
device_type: The type of the device (e.g. `CPU`, `GPU`, `TPU`.)
memory_limit: The maximum amount of memory available on the device.
Note: depending on the device, it is possible the usable memory could
be substantially less.
Raises:
tf.errors.OpError: If it encounters an error (e.g. session is in an
invalid state, or network errors occur).
Returns:
A list of devices in the session.
"""
raw_device_list = tf_session.TF_SessionListDevices(self._session)
device_list = []
size = tf_session.TF_DeviceListCount(raw_device_list)
for i in range(size):
name = tf_session.TF_DeviceListName(raw_device_list, i)
device_type = tf_session.TF_DeviceListType(raw_device_list, i)
memory = tf_session.TF_DeviceListMemoryBytes(raw_device_list, i)
incarnation = tf_session.TF_DeviceListIncarnation(raw_device_list, i)
device_list.append(
_DeviceAttributes(name, device_type, memory, incarnation))
tf_session.TF_DeleteDeviceList(raw_device_list)
return device_list
def close(self):
"""Closes this session.
Calling this method frees all resources associated with the session.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
closing the TensorFlow session.
"""
if self._session and not self._closed:
self._closed = True
tf_session.TF_CloseSession(self._session)
def __del__(self):
# cleanly ignore all exceptions
try:
self.close()
except Exception: # pylint: disable=broad-except
pass
if self._session is not None:
try:
tf_session.TF_DeleteSession(self._session)
except (AttributeError, TypeError):
# At shutdown, `c_api_util`, `tf_session`, or
# `tf_session.TF_DeleteSession` may have been garbage collected, causing
# the above method calls to fail. In this case, silently leak since the
# program is about to terminate anyway.
pass
self._session = None
@property
def graph(self):
"""The graph that was launched in this session."""
return self._graph
@property
def graph_def(self):
"""A serializable version of the underlying TensorFlow graph.
Returns:
A graph_pb2.GraphDef proto containing nodes for all of the Operations in
the underlying TensorFlow graph.
"""
return self._graph.as_graph_def(add_shapes=self._add_shapes)
@property
def sess_str(self):
return self._target
def as_default(self):
"""Returns a context manager that makes this object the default session.
Use with the `with` keyword to specify that calls to
`tf.Operation.run` or `tf.Tensor.eval` should be executed in
this session.
```python
c = tf.constant(..)
sess = tf.compat.v1.Session()
with sess.as_default():
assert tf.compat.v1.get_default_session() is sess
print(c.eval())
```
To get the current default session, use `tf.compat.v1.get_default_session`.
*N.B.* The `as_default` context manager *does not* close the
session when you exit the context, and you must close the session
explicitly.
```python
c = tf.constant(...)
sess = tf.compat.v1.Session()
with sess.as_default():
print(c.eval())
# ...
with sess.as_default():
print(c.eval())
sess.close()
```
Alternatively, you can use `with tf.compat.v1.Session():` to create a
session that is automatically closed on exiting the context,
including when an uncaught exception is raised.
*N.B.* The default session is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
*N.B.* Entering a `with sess.as_default():` block does not affect
the current default graph. If you are using multiple graphs, and
`sess.graph` is different from the value of
`tf.compat.v1.get_default_graph`, you must explicitly enter a
`with sess.graph.as_default():` block to make `sess.graph` the default
graph.
Returns:
A context manager using this session as the default session.
"""
return ops.default_session(self)
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Runs operations and evaluates tensors in `fetches`.
This method runs one "step" of TensorFlow computation, by
running the necessary graph fragment to execute every `Operation`
and evaluate every `Tensor` in `fetches`, substituting the values in
`feed_dict` for the corresponding input values.
The `fetches` argument may be a single graph element, or an arbitrarily
nested list, tuple, namedtuple, dict, or OrderedDict containing graph
elements at its leaves. A graph element can be one of the following types:
* A `tf.Operation`.
The corresponding fetched value will be `None`.
* A `tf.Tensor`.
The corresponding fetched value will be a numpy ndarray containing the
value of that tensor.
* A `tf.sparse.SparseTensor`.
The corresponding fetched value will be a
`tf.compat.v1.SparseTensorValue`
containing the value of that sparse tensor.
* A `get_tensor_handle` op. The corresponding fetched value will be a
numpy ndarray containing the handle of that tensor.
* A `string` which is the name of a tensor or operation in the graph.
The value returned by `run()` has the same shape as the `fetches` argument,
where the leaves are replaced by the corresponding values returned by
TensorFlow.
Example:
```python
a = tf.constant([10, 20])
b = tf.constant([1.0, 2.0])
# 'fetches' can be a singleton
v = session.run(a)
# v is the numpy array [10, 20]
# 'fetches' can be a list.
v = session.run([a, b])
# v is a Python list with 2 numpy arrays: the 1-D array [10, 20] and the
# 1-D array [1.0, 2.0]
# 'fetches' can be arbitrary lists, tuples, namedtuple, dicts:
MyData = collections.namedtuple('MyData', ['a', 'b'])
v = session.run({'k1': MyData(a, b), 'k2': [b, a]})
# v is a dict with
# v['k1'] is a MyData namedtuple with 'a' (the numpy array [10, 20]) and
# 'b' (the numpy array [1.0, 2.0])
# v['k2'] is a list with the numpy array [1.0, 2.0] and the numpy array
# [10, 20].
```
The optional `feed_dict` argument allows the caller to override
the value of tensors in the graph. Each key in `feed_dict` can be
one of the following types:
* If the key is a `tf.Tensor`, the
value may be a Python scalar, string, list, or numpy ndarray
that can be converted to the same `dtype` as that
tensor. Additionally, if the key is a
`tf.compat.v1.placeholder`, the shape of
the value will be checked for compatibility with the placeholder.
* If the key is a
`tf.sparse.SparseTensor`,
the value should be a
`tf.compat.v1.SparseTensorValue`.
* If the key is a nested tuple of `Tensor`s or `SparseTensor`s, the value
should be a nested tuple with the same structure that maps to their
corresponding values as above.
Each value in `feed_dict` must be convertible to a numpy array of the dtype
of the corresponding key.
The optional `options` argument expects a [`RunOptions`] proto. The options
allow controlling the behavior of this particular step (e.g. turning tracing
on).
The optional `run_metadata` argument expects a [`RunMetadata`] proto. When
appropriate, the non-Tensor output of this step will be collected there. For
example, when users turn on tracing in `options`, the profiled info will be
collected into this argument and passed back.
Args:
fetches: A single graph element, a list of graph elements, or a dictionary
whose values are graph elements or lists of graph elements (described
above).
feed_dict: A dictionary that maps graph elements to values (described
above).
options: A [`RunOptions`] protocol buffer
run_metadata: A [`RunMetadata`] protocol buffer
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list, or a dictionary with the
same keys as `fetches` if that is a dictionary (described above).
Order in which `fetches` operations are evaluated inside the call
is undefined.
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
ValueError: If `fetches` or `feed_dict` keys are invalid or refer to a
`Tensor` that doesn't exist.
"""
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(options.SerializeToString())) if options else None
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
try:
result = self._run(None, fetches, feed_dict, options_ptr,
run_metadata_ptr)
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
if options:
tf_session.TF_DeleteBuffer(options_ptr)
return result
def partial_run(self, handle, fetches, feed_dict=None):
"""Continues the execution with more feeds and fetches.
This is EXPERIMENTAL and subject to change.
To use partial execution, a user first calls `partial_run_setup()` and
then a sequence of `partial_run()`. `partial_run_setup` specifies the
list of feeds and fetches that will be used in the subsequent
`partial_run` calls.
The optional `feed_dict` argument allows the caller to override
the value of tensors in the graph. See run() for more information.
Below is a simple example:
```python
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
res = sess.partial_run(h, r2, feed_dict={c: res})
```
Args:
handle: A handle for a sequence of partial runs.
fetches: A single graph element, a list of graph elements, or a dictionary
whose values are graph elements or lists of graph elements (see
documentation for `run`).
feed_dict: A dictionary that maps graph elements to values (described
above).
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list, or a dictionary with the
same keys as `fetches` if that is a dictionary
(see documentation for `run`).
Raises:
tf.errors.OpError: Or one of its subclasses on error.
"""
# TODO(touts): Support feeding and fetching the same tensor.
return self._run(handle, fetches, feed_dict, None, None)
def partial_run_setup(self, fetches, feeds=None):
"""Sets up a graph with feeds and fetches for partial run.
This is EXPERIMENTAL and subject to change.
Note that contrary to `run`, `feeds` only specifies the graph elements.
The tensors will be supplied by the subsequent `partial_run` calls.
Args:
fetches: A single graph element, or a list of graph elements.
feeds: A single graph element, or a list of graph elements.
Returns:
A handle for partial run.
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
tf.errors.OpError: Or one of its subclasses if a TensorFlow error happens.
"""
def _feed_fn(feed):
for tensor_type, _, _, feed_fn in _REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed)
raise TypeError('Feed argument %r has invalid type %r' %
(feed, type(feed)))
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
if feeds is None:
feeds = []
# Create request.
feed_list = []
# Validate and process feed_list.
is_list_feed = isinstance(feeds, (list, tuple))
if not is_list_feed:
feeds = [feeds]
for feed in feeds:
for subfeed in _feed_fn(feed):
try:
subfeed_t = self.graph.as_graph_element(
subfeed, allow_tensor=True, allow_operation=False)
# pylint: disable=protected-access
feed_list.append(subfeed_t._as_tf_output())
# pylint: enable=protected-access
except Exception as e:
e.message = ('Cannot interpret feed_list key as Tensor: ' + e.message)
e.args = (e.message,)
raise e
# Validate and process fetches.
# TODO(touts): Support feeding and fetching the same tensor.
fetch_handler = _FetchHandler(self._graph, fetches, {})
# Set up a graph with feeds and fetches for partial run.
def _setup_fn(session, feed_list, fetch_list, target_list):
self._extend_graph()
return tf_session.TF_SessionPRunSetup_wrapper(session, feed_list,
fetch_list, target_list)
# pylint: disable=protected-access
final_fetches = [t._as_tf_output() for t in fetch_handler.fetches()]
final_targets = [op._c_op for op in fetch_handler.targets()]
# pylint: enable=protected-access
return self._do_call(_setup_fn, self._session, feed_list, final_fetches,
final_targets)
def _run(self, handle, fetches, feed_dict, options, run_metadata):
"""Perform either run or partial_run, depending the presence of `handle`."""
def _feed_fn(feed, feed_val):
for tensor_type, _, feed_fn, _ in _REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed, feed_val)
raise TypeError('Feed argument %r has invalid type %r' %
(feed, type(feed)))
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
# Create request.
feed_dict_tensor = {}
feed_map = {}
# Validate and process feed_dict.
feed_handles = {}
if feed_dict:
feed_dict = nest.flatten_dict_items(feed_dict)
for feed, feed_val in feed_dict.items():
for subfeed, subfeed_val in _feed_fn(feed, feed_val):
try:
subfeed_t = self.graph.as_graph_element(
subfeed, allow_tensor=True, allow_operation=False)
except Exception as e:
raise TypeError('Cannot interpret feed_dict key as Tensor: ' +
e.args[0])
if isinstance(subfeed_val, ops.Tensor):
raise TypeError('The value of a feed cannot be a tf.Tensor object. '
'Acceptable feed values include Python scalars, '
'strings, lists, numpy ndarrays, or TensorHandles. '
'For reference, the tensor object was ' +
str(feed_val) + ' which was passed to the '
'feed with key ' + str(feed) + '.')
subfeed_dtype = subfeed_t.dtype.as_numpy_dtype
if isinstance(subfeed_val, int) and _convert_to_numpy_obj(
subfeed_dtype, subfeed_val) != subfeed_val:
raise TypeError(
'Type of feed value ' + str(subfeed_val) + ' with type ' +
str(type(subfeed_val)) +
' is not compatible with Tensor type ' + str(subfeed_dtype) +
'. Try explicitly setting the type of the feed tensor'
' to a larger type (e.g. int64).')
is_tensor_handle_feed = isinstance(subfeed_val,
session_ops.TensorHandle)
if is_tensor_handle_feed:
np_val = subfeed_val.to_numpy_array()
feed_handles[subfeed_t.ref()] = subfeed_val
else:
np_val = np.asarray(subfeed_val, dtype=subfeed_dtype)
if (not is_tensor_handle_feed and
not subfeed_t.get_shape().is_compatible_with(np_val.shape)):
raise ValueError(
'Cannot feed value of shape %r for Tensor %r, '
'which has shape %r' %
(np_val.shape, subfeed_t.name, str(subfeed_t.get_shape())))
if not self.graph.is_feedable(subfeed_t):
raise ValueError('Tensor %s may not be fed.' % subfeed_t)
feed_dict_tensor[subfeed_t.ref()] = np_val
feed_map[compat.as_bytes(subfeed_t.name)] = (subfeed_t, subfeed_val)
# Create a fetch handler to take care of the structure of fetches.
fetch_handler = _FetchHandler(
self._graph, fetches, feed_dict_tensor, feed_handles=feed_handles)
# Run request and get response.
# We need to keep the returned movers alive for the following _do_run().
# These movers are no longer needed when _do_run() completes, and
# are deleted when `movers` goes out of scope when this _run() ends.
# TODO(yuanbyu, keveman): Revisit whether we should just treat feeding
# of a handle from a different device as an error.
_ = self._update_with_movers(feed_dict_tensor, feed_map)
final_fetches = fetch_handler.fetches()
final_targets = fetch_handler.targets()
# We only want to really perform the run if fetches or targets are provided,
# or if the call is a partial run that specifies feeds.
if final_fetches or final_targets or (handle and feed_dict_tensor):
results = self._do_run(handle, final_targets, final_fetches,
feed_dict_tensor, options, run_metadata)
else:
results = []
return fetch_handler.build_results(self, results)
def make_callable(self, fetches, feed_list=None, accept_options=False):
"""Returns a Python callable that runs a particular step.
The returned callable will take `len(feed_list)` arguments whose types
must be compatible feed values for the respective elements of `feed_list`.
For example, if element `i` of `feed_list` is a `tf.Tensor`, the `i`th
argument to the returned callable must be a numpy ndarray (or something
convertible to an ndarray) with matching element type and shape. See
`tf.Session.run` for details of the allowable feed key and value types.
The returned callable will have the same return type as
`tf.Session.run(fetches, ...)`. For example, if `fetches` is a `tf.Tensor`,
the callable will return a numpy ndarray; if `fetches` is a `tf.Operation`,
it will return `None`.
Args:
fetches: A value or list of values to fetch. See `tf.Session.run` for
details of the allowable fetch types.
feed_list: (Optional.) A list of `feed_dict` keys. See `tf.Session.run`
for details of the allowable feed key types.
accept_options: (Optional.) If `True`, the returned `Callable` will be
able to accept `tf.compat.v1.RunOptions` and `tf.compat.v1.RunMetadata`
as optional keyword arguments `options` and `run_metadata`,
respectively, with the same syntax and semantics as `tf.Session.run`,
which is useful for certain use cases (profiling and debugging) but will
result in measurable slowdown of the `Callable`'s
performance. Default: `False`.
Returns:
A function that when called will execute the step defined by
`feed_list` and `fetches` in this session.
Raises:
TypeError: If `fetches` or `feed_list` cannot be interpreted
as arguments to `tf.Session.run`.
"""
if feed_list is not None:
if not isinstance(feed_list, (list, tuple)):
raise TypeError('`feed_list` must be a list or tuple.')
# Delegate any non-empty feed lists to the existing `run()` logic.
# TODO(mrry): Refactor the feed handling logic from
# `Session._run()` so that we can convert the feeds to a list of
# strings here.
def _generic_run(*feed_args, **kwargs):
feed_dict = {
feed: feed_val for feed, feed_val in zip(feed_list, feed_args)
}
return self.run(fetches, feed_dict=feed_dict, **kwargs)
return _generic_run
# Ensure any changes to the graph are reflected in the runtime.
# Note that we don't need to do this on subsequent calls to the
# returned object, because the arguments to `fetches` must already be
# in the graph.
self._extend_graph()
# Create a fetch handler to take care of the structure of fetches.
fetch_handler = _FetchHandler(self._graph, fetches, {})
# pylint: disable=protected-access
fetch_list = [t._as_tf_output() for t in fetch_handler.fetches()]
target_list = [op._c_op for op in fetch_handler.targets()]
# pylint: enable=protected-access
def _callable_template_with_options_and_metadata(fetch_list,
target_list,
fetch_handler,
options=None,
run_metadata=None):
"""Template callable that accepts RunOptions and RunMetadata."""
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(options.SerializeToString())) if options else None
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
try:
results = self._call_tf_sessionrun(options_ptr, {}, fetch_list,
target_list, run_metadata_ptr)
if fetch_handler:
results = fetch_handler.build_results(self, results)
else:
results = results[0] if results else None
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
if options:
tf_session.TF_DeleteBuffer(options_ptr)
return results
if accept_options:
return functools.partial(_callable_template_with_options_and_metadata,
fetch_list, target_list, fetch_handler)
elif isinstance(fetches, ops.Operation):
# Special case for fetching a single operation, because the
# function will have no return value.
assert not fetch_list
assert len(target_list) == 1
def _single_operation_run():
self._call_tf_sessionrun(None, {}, [], target_list, None)
return _single_operation_run
elif isinstance(fetches, ops.Tensor):
# Special case for fetching a single tensor, because the
# function can return the result of `TF_Run()` directly.
assert len(fetch_list) == 1
assert not target_list
def _single_tensor_run():
results = self._call_tf_sessionrun(None, {}, fetch_list, [], None)
return results[0]
return _single_tensor_run
else:
# In all other cases, we must use `fetch_handler` to build the
# results for us.
def _fetch_handler_run():
results = self._call_tf_sessionrun(None, {}, fetch_list, target_list,
None)
return fetch_handler.build_results(self, results)
return _fetch_handler_run
# Captures the name of a node in an error status. The regex below matches
# both the old and the new formats:
# Old format: [[Node: <node_name> = ...]]
# New format: [[{{node <node_name>}} = ...]]
_NODEDEF_NAME_RE = re.compile(
r'\[\[(Node: )?(\{\{node )?([^\} ]*)(\}\})?\s*=*')
def _do_run(self, handle, target_list, fetch_list, feed_dict, options,
run_metadata):
"""Runs a step based on the given fetches and feeds.
Args:
handle: a handle for partial_run. None if this is just a call to run().
target_list: A list of operations to be run, but not fetched.
fetch_list: A list of tensors to be fetched.
feed_dict: A dictionary that maps tensors to numpy ndarrays.
options: A (pointer to a) [`RunOptions`] protocol buffer, or None
run_metadata: A (pointer to a) [`RunMetadata`] protocol buffer, or None
Returns:
A list of numpy ndarrays, corresponding to the elements of
`fetch_list`. If the ith element of `fetch_list` contains the
name of an operation, the first Tensor output of that operation
will be returned for that element.
Raises:
tf.errors.OpError: Or one of its subclasses on error.
"""
# pylint: disable=protected-access
feeds = dict((t.deref()._as_tf_output(), v) for t, v in feed_dict.items())
fetches = [t._as_tf_output() for t in fetch_list]
targets = [op._c_op for op in target_list]
# pylint: enable=protected-access
def _run_fn(feed_dict, fetch_list, target_list, options, run_metadata):
# Ensure any changes to the graph are reflected in the runtime.
self._extend_graph()
return self._call_tf_sessionrun(options, feed_dict, fetch_list,
target_list, run_metadata)
def _prun_fn(handle, feed_dict, fetch_list):
if target_list:
raise RuntimeError('partial_run() requires empty target_list.')
return self._call_tf_sessionprun(handle, feed_dict, fetch_list)
if handle is None:
return self._do_call(_run_fn, feeds, fetches, targets, options,
run_metadata)
else:
return self._do_call(_prun_fn, handle, feeds, fetches)
def _do_call(self, fn, *args):
try:
return fn(*args)
except errors.OpError as e:
message = compat.as_text(e.message)
m = BaseSession._NODEDEF_NAME_RE.search(message)
node_def = None
op = None
if m is not None:
node_name = m.group(3)
try:
op = self._graph.get_operation_by_name(node_name)
node_def = op.node_def
except KeyError:
pass
message = error_interpolation.interpolate(message, self._graph)
if 'only supports NHWC tensor format' in message:
message += ('\nA possible workaround: Try disabling Grappler optimizer'
'\nby modifying the config for creating the session eg.'
'\nsession_config.graph_options.rewrite_options.'
'disable_meta_optimizer = True')
raise type(e)(node_def, op, message) # pylint: disable=no-value-for-parameter
def _extend_graph(self):
with self._graph._session_run_lock(): # pylint: disable=protected-access
tf_session.ExtendSession(self._session)
# The threshold to run garbage collection to delete dead tensors.
_DEAD_HANDLES_THRESHOLD = 10
def _register_dead_handle(self, handle):
# Register a dead handle in the session. Delete the dead tensors when
# the number of dead tensors exceeds certain threshold.
tensors_to_delete = None
with self._delete_lock:
self._dead_handles.append(handle)
if len(self._dead_handles) == BaseSession._DEAD_HANDLES_THRESHOLD:
tensors_to_delete = self._dead_handles
self._dead_handles = []
# Delete the dead tensors.
if tensors_to_delete:
feeds = {}
fetches = []
for deleter_key, tensor_handle in enumerate(tensors_to_delete):
holder, deleter = session_ops._get_handle_deleter(
self.graph, deleter_key, tensor_handle)
feeds[holder] = tensor_handle
fetches.append(deleter)
self.run(fetches, feed_dict=feeds)
def _update_with_movers(self, feed_dict, feed_map):
# If a tensor handle that is fed to a device incompatible placeholder,
# we move the tensor to the right device, generate a new tensor handle,
# and update `feed_dict` to use the new handle.
handle_movers = []
for feed_name, val in feed_map.items():
mover = session_ops._get_handle_mover(self.graph, *val)
if mover:
handle_movers.append((feed_name, val[1], mover))
# Transfer a tensor to the right device if needed.
if not handle_movers:
return []
else:
feeds = {}
fetches = []
for _, handle, mover in handle_movers:
feeds[mover[0]] = handle
fetches.append(mover[1])
handles = self.run(fetches, feed_dict=feeds)
for handle_mover, handle in zip(handle_movers, handles):
np_val = np.array(handle.handle, dtype=np.object)
feed_name = handle_mover[0]
feed_tensor = feed_map[feed_name][0]
feed_dict[feed_tensor.ref()] = np_val
return handles
def _call_tf_sessionrun(self, options, feed_dict, fetch_list, target_list,
run_metadata):
return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,
fetch_list, target_list,
run_metadata)
def _call_tf_sessionprun(self, handle, feed_dict, fetch_list):
return tf_session.TF_SessionPRun_wrapper(self._session, handle, feed_dict,
fetch_list)
# pylint: disable=protected-access
class _Callable(object):
"""Experimental wrapper for the C++ `Session::MakeCallable()` API."""
def __init__(self, session, callable_options):
self._session = session
self._handle = None
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(callable_options.SerializeToString()))
try:
self._handle = tf_session.TF_SessionMakeCallable(
session._session, options_ptr)
finally:
tf_session.TF_DeleteBuffer(options_ptr)
def __call__(self, *args, **kwargs):
run_metadata = kwargs.get('run_metadata', None)
try:
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
ret = tf_session.TF_SessionRunCallable(self._session._session,
self._handle, args,
run_metadata_ptr)
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
return ret
def __del__(self):
# NOTE(mrry): It is possible that `self._session.__del__()` could be
# called before this destructor, in which case `self._session._session`
# will be `None`.
if (self._handle is not None and self._session._session is not None and
not self._session._closed):
tf_session.TF_SessionReleaseCallable(self._session._session,
self._handle)
# pylint: enable=protected-access
def _make_callable_from_options(self, callable_options):
"""Returns a handle to a "callable" with the given options.
Args:
callable_options: A `CallableOptions` protocol buffer message describing
the computation that will be performed by the callable.
Returns:
A handle to the new callable.
"""
self._extend_graph()
return BaseSession._Callable(self, callable_options)
@tf_export(v1=['Session'])
class Session(BaseSession):
"""A class for running TensorFlow operations.
A `Session` object encapsulates the environment in which `Operation`
objects are executed, and `Tensor` objects are evaluated. For
example:
```python
tf.compat.v1.disable_eager_execution() # need to disable eager in TF2.x
# Build a graph.
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# Launch the graph in a session.
sess = tf.compat.v1.Session()
# Evaluate the tensor `c`.
print(sess.run(c)) # prints 30.0
```
A session may own resources, such as
`tf.Variable`, `tf.queue.QueueBase`,
and `tf.compat.v1.ReaderBase`. It is important to release
these resources when they are no longer required. To do this, either
invoke the `tf.Session.close` method on the session, or use
the session as a context manager. The following two examples are
equivalent:
```python
# Using the `close()` method.
sess = tf.compat.v1.Session()
sess.run(...)
sess.close()
# Using the context manager.
with tf.compat.v1.Session() as sess:
sess.run(...)
```
The
[`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
protocol buffer exposes various configuration options for a
session. For example, to create a session that uses soft constraints
for device placement, and log the resulting placement decisions,
create a session as follows:
```python
# Launch the graph in a session that allows soft device placement and
# logs the placement decisions.
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(
allow_soft_placement=True,
log_device_placement=True))
```
@compatibility(TF2)
`Session` does not work with either eager execution or `tf.function`, and you
should not invoke it directly. To migrate code that uses sessions to TF2,
rewrite the code without it. See the
[migration
guide](https://www.tensorflow.org/guide/migrate#1_replace_v1sessionrun_calls)
on replacing `Session.run` calls.
@end_compatibility
"""
def __init__(self, target='', graph=None, config=None):
"""Creates a new TensorFlow session.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()`) in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to. Defaults to using
an in-process engine. See
[Distributed TensorFlow](https://tensorflow.org/deploy/distributed) for
more examples.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional.) A
[`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
protocol buffer with configuration options for the session.
"""
super(Session, self).__init__(target, graph, config=config)
# NOTE(mrry): Create these on first `__enter__` to avoid a reference cycle.
self._default_graph_context_manager = None
self._default_session_context_manager = None
def __enter__(self):
if self._default_graph_context_manager is None:
self._default_graph_context_manager = self.graph.as_default()
else:
raise RuntimeError('Session context managers are not re-entrant. '
'Use `Session.as_default()` if you want to enter '
'a session multiple times.')
if self._default_session_context_manager is None:
self._default_session_context_manager = self.as_default()
self._default_graph_context_manager.__enter__()
return self._default_session_context_manager.__enter__()
def __exit__(self, exec_type, exec_value, exec_tb):
if exec_type is errors.OpError:
logging.error('Session closing due to OpError: %s', (exec_value,))
try:
self._default_session_context_manager.__exit__(exec_type, exec_value,
exec_tb)
except RuntimeError as error:
if error == exec_value:
# NOTE(skyewm): for some reason, in Python3,
# _default_session_context_manager.__exit__ will re-raise the "not
# re-entrant" exception raised in __enter__ above (note that if we're
# here, we're in the outer session context manager, since __exit__ is
# not called when __enter__ raises an exception). We still want to
# continue cleaning up this context manager before the exception is
# further propagated, so we ignore it here (note that it'll continue
# being propagated after this method completes).
pass
else:
raise
self._default_graph_context_manager.__exit__(exec_type, exec_value, exec_tb)
self._default_session_context_manager = None
self._default_graph_context_manager = None
# If we are closing due to an exception, set a time limit on our Close() to
# avoid blocking forever.
# TODO(b/120204635) remove this when deadlock is fixed.
if exec_type:
close_thread = threading.Thread(
name='SessionCloseThread', target=self.close)
close_thread.daemon = True
close_thread.start()
close_thread.join(30.0)
if close_thread.is_alive():
logging.error(
'Session failed to close after 30 seconds. Continuing after this '
'point may leave your program in an undefined state.')
else:
self.close()
@staticmethod
def reset(target, containers=None, config=None):
"""Resets resource containers on `target`, and close all connected sessions.
A resource container is distributed across all workers in the
same cluster as `target`. When a resource container on `target`
is reset, resources associated with that container will be cleared.
In particular, all Variables in the container will become undefined:
they lose their values and shapes.
NOTE:
(i) reset() is currently only implemented for distributed sessions.
(ii) Any sessions on the master named by `target` will be closed.
If no resource containers are provided, all containers are reset.
Args:
target: The execution engine to connect to.
containers: A list of resource container name strings, or `None` if all of
all the containers are to be reset.
config: (Optional.) Protocol buffer with configuration options.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
resetting containers.
"""
if target is not None:
target = compat.as_bytes(target)
if containers is not None:
containers = [compat.as_bytes(c) for c in containers]
else:
containers = []
tf_session.TF_Reset(target, containers, config)
@tf_export(v1=['InteractiveSession'])
class InteractiveSession(BaseSession):
"""A TensorFlow `Session` for use in interactive contexts, such as a shell.
The only difference with a regular `Session` is that an `InteractiveSession`
installs itself as the default session on construction.
The methods `tf.Tensor.eval`
and `tf.Operation.run`
will use that session to run ops.
This is convenient in interactive shells and [IPython
notebooks](http://ipython.org), as it avoids having to pass an explicit
`Session` object to run ops.
For example:
```python
sess = tf.compat.v1.InteractiveSession()
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# We can just use 'c.eval()' without passing 'sess'
print(c.eval())
sess.close()
```
Note that a regular session installs itself as the default session when it
is created in a `with` statement. The common usage in non-interactive
programs is to follow that pattern:
```python
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
with tf.compat.v1.Session():
# We can also use 'c.eval()' here.
print(c.eval())
```
"""
_count_lock = threading.Lock()
_active_session_count = 0 # GUARDED_BY(_count_lock)
def __init__(self, target='', graph=None, config=None):
"""Creates a new interactive TensorFlow session.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()`) in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to. Defaults to using
an in-process engine.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional) `ConfigProto` proto used to configure the session.
"""
if not config:
# If config is not provided, choose some reasonable defaults for
# interactive use:
#
# - Grow GPU memory as needed at the cost of fragmentation.
gpu_options = config_pb2.GPUOptions(allow_growth=True)
config = config_pb2.ConfigProto(gpu_options=gpu_options)
# Interactive sessions always place pruned graphs.
config.graph_options.place_pruned_graph = True
super(InteractiveSession, self).__init__(target, graph, config)
with InteractiveSession._count_lock:
if InteractiveSession._active_session_count > 0:
warnings.warn('An interactive session is already active. This can '
'cause out-of-memory errors in some cases. You must '
'explicitly call `InteractiveSession.close()` to release '
'resources held by the other session(s).')
InteractiveSession._active_session_count += 1
# NOTE(mrry): We do not use `Session._closed` here because it has unhelpful
# semantics (in particular, it is not set to true if `Session.close()` is
# called on a session that has not been "opened" by running a step) and we
# cannot change those semantics without breaking existing code.
self._explicitly_closed = False
self._default_session = self.as_default()
self._default_session.enforce_nesting = False
self._default_session.__enter__()
self._explicit_graph = graph
if self._explicit_graph is not None:
self._default_graph = graph.as_default()
self._default_graph.enforce_nesting = False
self._default_graph.__enter__()
def close(self):
"""Closes an `InteractiveSession`."""
super(InteractiveSession, self).close()
with InteractiveSession._count_lock:
if not self._explicitly_closed:
InteractiveSession._active_session_count -= 1
self._explicitly_closed = True
else:
return
if self._explicit_graph is not None:
self._default_graph.__exit__(None, None, None)
self._default_graph = None
self._default_session.__exit__(None, None, None)
self._default_session = None
|
{
"content_hash": "01d6d0cf63b8e015d49c7c944fbb4722",
"timestamp": "",
"source": "github",
"line_count": 1785,
"max_line_length": 94,
"avg_line_length": 38.69075630252101,
"alnum_prop": 0.6566178706398506,
"repo_name": "sarvex/tensorflow",
"id": "910428fb3c485db67a4e9ede6ced0cf688ecd6cd",
"size": "69752",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tensorflow/python/client/session.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "148184"
},
{
"name": "C++",
"bytes": "6224499"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "650478"
},
{
"name": "Java",
"bytes": "53519"
},
{
"name": "JavaScript",
"bytes": "6659"
},
{
"name": "Jupyter Notebook",
"bytes": "777935"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "61743"
},
{
"name": "Python",
"bytes": "3474762"
},
{
"name": "Shell",
"bytes": "45640"
},
{
"name": "TypeScript",
"bytes": "283668"
}
],
"symlink_target": ""
}
|
import copy
import warnings
from collections import OrderedDict
from django.utils import six
class MergeDict(object):
"""
A simple class for creating new "virtual" dictionaries that actually look
up values in more than one dictionary, passed in the constructor.
If a key appears in more than one of the given dictionaries, only the
first occurrence will be used.
"""
def __init__(self, *dicts):
self.dicts = dicts
def __bool__(self):
return any(self.dicts)
def __nonzero__(self):
return type(self).__bool__(self)
def __getitem__(self, key):
for dict_ in self.dicts:
try:
return dict_[key]
except KeyError:
pass
raise KeyError(key)
def __copy__(self):
return self.__class__(*self.dicts)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
# This is used by MergeDicts of MultiValueDicts.
def getlist(self, key):
for dict_ in self.dicts:
if key in dict_:
return dict_.getlist(key)
return []
def _iteritems(self):
seen = set()
for dict_ in self.dicts:
for item in six.iteritems(dict_):
k = item[0]
if k in seen:
continue
seen.add(k)
yield item
def _iterkeys(self):
for k, v in self._iteritems():
yield k
def _itervalues(self):
for k, v in self._iteritems():
yield v
if six.PY3:
items = _iteritems
keys = _iterkeys
values = _itervalues
else:
iteritems = _iteritems
iterkeys = _iterkeys
itervalues = _itervalues
def items(self):
return list(self.iteritems())
def keys(self):
return list(self.iterkeys())
def values(self):
return list(self.itervalues())
def has_key(self, key):
for dict_ in self.dicts:
if key in dict_:
return True
return False
__contains__ = has_key
__iter__ = _iterkeys
def copy(self):
"""Returns a copy of this object."""
return self.__copy__()
def __str__(self):
'''
Returns something like
"{'key1': 'val1', 'key2': 'val2', 'key3': 'val3'}"
instead of the generic "<object meta-data>" inherited from object.
'''
return str(dict(self.items()))
def __repr__(self):
'''
Returns something like
MergeDict({'key1': 'val1', 'key2': 'val2'}, {'key3': 'val3'})
instead of generic "<object meta-data>" inherited from object.
'''
dictreprs = ', '.join(repr(d) for d in self.dicts)
return '%s(%s)' % (self.__class__.__name__, dictreprs)
class SortedDict(dict):
"""
A dictionary that keeps its keys in the order in which they're inserted.
"""
def __new__(cls, *args, **kwargs):
instance = super(SortedDict, cls).__new__(cls, *args, **kwargs)
instance.keyOrder = []
return instance
def __init__(self, data=None):
warnings.warn(
"SortedDict is deprecated and will be removed in Django 1.9.",
PendingDeprecationWarning, stacklevel=2
)
if data is None or isinstance(data, dict):
data = data or []
super(SortedDict, self).__init__(data)
self.keyOrder = list(data) if data else []
else:
super(SortedDict, self).__init__()
super_set = super(SortedDict, self).__setitem__
for key, value in data:
# Take the ordering from first key
if key not in self:
self.keyOrder.append(key)
# But override with last value in data (dict() does this)
super_set(key, value)
def __deepcopy__(self, memo):
return self.__class__([(key, copy.deepcopy(value, memo))
for key, value in self.items()])
def __copy__(self):
# The Python's default copy implementation will alter the state
# of self. The reason for this seems complex but is likely related to
# subclassing dict.
return self.copy()
def __setitem__(self, key, value):
if key not in self:
self.keyOrder.append(key)
super(SortedDict, self).__setitem__(key, value)
def __delitem__(self, key):
super(SortedDict, self).__delitem__(key)
self.keyOrder.remove(key)
def __iter__(self):
return iter(self.keyOrder)
def __reversed__(self):
return reversed(self.keyOrder)
def pop(self, k, *args):
result = super(SortedDict, self).pop(k, *args)
try:
self.keyOrder.remove(k)
except ValueError:
# Key wasn't in the dictionary in the first place. No problem.
pass
return result
def popitem(self):
result = super(SortedDict, self).popitem()
self.keyOrder.remove(result[0])
return result
def _iteritems(self):
for key in self.keyOrder:
yield key, self[key]
def _iterkeys(self):
for key in self.keyOrder:
yield key
def _itervalues(self):
for key in self.keyOrder:
yield self[key]
if six.PY3:
items = _iteritems
keys = _iterkeys
values = _itervalues
else:
iteritems = _iteritems
iterkeys = _iterkeys
itervalues = _itervalues
def items(self):
return [(k, self[k]) for k in self.keyOrder]
def keys(self):
return self.keyOrder[:]
def values(self):
return [self[k] for k in self.keyOrder]
def update(self, dict_):
for k, v in six.iteritems(dict_):
self[k] = v
def setdefault(self, key, default):
if key not in self:
self.keyOrder.append(key)
return super(SortedDict, self).setdefault(key, default)
def copy(self):
"""Returns a copy of this object."""
# This way of initializing the copy means it works for subclasses, too.
return self.__class__(self)
def __repr__(self):
"""
Replaces the normal dict.__repr__ with a version that returns the keys
in their sorted order.
"""
return '{%s}' % ', '.join('%r: %r' % (k, v) for k, v in six.iteritems(self))
def clear(self):
super(SortedDict, self).clear()
self.keyOrder = []
class OrderedSet(object):
"""
A set which keeps the ordering of the inserted items.
Currently backs onto OrderedDict.
"""
def __init__(self, iterable=None):
self.dict = OrderedDict(((x, None) for x in iterable) if iterable else [])
def add(self, item):
self.dict[item] = None
def remove(self, item):
del self.dict[item]
def discard(self, item):
try:
self.remove(item)
except KeyError:
pass
def __iter__(self):
return iter(self.dict.keys())
def __contains__(self, item):
return item in self.dict
def __nonzero__(self):
return bool(self.dict)
class MultiValueDictKeyError(KeyError):
pass
class MultiValueDict(dict):
"""
A subclass of dictionary customized to handle multiple values for the
same key.
>>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']})
>>> d['name']
'Simon'
>>> d.getlist('name')
['Adrian', 'Simon']
>>> d.getlist('doesnotexist')
[]
>>> d.getlist('doesnotexist', ['Adrian', 'Simon'])
['Adrian', 'Simon']
>>> d.get('lastname', 'nonexistent')
'nonexistent'
>>> d.setlist('lastname', ['Holovaty', 'Willison'])
This class exists to solve the irritating problem raised by cgi.parse_qs,
which returns a list for every key, even though most Web forms submit
single name-value pairs.
"""
def __init__(self, key_to_list_mapping=()):
super(MultiValueDict, self).__init__(key_to_list_mapping)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__,
super(MultiValueDict, self).__repr__())
def __getitem__(self, key):
"""
Returns the last data value for this key, or [] if it's an empty list;
raises KeyError if not found.
"""
try:
list_ = super(MultiValueDict, self).__getitem__(key)
except KeyError:
raise MultiValueDictKeyError("Key %r not found in %r" % (key, self))
try:
return list_[-1]
except IndexError:
return []
def __setitem__(self, key, value):
super(MultiValueDict, self).__setitem__(key, [value])
def __copy__(self):
return self.__class__([
(k, v[:])
for k, v in self.lists()
])
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
result = self.__class__()
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo),
copy.deepcopy(value, memo))
return result
def __getstate__(self):
obj_dict = self.__dict__.copy()
obj_dict['_data'] = dict((k, self.getlist(k)) for k in self)
return obj_dict
def __setstate__(self, obj_dict):
data = obj_dict.pop('_data', {})
for k, v in data.items():
self.setlist(k, v)
self.__dict__.update(obj_dict)
def get(self, key, default=None):
"""
Returns the last data value for the passed key. If key doesn't exist
or value is an empty list, then default is returned.
"""
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def getlist(self, key, default=None):
"""
Returns the list of values for the passed key. If key doesn't exist,
then a default value is returned.
"""
try:
return super(MultiValueDict, self).__getitem__(key)
except KeyError:
if default is None:
return []
return default
def setlist(self, key, list_):
super(MultiValueDict, self).__setitem__(key, list_)
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
# Do not return default here because __setitem__() may store
# another value -- QueryDict.__setitem__() does. Look it up.
return self[key]
def setlistdefault(self, key, default_list=None):
if key not in self:
if default_list is None:
default_list = []
self.setlist(key, default_list)
# Do not return default_list here because setlist() may store
# another value -- QueryDict.setlist() does. Look it up.
return self.getlist(key)
def appendlist(self, key, value):
"""Appends an item to the internal list associated with key."""
self.setlistdefault(key).append(value)
def _iteritems(self):
"""
Yields (key, value) pairs, where value is the last item in the list
associated with the key.
"""
for key in self:
yield key, self[key]
def _iterlists(self):
"""Yields (key, list) pairs."""
return six.iteritems(super(MultiValueDict, self))
def _itervalues(self):
"""Yield the last value on every key list."""
for key in self:
yield self[key]
if six.PY3:
items = _iteritems
lists = _iterlists
values = _itervalues
else:
iteritems = _iteritems
iterlists = _iterlists
itervalues = _itervalues
def items(self):
return list(self.iteritems())
def lists(self):
return list(self.iterlists())
def values(self):
return list(self.itervalues())
def copy(self):
"""Returns a shallow copy of this object."""
return copy.copy(self)
def update(self, *args, **kwargs):
"""
update() extends rather than replaces existing key lists.
Also accepts keyword args.
"""
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got %d" % len(args))
if args:
other_dict = args[0]
if isinstance(other_dict, MultiValueDict):
for key, value_list in other_dict.lists():
self.setlistdefault(key).extend(value_list)
else:
try:
for key, value in other_dict.items():
self.setlistdefault(key).append(value)
except TypeError:
raise ValueError("MultiValueDict.update() takes either a MultiValueDict or dictionary")
for key, value in six.iteritems(kwargs):
self.setlistdefault(key).append(value)
def dict(self):
"""
Returns current object as a dict with singular values.
"""
return dict((key, self[key]) for key in self)
class ImmutableList(tuple):
"""
A tuple-like object that raises useful errors when it is asked to mutate.
Example::
>>> a = ImmutableList(range(5), warning="You cannot mutate this.")
>>> a[3] = '4'
Traceback (most recent call last):
...
AttributeError: You cannot mutate this.
"""
def __new__(cls, *args, **kwargs):
if 'warning' in kwargs:
warning = kwargs['warning']
del kwargs['warning']
else:
warning = 'ImmutableList object is immutable.'
self = tuple.__new__(cls, *args, **kwargs)
self.warning = warning
return self
def complain(self, *wargs, **kwargs):
if isinstance(self.warning, Exception):
raise self.warning
else:
raise AttributeError(self.warning)
# All list mutation functions complain.
__delitem__ = complain
__delslice__ = complain
__iadd__ = complain
__imul__ = complain
__setitem__ = complain
__setslice__ = complain
append = complain
extend = complain
insert = complain
pop = complain
remove = complain
sort = complain
reverse = complain
class DictWrapper(dict):
"""
Wraps accesses to a dictionary so that certain values (those starting with
the specified prefix) are passed through a function before being returned.
The prefix is removed before looking up the real value.
Used by the SQL construction code to ensure that values are correctly
quoted before being used.
"""
def __init__(self, data, func, prefix):
super(DictWrapper, self).__init__(data)
self.func = func
self.prefix = prefix
def __getitem__(self, key):
"""
Retrieves the real value after stripping the prefix string (if
present). If the prefix is present, pass the value through self.func
before returning, otherwise return the raw value.
"""
if key.startswith(self.prefix):
use_func = True
key = key[len(self.prefix):]
else:
use_func = False
value = super(DictWrapper, self).__getitem__(key)
if use_func:
return self.func(value)
return value
|
{
"content_hash": "295789566f5a2f079e346e330935a902",
"timestamp": "",
"source": "github",
"line_count": 533,
"max_line_length": 107,
"avg_line_length": 29.587242026266416,
"alnum_prop": 0.5484464172479391,
"repo_name": "denisenkom/django",
"id": "f4f694f399fd588e1826e718cbd33bb2fd4a5562",
"size": "15770",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django/utils/datastructures.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "50381"
},
{
"name": "JavaScript",
"bytes": "100648"
},
{
"name": "Python",
"bytes": "8801295"
},
{
"name": "Shell",
"bytes": "12135"
}
],
"symlink_target": ""
}
|
from abc import ABC, abstractmethod
import dateutil.parser
from enum import Enum
from metadata_comparison.lib.operation_ids import JsonObject, operation_id_to_api_version, \
PAPI_V1_API_VERSION, PAPI_V2_ALPHA1_API_VERSION, PAPI_V2_BETA_API_VERSION
import re
from typing import AnyStr, Dict, Iterator, Union
class DiskType(Enum):
HDD = 1
SSD = 2
@staticmethod
def from_string(string: AnyStr):
if string == 'HDD':
return DiskType.HDD
elif string == 'SSD':
return DiskType.SSD
else:
raise ValueError("")
def __str__(self):
return "HDD" if self is DiskType.HDD else "SSD"
# The various attributes of a disk have string keys and string or int values.
DiskAttributes = Dict[AnyStr, Union[AnyStr, int]]
# A DiskDict is keyed by the name of the disk (unique within a VM) and DiskAttributes values.
DiskDict = Dict[AnyStr, DiskAttributes]
class Disk:
def __init__(self, name: AnyStr, size_gb: int, disk_type: DiskType):
self.name = name
self.size_gb = size_gb
self.disk_type = disk_type
def for_json(self) -> DiskDict:
return {
self.name: {
'name': self.name,
'sizeGb': self.size_gb,
'type': str(self.disk_type)
}
}
def __eq__(self, other) -> bool:
return self.name == other.name and self.size_gb == other.size_gb # and self.disk_type == other.disk_type
def __hash__(self):
return hash(self.name) + hash(self.size_gb) # + hash(self.disk_type)
def __str__(self):
return f'Disk(name={self.name}, size_gb={self.size_gb}, disk_type={str(self.disk_type)}'
class OperationDigester(ABC):
"""
Abstract Base Class for PAPI operation subclasses sharing an interface for the purpose of treating digesters
uniformly regardless of PAPI version.
"""
def __init__(self, operation_json: JsonObject):
self.__json = operation_json
def __metadata(self) -> JsonObject:
return self.__json.get('metadata')
def __events(self) -> JsonObject:
return self.__metadata()['events']
def create_time(self) -> AnyStr:
return self.__metadata().get('createTime')
def start_time(self) -> AnyStr:
return self.__metadata().get('startTime')
def end_time(self) -> AnyStr:
return self.__metadata().get('endTime')
def total_time_seconds(self) -> float:
return (dateutil.parser.parse(self.end_time()) - dateutil.parser.parse(self.create_time())).total_seconds()
def metadata(self):
return self.__metadata()
@staticmethod
def create(operation_json: JsonObject):
operation_id = operation_json.get('name')
version = operation_id_to_api_version(operation_id)
if version == PAPI_V1_API_VERSION:
return PapiV1OperationDigester(operation_json)
elif version == PAPI_V2_ALPHA1_API_VERSION:
return PapiV2AlphaOperationDigester(operation_json)
elif version == PAPI_V2_BETA_API_VERSION:
return PapiV2BetaOperationDigester(operation_json)
else:
raise ValueError(f"Unrecognized format for PAPI operation ID {operation_id}")
@abstractmethod
def docker_image_pull_time_seconds(self) -> float: pass
@abstractmethod
def localization_time_seconds(self) -> float: pass
@abstractmethod
def user_command_time_seconds(self) -> float: pass
@abstractmethod
def delocalization_time_seconds(self) -> float: pass
@abstractmethod
def startup_time_seconds(self) -> float: pass
@abstractmethod
def machine_type(self) -> AnyStr: pass
@abstractmethod
def disks(self) -> DiskDict: pass
def other_time_seconds(self) -> float:
end, create = [dateutil.parser.parse(t) for t in [self.end_time(), self.create_time()]]
total_time = (end - create).total_seconds()
accounted_for_time = \
self.startup_time_seconds() + \
self.docker_image_pull_time_seconds() + \
self.localization_time_seconds() + \
self.user_command_time_seconds() + \
self.delocalization_time_seconds()
return max(total_time - accounted_for_time, 0)
def event_with_description(self, description: AnyStr) -> JsonObject:
def has_description(event: JsonObject) -> bool:
return event.get('description') == description
for unique in filter(has_description, self.__metadata().get('events')):
return unique
def event_with_description_like(self, description: AnyStr) -> Iterator[JsonObject]:
regex = re.compile(description)
def has_description_like(event: JsonObject) -> bool:
return regex.match(event.get('description')) is not None
return filter(has_description_like, self.__metadata().get('events'))
class PapiV1OperationDigester(OperationDigester):
def __init__(self, operation_json: JsonObject):
super(PapiV1OperationDigester, self).__init__(operation_json)
@staticmethod
def __total_seconds_between_timestamps(start: AnyStr, end: AnyStr) -> float:
_start, _end = [dateutil.parser.parse(t) for t in [start, end]]
return (_end - _start).total_seconds()
@staticmethod
def __total_seconds_between_events(start: JsonObject, end: JsonObject) -> float:
_start, _end = [e.get('startTime') for e in [start, end]]
return PapiV1OperationDigester.__total_seconds_between_timestamps(_start, _end)
def startup_time_seconds(self) -> float:
# Look at `pulling_image` as that is the next lifecycle phase after startup.
create, pulling_image = self.create_time(), self.event_with_description('pulling-image').get('startTime')
return self.__total_seconds_between_timestamps(create, pulling_image)
def docker_image_pull_time_seconds(self) -> float:
start, end = [self.event_with_description(d) for d in ['pulling-image', 'localizing-files']]
return self.__total_seconds_between_events(start, end)
def localization_time_seconds(self) -> float:
start, end = [self.event_with_description(d) for d in ['localizing-files', 'running-docker']]
return self.__total_seconds_between_events(start, end)
def user_command_time_seconds(self) -> float:
start, end = [self.event_with_description(d) for d in ['running-docker', 'delocalizing-files']]
return self.__total_seconds_between_events(start, end)
def delocalization_time_seconds(self) -> float:
start, end = [self.event_with_description(d) for d in ['delocalizing-files', 'ok']]
return self.__total_seconds_between_events(start, end)
def machine_type(self) -> AnyStr:
machine_type_with_zone_prefix = self.metadata().get('runtimeMetadata').get('computeEngine').get('machineType')
return machine_type_with_zone_prefix.split('/')[-1]
def disks(self) -> DiskDict:
resources = self.metadata().get('request').get('pipelineArgs').get('resources')
# start with the boot disk and then add any others later
boot_disk = Disk('boot-disk', resources.get('bootDiskSizeGb'), DiskType.HDD)
disks_ = boot_disk.for_json()
def disk_type_from_string_v2(string: AnyStr) -> DiskType:
if string == 'PERSISTENT_HDD':
return DiskType.HDD
elif string == 'PERSISTENT_SSD':
return DiskType.SSD
else:
raise ValueError(f"Unknown disk type: {string}")
non_boot_disks = [
Disk(d.get('name'), d.get('sizeGb'), disk_type_from_string_v2(d.get('type'))) for d in resources.get('disks')]
for non_boot_disk in non_boot_disks:
disks_.update(non_boot_disk.for_json())
return disks_
class PapiV2OperationDigester(OperationDigester, ABC):
def __init__(self, operation_json: JsonObject):
super(PapiV2OperationDigester, self).__init__(operation_json)
def startup_time_seconds(self) -> float:
create = dateutil.parser.parse(self.create_time())
docker_description = "^Started pulling .*"
docker_events = [dateutil.parser.parse(d.get('timestamp')) for d in
self.event_with_description_like(docker_description)]
docker_events.sort()
return (docker_events[0] - create).total_seconds()
def docker_image_pull_time_seconds(self) -> float:
description = "^(Started|Stopped) pulling .*"
events = [dateutil.parser.parse(d.get('timestamp')) for d in self.event_with_description_like(description)]
events.sort()
return (events[-1] - events[0]).total_seconds()
def localization_time_seconds(self) -> float:
description = "^.* (Starting|Done)\\\\\\\\ localization.\"$"
events = [dateutil.parser.parse(d.get('timestamp')) for d in self.event_with_description_like(description)]
events.sort()
return (events[-1] - events[0]).total_seconds()
def user_command_time_seconds(self) -> float:
started_running_description = "^Started running .*/cromwell_root/script\"$"
started_running_events = [dateutil.parser.parse(d.get('timestamp')) for d in
self.event_with_description_like(started_running_description)]
stopped_running_description = "^Stopped running \"/cromwell_root/script\".*"
stopped_running_events = [dateutil.parser.parse(d.get('timestamp')) for d in
self.event_with_description_like(stopped_running_description)]
events = started_running_events + stopped_running_events
events.sort()
return (events[-1] - events[0]).total_seconds()
def delocalization_time_seconds(self) -> float:
description = "^.* (Starting|Done)\\\\\\\\ delocalization.\"$"
events = [dateutil.parser.parse(d.get('timestamp')) for d in self.event_with_description_like(description)]
events.sort()
return (events[-1] - events[0]).total_seconds()
def machine_type(self) -> AnyStr:
event = next(self.event_with_description_like('^Worker .* assigned in .*'))
return event.get('details').get('machineType')
def disks(self) -> DiskDict:
vm = self.metadata().get('pipeline').get('resources').get('virtualMachine')
# start with the boot disk and then add any others later
boot_disk = Disk('boot-disk', vm.get('bootDiskSizeGb'), DiskType.HDD)
disks_ = boot_disk.for_json()
def disk_type_from_string(string: AnyStr) -> DiskType:
if string == 'pd-standard':
return DiskType.HDD
elif string == 'pd-ssd':
return DiskType.SSD
else:
raise ValueError(f"Unknown disk type '{string}'")
non_boot_disks = [
Disk(d.get('name'), d.get('sizeGb'), disk_type_from_string(d.get('type'))) for d in vm.get('disks')]
for non_boot_disk in non_boot_disks:
disks_.update(non_boot_disk.for_json())
return disks_
class PapiV2AlphaOperationDigester(PapiV2OperationDigester):
def __init__(self, operation_json: JsonObject):
super(PapiV2AlphaOperationDigester, self).__init__(operation_json)
class PapiV2BetaOperationDigester(PapiV2OperationDigester):
def __init__(self, operation_json: JsonObject):
super(PapiV2BetaOperationDigester, self).__init__(operation_json)
|
{
"content_hash": "62453fc71e8894b5efdb137232aa6c5a",
"timestamp": "",
"source": "github",
"line_count": 289,
"max_line_length": 122,
"avg_line_length": 39.826989619377166,
"alnum_prop": 0.635968722849696,
"repo_name": "broadinstitute/cromwell",
"id": "bda34c1aa4c923d302b75d2477a9976700273e55",
"size": "11511",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "scripts/metadata_comparison/metadata_comparison/lib/operations_digesters.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Common Workflow Language",
"bytes": "900516"
},
{
"name": "Dockerfile",
"bytes": "2932"
},
{
"name": "HTML",
"bytes": "11173"
},
{
"name": "Java",
"bytes": "1197857"
},
{
"name": "JavaScript",
"bytes": "540512"
},
{
"name": "Python",
"bytes": "109340"
},
{
"name": "Scala",
"bytes": "7699420"
},
{
"name": "Shell",
"bytes": "198407"
},
{
"name": "wdl",
"bytes": "980982"
}
],
"symlink_target": ""
}
|
"""
use objgraph to plot the reference paths for types found in dill.types
"""
#XXX: useful if could read .pkl file and generate the graph... ?
import dill as pickle
#pickle.debug.trace(True)
#import pickle
# get all objects for testing
from dill import load_types
load_types(pickleable=True,unpickleable=True)
from dill import objects
if __name__ == "__main__":
import sys
if len(sys.argv) != 2:
print ("Please provide exactly one type name (e.g. 'IntType')")
msg = "\n"
for objtype in list(objects.keys())[:40]:
msg += objtype + ', '
print (msg + "...")
else:
objtype = str(sys.argv[-1])
obj = objects[objtype]
try:
import objgraph
objgraph.show_refs(obj, filename=objtype+'.png')
except ImportError:
print ("Please install 'objgraph' to view object graphs")
# EOF
|
{
"content_hash": "f1c46e1bc4bbddbd913dbca1b2f481a5",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 71,
"avg_line_length": 27.151515151515152,
"alnum_prop": 0.6082589285714286,
"repo_name": "DiegoCorrea/ouvidoMusical",
"id": "d62c4cad9f497efaf6cfd422644a5a05841c6b3a",
"size": "1309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ouvidoMusicalenv/bin/get_objgraph.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "182332"
},
{
"name": "Shell",
"bytes": "51486"
}
],
"symlink_target": ""
}
|
from django.core.management.base import BaseCommand, CommandError
from ship_data.models import MultibeamRawFileMetadata
import json
import datetime
import os
# This file is part of https://github.com/cpina/science-cruise-data-management
#
# This project was programmed in a hurry without any prior Django experience,
# while circumnavigating the Antarctic on the ACE expedition, without proper
# Internet access, with 150 scientists using the system and doing at the same
# cruise other data management and system administration tasks.
#
# Sadly there aren't unit tests and we didn't have time to refactor the code
# during the cruise, which is really needed.
#
# Carles Pina (carles@pina.cat) and Jen Thomas (jenny_t152@yahoo.co.uk), 2016-2017.
class Command(BaseCommand):
help = 'Imports multibeam data'
def add_arguments(self, parser):
parser.add_argument('directory_name', help="Base where to find all the *.inf.json files", type=str)
def handle(self, *args, **options):
print(options['directory_name'])
import_multibeam_raw_file_metadata(options['directory_name'])
def iso_to_datetime(text):
text = text.split(".")[0]
return datetime.datetime.strptime(text, "%Y-%m-%dT%H:%M:%S")
def import_metadata(base_directory, sub_directory, file_name):
print(base_directory, sub_directory, file_name)
file_path = os.path.join(base_directory, sub_directory, file_name)
fp = open(file_path, "r")
information = json.load(fp)
multibeam_raw_file_metadata = MultibeamRawFileMetadata()
# file_path = models.CharField(max_length=300)
# directory = models.CharField(max_length=30)
# swath_data_file = models.CharField(max_length=40)
# file_start_time_iso = models.DateTimeField()
# file_end_time_iso = models.DateTimeField()
# minimum_longitude = models.FloatField()
# maximum_longitude = models.FloatField()
# minimum_latitude = models.FloatField()
# maximum_latitude = models.FloatField()
# minimum_sonar_depth = models.FloatField()
# maximum_sonar_depth = models.FloatField()
multibeam_raw_file_metadata.file_path = base_directory
multibeam_raw_file_metadata.directory = sub_directory
multibeam_raw_file_metadata.swath_data_file = file_name.replace("_inf.json", "")
multibeam_raw_file_metadata.file_start_time = iso_to_datetime(information['start_of_data']['time_iso'])
multibeam_raw_file_metadata.file_end_time = iso_to_datetime(information['end_of_data']['time_iso'])
multibeam_raw_file_metadata.minimum_longitude = information['limits']['minimum_longitude']
multibeam_raw_file_metadata.maximum_longitude = information['limits']['maximum_longitude']
multibeam_raw_file_metadata.minimum_latitude = information['limits']['minimum_latitude']
multibeam_raw_file_metadata.maximum_latitude = information['limits']['maximum_latitude']
multibeam_raw_file_metadata.minimum_sonar_depth = information['limits']['minimum_sonar_depth']
multibeam_raw_file_metadata.maximum_sonar_depth = information['limits']['maximum_sonar_depth']
multibeam_raw_file_metadata.save()
fp.close()
def import_multibeam_raw_file_metadata(directory):
for (dirpath, dirnames, filenames) in os.walk(directory):
for filename in filenames:
if filename.endswith("_inf.json"):
import_metadata(directory, dirpath[len(directory):], filename)
|
{
"content_hash": "490f9396374fb753f97e870ee6c19526",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 107,
"avg_line_length": 45.945945945945944,
"alnum_prop": 0.7252941176470589,
"repo_name": "cpina/science-cruise-data-management",
"id": "321228616d50f9547b02c9c1921e3d557efffe89",
"size": "3400",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ScienceCruiseDataManagement/ship_data/management/commands/importmultibeamrawfilemetadata.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "59966"
},
{
"name": "HTML",
"bytes": "50774"
},
{
"name": "JavaScript",
"bytes": "106205"
},
{
"name": "Python",
"bytes": "548151"
},
{
"name": "Shell",
"bytes": "106"
}
],
"symlink_target": ""
}
|
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
import paddle.fluid as fluid
import paddle
paddle.enable_static()
def bilinear_interp_np(
input,
out_h,
out_w,
out_size=None,
actual_shape=None,
align_corners=True,
align_mode=0,
data_layout='NCHW',
):
"""bilinear interpolation implement in shape [N, C, H, W]"""
if data_layout == "NHWC":
input = np.transpose(input, (0, 3, 1, 2)) # NHWC => NCHW
if out_size is not None:
out_h = out_size[0]
out_w = out_size[1]
if actual_shape is not None:
out_h = actual_shape[0]
out_w = actual_shape[1]
batch_size, channel, in_h, in_w = input.shape
ratio_h = ratio_w = 0.0
if out_h > 1:
if align_corners:
ratio_h = (in_h - 1.0) / (out_h - 1.0)
else:
ratio_h = 1.0 * in_h / out_h
if out_w > 1:
if align_corners:
ratio_w = (in_w - 1.0) / (out_w - 1.0)
else:
ratio_w = 1.0 * in_w / out_w
out = np.zeros((batch_size, channel, out_h, out_w))
for i in range(out_h):
if align_mode == 0 and not align_corners:
h = int(ratio_h * (i + 0.5) - 0.5)
else:
h = int(ratio_h * i)
h = max(0, h)
hid = 1 if h < in_h - 1 else 0
if align_mode == 0 and not align_corners:
idx_src_h = max(ratio_h * (i + 0.5) - 0.5, 0)
h1lambda = idx_src_h - h
else:
h1lambda = ratio_h * i - h
h2lambda = 1.0 - h1lambda
for j in range(out_w):
if align_mode == 0 and not align_corners:
w = int(ratio_w * (j + 0.5) - 0.5)
else:
w = int(ratio_w * j)
w = max(0, w)
wid = 1 if w < in_w - 1 else 0
if align_mode == 0 and not align_corners:
idx_src_w = max(ratio_w * (j + 0.5) - 0.5, 0)
w1lambda = idx_src_w - w
else:
w1lambda = ratio_w * j - w
w2lambda = 1.0 - w1lambda
out[:, :, i, j] = h2lambda * (
w2lambda * input[:, :, h, w]
+ w1lambda * input[:, :, h, w + wid]
) + h1lambda * (
w2lambda * input[:, :, h + hid, w]
+ w1lambda * input[:, :, h + hid, w + wid]
)
if data_layout == "NHWC":
out = np.transpose(out, (0, 2, 3, 1)) # NCHW => NHWC
return out.astype(input.dtype)
class TestBilinearInterpOp(OpTest):
def setUp(self):
self.out_size = None
self.actual_shape = None
self.data_layout = 'NCHW'
self.init_test_case()
self.op_type = "bilinear_interp"
# NOTE(dev): some AsDispensible input is not used under imperative mode.
# Skip check_eager while found them in Inputs.
self.check_eager = True
input_np = np.random.random(self.input_shape).astype("float64")
if self.data_layout == "NCHW":
in_h = self.input_shape[2]
in_w = self.input_shape[3]
else:
in_h = self.input_shape[1]
in_w = self.input_shape[2]
if self.scale > 0:
out_h = int(in_h * self.scale)
out_w = int(in_w * self.scale)
else:
out_h = self.out_h
out_w = self.out_w
output_np = bilinear_interp_np(
input_np,
out_h,
out_w,
self.out_size,
self.actual_shape,
self.align_corners,
self.align_mode,
self.data_layout,
)
self.inputs = {'X': input_np}
if self.out_size is not None:
self.inputs['OutSize'] = self.out_size
self.check_eager = False
if self.actual_shape is not None:
self.inputs['OutSize'] = self.actual_shape
self.check_eager = False
self.attrs = {
'out_h': self.out_h,
'out_w': self.out_w,
'scale': self.scale,
'interp_method': self.interp_method,
'align_corners': self.align_corners,
'align_mode': self.align_mode,
'data_layout': self.data_layout,
}
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output(check_eager=self.check_eager)
def test_check_grad(self):
self.check_grad(
['X'], 'Out', in_place=True, check_eager=self.check_eager
)
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 5, 5]
self.out_h = 2
self.out_w = 2
self.scale = 0.0
self.out_size = np.array([3, 3]).astype("int32")
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpCase1(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [4, 1, 7, 8]
self.out_h = 1
self.out_w = 1
self.scale = 0.0
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpCase2(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [3, 3, 9, 6]
self.out_h = 12
self.out_w = 12
self.scale = 0.0
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpCase3(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [1, 1, 32, 64]
self.out_h = 64
self.out_w = 32
self.scale = 0.0
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpCase4(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [4, 1, 7, 8]
self.out_h = 1
self.out_w = 1
self.scale = 0.0
self.out_size = np.array([2, 2]).astype("int32")
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpCase5(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [3, 3, 9, 6]
self.out_h = 12
self.out_w = 12
self.scale = 0.0
self.out_size = np.array([11, 11]).astype("int32")
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpCase6(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [1, 1, 32, 64]
self.out_h = 64
self.out_w = 32
self.scale = 0.0
self.out_size = np.array([65, 33]).astype("int32")
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpSame(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 32, 64]
self.out_h = 32
self.out_w = 64
self.scale = 0.0
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpActualShape(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [3, 2, 32, 16]
self.out_h = 64
self.out_w = 32
self.scale = 0.0
self.out_size = np.array([66, 40]).astype("int32")
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpDataLayout(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 5, 5, 3]
self.out_h = 2
self.out_w = 2
self.scale = 0.0
self.out_size = np.array([3, 3]).astype("int32")
self.align_corners = True
self.align_mode = 1
self.data_layout = "NHWC"
class TestBilinearInterpOpUint8(OpTest):
def setUp(self):
self.out_size = None
self.actual_shape = None
self.init_test_case()
self.op_type = "bilinear_interp"
self.check_eager = True
input_np = np.random.randint(
low=0, high=256, size=self.input_shape
).astype("uint8")
if self.scale > 0:
out_h = int(self.input_shape[2] * self.scale)
out_w = int(self.input_shape[3] * self.scale)
else:
out_h = self.out_h
out_w = self.out_w
output_np = bilinear_interp_np(
input_np,
out_h,
out_w,
self.out_size,
self.actual_shape,
self.align_corners,
self.align_mode,
)
self.inputs = {'X': input_np}
if self.out_size is not None:
self.inputs['OutSize'] = self.out_size
self.check_eager = False
self.attrs = {
'out_h': self.out_h,
'out_w': self.out_w,
'scale': self.scale,
'interp_method': self.interp_method,
'align_corners': self.align_corners,
'align_mode': self.align_mode,
}
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output_with_place(
place=core.CPUPlace(), atol=1, check_eager=self.check_eager
)
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [1, 3, 9, 6]
self.out_h = 10
self.out_w = 9
self.scale = 0.0
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpCase1Uint8(TestBilinearInterpOpUint8):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 32, 64]
self.out_h = 64
self.out_w = 32
self.scale = 0.0
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpCase2Uint8(TestBilinearInterpOpUint8):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [4, 1, 7, 8]
self.out_h = 5
self.out_w = 13
self.scale = 0.0
self.out_size = np.array([6, 15]).astype("int32")
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpOtherMethod1(TestBilinearInterpOp):
def set_align_mode(self):
self.align_corners = False
self.align_mode = 1
class TestBilinearInterpWithMethod2(TestBilinearInterpOp):
def set_align_mode(self):
self.align_corners = False
self.align_mode = 0
class TestBilinearInterpWithMethod3(TestBilinearInterpOp):
def set_align_mode(self):
self.align_corners = True
self.align_mode = 0
class TestBilinearInterpScale1(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 5, 7]
self.out_h = 60
self.out_w = 25
self.scale = 2.0
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpScale2(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 5, 7]
self.out_h = 60
self.out_w = 25
self.scale = 1.0
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpScale3(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 5, 7]
self.out_h = 60
self.out_w = 25
self.scale = 1.5
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpZero(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 5, 7]
self.out_h = 60
self.out_w = 25
self.scale = 0.2
self.align_corners = False
self.align_mode = 0
class TestBilinearInterpOp_attr_tensor(OpTest):
def setUp(self):
self.out_size = None
self.actual_shape = None
self.init_test_case()
self.op_type = "bilinear_interp"
self.check_eager = True
self.shape_by_1Dtensor = False
self.scale_by_1Dtensor = False
self.attrs = {
'interp_method': self.interp_method,
'align_corners': self.align_corners,
}
input_np = np.random.random(self.input_shape).astype("float64")
self.inputs = {'X': input_np}
if self.scale_by_1Dtensor:
self.inputs['Scale'] = np.array([self.scale]).astype("float32")
elif self.scale > 0:
out_h = int(self.input_shape[2] * self.scale)
out_w = int(self.input_shape[3] * self.scale)
self.attrs['scale'] = self.scale
else:
out_h = self.out_h
out_w = self.out_w
if self.shape_by_1Dtensor:
self.inputs['OutSize'] = self.out_size
self.check_eager = False
elif self.out_size is not None:
size_tensor = []
for index, ele in enumerate(self.out_size):
size_tensor.append(
("x" + str(index), np.ones((1)).astype('int32') * ele)
)
self.inputs['SizeTensor'] = size_tensor
self.check_eager = False
self.attrs['out_h'] = self.out_h
self.attrs['out_w'] = self.out_w
output_np = bilinear_interp_np(
input_np,
out_h,
out_w,
self.out_size,
self.actual_shape,
self.align_corners,
)
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output(check_eager=self.check_eager)
def test_check_grad(self):
self.check_grad(
['X'], 'Out', in_place=True, check_eager=self.check_eager
)
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 5, 5]
self.out_h = 3
self.out_w = 3
self.scale = 0.0
self.out_size = [3, 3]
self.align_corners = True
# out_size is a 1-D tensor
class TestBilinearInterp_attr_tensor_Case1(TestBilinearInterpOp_attr_tensor):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [3, 3, 9, 6]
self.out_h = 12
self.out_w = 12
self.scale = 0.0
self.out_size = [8, 12]
self.align_corners = True
# scale is a 1-D tensor
class TestBilinearInterp_attr_tensor_Case2(TestBilinearInterpOp_attr_tensor):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [3, 2, 32, 16]
self.out_h = 64
self.out_w = 32
self.scale = 0.0
self.out_size = np.array([66, 40]).astype("int32")
self.align_corners = True
self.shape_by_1Dtensor = True
# scale is a 1-D tensor
class TestBilinearInterp_attr_tensor_Case3(TestBilinearInterpOp_attr_tensor):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [3, 2, 32, 16]
self.out_h = 64
self.out_w = 32
self.scale = 2.0
self.out_size = None
self.align_corners = True
self.scale_by_1Dtensor = True
class TestBilinearInterpOpAPI(unittest.TestCase):
def test_case(self):
x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32")
dim = fluid.data(name="dim", shape=[1], dtype="int32")
shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32")
actual_size = fluid.data(name="actual_size", shape=[2], dtype="int32")
scale_tensor = fluid.data(
name="scale_tensor", shape=[1], dtype="float32"
)
out1 = fluid.layers.resize_bilinear(x, out_shape=[12, 12])
out2 = fluid.layers.resize_bilinear(x, out_shape=[12, dim])
out3 = fluid.layers.resize_bilinear(x, out_shape=shape_tensor)
out4 = fluid.layers.resize_bilinear(
x, out_shape=[4, 4], actual_shape=actual_size
)
out5 = fluid.layers.resize_bilinear(x, scale=scale_tensor)
x_data = np.random.random((2, 3, 6, 6)).astype("float32")
dim_data = np.array([12]).astype("int32")
shape_data = np.array([12, 12]).astype("int32")
actual_size_data = np.array([12, 12]).astype("int32")
scale_data = np.array([2.0]).astype("float32")
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
results = exe.run(
fluid.default_main_program(),
feed={
"x": x_data,
"dim": dim_data,
"shape_tensor": shape_data,
"actual_size": actual_size_data,
"scale_tensor": scale_data,
},
fetch_list=[out1, out2, out3, out4, out5],
return_numpy=True,
)
expect_res = bilinear_interp_np(
x_data, out_h=12, out_w=12, align_corners=True
)
for res in results:
np.testing.assert_allclose(res, expect_res, rtol=1e-05)
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "9879f6933d043a1b6b3bd84a45610789",
"timestamp": "",
"source": "github",
"line_count": 568,
"max_line_length": 80,
"avg_line_length": 30.408450704225352,
"alnum_prop": 0.541975451597962,
"repo_name": "PaddlePaddle/Paddle",
"id": "b281d30a2221f2f07aad061df939620fb2659425",
"size": "17885",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36848680"
},
{
"name": "CMake",
"bytes": "902619"
},
{
"name": "Cuda",
"bytes": "5227207"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36203874"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553177"
}
],
"symlink_target": ""
}
|
from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = config()
config.General.requestName = 'TSTARSELECT_NAME'
config.General.workArea = 'work_area'
config.General.transferOutputs = True
config.General.transferLogs = True
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = './run_production.py'
## Input parameters
config.JobType.pyCfgParams = [
'Mode=RUNMODE',
]
config.Data.inputDataset = 'DATASET'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob = 16
config.Data.outLFNDirBase = '/store/user/yichen/tstar_76X/'
config.Data.publication = False
config.Site.storageSite = 'T3_TW_NTU_HEP'
|
{
"content_hash": "cb086bd178911eb5ef7cf254055824c7",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 66,
"avg_line_length": 27.2,
"alnum_prop": 0.7720588235294118,
"repo_name": "enochnotsocool/TstarAnalysis",
"id": "9e97909b5579f3fc0134199882c30228af078642",
"size": "680",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BaseLineSelector/production/mc_crab_template.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "193172"
},
{
"name": "Python",
"bytes": "13782"
},
{
"name": "Shell",
"bytes": "9014"
}
],
"symlink_target": ""
}
|
"""The test case itsself, and associated stuff"""
import string
class BaseTest:
def __init__(self):
self.res = -1
self.reason = None
def parse_config(self, config):
self.files = []
for line in config.readlines():
line = string.strip(line)
self.files.append({'file': line, 'read': 0})
baseName = 'index'
def verify_request(self, req):
"""Check that the request is valid.
Also needs to update any internal 'read' stuff"""
## XXXXX
## This needs to be done using exceptions, maybe
## XXXXX
for i in self.files:
if i['file'] == req.fname:
if i['read'] == 1:
self.res = 0
self.reason = "File %s was read twice" % (req.fname)
return 0
i['read'] = 1
break
elif i['read'] == 0:
self.res = 0
self.reason = "File %s requested, expected %s" % (req.fname, i['file'])
return 0
### Simplistic for now...
res = req.headers.getheader('Host')
return res
def result(self):
if self.res == -1:
for i in self.files:
if i['read'] == 0:
self.res = 0
self.reason = "%s not read" % (i['file'])
return self.res, self.reason
self.res = 1
return self.res, self.reason
tester = BaseTest
|
{
"content_hash": "2ac3a4137d3e92ecc1b72e2223461907",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 87,
"avg_line_length": 27.157894736842106,
"alnum_prop": 0.458656330749354,
"repo_name": "wilebeast/FireFox-OS",
"id": "984eef0e2d941e3e2abb2baf2be0e9b67e08254b",
"size": "1748",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "B2G/gecko/tools/httptester/BaseTest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import os
from flask import current_app
def make_relative(curr_path: str, href: str) -> str:
"""
Given a current path and a href, return an equivalent relative path.
:param curr_path:
:param href:
:return:
"""
curr_list = curr_path.lstrip('/').split('/')
href_list = href.lstrip('/').split('/')
# How many path components are shared between the two paths?
i = len(os.path.commonprefix([curr_list, href_list]))
rel_list = (['..'] * (len(curr_list) - i - 1)) + href_list[i:]
if not rel_list or rel_list == ['']:
return './'
return '/'.join(rel_list)
def breadcrumbs(path: str) -> [(str, str)]:
"""
:param path:
:rtype : list
:return:
"""
if path[-1] == "/":
path = path[0:-1]
components = path.split(os.path.sep)
terminus = os.path.splitext(components.pop())[0]
if not components:
if terminus == 'index':
return [('index', '/wiki/')]
return [('index', '/wiki/'), (terminus, "")]
elif terminus == 'index':
terminus = os.path.splitext(components.pop())[0]
crumbs = [('index', '/wiki/')]
for component in components:
component_path = "%s%s/" % (crumbs[-1][1], component)
crumbs.append((component, component_path))
crumbs.append((terminus, ""))
current_app.logger.debug(crumbs)
return crumbs
def link(kind: str, path: str) -> str:
kind = "/_" + kind
if "_" in path:
path = os.path.dirname(path) + kind
else:
path += kind
return path
|
{
"content_hash": "5649e2ff59567eefa7b662850810755f",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 72,
"avg_line_length": 26.70689655172414,
"alnum_prop": 0.5584247901872176,
"repo_name": "tokyo-jesus/tamactiluya",
"id": "42b22e03855cadd5d0acf84bcf80f0be8bda13a4",
"size": "1574",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tamactiluya/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "464"
},
{
"name": "Python",
"bytes": "20387"
}
],
"symlink_target": ""
}
|
import random
from oslo_db import exception as db_exc
from oslo_log import log as logging
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc
from sqlalchemy.orm import joinedload
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.common import constants as q_const
from neutron.common import utils as n_utils
from neutron.db import agents_db
from neutron.db import l3_agentschedulers_db as l3agent_sch_db
from neutron.db import model_base
from neutron.db import models_v2
from neutron.i18n import _LI, _LW
from neutron import manager
from neutron.plugins.common import constants as service_constants
from neutron.plugins.ml2 import db as ml2_db
LOG = logging.getLogger(__name__)
class CentralizedSnatL3AgentBinding(model_base.BASEV2):
"""Represents binding between Neutron Centralized SNAT and L3 agents."""
__tablename__ = "csnat_l3_agent_bindings"
router_id = sa.Column(sa.String(36),
sa.ForeignKey("routers.id", ondelete='CASCADE'),
primary_key=True)
l3_agent_id = sa.Column(sa.String(36),
sa.ForeignKey("agents.id", ondelete='CASCADE'),
primary_key=True)
host_id = sa.Column(sa.String(255))
csnat_gw_port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id'))
l3_agent = orm.relationship(agents_db.Agent)
csnat_gw_port = orm.relationship(models_v2.Port)
class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
"""Mixin class for L3 DVR scheduler.
DVR currently supports the following use cases:
- East/West (E/W) traffic between VMs: this is handled in a
distributed manner across Compute Nodes without a centralized element.
This includes E/W traffic between VMs on the same Compute Node.
- North/South traffic for Floating IPs (FIP N/S): this is supported on the
distributed routers on Compute Nodes without any centralized element.
- North/South traffic for SNAT (SNAT N/S): this is supported via a
centralized element that handles the SNAT traffic.
To support these use cases, DVR routers rely on an L3 agent that runs on a
central node (also known as Network Node or Service Node), as well as, L3
agents that run individually on each Compute Node of an OpenStack cloud.
Each L3 agent creates namespaces to route traffic according to the use
cases outlined above. The mechanism adopted for creating and managing
these namespaces is via (Router, Agent) binding and Scheduling in general.
The main difference between distributed routers and centralized ones is
that in the distributed case, multiple bindings will exist, one for each
of the agents participating in the routed topology for the specific router.
These bindings are created in the following circumstances:
- A subnet is added to a router via router-interface-add, and that subnet
has running VM's deployed in it. A binding will be created between the
router and any L3 agent whose Compute Node is hosting the VM(s).
- An external gateway is set to a router via router-gateway-set. A binding
will be created between the router and the L3 agent running centrally
on the Network Node.
Therefore, any time a router operation occurs (create, update or delete),
scheduling will determine whether the router needs to be associated to an
L3 agent, just like a regular centralized router, with the difference that,
in the distributed case, the bindings required are established based on
the state of the router and the Compute Nodes.
"""
def dvr_update_router_addvm(self, context, port):
ips = port['fixed_ips']
for ip in ips:
subnet = ip['subnet_id']
filter_sub = {'fixed_ips': {'subnet_id': [subnet]},
'device_owner':
[q_const.DEVICE_OWNER_DVR_INTERFACE]}
router_id = None
ports = self._core_plugin.get_ports(context, filters=filter_sub)
for port in ports:
router_id = port['device_id']
router_dict = self.get_router(context, router_id)
if router_dict.get('distributed', False):
payload = {'subnet_id': subnet}
self.l3_rpc_notifier.routers_updated(
context, [router_id], None, payload)
break
LOG.debug('DVR: dvr_update_router_addvm %s ', router_id)
def get_dvr_routers_by_portid(self, context, port_id):
"""Gets the dvr routers on vmport subnets."""
router_ids = set()
port_dict = self._core_plugin.get_port(context, port_id)
fixed_ips = port_dict['fixed_ips']
for fixedip in fixed_ips:
vm_subnet = fixedip['subnet_id']
filter_sub = {'fixed_ips': {'subnet_id': [vm_subnet]},
'device_owner':
[q_const.DEVICE_OWNER_DVR_INTERFACE]}
subnet_ports = self._core_plugin.get_ports(
context, filters=filter_sub)
for subnet_port in subnet_ports:
router_ids.add(subnet_port['device_id'])
return router_ids
def get_subnet_ids_on_router(self, context, router_id):
"""Return subnet IDs for interfaces attached to the given router."""
subnet_ids = set()
filter_rtr = {'device_id': [router_id]}
int_ports = self._core_plugin.get_ports(context, filters=filter_rtr)
for int_port in int_ports:
int_ips = int_port['fixed_ips']
int_subnet = int_ips[0]['subnet_id']
subnet_ids.add(int_subnet)
return subnet_ids
def check_ports_active_on_host_and_subnet(self, context, host,
port_id, subnet_id):
"""Check if there is any dvr serviceable port on the subnet_id."""
filter_sub = {'fixed_ips': {'subnet_id': [subnet_id]}}
ports = self._core_plugin.get_ports(context, filters=filter_sub)
for port in ports:
if (n_utils.is_dvr_serviced(port['device_owner'])
and port['status'] == 'ACTIVE'
and port['binding:host_id'] == host
and port['id'] != port_id):
LOG.debug('DVR: Active port exists for subnet %(subnet_id)s '
'on host %(host)s', {'subnet_id': subnet_id,
'host': host})
return True
return False
def dvr_deletens_if_no_port(self, context, port_id):
"""Delete the DVR namespace if no dvr serviced port exists."""
admin_context = context.elevated()
router_ids = self.get_dvr_routers_by_portid(admin_context, port_id)
port_host = ml2_db.get_port_binding_host(admin_context.session,
port_id)
if not router_ids:
LOG.debug('No namespaces available for this DVR port %(port)s '
'on host %(host)s', {'port': port_id,
'host': port_host})
return []
removed_router_info = []
for router_id in router_ids:
subnet_ids = self.get_subnet_ids_on_router(admin_context,
router_id)
port_exists_on_subnet = False
for subnet in subnet_ids:
if self.check_ports_active_on_host_and_subnet(admin_context,
port_host,
port_id,
subnet):
port_exists_on_subnet = True
break
if port_exists_on_subnet:
continue
filter_rtr = {'device_id': [router_id],
'device_owner':
[q_const.DEVICE_OWNER_DVR_INTERFACE]}
int_ports = self._core_plugin.get_ports(
admin_context, filters=filter_rtr)
for prt in int_ports:
dvr_binding = (ml2_db.
get_dvr_port_binding_by_host(context.session,
prt['id'],
port_host))
if dvr_binding:
# unbind this port from router
dvr_binding['router_id'] = None
dvr_binding.update(dvr_binding)
agent = self._get_agent_by_type_and_host(context,
q_const.AGENT_TYPE_L3,
port_host)
info = {'router_id': router_id, 'host': port_host,
'agent_id': str(agent.id)}
removed_router_info.append(info)
LOG.debug('Router namespace %(router_id)s on host %(host)s '
'to be deleted', info)
return removed_router_info
def bind_snat_router(self, context, router_id, chosen_agent):
"""Bind the router to the chosen l3 agent."""
with context.session.begin(subtransactions=True):
binding = CentralizedSnatL3AgentBinding()
binding.l3_agent = chosen_agent
binding.router_id = router_id
context.session.add(binding)
LOG.debug('SNAT Router %(router_id)s is scheduled to L3 agent '
'%(agent_id)s', {'router_id': router_id,
'agent_id': chosen_agent.id})
def bind_dvr_router_servicenode(self, context, router_id,
chosen_snat_agent):
"""Bind the IR router to service node if not already hosted."""
query = (context.session.query(l3agent_sch_db.RouterL3AgentBinding).
filter_by(router_id=router_id))
for bind in query:
if bind.l3_agent_id == chosen_snat_agent.id:
LOG.debug('Distributed Router %(router_id)s already hosted '
'on snat l3_agent %(snat_id)s',
{'router_id': router_id,
'snat_id': chosen_snat_agent.id})
return
with context.session.begin(subtransactions=True):
binding = l3agent_sch_db.RouterL3AgentBinding()
binding.l3_agent = chosen_snat_agent
binding.router_id = router_id
context.session.add(binding)
LOG.debug('Binding the distributed router %(router_id)s to '
'the snat agent %(snat_id)s',
{'router_id': router_id,
'snat_id': chosen_snat_agent.id})
def bind_snat_servicenode(self, context, router_id, snat_candidates):
"""Bind the snat router to the chosen l3 service agent."""
chosen_snat_agent = random.choice(snat_candidates)
self.bind_snat_router(context, router_id, chosen_snat_agent)
return chosen_snat_agent
def unbind_snat_servicenode(self, context, router_id):
"""Unbind the snat router to the chosen l3 service agent."""
vm_ports = []
with context.session.begin(subtransactions=True):
query = (context.session.
query(CentralizedSnatL3AgentBinding).
filter_by(router_id=router_id))
try:
binding = query.one()
except exc.NoResultFound:
LOG.debug('no snat router binding found for %s', router_id)
return
host = binding.l3_agent.host
subnet_ids = self.get_subnet_ids_on_router(context, router_id)
for subnet in subnet_ids:
vm_ports = (
self._core_plugin.get_ports_on_host_by_subnet(
context, host, subnet))
if vm_ports:
LOG.debug('One or more ports exist on the snat enabled '
'l3_agent host %(host)s and router_id %(id)s',
{'host': host, 'id': router_id})
break
agent_id = binding.l3_agent_id
LOG.debug('Delete binding of the SNAT router %(router_id)s '
'from agent %(id)s', {'router_id': router_id,
'id': agent_id})
context.session.delete(binding)
if not vm_ports:
query = (context.session.
query(l3agent_sch_db.RouterL3AgentBinding).
filter_by(router_id=router_id,
l3_agent_id=agent_id).
delete(synchronize_session=False))
self.l3_rpc_notifier.router_removed_from_agent(
context, router_id, host)
LOG.debug('Removed binding for router %(router_id)s and '
'agent %(id)s', {'router_id': router_id, 'id': agent_id})
def get_snat_bindings(self, context, router_ids):
"""Retrieves the dvr snat bindings for a router."""
if not router_ids:
return []
query = context.session.query(CentralizedSnatL3AgentBinding)
query = query.options(joinedload('l3_agent')).filter(
CentralizedSnatL3AgentBinding.router_id.in_(router_ids))
return query.all()
def schedule_snat_router(self, context, router_id, sync_router):
"""Schedule the snat router on l3 service agent."""
active_l3_agents = self.get_l3_agents(context, active=True)
if not active_l3_agents:
LOG.warn(_LW('No active L3 agents found for SNAT'))
return
snat_candidates = self.get_snat_candidates(sync_router,
active_l3_agents)
if snat_candidates:
try:
chosen_agent = self.bind_snat_servicenode(
context, router_id, snat_candidates)
except db_exc.DBDuplicateEntry:
LOG.info(_LI("SNAT already bound to a service node."))
return
self.bind_dvr_router_servicenode(
context, router_id, chosen_agent)
return chosen_agent
def _get_active_l3_agent_routers_sync_data(self, context, host, agent,
router_ids):
if n_utils.is_extension_supported(self, q_const.L3_HA_MODE_EXT_ALIAS):
return self.get_ha_sync_data_for_host(context, host,
router_ids=router_ids,
active=True)
return self.get_dvr_sync_data(context, host, agent,
router_ids=router_ids, active=True)
def _notify_l3_agent_new_port(resource, event, trigger, **kwargs):
LOG.debug('Received %(resource)s %(event)s', {
'resource': resource,
'event': event})
port = kwargs.get('port')
if not port:
return
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
mac_address_updated = kwargs.get('mac_address_updated')
update_device_up = kwargs.get('update_device_up')
context = kwargs['context']
if mac_address_updated or update_device_up:
l3plugin.dvr_vmarp_table_update(context, port, "add")
if n_utils.is_dvr_serviced(port['device_owner']):
l3plugin.dvr_update_router_addvm(context, port)
def _notify_port_delete(event, resource, trigger, **kwargs):
context = kwargs['context']
port = kwargs['port']
removed_routers = kwargs['removed_routers']
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
l3plugin.dvr_vmarp_table_update(context, port, "del")
for router in removed_routers:
l3plugin.remove_router_from_l3_agent(
context, router['agent_id'], router['router_id'])
def subscribe():
registry.subscribe(
_notify_l3_agent_new_port, resources.PORT, events.AFTER_UPDATE)
registry.subscribe(
_notify_l3_agent_new_port, resources.PORT, events.AFTER_CREATE)
registry.subscribe(
_notify_port_delete, resources.PORT, events.AFTER_DELETE)
|
{
"content_hash": "154189b96d96cd55c4ca71f0955a071a",
"timestamp": "",
"source": "github",
"line_count": 353,
"max_line_length": 79,
"avg_line_length": 46.861189801699716,
"alnum_prop": 0.5686736791198163,
"repo_name": "Stavitsky/neutron",
"id": "6dd4e7807ef0868a22bb1dd62ee3105e74ea0ed5",
"size": "17209",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/db/l3_dvrscheduler_db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "980"
},
{
"name": "Python",
"bytes": "7134099"
},
{
"name": "Shell",
"bytes": "12319"
}
],
"symlink_target": ""
}
|
from nova.policies import base
BASE_POLICY_NAME = 'os_compute_api:os-extended-server-attributes'
extended_server_attributes_policies = [
base.create_rule_default(
BASE_POLICY_NAME,
base.RULE_ADMIN_API,
"""Return extended attributes for server.
This rule will control the visibility for a set of servers attributes:
OS-EXT-SRV-ATTR:host
OS-EXT-SRV-ATTR:instance_name
OS-EXT-SRV-ATTR:reservation_id (since microversion 2.3)
OS-EXT-SRV-ATTR:launch_index (since microversion 2.3)
OS-EXT-SRV-ATTR:hostname (since microversion 2.3)
OS-EXT-SRV-ATTR:kernel_id (since microversion 2.3)
OS-EXT-SRV-ATTR:ramdisk_id (since microversion 2.3)
OS-EXT-SRV-ATTR:root_device_name (since microversion 2.3)
OS-EXT-SRV-ATTR:user_data (since microversion 2.3)""",
[
{
'method': 'GET',
'path': '/servers/{id}'
},
{
'method': 'GET',
'path': '/servers/detail'
}
]
),
]
def list_rules():
return extended_server_attributes_policies
|
{
"content_hash": "e0520c3de50057a7c8c1095461947372",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 70,
"avg_line_length": 29.263157894736842,
"alnum_prop": 0.6097122302158273,
"repo_name": "rajalokan/nova",
"id": "ce1fcdd58a38c8f10edca2f085cf88ba19b661d8",
"size": "1751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/policies/extended_server_attributes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "601"
},
{
"name": "PHP",
"bytes": "4503"
},
{
"name": "Python",
"bytes": "19100322"
},
{
"name": "Shell",
"bytes": "26793"
},
{
"name": "Smarty",
"bytes": "299237"
}
],
"symlink_target": ""
}
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("cms", "0005_product_page_header")]
operations = [migrations.RemoveField(model_name="bootcamprunpage", name="content")]
|
{
"content_hash": "09c23f1aab66fb332120afe087bbc68f",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 87,
"avg_line_length": 27.625,
"alnum_prop": 0.7375565610859729,
"repo_name": "mitodl/bootcamp-ecommerce",
"id": "483f184e9fa54d29ab7a8469466de76995129663",
"size": "271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cms/migrations/0006_remove_bootcamprunpage_content.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "325"
},
{
"name": "Dockerfile",
"bytes": "998"
},
{
"name": "HTML",
"bytes": "70605"
},
{
"name": "JavaScript",
"bytes": "491664"
},
{
"name": "Procfile",
"bytes": "293"
},
{
"name": "Python",
"bytes": "1236492"
},
{
"name": "SCSS",
"bytes": "72463"
},
{
"name": "Shell",
"bytes": "7329"
}
],
"symlink_target": ""
}
|
from redbot.message import headers
class x_cache(headers.HttpHeader):
canonical_name = "X-Cache"
description = """\
The `X-Cache` header is used by some caches to indicate whether or not the response was served from
cache; if it contains `HIT`, it was."""
list_header = True
deprecated = False
valid_in_requests = False
valid_in_responses = True
|
{
"content_hash": "cbb9202727a79b4d25fa730b8c6df46d",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 99,
"avg_line_length": 31,
"alnum_prop": 0.7016129032258065,
"repo_name": "mnot/redbot",
"id": "c756253804b5f953c11a7926881edc90513f4e10",
"size": "372",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "redbot/message/headers/x_cache.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10019"
},
{
"name": "Dockerfile",
"bytes": "314"
},
{
"name": "HTML",
"bytes": "12590"
},
{
"name": "JavaScript",
"bytes": "13611"
},
{
"name": "Makefile",
"bytes": "4539"
},
{
"name": "Python",
"bytes": "379846"
},
{
"name": "SCSS",
"bytes": "10095"
}
],
"symlink_target": ""
}
|
from django.core.mail.message import EmailMultiAlternatives
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template.context import RequestContext, Context
from django.template.loader import get_template
from django.utils import simplejson
from django.views.decorators.csrf import csrf_protect
from sitecontacts import settings
from sitecontacts.forms import ContactsForm
from sitecontacts.models import Contacts
from django.utils.translation import ugettext as _
@csrf_protect
def form(request):
"""
@type request: django.http.HttpRequest
"""
success = None
contacts_list = list(Contacts.objects.filter(active=True).order_by('-main'))
if request.method == 'POST':
form = ContactsForm(request.POST)
if form.is_valid():
success = settings.SUCCESS_MESSAGE
textmail = get_template('sitecontacts/contacts_email.txt')
context = Context({'from': form.cleaned_data['name'],
'email': form.cleaned_data['email'],
'phone': form.cleaned_data['phone'],
'message': form.cleaned_data['message']})
subject, from_email, to = _('Email from site!'), form.cleaned_data['email'], settings.MAILTO
textmail_content = textmail.render(context)
msg = EmailMultiAlternatives(subject, textmail_content, from_email, [to])
msg.send()
if request.is_ajax():
ret = {'fail': False}
if not form.is_valid():
ret.update({'fail': True})
errs = {}
for err in form.errors.iteritems():
errs.update({err[0]: unicode(err[1])})
ret.update({'errs': errs})
else:
ret.update({'success': success})
json = simplejson.dumps(ret, ensure_ascii=False)
return HttpResponse(json, mimetype='application/json')
else:
form = ContactsForm()
return render_to_response(
'sitecontacts/contacts_all.html',
{'form': form,
'contacts_list': contacts_list,
'success': success},
context_instance=RequestContext(request),)
def text(a, b):
return True
|
{
"content_hash": "2eb69759a27c0da6ebe4ebe4bb9acb91",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 104,
"avg_line_length": 38.728813559322035,
"alnum_prop": 0.6122538293216631,
"repo_name": "vosi/django-sitecontacts",
"id": "1431aa930f544193efe3117c15d7ef74be4ca790",
"size": "2285",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sitecontacts/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "27700"
},
{
"name": "Python",
"bytes": "57078"
}
],
"symlink_target": ""
}
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2013-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilentBase8590E import *
class agilent8591EM(agilentBase8590E):
"Agilent 8591EM IVI spectrum analyzer driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'HP8591EM')
super(agilent8591EM, self).__init__(*args, **kwargs)
self._input_impedance = 50
self._frequency_low = 9e3
self._frequency_high = 1.8e9
|
{
"content_hash": "bfffcd0577775d54ff6b309a3967279d",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 77,
"avg_line_length": 37.11904761904762,
"alnum_prop": 0.7408595253367544,
"repo_name": "Diti24/python-ivi",
"id": "c1b8151a3ec18f00a496a69a03138ee9bea69bb3",
"size": "1559",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ivi/agilent/agilent8591EM.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1992462"
}
],
"symlink_target": ""
}
|
import time
import game
import instances
import palette
import registry
from entities import entity
from entities.creatures import player
from statuses import hastestatus
class TwitchChatManager(entity.Entity):
special_viewers = []
def __init__(self):
super().__init__(' ')
self.hidden = True
self.time_since_last_help_message = 0
TwitchChatManager.special_viewers = [viewer.lower() for viewer in game.Game.config['TWITCH']['SpecialViewers'].split('\n') if viewer]
def get_config_color(self, key):
return tuple([int(d) for d in game.Game.config['TWITCH'][key].strip(' ').split(',')])
def handle_events(self, event):
current_scene = instances.scene_root
if event.type == 'TWITCHCHATMESSAGE':
if event.message:
if event.message.upper() == '!JOIN':
player_names = [e.name for e in current_scene.children if hasattr(e, 'name')]
bonus = None
if not event.nickname in player_names:
# Set player color
if 'broadcaster' in event.tags['badges']:
try:
player_color = self.get_config_color('BroadcasterColor')
except:
player_color = palette.get_nearest((255, 163, 0))
elif event.tags['subscriber'] != '0':
try:
player_color = self.get_config_color('SubscriberColor')
except:
player_color = palette.BRIGHT_BLUE
bonus = registry.Registry.get('weapon')()
elif event.nickname.lower() in TwitchChatManager.special_viewers:
try:
player_color = self.get_config_color('SpecialViewerColor')
except:
player_color = palette.BRIGHT_RED
else:
try:
player_color = self.get_config_color('ViewerColor')
except:
player_color = palette.get_nearest((255, 163, 0))
# Add player
pos = current_scene.get_location_near_stairs()
p = player.Player(event.nickname[0], pos, fg=player_color)
p.name = event.nickname
if bonus:
p.equip_weapon(bonus)
current_scene.append(p)
instances.console.print('{} has joined!'.format(p.display_string))
elif event.message.upper() == '!LEAVE':
for e in current_scene.children:
if not e.isinstance('Player'):
continue
if e.name == event.nickname:
e.die()
instances.console.print('{} has left.'.format(e.display_string))
elif event.message.upper().startswith('!CHEER'):
s = event.message.split(' ')
if len(s) <= 1:
return
player_names = [p.name for p in instances.scene_root.players if p.state != 'PlayerExitedState']
if event.nickname in player_names:
return
player_name = s[1].lower()
if player_name[0] == '@':
player_name = player_name[1:]
target_player = [p for p in instances.scene_root.players if p.state != 'PlayerExitedState' and p.name == player_name]
target_player = target_player[0] if target_player else None
if target_player:
target_player.add_status(hastestatus.HasteStatus(target_player))
elif event.message.upper() == '!HELP':
current_time = time.time()
if current_time - self.time_since_last_help_message > 30:
help_message = 'Available commands: !join !leave !move [uldr] !move @username !stop !attack [uldr] !throw [uldr] !drop !cheer @username'
instances.game.observer.send_message(help_message, instances.game.channel)
self.time_since_last_help_message = current_time
|
{
"content_hash": "eca08351fb6487a47772453eacbb8a2c",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 160,
"avg_line_length": 41.2972972972973,
"alnum_prop": 0.48276614310645727,
"repo_name": "JoshuaSkelly/lunch-break-rl",
"id": "ef49018a54b21a5b06d0e69a64e788b92e4ecddc",
"size": "4584",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twitchchatmanager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "69311"
}
],
"symlink_target": ""
}
|
"""Tests for tensorflow.ops.control_flow_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import time
import unittest
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import cond_v2 # pylint: disable=unused-import
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_logging_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
# pylint: disable=unused-import
import tensorflow.python.ops.tensor_array_grad
# pylint: enable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import nest
def check_consumers(graph):
"""Sanity check on the consumer list of the tensors."""
consumer_count = {}
for op in graph.get_operations():
for v in op.inputs:
cnt = consumer_count.get(v, 0)
consumer_count[v] = cnt + 1
for k, v in consumer_count.items():
if len(k.consumers()) != v:
return False
return True
def all_fetchables():
tensor_names = []
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.outputs:
if graph.is_fetchable(t):
tensor_names.append(t.name)
return tensor_names
def all_feedables():
feedable_tensors = []
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.inputs:
if graph.is_feedable(t):
feedable_tensors.append(t)
return feedable_tensors
def opt_cfg():
return config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L1,
do_function_inlining=True,
do_constant_folding=True)))
def isum(s, maximum_iterations=None):
i = constant_op.constant(0, name="i")
c = lambda i, s: math_ops.less(i, 10)
b = lambda i, s: [math_ops.add(i, 1), math_ops.add(i, s)]
_, r_s = control_flow_ops.while_loop(
c, b, [i, s], maximum_iterations=maximum_iterations)
return r_s
@test_util.with_cond_v2
class ControlFlowTest(test.TestCase):
def testRefIdentity(self):
with self.test_session():
v = variables.Variable(7)
v = control_flow_ops._Identity(v)
op = state_ops.assign(v, 9)
v2 = control_flow_ops.with_dependencies([op], v)
self.assertTrue(isinstance(v2, ops.Tensor))
variables.global_variables_initializer().run()
self.assertEqual(9, v2.eval())
def testRefEnter(self):
with self.test_session():
v = variables.Variable(7)
enter_v = control_flow_ops._Enter(v, "foo_1", is_constant=True)
nine = constant_op.constant(9)
enter_nine = gen_control_flow_ops.enter(nine, "foo_1")
op = state_ops.assign(enter_v, enter_nine)
v2 = control_flow_ops.with_dependencies([op], enter_v)
v3 = control_flow_ops.exit(v2)
variables.global_variables_initializer().run()
self.assertEqual(9, v3.eval())
def testRefSwitch(self):
with self.test_session():
v = variables.Variable(7)
p = constant_op.constant(True)
v1 = control_flow_ops._SwitchRefOrTensor(v._ref(), p) # pylint: disable=protected-access
v2 = state_ops.assign(v1[1], 9)
variables.global_variables_initializer().run()
self.assertEqual(9, v2.eval())
def testEnterMulExit(self):
with self.test_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
enter_data = gen_control_flow_ops.enter(data, "foo_1", False)
five = constant_op.constant(5)
enter_five = gen_control_flow_ops.enter(five, "foo_1", False)
mul_op = math_ops.multiply(enter_data, enter_five)
exit_op = control_flow_ops.exit(mul_op)
result = exit_op.eval()
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
def testEnterShapePropagation(self):
with self.test_session():
v = variables.Variable([0.0, 0.0], dtype=dtypes.float32)
# If is_constant=True, the shape information should be propagated.
enter_v_constant = gen_control_flow_ops.enter(
v, "frame1", is_constant=True)
self.assertEqual(enter_v_constant.shape, [2])
# Otherwise, the shape should be unknown.
enter_v_non_constant = gen_control_flow_ops.enter(
v, "frame2", is_constant=False)
self.assertEqual(enter_v_non_constant.shape, None)
def testSwitchMergeIndexedSlices(self):
with self.test_session():
values = constant_op.constant([1, 2, 3, 4, 5, 6])
indices = constant_op.constant([0, 2, 4, 6, 8, 10])
data = ops.IndexedSlices(values, indices)
pred = ops.convert_to_tensor(True)
switch_op = control_flow_ops.switch(data, pred)
merge_op = control_flow_ops.merge(switch_op)[0]
val = merge_op.values.eval()
ind = merge_op.indices.eval()
self.assertAllEqual(np.arange(1, 7), val)
self.assertAllEqual(np.arange(0, 12, 2), ind)
def testSwitchDeadBranch(self):
with self.test_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
dead_branch = array_ops.identity(switch_op[0])
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Retval[0] does not have value" in str(e)):
dead_branch.eval()
def testSwitchMergeLess(self):
with self.test_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
zero = ops.convert_to_tensor(0)
one = ops.convert_to_tensor(1)
less_op = math_ops.less(zero, one)
switch_op = control_flow_ops.switch(data, less_op)
merge_op = control_flow_ops.merge(switch_op)[0]
result = merge_op.eval()
self.assertAllEqual(np.arange(1, 7), result)
def testSwitchMergeAddIdentity(self):
with self.test_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(False, name="ports")
switch_op = control_flow_ops.switch(data, ports)
one = constant_op.constant(1)
add_op = math_ops.add(switch_op[0], one)
id_op = array_ops.identity(switch_op[1])
merge_op = control_flow_ops.merge([add_op, id_op])[0]
result = merge_op.eval()
self.assertAllEqual(np.array([x + 1 for x in [1, 2, 3, 4, 5, 6]]), result)
def testSwitchMergeAddMul(self):
with self.test_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
one = constant_op.constant(1)
add_op = math_ops.add(switch_op[0], one)
five = constant_op.constant(5)
mul_op = math_ops.multiply(switch_op[1], five)
merge_op = control_flow_ops.merge([add_op, mul_op])[0]
result = merge_op.eval()
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
def testLoop_false(self):
with self.test_session():
false = ops.convert_to_tensor(False)
n = constant_op.constant(10)
enter_false = gen_control_flow_ops.enter(false, "foo_1", False)
enter_n = gen_control_flow_ops.enter(n, "foo_1", False)
merge_n = control_flow_ops.merge([enter_n, enter_n], name="merge_n")[0]
switch_n = control_flow_ops.switch(merge_n, enter_false)
exit_n = control_flow_ops.exit(switch_n[0])
next_n = control_flow_ops.next_iteration(switch_n[0])
merge_n.op._update_input(1, next_n)
result = exit_n.eval()
self.assertAllEqual(10, result)
def testLoop_1(self):
with self.test_session():
zero = constant_op.constant(0)
one = constant_op.constant(1)
n = constant_op.constant(10)
enter_i = gen_control_flow_ops.enter(zero, "foo", False)
enter_one = gen_control_flow_ops.enter(one, "foo", True)
enter_n = gen_control_flow_ops.enter(n, "foo", True)
with ops.device(test.gpu_device_name()):
merge_i = control_flow_ops.merge([enter_i, enter_i])[0]
less_op = math_ops.less(merge_i, enter_n)
cond_op = control_flow_ops.loop_cond(less_op)
switch_i = control_flow_ops.switch(merge_i, cond_op)
add_i = math_ops.add(switch_i[1], enter_one)
next_i = control_flow_ops.next_iteration(add_i)
merge_i.op._update_input(1, next_i)
exit_i = control_flow_ops.exit(switch_i[0])
result = exit_i.eval()
self.assertAllEqual(10, result)
def testLoop_2(self):
with self.test_session():
zero = constant_op.constant(0)
one = constant_op.constant(1)
n = constant_op.constant(10)
enter_i = gen_control_flow_ops.enter(zero, "foo", False)
enter_one = gen_control_flow_ops.enter(one, "foo", True)
enter_n = gen_control_flow_ops.enter(n, "foo", True)
merge_i = control_flow_ops.merge([enter_i, enter_i])[0]
less_op = math_ops.less(merge_i, enter_n)
cond_op = control_flow_ops.loop_cond(less_op)
switch_i = control_flow_ops.switch(merge_i, cond_op)
add_i = math_ops.add(switch_i[1], enter_one)
with ops.device(test.gpu_device_name()):
next_i = control_flow_ops.next_iteration(add_i)
merge_i.op._update_input(1, next_i)
exit_i = control_flow_ops.exit(switch_i[0])
result = exit_i.eval()
self.assertAllEqual(10, result)
def testDifferentFrame(self):
with self.test_session():
data = array_ops.placeholder(dtypes.float32, shape=[])
enter_1 = gen_control_flow_ops.enter(data, "foo_1", False)
enter_2 = gen_control_flow_ops.enter(data, "foo_2", False)
res = math_ops.add(enter_1, enter_2)
with self.assertRaisesOpError("has inputs from different frames"):
res.eval(feed_dict={data: 1.0})
def testCondBool(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113296297")
values = constant_op.constant(10)
fn1 = lambda: math_ops.add(values, 1)
fn2 = lambda: math_ops.subtract(values, 1)
with self.assertRaisesRegexp(TypeError, "must not be a Python bool"):
_ = control_flow_ops.cond(False, fn1, fn2)
def testCondInt(self):
p = array_ops.placeholder(dtypes.bool, shape=[])
v = constant_op.constant(10)
fn1 = lambda: math_ops.add(v, 1)
fn2 = lambda: math_ops.subtract(v, 1)
y = control_flow_ops.cond(p, fn1, fn2)
grad = gradients_impl.gradients(y, [v])
self.assertAllEqual([None], grad)
def testFetchable(self):
with self.test_session() as sess:
x = array_ops.placeholder(dtypes.float32)
control_flow_ops.cond(
constant_op.constant(True), lambda: x + 2, lambda: x + 0)
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.inputs:
if graph.is_fetchable(t.op):
sess.run(t, feed_dict={x: 3})
else:
with self.assertRaisesRegexp(ValueError,
"has been marked as not fetchable"):
sess.run(t, feed_dict={x: 3})
def testFeedable(self):
with self.test_session() as sess:
c = constant_op.constant(2)
i0 = constant_op.constant(0)
r = control_flow_ops.while_loop(lambda i: i < 1000,
lambda i: math_ops.square(c) + i, [i0])
self.assertEqual(1000, r.eval(feed_dict={i0: 0}))
feedable_tensors = all_feedables()
for t in feedable_tensors:
sess.run(r, feed_dict={t: 3})
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.inputs:
if t not in feedable_tensors and t.dtype is dtypes.int32:
with self.assertRaisesRegexp(ValueError, "may not be fed"):
sess.run(r, feed_dict={t: 3})
def testCondIndexedSlices(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113296180")
with self.test_session():
values = constant_op.constant(10)
indices = constant_op.constant(0)
x = ops.IndexedSlices(values, indices)
pred = math_ops.less(1, 2)
fn1 = lambda: ops.IndexedSlices(math_ops.add(x.values, 1), indices)
fn2 = lambda: ops.IndexedSlices(math_ops.subtract(x.values, 1), indices)
r = control_flow_ops.cond(pred, fn1, fn2)
val = r.values.eval()
ind = r.indices.eval()
self.assertAllEqual(11, val)
self.assertAllEqual(0, ind)
def testCondSparseTensor(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113296161 (SparseTensors)")
with self.test_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant(
[[0], [3]], dtype=dtypes.int64, name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
pred = math_ops.less(1, 2)
fn1 = lambda: sparse_tensor.SparseTensor(
indices + 1, x.values + 1, dense_shape=shape)
fn2 = lambda: sparse_tensor.SparseTensor(
indices, x.values - 1, dense_shape=shape)
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual([3.0, 5.0], r.values.eval())
self.assertAllEqual([[1], [4]], r.indices.eval())
self.assertAllEqual(r.values.get_shape(), (2,))
def testCondResource(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/111124878 (don't return tuple)")
with self.test_session():
rv = resource_variable_ops.ResourceVariable(True)
variables.global_variables_initializer().run()
t = ops.convert_to_tensor(1.0)
def case():
assign = resource_variable_ops.assign_variable_op(rv.handle, False)
with ops.control_dependencies([assign]):
return array_ops.identity(t)
self.assertEqual(1.0, control_flow_ops.cond(rv, case, lambda: t).eval())
def testCondIndexedSlicesDifferentTypes(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113293074")
with self.test_session():
values = constant_op.constant(10)
i_32 = ops.convert_to_tensor(0, name="one", dtype=dtypes.int32)
i_64 = ops.convert_to_tensor(0, name="one", dtype=dtypes.int64)
x = ops.IndexedSlices(values, i_32)
pred = math_ops.less(1, 2)
fn1 = lambda: ops.IndexedSlices(math_ops.add(x.values, 1), i_32)
fn2 = lambda: ops.IndexedSlices(math_ops.subtract(x.values, 1), i_64)
r = control_flow_ops.cond(pred, fn1, fn2)
val = r.values.eval()
ind = r.indices.eval()
self.assertAllEqual(11, val)
self.assertAllEqual(0, ind)
self.assertTrue(ind.dtype == np.int64)
def testCondColocation(self):
with self.test_session(use_gpu=True):
with ops.device("/cpu:0"):
v = variables.Variable(7.0)
x = constant_op.constant(10.0)
pred = math_ops.less(1.0, 2.0)
fn1 = lambda: math_ops.add(v, 1.0)
fn2 = lambda: math_ops.subtract(x, 1.0)
r = control_flow_ops.cond(pred, fn1, fn2)
for op in x.graph.get_operations():
if op.name == "cond/Add/Switch":
self.assertDeviceEqual(op.device, "/cpu:0")
def _testCond_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
x = constant_op.constant(10)
pred = math_ops.less(1, 2)
fn1 = lambda: math_ops.add(x, 1)
fn2 = lambda: math_ops.subtract(x, 1)
r = control_flow_ops.cond(pred, fn1, fn2)
result = r.eval()
self.assertAllEqual(11, result)
def testCond_1(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/111124878 (don't return tuple)")
self._testCond_1(use_gpu=False)
self._testCond_1(use_gpu=True)
def testCond_2(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/111124878 (don't return tuple)")
with self.test_session():
x = constant_op.constant(10)
r = control_flow_ops.cond(
math_ops.less(1, 0), lambda: math_ops.add(x, 1),
lambda: math_ops.subtract(x, 1))
result = r.eval()
self.assertAllEqual(9, result)
def testCond_3(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/111124878 (don't return tuple)")
with self.test_session():
x = constant_op.constant(10)
pred = math_ops.less(1, 2)
fn1 = lambda: math_ops.add(x, 1)
fn2 = lambda: math_ops.subtract(x, 1)
fn3 = lambda: math_ops.add(control_flow_ops.cond(pred, fn1, fn2), 1)
r = control_flow_ops.cond(pred, fn3, fn2)
result = r.eval()
self.assertAllEqual(12, result)
def testCond_4(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113324949 (ref vars)")
with self.test_session():
v1 = variables.Variable(7)
v2 = variables.Variable(7)
v3 = variables.Variable(7)
age = constant_op.constant(3)
max_age = constant_op.constant(2)
pred = math_ops.greater(age, max_age)
fn1 = lambda: [state_ops.assign(v1, 1).op, state_ops.assign(v2, 2).op]
fn2 = lambda: [state_ops.assign(v3, 3).op, constant_op.constant(10).op]
r = control_flow_ops.cond(pred, fn1, fn2)
variables.global_variables_initializer().run()
self.assertEqual(len(r), 2)
result = r[1].eval()
self.assertAllEqual(True, result)
self.assertAllEqual(7, v1.eval())
self.assertAllEqual(2, v2.eval())
self.assertAllEqual(7, v3.eval())
def testCond_5(self):
with self.test_session():
alive = constant_op.constant(True, name="alive")
count = constant_op.constant(0, name="count")
def body(i):
return control_flow_ops.cond(
alive, lambda: [math_ops.less(i, 3), math_ops.add(count, 1)],
lambda: [alive, count])
for i in range(10):
alive, count = body(i)
self.assertAllEqual(4, count.eval())
def testCond_6(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/111124878 (don't return tuple)")
with self.test_session():
v1 = variables.Variable([7])
age = constant_op.constant(3)
pred = math_ops.greater(age, 4)
fn1 = lambda: age
fn2 = lambda: v1
r = control_flow_ops.cond(pred, fn1, fn2)
variables.global_variables_initializer().run()
result = r.eval()
self.assertAllEqual(np.array([7]), result)
def testCond_7(self):
with self.test_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: [math_ops.add(x, 1), math_ops.add(x, 2)]
fn2 = lambda: [y, y]
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual([11, 12], sess.run(r))
def testCondRef(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/111124878 (don't return tuple)")
with self.test_session():
x = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="x",
container="",
shared_name="")
true_fn = lambda: x
false_fn = lambda: constant_op.constant([2.0])
r = control_flow_ops.cond(constant_op.constant(False), true_fn, false_fn)
self.assertAllEqual([2.0], r.eval())
def testCondWithControl(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/79881896")
with self.test_session() as sess:
control_holder = array_ops.placeholder(dtypes.float32, shape=())
a = constant_op.constant(3)
def true_branch():
with ops.control_dependencies([control_holder]):
_ = a + 1
return a + 2
r = control_flow_ops.cond(
constant_op.constant(True), true_branch,
lambda: constant_op.constant(1))
self.assertEqual(5, r.eval())
def testUninitializedRefIdentity(self):
with self.test_session() as sess:
v = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="v",
container="",
shared_name="")
inited = state_ops.is_variable_initialized(v)
v_f, v_t = control_flow_ops.ref_switch(v, inited)
# Both v_f and v_t are uninitialized references. However, an actual use
# of the reference in the 'true' branch in the 'tf.identity' op will
# not 'fire' when v is uninitialized, so this is a valid construction.
# This test tests that ref_identity allows uninitialized ref as input
# so that this construction is allowed.
v_f_op = gen_array_ops.ref_identity(v_f)
v_t_op = gen_array_ops.ref_identity(v_t)
with ops.control_dependencies([v_f_op]):
assign_v = state_ops.assign(v, [1.0])
with ops.control_dependencies([v_t_op]):
orig_v = array_ops.identity(v)
merged_op = control_flow_ops.merge([assign_v, orig_v])
self.assertAllEqual([1.0], sess.run(merged_op.output))
def testCondSwitchIdentity(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/112477618 (Operation returned from cond)")
# Make sure the recv identity is not removed by optimization.
with session.Session(config=opt_cfg()) as sess:
pred = constant_op.constant(True)
def fn1():
return control_flow_ops.no_op()
def fn2():
return control_flow_ops.Assert(False, ["Wrong branch!!!"])
r = control_flow_ops.cond(pred, fn1, fn2)
sess.run(r)
def testCondRecvIdentity(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/112477618 (Operation returned from cond)")
# Make sure the switch identity is not removed by optimization.
with session.Session(config=opt_cfg()) as sess:
with ops.device(test.gpu_device_name()):
pred = constant_op.constant(True)
def fn1():
return control_flow_ops.no_op()
def fn2():
with ops.device("/cpu:0"):
return control_flow_ops.Assert(False, ["Wrong branch!!!"])
r = control_flow_ops.cond(pred, fn1, fn2)
sess.run(r)
def testCondGrad_1(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113346829 (gpu failure)")
graph = ops.Graph()
with graph.as_default():
x = constant_op.constant(10.0, name="x")
pred = math_ops.less(1, 2)
fn1 = lambda: array_ops.identity(x)
fn2 = lambda: array_ops.identity(x)
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, [x])[0]
with self.test_session():
self.assertAllEqual(1.0, grad.eval())
def testCondGrad_2(self):
with self.test_session():
c = array_ops.placeholder(dtypes.int32, shape=[])
x = constant_op.constant(10.0)
pred = math_ops.less(c, 2)
fn1 = lambda: math_ops.multiply(x, 42.0)
fn2 = lambda: math_ops.multiply(x, 3.0)
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, [x])[0]
self.assertAllEqual(42.0, grad.eval(feed_dict={c: 1}))
self.assertAllEqual(3.0, grad.eval(feed_dict={c: 3}))
def testCondGrad_3(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/110550782 (gradient w.r.t external variable)")
with self.test_session():
c = array_ops.placeholder(dtypes.int32, shape=[])
ox = constant_op.constant(10.0)
pred = math_ops.less(c, 2)
def fn1(x):
m = x * x
return gradients_impl.gradients(m, [ox])[0]
fn2 = lambda: math_ops.multiply(ox, 3.0)
y = math_ops.multiply(7.0, ox)
r = control_flow_ops.cond(pred, lambda: fn1(y), fn2)
self.assertAllEqual(980.0, r.eval(feed_dict={c: 1}))
self.assertAllEqual(30.0, r.eval(feed_dict={c: 3}))
def testNestedCond_Simple(self):
with self.test_session():
x = constant_op.constant(0., name="X")
y = control_flow_ops.cond(
constant_op.constant(True), lambda: x,
lambda: control_flow_ops.cond(x < 1., lambda: x, lambda: x))
result = gradients_impl.gradients(y, x)[0]
self.assertEqual(1.0, result.eval())
z = control_flow_ops.cond(
constant_op.constant(False), lambda: x,
lambda: control_flow_ops.cond(x < 1., lambda: x, lambda: x))
result = gradients_impl.gradients(z, x)[0]
self.assertEqual(1.0, result.eval())
def testCondGrad_Gather(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113327884")
with self.test_session() as sess:
v1 = variables.Variable([1.0, 42.0])
c = array_ops.placeholder(dtypes.int32, shape=[])
pred = math_ops.less(c, 2)
fn1 = lambda: array_ops.identity(v1)
fn2 = lambda: array_ops.gather(v1, [1, 1])
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, [v1])[0]
variables.global_variables_initializer().run()
# Should just be [1, 1], but possibly a sparse representation
gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 1})
dense_gv = [
sum([y for (x, y) in zip(gi, gv) if x == i]) for i in range(2)
]
self.assertAllEqual(dense_gv, [1.0, 1.0])
# Should be [0, 2], as the else forwards v1[1] twice
gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 3})
dense_gv = [
sum([y for (x, y) in zip(gi, gv) if x == i]) for i in range(2)
]
self.assertAllEqual(dense_gv, [0.0, 2.0])
# Microbenchmark: 256,000 iterations/s.
def testWhile_1(self):
with self.test_session():
n = constant_op.constant(0)
c = lambda x: math_ops.less(x, 10000)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual(10000, r.eval())
def testWhileExternalControlDependencies(self):
with self.test_session():
v = variables.Variable(0.0)
v.initializer.run()
increment = v.assign_add(1.0)
def body_fn(i):
with ops.control_dependencies([increment]):
return i + 1
result = control_flow_ops.while_loop(cond=lambda i: i < 2,
body=body_fn, loop_vars=[1])
self.assertAllEqual(result.eval(), 2)
self.assertAllEqual(v.eval(), 1.0)
def testWhileExternalControlDependenciesNoInput(self):
with self.test_session():
v = variables.Variable(0.0)
v.initializer.run()
increment = v.assign_add(1.0)
def body_fn(unused_i):
with ops.control_dependencies([increment]):
return constant_op.constant(5, name="five")
result = control_flow_ops.while_loop(cond=lambda i: i < 5,
body=body_fn, loop_vars=[0])
result.eval()
self.assertAllEqual(v.eval(), 1.0)
def testWhileWithRefs_1(self):
with self.test_session() as sess:
x = variables.Variable(0)._ref() # pylint: disable=protected-access
i = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 100)
self.assertEqual(x.dtype, dtypes.int32_ref)
def b(i, x):
self.assertEqual(x.dtype, dtypes.int32_ref)
return (i + 1, gen_array_ops.ref_identity(x))
r = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=5)
variables.global_variables_initializer().run()
self.assertEqual(r[0].dtype, dtypes.int32)
self.assertEqual(r[1].dtype, dtypes.int32_ref)
value_i, value_x = sess.run(r)
self.assertEqual(100, value_i)
self.assertEqual(0, value_x)
def testWhile_2(self):
with self.test_session():
s = constant_op.constant(0)
r = isum(s)
self.assertAllEqual(45, r.eval())
def testWhileWithMaximumIterations(self):
with self.test_session():
s = constant_op.constant([1, 2, 3, 4, 5])
r = isum(s, maximum_iterations=3)
self.assertAllEqual([1 + 3, 2 + 3, 3 + 3, 4 + 3, 5 + 3], r.eval())
def testWhileWithMaximumIterationsAndSingleArgument(self):
with self.test_session():
r = control_flow_ops.while_loop(
lambda i: i < 3, lambda i: i + 1, [0], maximum_iterations=1)
self.assertEqual(1, r.eval())
def testSingleNestedMaximumIterationsWhileLoopGradientInXLAContext(self):
v = constant_op.constant(1.0)
def training_loop_with_gradient(i):
out = control_flow_ops.while_loop(
lambda i_, _: i_ < 3,
lambda i_, j: [i_ + 1, j * v], [0, 1.0],
maximum_iterations=i)
g = gradients_impl.gradients(out, v)
with ops.control_dependencies(g):
return i + 1
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
# Create training loop, ensure we can call gradient() of
# while_loop inside the training loop.
loop = control_flow_ops.while_loop(lambda i: i < 3,
training_loop_with_gradient, [0])
xla_context.Exit()
loop_execute = array_ops.identity(loop) # Because loop is not fetchable.
# Should execute without issue.
self.assertEqual(3, self.evaluate(loop_execute))
def testInvalidMaximumIterationsWhileLoopGradientInXLAContext(self):
v = constant_op.constant(1.0)
def inner_body(i, x):
out = control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, j: [i + 1, j * v], [0, x],
maximum_iterations=i)
return out
def create_while_loop(maximum_iterations=None):
return control_flow_ops.while_loop(
lambda i, _: i < 3,
inner_body, [0, 1.0],
maximum_iterations=maximum_iterations)
loop_no_xla = create_while_loop(maximum_iterations=5)
# maximum_iterations is fine outside of an XLA scope
gs = gradients_impl.gradients(loop_no_xla, v)
self.evaluate(gs) # This should execute without error.
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
loop_no_maxiter = create_while_loop()
loop_with_maxiter = create_while_loop(maximum_iterations=2)
xla_context.Exit()
with self.assertRaisesRegexp(
ValueError,
r"Cannot create a gradient accumulator for tensor '.+' inside "
r"XLA while_loop because maximum_iterations was not passed to "
r"the tf.while_loop call \('.+'\)."):
_ = gradients_impl.gradients(loop_no_maxiter, v)
with self.assertRaisesRegexp(
ValueError,
r"Cannot create a gradient accumulator for tensor '.+' inside XLA "
r"while_loop. maximum_iterations tensor '.+' for while_loop context "
r"'.+' must be statically known \(e.g. a constant value or known "
r"shape dimension\), or be defined at or outside the while loop "
r"context '.*' \(currently defined in '.*'\)"):
_ = gradients_impl.gradients(loop_with_maxiter, v)
def testInvalidMaximumIterationsFromSiblingContextWhileLoopInXLAContext(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113294340 (enable while_v2)")
v = constant_op.constant(1.0)
def create_while_loop():
max_iter_holder = []
def create_mi():
max_iter_holder.append(array_ops.placeholder(dtypes.int32, shape=()))
return 1.0
_ = control_flow_ops.cond(
constant_op.constant(True), create_mi, create_mi)
return control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, x: (i + 1, v * x), (0, 1.0),
maximum_iterations=max_iter_holder[0])
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
loop = create_while_loop()
xla_context.Exit()
with self.assertRaisesRegexp(
ValueError,
r"Cannot create a gradient accumulator for tensor '.+' inside XLA "
r"while_loop. maximum_iterations tensor '.*Placeholder:0' for "
r"while_loop context '.+' must be statically known \(e.g. a constant "
r"value or known shape dimension\), or be defined at or outside the "
r"while loop context '' \(currently defined in 'cond/.+'\)"):
_ = gradients_impl.gradients(loop, v)
def testNestedWhileLoopWithMaxItersFromOuterContextInXLAContext(self):
v = constant_op.constant(1.0)
p = array_ops.placeholder(dtype=dtypes.int32)
def mid_body_builder(iterations):
def mid_body(i, x):
r = control_flow_ops.while_loop(
lambda *_: True,
lambda i, x: (i + 1, v * x), (0, x),
maximum_iterations=iterations,
name="inner")
return (i + 1, gradients_impl.gradients(x + r[1], v)[0])
return mid_body
def outer_body(i, x):
iterations = array_ops.size(p, name="iterations")
return (i + 1, x + control_flow_ops.while_loop(
lambda *_: True,
mid_body_builder(iterations), (0, x),
maximum_iterations=iterations,
name="mid")[1])
def create_while_loop():
with ops.device("/cpu:0"):
r = control_flow_ops.while_loop(
lambda *_: True,
outer_body, (0, 1.0),
maximum_iterations=5,
name="outer")
return array_ops.identity(r[1])
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
final_with_xla_context = create_while_loop()
xla_context.Exit()
final_without_xla_context = create_while_loop()
with self.test_session(use_gpu=False) as sess:
opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
final_value_without_xla_context = sess.run(
final_without_xla_context, feed_dict={
p: [0, 0, 0]
})
final_value_with_xla_context = sess.run(
final_with_xla_context,
feed_dict={p: [0, 0, 0]},
options=opts,
run_metadata=run_metadata)
node_stats = run_metadata.step_stats.dev_stats[0].node_stats
stack_push_count = len(
[x for x in node_stats if x.node_name.endswith("StackPushV2")])
# Pushes to the stack = product of maximum_iterations values;
# the last two "3"s comes from size(p), when p == [0, 0, 0].
self.assertEqual(stack_push_count, 5 * 3 * 3)
self.assertAllClose(final_value_with_xla_context,
final_value_without_xla_context)
# Have more than 10 parallel iterations and hence exercise k-bound
# most of the time.
def testWhile_3(self):
with self.test_session():
def compute(i, m, c, o):
m, c = [math_ops.add(m, 1), math_ops.add(c, 1)]
o = math_ops.add(o, m)
o = math_ops.add(o, c)
i = math_ops.add(i, 1)
return [i, m, c, o]
i = ops.convert_to_tensor(0)
m = ops.convert_to_tensor(0)
c = ops.convert_to_tensor(0)
o = ops.convert_to_tensor(0)
d = ops.convert_to_tensor(100)
r = control_flow_ops.while_loop(lambda i, m, c, o: math_ops.less(i, d),
compute, [i, m, c, o])
result = r[3].eval()
self.assertAllEqual(10100, result)
def testWhile_4(self):
with self.test_session():
def compute(i, m, c, o):
m, c = [array_ops.gather(x, i), array_ops.gather(x, i)]
o = math_ops.add(o, m)
o = math_ops.add(o, c)
i = math_ops.add(i, 1)
return [i, m, c, o]
i = ops.convert_to_tensor(0)
m = ops.convert_to_tensor(0)
c = ops.convert_to_tensor(0)
o = ops.convert_to_tensor(0)
x = ops.convert_to_tensor([1, 2, 3, 4, 5, 6])
s = array_ops.size(x)
r = control_flow_ops.while_loop(lambda i, m, c, o: math_ops.less(i, s),
compute, [i, m, c, o])
result = r[3].eval()
self.assertAllEqual(42, result)
def testWhile_5(self):
with self.test_session():
def compute(i, c, o):
c = array_ops.strided_slice(x, array_ops.expand_dims(i, 0),
[1] + array_ops.expand_dims(i, 0))
o = array_ops.concat([o, c], 0)
i = math_ops.add(i, 1)
return [i, c, o]
i = ops.convert_to_tensor(0)
c = ops.convert_to_tensor([0])
o = ops.convert_to_tensor([0])
x = ops.convert_to_tensor([1, 2, 3, 4, 5, 6])
s = array_ops.size(x)
r = control_flow_ops.while_loop(lambda i, c, o: math_ops.less(i, s),
compute, [i, c, o], [
i.get_shape(),
tensor_shape.unknown_shape(),
tensor_shape.unknown_shape()
])
result = r[2].eval()
self.assertAllEqual(np.array([0, 1, 2, 3, 4, 5, 6]), result)
def testBufferForwarding(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with self.test_session() as sess:
with ops.device("/cpu:0"):
c = constant_op.constant(2)
i0 = constant_op.constant(0)
r = control_flow_ops.while_loop(lambda i: i < 1000,
lambda i: math_ops.square(c) + i, [i0])
r_val = sess.run(r, options=run_options, run_metadata=run_metadata)
self.assertEqual(1000, r_val)
self.assertTrue(run_metadata.HasField("step_stats"))
unique_allocs = set()
for node_stat in run_metadata.step_stats.dev_stats[0].node_stats:
for output in node_stat.output:
unique_allocs.add(
output.tensor_description.allocation_description.ptr)
# Prior to cl/147536680, the number of unique allocations was about 1005.
self.assertLess(len(unique_allocs), 756)
def _testWhile_Gpu_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = constant_op.constant(1.0)
c = lambda x: math_ops.less(x, 10.0)
b = lambda x: math_ops.add(x, 1.0)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllClose(10.0, r.eval())
def testWhile_Gpu_1(self):
self._testWhile_Gpu_1(use_gpu=False)
self._testWhile_Gpu_1(use_gpu=True)
def _testWhile_Gpu_2(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = constant_op.constant(1.0)
c = lambda x: math_ops.less(x, 10.0)
def b(x):
with ops.device("/cpu:0"):
return math_ops.add(x, 1.0)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllClose(10.0, r.eval())
def testWhile_Gpu_2(self):
self._testWhile_Gpu_1(use_gpu=False)
self._testWhile_Gpu_1(use_gpu=True)
def testWhileShape(self):
with self.test_session():
i = constant_op.constant(0)
m = array_ops.ones([2, 2])
c = lambda i, j: math_ops.less(i, 2)
def _b(i, j):
new_i = math_ops.add(i, 1)
new_j = array_ops.tile(j, [2, 2])
return [new_i, new_j]
r = control_flow_ops.while_loop(
c, _b, [i, m],
[i.get_shape(), tensor_shape.unknown_shape()])
r = r[1] * array_ops.ones([8, 8])
self.assertAllEqual(np.ones((8, 8)), r.eval())
def testWhileWithNonTensorInput_Scalar(self):
with self.test_session():
n = 0
c = lambda x: x < 10000
b = lambda x: x + 1
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual(10000, r.eval())
def testWhileWithNonTensorInput_Vector(self):
with self.test_session():
n = np.array([0]) # Note, [0] would not work here; that is a list
c = lambda x: x[0] < 10000
b = lambda x: array_ops.stack([x[0] + 1])
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual([10000], r.eval())
def testWhileShapeInference(self):
with self.test_session():
i = constant_op.constant(0)
m = array_ops.ones([2, 2])
c = lambda i, j: math_ops.less(i, 2)
def b(i, j):
new_i = math_ops.add(i, 1)
new_j = array_ops.concat([j, j], 0)
return [new_i, new_j]
r = control_flow_ops.while_loop(
c, b, [i, m],
[i.get_shape(), tensor_shape.TensorShape([None, 2])])
self.assertTrue(r[1].get_shape()[0].value is None)
self.assertEqual(r[1].get_shape()[1], tensor_shape.Dimension(2))
with self.assertRaisesRegexp(
ValueError,
r"Input tensor 'ones:0' enters the loop with shape \(2, 2\), but has "
r"shape \(4, 2\) after one iteration. To allow the shape to vary "
r"across iterations, use the `shape_invariants` argument of "
r"tf.while_loop to specify a less-specific shape."):
r = control_flow_ops.while_loop(c, b, [i, m])
def testWhileShapeInferenceSparseTensor(self):
with self.test_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant(
[[0], [3]], dtype=dtypes.int64, name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1,
sparse_tensor.SparseTensor(x.indices, x.values * 2.0, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
self.assertEqual(r.dense_shape.get_shape()[0].value, 1)
_, r = control_flow_ops.while_loop(
c, b, [i, x],
[i.get_shape(), tensor_shape.TensorShape([None])])
self.assertTrue(r.dense_shape.get_shape()[0].value is None)
with self.assertRaisesRegexp(ValueError, "is not compatible with"):
_, r = control_flow_ops.while_loop(
c, b, [i, x],
[i.get_shape(), tensor_shape.TensorShape([5])])
def testWhileShapeInferenceIndexedSlices(self):
with self.test_session():
values = constant_op.constant([[2.0, 4.0], [3.0, 5.0]], name="values")
indices = constant_op.constant([0, 3], name="indices")
shape = constant_op.constant([10, 2], name="dense_shape")
i = constant_op.constant(0)
x = ops.IndexedSlices(values, indices, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1,
ops.IndexedSlices(x.values * 2.0, x.indices, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
self.assertEqual(r.dense_shape.get_shape()[0].value, 2)
self.assertEqual(r.values.get_shape(), tensor_shape.TensorShape([2, 2]))
_, r = control_flow_ops.while_loop(
c, b, [i, x],
[i.get_shape(), tensor_shape.TensorShape([None, 2])])
self.assertEqual(r.dense_shape.get_shape()[0].value, 2)
self.assertTrue(r.values.get_shape()[0].value is None)
self.assertEqual(r.values.get_shape()[1].value, 2)
with self.assertRaisesRegexp(ValueError, "is not compatible with"):
_, r = control_flow_ops.while_loop(
c, b, [i, x],
[i.get_shape(), tensor_shape.TensorShape([None, 5])])
def _testNestedWhile_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = constant_op.constant(0)
def cpu_sum(s):
c = lambda i, s: math_ops.less(i, 10)
def b(i, s):
i1 = math_ops.add(i, 1)
with ops.device("/cpu:0"):
s1 = math_ops.add(i, s)
return i1, s1
_, r_s = control_flow_ops.while_loop(c, b, [n, s])
return r_s
c = lambda x: math_ops.less(x, 200)
b = lambda x: math_ops.add(x, cpu_sum(n))
r = control_flow_ops.while_loop(c, b, [n])
self.assertEqual(225, r.eval())
def testNestedWhile_1(self):
self._testNestedWhile_1(use_gpu=False)
self._testNestedWhile_1(use_gpu=True)
def _testNestedWhile_2(self, use_gpu):
# Test the cases that A -> Enter and Exit -> A are partitioned.
with self.test_session(use_gpu=use_gpu):
s0 = constant_op.constant(2.0)
def inner_loop(s):
c = lambda s: math_ops.less(s, 20.0)
def b(s):
s1 = math_ops.add(s, s)
return s1
r_s = control_flow_ops.while_loop(c, b, [s], parallel_iterations=1)
return r_s
outer_c = lambda x: math_ops.less(x, 3000.0)
def outer_b(x):
x = logging_ops.Print(x, [x]) # Edge "Print -> Enter" is partitioned
x = inner_loop(x)
with ops.device("/cpu:0"):
x = math_ops.square(x) # Edge "Exit -> Square" is partitioned
return x
r = control_flow_ops.while_loop(
outer_c, outer_b, [s0], parallel_iterations=1)
self.assertEqual(1048576.0, r.eval())
def testNestedWhile_2(self):
self._testNestedWhile_2(use_gpu=False)
self._testNestedWhile_2(use_gpu=True)
def testWhileWithControl_1(self):
with self.test_session():
n = constant_op.constant(0)
r = constant_op.constant(0)
condition = lambda n_, r_: math_ops.less(n_, 10)
def body(n_, r_):
n_ = math_ops.add(n_, 1)
with r_.graph.control_dependencies([r_]):
r_ = constant_op.constant(12)
return [n_, r_]
res = control_flow_ops.while_loop(
condition, body, [n, r], parallel_iterations=1)
self.assertAllEqual(12, res[1].eval())
def testWhileWithControl_2(self):
with self.test_session():
r = constant_op.constant(0)
condition = lambda r_: math_ops.less(r_, 10)
def body(r_):
with r_.graph.control_dependencies([r_]):
r_ = constant_op.constant(12)
return [r_]
res = control_flow_ops.while_loop(
condition, body, [r], parallel_iterations=1)
self.assertAllEqual(12, res.eval())
def testWhileWithControl_3(self):
with self.test_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
with ops.control_dependencies([b]):
r = control_flow_ops.while_loop(lambda x: x < 10, lambda x: x + c, [x0])
self.assertEqual(10, sess.run(r, {b: True}))
def testWhileWithControl_4(self):
with self.test_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
with ops.control_dependencies([b]):
r = control_flow_ops.while_loop(
lambda x: x < 10, lambda x: x + array_ops.identity(c), [x0])
self.assertEqual(10, sess.run(r, {b: True}))
def testWhileWithControl_5(self):
with self.test_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
def body(x):
with ops.control_dependencies([b]):
return x + c
r = control_flow_ops.while_loop(lambda x: x < 10, body, [x0])
self.assertEqual(10, sess.run(r, {b: True}))
def testWhileCondWithControl(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113294377 (unknown shape)")
# Ensure that no control edges by an outer control dependency context are
# added to nodes inside cond/while contexts.
with self.test_session() as sess:
const_true = lambda: constant_op.constant(True)
const_false = lambda: constant_op.constant(False)
cond = lambda i: control_flow_ops.cond(i > 0, const_true, const_false)
body = lambda i: control_flow_ops.cond(i > 0, lambda: i - 1, lambda: i)
with ops.control_dependencies([control_flow_ops.no_op()]):
loop = control_flow_ops.while_loop(cond, body,
(constant_op.constant(5),))
self.assertEqual(0, sess.run(loop))
def testWhileCondWithControl_1(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113324949 (ref vars)")
with self.test_session():
v = variable_scope.get_variable(
"v", [], initializer=init_ops.constant_initializer(2))
i0 = constant_op.constant(0)
with ops.control_dependencies([i0]):
def loop_condition(i):
return i < 4
def loop_body(i):
some_cond = control_flow_ops.cond(
constant_op.constant(True),
lambda: state_ops.assign(v, math_ops.square(v)), lambda: v)
with ops.control_dependencies([some_cond]):
return i + 1
r = control_flow_ops.while_loop(loop_condition, loop_body, (i0,))
variables.global_variables_initializer().run()
self.assertEqual(4, r.eval())
self.assertAllClose(65536.0, v.eval())
def testWhileCondExitControl(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113294340 (enable while_v2)")
with self.test_session():
v = variables.Variable(1)
def false_branch():
cond = lambda i: i < 100
def body(i):
x = state_ops.assign(v, i)
return x + 1
loop = control_flow_ops.while_loop(cond, body, [0])
# Make sure to handle correctly control edge from Exit to a node.
with ops.control_dependencies([loop]):
return constant_op.constant(6.0)
r = control_flow_ops.cond(
constant_op.constant(False), lambda: constant_op.constant(1.0),
false_branch)
variables.global_variables_initializer().run()
self.assertEqual(6.0, r.eval())
self.assertEqual(99, v.eval())
def testCondWhile_1(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/111124878 (don't return tuple)")
with self.test_session():
n = ops.convert_to_tensor(0, name="n")
c = lambda x: math_ops.less(x, 10)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.cond(
math_ops.less(0, 1), lambda: control_flow_ops.while_loop(c, b, [n]),
lambda: n)
self.assertAllEqual(10, r.eval())
def testCondWhile_2(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/111124878 (don't return tuple)")
with self.test_session():
n = ops.convert_to_tensor(0)
c = lambda x: math_ops.less(x, 10)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.cond(
math_ops.less(1, 0), lambda: math_ops.add(n, 1),
lambda: control_flow_ops.while_loop(c, b, [n]))
self.assertAllEqual(10, r.eval())
def _testCondWhile_3(self, use_gpu):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113294340 (enable while_v2)")
with self.test_session(use_gpu=use_gpu) as sess:
p = array_ops.placeholder(dtypes.bool)
n = constant_op.constant(0.0)
def c(x):
return math_ops.less(x, 10.0)
def b(x):
with ops.device("/cpu:0"):
x1 = math_ops.add(x, 1.0)
return x1
r = control_flow_ops.cond(p,
lambda: control_flow_ops.while_loop(c, b, [n]),
lambda: math_ops.multiply(n, 2.0))
r1 = gradients_impl.gradients(r, [n])
self.assertEqual(10, sess.run(r, {p: True}))
self.assertEqual([1.0], sess.run(r1, {p: True}))
self.assertEqual(0.0, sess.run(r, {p: False}))
self.assertEqual([2.0], sess.run(r1, {p: False}))
def testCondWhile_3(self):
self._testCondWhile_3(use_gpu=False)
self._testCondWhile_3(use_gpu=True)
def testWhileCond_1(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113294377 (unknown shape)")
with self.test_session():
i = ops.convert_to_tensor(0, name="i")
n = ops.convert_to_tensor(10, name="n")
one = ops.convert_to_tensor(1, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(
constant_op.constant(True),
lambda: math_ops.add(x, one), lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [i])
self.assertAllEqual(10, r.eval())
def testWhileCond_2(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113294377 (unknown shape)")
with self.test_session():
n = ops.convert_to_tensor(0, name="n")
c = lambda x: math_ops.less(x, 10)
b = lambda x: control_flow_ops.cond(constant_op.constant(True), lambda: math_ops.add(x, 1), lambda: n)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllEqual(10, r.eval())
def testWhileCond_3(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113294377 (unknown shape)")
with self.test_session():
n = ops.convert_to_tensor(0)
c = lambda x: math_ops.less(x, 10)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(math_ops.less(0, 1),
lambda: math_ops.add(x, 1),
lambda: math_ops.subtract(x, 1))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllEqual(10, r.eval())
# NOTE: It is ok to have parallel_iterations > 1
def testWhileUpdateVariable_1(self):
with self.test_session():
select = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j):
return math_ops.less(j, 3)
def loop_body(j):
ns = state_ops.scatter_update(select, j, 10.0)
nj = math_ops.add(j, 1)
op = control_flow_ops.group(ns)
nj = control_flow_ops.with_dependencies([op], nj)
return [nj]
r = control_flow_ops.while_loop(
loop_iterator, loop_body, [n], parallel_iterations=1)
variables.global_variables_initializer().run()
self.assertEqual(3, r.eval())
result = select.eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
def testWhileUpdateVariable_2(self):
with self.test_session():
select1 = variables.Variable([3.0, 4.0, 5.0])
select2 = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j):
return math_ops.less(j, 3)
def loop_body(j):
ns1 = state_ops.scatter_update(select1, j, 10.0)
ns2 = state_ops.scatter_update(select2, j, 10.0)
nj = math_ops.add(j, 1)
op = control_flow_ops.group(ns1, ns2)
nj = control_flow_ops.with_dependencies([op], nj)
return [nj]
r = control_flow_ops.while_loop(
loop_iterator, loop_body, [n], parallel_iterations=1)
variables.global_variables_initializer().run()
self.assertEqual(3, r.eval())
result1 = select1.eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result1)
result2 = select2.eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result2)
def testWhileUpdateVariable_3(self):
with self.test_session():
select = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j, _):
return math_ops.less(j, 3)
def loop_body(j, _):
ns = state_ops.scatter_update(select, j, 10.0)
nj = math_ops.add(j, 1)
return [nj, ns]
r = control_flow_ops.while_loop(
loop_iterator,
loop_body, [n, array_ops.identity(select)],
parallel_iterations=1)
variables.global_variables_initializer().run()
result = r[1].eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
# b/24814703
def testWhileUpdateVariable_4(self):
with self.test_session():
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
variables.global_variables_initializer().run()
c = constant_op.constant(0, name="c")
asn1 = state_ops.assign_add(var_a, 1, name="a_add")
# Loop condition
def pred(i):
return math_ops.less(i, 10)
# Loop body
def loop_body(i):
asn2 = state_ops.assign_add(var_b, asn1, name="b_add")
with ops.control_dependencies([asn2]):
ni = math_ops.add(i, 1, name="i_add")
return ni
lpa = control_flow_ops.while_loop(
pred, loop_body, [c], parallel_iterations=1)
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(10, var_b.eval())
# b/24736492
def testWhileUpdateVariable_5(self):
with self.test_session():
# Create some variables.
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
variables.global_variables_initializer().run()
# Change condition to check var_b
def pred(_):
return math_ops.less(var_b, 10)
# Change body to increment var_b
def loop_body(i):
asn1 = state_ops.assign_add(
var_a, constant_op.constant(1), name="a_add")
asn2 = state_ops.assign_add(
var_b, constant_op.constant(1), name="b_add")
with ops.control_dependencies([asn1, asn2]):
inc_b = array_ops.identity(var_b)
return inc_b
lpa = control_flow_ops.while_loop(
pred, loop_body, [var_b], parallel_iterations=1, name="loop")
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(10, var_a.eval())
self.assertEqual(10, var_b.eval())
# b/24814668
def testWhileUpdateVariable_6(self):
with self.test_session():
# Create some variables.
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
c = constant_op.constant(0)
variables.global_variables_initializer().run()
# Loop condition
def pred(i):
return math_ops.less(i, 10)
# Loop body
def loop_body(i):
asn1 = state_ops.assign_add(var_a, 1, name="a_add")
with ops.control_dependencies([asn1]):
asn2 = state_ops.assign_add(var_b, var_a, name="b_add")
with ops.control_dependencies([asn2]):
ni = math_ops.add(i, 1, name="i_add")
return ni
lpa = control_flow_ops.while_loop(
pred, loop_body, [c], parallel_iterations=1, name="loop")
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(55, var_b.eval())
self.assertEqual(10, var_a.eval())
def testWhileQueue_1(self):
with self.test_session():
q = data_flow_ops.FIFOQueue(-1, dtypes.int32)
i = constant_op.constant(0)
def c(i):
return math_ops.less(i, 10)
def b(i):
ni = math_ops.add(i, 1)
ni = control_flow_ops.with_dependencies([q.enqueue((i,))], ni)
return ni
r = control_flow_ops.while_loop(c, b, [i], parallel_iterations=1)
self.assertEqual([10], r.eval())
for i in xrange(10):
self.assertEqual([i], q.dequeue().eval())
def testWhileStack_1(self):
with self.test_session():
s = gen_data_flow_ops.stack_v2(-1, dtypes.int32, stack_name="foo")
i = constant_op.constant(0)
def c(i):
return math_ops.less(i, 10)
def b(i):
ni = math_ops.add(i, 1)
ni = control_flow_ops.with_dependencies(
[gen_data_flow_ops.stack_push_v2(s, i)], ni)
return ni
r = control_flow_ops.while_loop(c, b, [i], parallel_iterations=1)
x = constant_op.constant(0)
def c1(i, _):
return math_ops.greater(i, 0)
def b1(i, x):
ni = math_ops.subtract(i, 1)
nx = x + gen_data_flow_ops.stack_pop_v2(s, dtypes.int32)
return [ni, nx]
_, rx = control_flow_ops.while_loop(
c1,
b1, [r, x],
[r.get_shape(), tensor_shape.unknown_shape()],
parallel_iterations=1)
self.assertEqual(45, rx.eval())
def _testWhileGrad_ColocateGradients(self, colocate):
gpu_dev_name = test.gpu_device_name() if test.is_gpu_available(
) else "/device:CPU:0"
graph = ops.Graph()
with graph.as_default():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
def b(x):
with ops.device(gpu_dev_name):
return math_ops.square(x)
loop = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = gradients_impl.gradients(
loop, v, colocate_gradients_with_ops=colocate)[0]
r_ops = graph.get_operations()
r_devices = [(op.name, op.device) for op in r_ops]
self.assertTrue(any("Square" in op.name for op in r_ops))
for (name, dev) in r_devices:
if not colocate and name.endswith("Square"):
# Only forward graph contain gpu in Square device
self.assertTrue(gpu_dev_name in dev)
elif colocate and "Square" in name:
# Forward and backward graphs contain gpu in Square/Square_grad devices
self.assertTrue(gpu_dev_name in dev)
else:
self.assertFalse(gpu_dev_name in dev)
with self.test_session(graph=graph) as sess:
self.assertAllClose(1024.0, sess.run(r))
def testWhileGrad_ColocateGradients(self):
self._testWhileGrad_ColocateGradients(colocate=False)
self._testWhileGrad_ColocateGradients(colocate=True)
def testWhileGrad_Square(self):
with self.test_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = control_flow_ops.cond(math_ops.less(1, 2), lambda: r, lambda: v)
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(1024.0, r.eval())
def testWhileGrad_Shape(self):
with self.test_session():
x = array_ops.placeholder(dtypes.float32, shape=[None])
v = constant_op.constant([2.0], name="v")
n = constant_op.constant(0, name="n")
c = lambda i, v: math_ops.less(i, 5)
b = lambda i, v: [i + 1, math_ops.multiply(x, v)]
r = control_flow_ops.while_loop(
c,
b, [n, v],
[n.get_shape(), tensor_shape.unknown_shape()],
parallel_iterations=1)
r = gradients_impl.gradients(r[1], x)[0]
self.assertEqual([None], r.get_shape().as_list())
self.assertAllClose([810.0, 2560.0], r.eval(feed_dict={x: [3.0, 4.0]}))
def testWhileGrad_BaseShape(self):
with self.test_session() as sess:
x = array_ops.placeholder(dtypes.float32, [None])
v0 = constant_op.constant([2.0, 2.0], name="v")
c = lambda v: constant_op.constant(False)
b = lambda v: math_ops.multiply(v, x)
r = control_flow_ops.while_loop(c, b, [v0])
y = math_ops.square(x)
r = gradients_impl.gradients([r, y], x)[0]
self.assertAllClose([2.0, 4.0], sess.run(r, feed_dict={x: [1.0, 2.0]}))
def testWhileGrad_MultipleUses(self):
with self.test_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = math_ops.multiply(r, r)
r = gradients_impl.gradients(r, v)[0]
self.assertEqual(524288.0, r.eval())
def testWhileGrad_LoopAdd(self):
with self.test_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = math_ops.add(r, r)
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(2048.0, r.eval())
def _testWhileGrad_Mul(self, use_gpu, p_iters):
with self.test_session(use_gpu=use_gpu) as sess:
a = constant_op.constant(3.0, name="a")
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = lambda v: math_ops.multiply(v, a)
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=p_iters)
grad_a, grad_v = gradients_impl.gradients(r, [a, v])
grad_a_val, grad_v_val = sess.run([grad_a, grad_v])
self.assertAllClose(216.0, grad_a_val)
self.assertAllClose(81.0, grad_v_val)
def testWhileGrad_Mul(self):
self._testWhileGrad_Mul(use_gpu=False, p_iters=1)
self._testWhileGrad_Mul(use_gpu=False, p_iters=10)
self._testWhileGrad_Mul(use_gpu=True, p_iters=1)
self._testWhileGrad_Mul(use_gpu=True, p_iters=10)
def _testNestedWhileCondWhileGrad(self, use_gpu):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113294377 (unknown shape)")
with self.test_session(use_gpu=use_gpu):
v = constant_op.constant(1.0)
def inner_loop(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
def b(x):
return control_flow_ops.cond(
constant_op.constant(True),
lambda: math_ops.square(inner_loop(x)[1]),
lambda: math_ops.multiply(x, 2.0))
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(512.0, r.eval())
def testNestedWhileCondWhileGrad(self):
self._testNestedWhileCondWhileGrad(use_gpu=False)
self._testNestedWhileCondWhileGrad(use_gpu=True)
def testWhileGrad_Variable(self):
with self.test_session():
a = variables.Variable(3.0)
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = lambda v: math_ops.multiply(v, a)
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = gradients_impl.gradients(r, a)
variables.global_variables_initializer().run()
self.assertAllClose(216.0, r[0].eval())
def testWhileGradInCond(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/110550782 (gradient w.r.t external variable)")
with self.test_session():
n = ops.convert_to_tensor(1.0, name="n")
x = array_ops.placeholder(dtypes.float32, shape=None)
c = lambda n: math_ops.less(n, 10.0)
b = lambda n: math_ops.add(n, x)
def fn1():
r = control_flow_ops.while_loop(c, b, [n],
[tensor_shape.unknown_shape()])
return gradients_impl.gradients(r, x)
r = control_flow_ops.cond(math_ops.less(1, 2), fn1, lambda: x)
self.assertAllClose(9.0, r.eval(feed_dict={x: 1.0}))
def testGradInWhileWrtInitialLoopVal(self):
with self.test_session():
x = array_ops.placeholder(dtypes.float32, shape=(), name="x")
y = x + 1
def body(i, v):
z = v * 2
return i + 1, gradients_impl.gradients(z, x)[0]
with self.assertRaisesRegexp(
ValueError,
"Cannot compute gradient inside while loop with respect to op 'x'. "
"We do not support taking the gradient wrt or through the initial "
"value of a loop variable. Gradients can be computed through "
"loop invariants or wrt the input parameters to the loop body."):
control_flow_ops.while_loop(lambda i, x: i < 3, body, [0, y])
def testWhileGradInWhile(self):
with self.test_session():
n = ops.convert_to_tensor(1.0, name="n")
x = array_ops.placeholder(dtypes.float32, shape=None)
c = lambda n: math_ops.less(n, 10.0)
b = lambda n: math_ops.add(n, x)
def b1(n):
r = control_flow_ops.while_loop(c, b, [n],
[tensor_shape.unknown_shape()])
return gradients_impl.gradients(r, x)
r = control_flow_ops.while_loop(lambda n: n < 6.0, b1, [n],
[tensor_shape.unknown_shape()])
self.assertAllClose(9.0, r.eval(feed_dict={x: 1.0}))
def testCondGradInNestedWhiles(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113346829 (gpu failure)")
def outer_body(i, x):
_, x = control_flow_ops.while_loop(
lambda j, x: j < 3, inner_body, [0, 0.0])
return i + 1, x
def inner_body(j, x):
y = control_flow_ops.cond(math_ops.less(x, 1), lambda: 2 * x, lambda: x)
return j + 1, gradients_impl.gradients(y, x)[0]
i, x = control_flow_ops.while_loop(lambda i, x: i < 3, outer_body, [0, 0.0])
with self.test_session() as sess:
i_val, x_val = sess.run([i, x])
self.assertEqual(i_val, 3)
self.assertAllClose(x_val, 1.0)
def testWhile_NestedInput(self):
with self.test_session() as sess:
named = collections.namedtuple("named", ("a", "b"))
loop_vars = [
named(a=constant_op.constant(0.0), b=constant_op.constant(1.0)),
(constant_op.constant(2.0), constant_op.constant(3.0)),
constant_op.constant(4.0)
]
c = lambda lv0, _1, _2: lv0.a < 100.0
def b(lv0, lv1, lv2):
lv0 = named(a=lv0.a + 1, b=lv0.b)
lv1 = (lv1[0] + 1, lv1[1])
lv2 += 2
return [lv0, lv1, lv2]
r = control_flow_ops.while_loop(c, b, loop_vars)
self.assertTrue(isinstance(r, list))
self.assertTrue(isinstance(r[0], named))
self.assertTrue(isinstance(r[1], tuple))
self.assertTrue(isinstance(r[2], ops.Tensor))
r_flattened = nest.flatten(r)
self.assertEqual([100.0, 1.0, 102.0, 3.0, 4.0 + 100 * 2.0],
sess.run(r_flattened))
def testWhile_NestedBadArityFails(self):
with self.test_session():
named = collections.namedtuple("named", ("a", "b"))
loop_vars = [
named(a=constant_op.constant(0.0), b=constant_op.constant(1.0)),
(constant_op.constant(2.0), constant_op.constant(3.0)),
constant_op.constant(4.0)
]
c = lambda lv0, _1, _2: lv0.a < 100.0
def b(lv0, lv1, _):
return [lv0, lv1]
with self.assertRaisesRegexp(ValueError, "the same number of elements"):
control_flow_ops.while_loop(c, b, loop_vars)
def testWhileGrad_ys_xs(self):
with self.test_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = math_ops.add(x, y)
x1 = math_ops.multiply(x, y1)
return x1, y1
rx, ry = control_flow_ops.while_loop(c, b, [x, y], parallel_iterations=1)
r = gradients_impl.gradients([rx, ry], x)
self.assertAllClose(304.0, r[0].eval())
r = gradients_impl.gradients([rx, ry], y)
self.assertAllClose(124.0, r[0].eval())
r = gradients_impl.gradients([rx], x)
self.assertAllClose(295.0, r[0].eval())
r = gradients_impl.gradients([rx], y)
self.assertAllClose(120.0, r[0].eval())
def testWhileGrad_Dependency(self):
with self.test_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 10)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
ri, rx = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
r = gradients_impl.gradients([ri, rx], x)
self.assertAllClose(1024.0, r[0].eval())
r = gradients_impl.gradients([rx], x)
self.assertAllClose(1024.0, r[0].eval())
def testWhileGrad_NoGradient(self):
with self.test_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], back_prop=False)
r = math_ops.add(r, v)
r = gradients_impl.gradients(r, v)
self.assertAllClose(1.0, r[0].eval())
def testWhileGrad_NoDependency(self):
with self.test_session() as sess:
variable = variables.Variable(array_ops.ones([2, 3]))
duration = array_ops.zeros([], dtype=dtypes.int32)
def cond(duration, tensor, _):
del tensor
return duration < 10
def body(duration, tensor, _):
return (duration + 1, tensor, tensor)
loop_vars = [duration, variable, variable]
tensors = control_flow_ops.while_loop(
cond=cond, body=body, loop_vars=loop_vars)
cost = math_ops.reduce_sum(tensors[2])
grad = gradients_impl.gradients(cost, [variable])
variables.global_variables_initializer().run()
self.assertAllClose(np.ones([2, 3]), sess.run(grad[0]))
def testWhileGrad_Const(self):
with self.test_session() as sess:
c0 = constant_op.constant(0.0, name="c0")
c1 = constant_op.constant(1.0, name="c1")
duration = constant_op.constant(0, name="t")
def cond(duration, _):
return duration < 1
def body(duration, _):
return duration + 1, c1
loop_vars = [duration, c0]
tensors = control_flow_ops.while_loop(
cond=cond, body=body, loop_vars=loop_vars)
cost = math_ops.reduce_sum(tensors[1])
grad = gradients_impl.gradients(cost, [c0])
self.assertAllClose(0.0, sess.run(grad[0]))
def testWhileGrad_SerialTwoLoops(self):
with self.test_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 5)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
_, rx = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
_, rx = control_flow_ops.while_loop(c, b, [i, rx], parallel_iterations=1)
r = gradients_impl.gradients([rx], x)
self.assertAllClose(1024.0, r[0].eval())
def testWhileGrad_ParallelTwoLoops(self):
with self.test_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 5)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
_, r1 = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
_, r2 = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
rx = math_ops.add(r1, r2)
r = gradients_impl.gradients([rx], x)
self.assertAllClose(64.0, r[0].eval())
def testWhileGrad_OneOutputWithControlDependencyOnSecond(self):
with self.test_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(1.0, name="y")
c = lambda i, *_: math_ops.less(i, 1, name="cond_less")
def b(i, xi, yi):
# return (i + 1, xi, xi + yi)
return (math_ops.add(i, 1, name="inc"), array_ops.identity(
xi, name="xi"), math_ops.add(xi, yi, name="xi_plus_yi"))
_, x_f, y_f = control_flow_ops.while_loop(c, b, [i, x, y])
with ops.control_dependencies([x_f]):
y_f_d = array_ops.identity(y_f, name="y_f_d")
self.assertAllClose(2.0, y_f_d.eval()) # y_f_d = 1.0 + 1.0
g = gradients_impl.gradients([y_f_d], [x])[0]
self.assertTrue(g is not None)
self.assertAllClose(1.0, g.eval()) # y_f_d = x + 1.0, dy_f_d/dx = 1.0
def _testNestedWhileGrad_Simple(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
v = constant_op.constant(1.0)
def inner_loop(s):
c = lambda x: math_ops.less(x, 4.0)
b = lambda x: math_ops.multiply(x, 2.0)
return control_flow_ops.while_loop(c, b, [s])
c = lambda x: math_ops.less(x, 2.0)
b = lambda x: math_ops.multiply(inner_loop(x), 2.0)
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(8.0, r.eval())
def testNestedWhileGrad_Simple(self):
self._testNestedWhileGrad_Simple(use_gpu=False)
self._testNestedWhileGrad_Simple(use_gpu=True)
def testNestedWhileGrad_SerialInner(self):
with self.test_session():
v = constant_op.constant(1.0)
def inner_loop1(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
def inner_loop2(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
b = lambda x: inner_loop2(inner_loop1(x)[1])[1]
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(256.0, r.eval())
def testNestedWhileGrad_ParallelInner(self):
with self.test_session():
v = constant_op.constant(1.0)
def inner_loop1(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
def inner_loop2(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
b = lambda x: math_ops.multiply(inner_loop1(x)[1], inner_loop2(x)[1])
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(512.0, r.eval())
def testNestedWhileGrad_ParallelIterations(self):
# Make sure the stack pushes and pops of an inner loop are executed in
# the sequential order of the iterations of its outer loop.
with self.test_session() as sess:
def inner_loop(t):
fn = lambda n: n + math_ops.square(var)
return functional_ops.map_fn(fn=fn, elems=t, parallel_iterations=10)
def outer_loop(inp):
return functional_ops.map_fn(
fn=inner_loop, elems=inp, parallel_iterations=10)
var = variables.Variable(constant_op.constant(3.0))
inp = constant_op.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
res = outer_loop(inp)
optimizer = adam.AdamOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(math_ops.reduce_mean(math_ops.square(res)))
sess.run(variables.global_variables_initializer())
sess.run(train_op)
self.assertAllClose(2.999, var.eval())
def _testWhileCondGrad_Simple(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
v = ops.convert_to_tensor(2.0, name="v")
n = ops.convert_to_tensor(100.0, name="n")
one = ops.convert_to_tensor(1.0, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(constant_op.constant(True),
lambda: math_ops.square(x),
lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(1024.0, r.eval())
def testWhileCondGrad_Simple(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113294377 (unknown shape)")
self._testWhileCondGrad_Simple(use_gpu=False)
self._testWhileCondGrad_Simple(use_gpu=True)
def testWhileCondGrad_UnknownShape(self):
with self.test_session() as sess:
v = array_ops.placeholder(dtypes.float32)
n = ops.convert_to_tensor(100.0, name="n")
one = ops.convert_to_tensor(1.0, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(constant_op.constant(True),
lambda: math_ops.square(x),
lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
r = sess.run(r, feed_dict={v: 2.0})
self.assertAllClose(1024.0, r)
def testWhileGrad_Concat(self):
with self.test_session() as sess:
x = variable_scope.get_variable("x", initializer=[[1., 2.]])
i0 = constant_op.constant(0)
h0 = array_ops.zeros([0, 2])
def condition(i, _):
return i < 2
def body(i, h):
return i + 1, array_ops.concat([h, x], 0)
_, h = control_flow_ops.while_loop(
condition, body, [i0, h0],
[i0.get_shape(), tensor_shape.TensorShape([None, 2])])
s = math_ops.reduce_sum(h)
sess.run(variables.global_variables_initializer())
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
op = optimizer.minimize(s)
sess.run(op)
self.assertAllClose([[0.98000002, 1.98000002]], sess.run(x))
def testWhileWithRefsWithGradients_1(self):
with self.test_session() as sess:
x = variables.Variable(0.)._ref() # pylint: disable=protected-access
i = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 10)
self.assertEqual(x.dtype, dtypes.float32_ref)
def body(i, x):
self.assertEqual(x.dtype, dtypes.float32_ref)
return [i + 1, gen_array_ops.ref_identity(x)]
r = control_flow_ops.while_loop(c, body, [i, x], parallel_iterations=5)
grad_ys = [variables.Variable(73)._ref()] # pylint: disable=protected-access
grad = gradients_impl.gradients([r[1]], [x], grad_ys=grad_ys)
variables.global_variables_initializer().run()
self.assertEqual(r[0].dtype, dtypes.int32)
self.assertEqual(r[1].dtype, dtypes.float32_ref)
value_i, value_x, value_x_grad = sess.run(r + grad)
self.assertEqual(10, value_i)
self.assertEqual(0, value_x)
self.assertEqual(73, value_x_grad)
def testWhileGrad_IndexedSlices(self):
with self.test_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant([0, 3], name="indices")
shape = constant_op.constant([10], name="dense_shape")
i = constant_op.constant(0)
x = ops.IndexedSlices(values, indices, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1,
ops.IndexedSlices(x.values * 2.0, x.indices, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
r = gradients_impl.gradients(r.values, values)[0]
self.assertAllClose(np.array([1024.0, 1024.0]), r.eval())
def testWhileGrad_SparseTensor(self):
with self.test_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant(
[[0], [3]], dtype=dtypes.int64, name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1,
sparse_tensor.SparseTensor(x.indices, x.values * 2.0, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
r = gradients_impl.gradients(r.values, values)[0]
self.assertAllClose(np.array([1024.0, 1024.0]), r.eval())
def testCallGradInLoop(self):
with self.test_session() as sess:
i0 = constant_op.constant(0)
params = constant_op.constant(5.0)
params_1 = math_ops.square(params)
def c(i, _):
return i < 10
def b(i, x):
data = constant_op.constant([1.0, 2.0, 3.0])
data = math_ops.multiply(data, params_1)
x1 = x + gradients_impl.gradients(data, params)[0]
return i + 1, x1
output_grad = control_flow_ops.while_loop(
c, b, [i0, constant_op.constant(0.0)])
self.assertAllClose(600.0, sess.run(output_grad)[1])
def testWhileAndTensorArray(self):
with self.test_session() as sess:
param = constant_op.constant(2.0)
n0 = constant_op.constant(0)
y0 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="elems")
def c(i, _):
return i < 10
def b(i, y):
return [
i + 1,
functional_ops.map_fn(lambda x: math_ops.multiply(x, param), y)
]
r = control_flow_ops.while_loop(c, b, [n0, y0], parallel_iterations=1)
r = gradients_impl.gradients(r, param)[0]
self.assertAllClose(107520.0, sess.run(r))
def testWhileGrad_StopGrad(self):
with self.test_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = math_ops.square(y)
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, ry = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
self.assertEqual(136.0, r.eval())
r = gradients_impl.gradients(ry, y)[0]
self.assertEqual(32.0, r.eval())
r = gradients_impl.gradients(array_ops.stop_gradient(rx), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(array_ops.stop_gradient(ry), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.square(rx)), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.add(rx, ry)), x)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.add(rx, ry)), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(math_ops.add(rx, ry), y)[0]
self.assertEqual(168.0, r.eval())
r = gradients_impl.gradients(
math_ops.add(rx, array_ops.stop_gradient(ry)), y)[0]
self.assertEqual(136.0, r.eval())
r = gradients_impl.gradients(
math_ops.add(array_ops.stop_gradient(rx), ry), y)[0]
self.assertEqual(32.0, r.eval())
def testWhileGrad_StopGradInside(self):
with self.test_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = array_ops.stop_gradient(math_ops.square(y))
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, _ = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
self.assertAllClose(0.0, r.eval())
r = gradients_impl.gradients(rx, x)[0]
self.assertAllClose(156.0, r.eval())
def testWhileGrad_StopGradInsideNoShape(self):
with self.test_session() as sess:
x = array_ops.placeholder(dtypes.float32)
y = array_ops.placeholder(dtypes.float32)
c = lambda x, y: math_ops.less(math_ops.reduce_sum(x), 100.0)
def b(x, y):
y1 = array_ops.stop_gradient(math_ops.square(y, name="stopped"))
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, _ = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
feed_dict = {x: [3.0, 4.0], y: [2.0, 3.0]}
self.assertAllClose([0.0, 0.0], sess.run(r, feed_dict=feed_dict))
r = gradients_impl.gradients(rx, x)[0]
self.assertAllClose([156.0, 400.0], sess.run(r, feed_dict=feed_dict))
name = "gradients/while/stopped_grad"
all_ops = x.graph.get_operations()
self.assertFalse(any([name in op.name for op in all_ops]))
def testWhileGradGradFail(self):
theta = variables.Variable(initial_value=1.)
def fn(prev, x):
return prev + x * theta
result = functional_ops.scan(fn, np.array([1., 2., 3.], dtype=np.float32))
grad_theta = gradients_impl.gradients(result, theta)
with self.assertRaisesRegexp(TypeError, "Second-order gradient"):
gradients_impl.gradients(grad_theta, theta)
grad_theta_stopped = array_ops.stop_gradient(grad_theta)
gradients_impl.gradients(grad_theta_stopped, theta)
def testStopGradOnWhileGrad(self):
with self.test_session():
x = constant_op.constant(2.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x: math_ops.less(x, 100.0)
b = lambda x: math_ops.multiply(x, y)
rx = control_flow_ops.while_loop(c, b, [x])
rg = gradients_impl.gradients(rx, y)[0]
rg = array_ops.stop_gradient(rg)
r = math_ops.add(math_ops.square(y), rx)
r = math_ops.add(r, rg)
r = gradients_impl.gradients(r, y)[0]
self.assertEqual(388.0, r.eval())
def testWhileGradientWithNontrainablePath1(self):
q = variables.Variable([7., 8.])
def cond(_, y):
del y
return False
def body(x, _):
return x, math_ops.cast(x, dtypes.float32) + math_ops.reduce_sum(q)
_, y = control_flow_ops.while_loop(cond, body, (math_ops.argmin(q), 0.))
dy_dq, = gradients_impl.gradients(y, q)
self.assertIsNotNone(dy_dq)
with self.test_session() as sess:
sess.run(q.initializer)
self.assertAllClose([0., 0.], sess.run(dy_dq))
def testWhileGradientWithNontrainablePath2(self):
q = variables.Variable([7., 8.])
def cond(_, y):
return math_ops.equal(y, 0.)
def body(x, _):
zero = constant_op.constant(0, dtype=dtypes.int64)
return zero, math_ops.cast(x, dtypes.float32) + math_ops.reduce_sum(q)
_, y = control_flow_ops.while_loop(cond, body, (math_ops.argmin(q), 0.))
dy_dq, = gradients_impl.gradients(y, q)
self.assertIsNotNone(dy_dq)
with self.test_session() as sess:
sess.run(q.initializer)
self.assertAllClose([1., 1.], sess.run(dy_dq))
def testIssue16504(self):
c = constant_op.constant(np.arange(100), dtype=dtypes.float32)
w = variables.Variable(
initial_value=np.ones(100), dtype=dtypes.float32) / 100
k = variables.Variable(0, dtype=dtypes.int32)
chg_w = constant_op.constant(np.inf, dtype=dtypes.float32)
def cond(k, _, chg_w):
return math_ops.logical_and(k < 10, chg_w > 1e-3)
def body(k, w, chg_w):
grad, = gradients_impl.gradients(-math_ops.reduce_sum(w * c), w)
w_n = w * math_ops.exp(-0.1 * grad)
w_n /= math_ops.reduce_sum(w_n)
chg_w = (
math_ops.reduce_sum(math_ops.abs(w_n - w)) / math_ops.reduce_sum(
math_ops.abs(w)))
return k + 1, w_n, chg_w
_, w, _ = control_flow_ops.while_loop(cond, body, [k, w, chg_w])
grad, = gradients_impl.gradients(w, c)
self.assertIsNotNone(grad)
def testStopGradMultiFlows(self):
with self.test_session():
def body(i, y, r):
x = variable_scope.get_variable(
"x",
shape=(),
dtype=dtypes.float32,
initializer=init_ops.ones_initializer())
y *= x
return [i + 1, y, r + math_ops.reduce_sum(y)]
i0 = constant_op.constant(0)
y0 = array_ops.ones(5)
r0 = constant_op.constant(0.0)
cond = lambda i, y, r: i < 1
_, _, r = control_flow_ops.while_loop(
cond, body, [i0, y0, r0], back_prop=True)
vars_ = variables.global_variables()
grads = linalg_ops.norm(gradients_impl.gradients(r, vars_)[0])
z = math_ops.add(r, array_ops.stop_gradient(math_ops.reduce_sum(grads)))
result = gradients_impl.gradients(z, vars_)[0]
variables.global_variables_initializer().run()
self.assertEqual(5.0, result.eval())
def testOneValueCond(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/111124878 (don't return tuple)")
with self.test_session():
c = array_ops.placeholder(dtypes.int32, shape=[])
one = ops.convert_to_tensor(1, name="one")
two = ops.convert_to_tensor(2, name="two")
p = math_ops.greater_equal(c, 1)
i = control_flow_ops.cond(p, lambda: one, lambda: two)
self.assertTrue(isinstance(i, ops.Tensor))
# True case: c = 2 is >= 1
self.assertEqual([1], i.eval(feed_dict={c: 2}))
# False case: c = 0 is not >= 1
self.assertEqual([2], i.eval(feed_dict={c: 0}))
def testExampleCond(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/111124878 (don't return tuple)")
with self.test_session():
x = ops.convert_to_tensor([-2.0, 2.0], name="x")
d = array_ops.placeholder(dtypes.int32, shape=[])
def l2():
return math_ops.sqrt(math_ops.reduce_sum(math_ops.square(x)))
def l1():
return math_ops.reduce_sum(math_ops.abs(x))
i = control_flow_ops.cond(math_ops.equal(d, 2), l2, l1)
self.assertAllClose(4.0, i.eval(feed_dict={d: 1}))
self.assertAllClose(2.0 * math.sqrt(2), i.eval(feed_dict={d: 2}))
def testCase(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/112477618 (Operation returned from cond)")
with self.test_session():
x = constant_op.constant(1)
y = constant_op.constant(2)
z = constant_op.constant(3)
f1 = lambda: constant_op.constant(17)
f2 = lambda: constant_op.constant(23)
f3 = lambda: constant_op.constant(-1)
r1 = control_flow_ops.case(
{
x < y: f1,
x > z: f2
}, default=f3, exclusive=True)
self.assertAllEqual(r1.eval(), 17)
r2 = control_flow_ops.case([(y > z, f1), (y > x, f2)], default=f3)
self.assertAllEqual(r2.eval(), 23)
# Duplicate events can happen, first one is selected
r3 = control_flow_ops.case([(x < y, f1), (x < y, f2)], default=f3)
self.assertAllEqual(r3.eval(), 17)
# Duplicate events cause an error if exclusive = True
r4 = control_flow_ops.case(
[(x < y, f1), (x < y, f2)], default=f3, exclusive=True)
with self.assertRaisesOpError("Input error:"):
r4.eval()
# Check that the default is called if none of the others are
r5 = control_flow_ops.case({x > y: f1}, default=f3)
self.assertAllEqual(r5.eval(), -1)
ran_once = [False, False, False]
def break_run_twice(ix):
def _break():
ran_once[ix] = True
return constant_op.constant(ix)
return _break
# Should not fail - each conditional gets called exactly once
# except default. Default gets called twice: once to create an
# empty output and once for the actual cond switch.
r6 = control_flow_ops.case(
[(x < y, break_run_twice(0)), (x > y, break_run_twice(1))],
default=lambda: constant_op.constant(2))
self.assertAllEqual(r6.eval(), 0)
def testCaseSideEffects(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/112477618 (Operation returned from cond)")
with self.test_session() as sess:
v0 = variables.Variable(-1)
v1 = variables.Variable(-1)
v2 = variables.Variable(-1)
a = lambda: control_flow_ops.with_dependencies([state_ops.assign(v0, 0)], 0)
b = lambda: control_flow_ops.with_dependencies([state_ops.assign(v1, 1)], 1)
c = lambda: control_flow_ops.with_dependencies([state_ops.assign(v2, 2)], 2)
x = constant_op.constant(1)
y = constant_op.constant(2)
r0 = control_flow_ops.case(
((x < y, a), (x > y, b)), default=c, exclusive=True)
r1 = control_flow_ops.case(
((x > y, a), (x < y, b)), default=c, exclusive=True)
r2 = control_flow_ops.case(
((x > y, a), (x > y, b)), default=c, exclusive=True)
variables.global_variables_initializer().run()
self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)
self.assertEqual(2, r2.eval())
self.assertAllEqual(sess.run([v0, v1, v2]), [-1, -1, 2])
variables.global_variables_initializer().run()
self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)
self.assertEqual(1, r1.eval())
self.assertAllEqual(sess.run([v0, v1, v2]), [-1, 1, -1])
variables.global_variables_initializer().run()
self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)
self.assertEqual(0, r0.eval())
self.assertAllEqual(sess.run([v0, v1, v2]), [0, -1, -1])
def testOneOpCond(self):
if control_flow_ops.ENABLE_COND_V2:
return unittest.skip("b/113324949 (ref vars)")
with self.test_session():
v = variables.Variable(0)
c = ops.convert_to_tensor(0)
one = ops.convert_to_tensor(1)
two = ops.convert_to_tensor(2)
p = math_ops.greater_equal(c, 1)
def a():
return state_ops.assign(v, one)
def b():
return state_ops.assign(v, two)
i = control_flow_ops.cond(p, a, b)
self.assertTrue(isinstance(i, ops.Tensor))
variables.global_variables_initializer().run()
self.assertEqual(0, v.eval())
# True case: c = 2 is >= 1, v is set to 1.
self.assertEqual(1, i.eval(feed_dict={c.name: 2}))
self.assertEqual(1, v.eval())
# False case: c = 0 is not >= 1, v is set to 2.
self.assertEqual(2, i.eval(feed_dict={c.name: 0}))
self.assertEqual(2, v.eval())
def testWithOpsDependencies(self):
with self.test_session() as sess:
v = variables.Variable(0.0)
c = constant_op.constant(10)
# Fetching v directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
sess.run([c, v])
# Use a control dependency to ensure init_variable is run
# while asking for c
real_v = control_flow_ops.with_dependencies(
name="real_tensor",
output_tensor=v._ref(), # pylint: disable=protected-access
dependencies=[v.initializer])
c_val, real_v_val = sess.run([c, real_v])
# Ensure the result of 'real_c' is the same as 'c'
self.assertAllEqual(10, c_val)
# Ensure that 'v' is initialized
self.assertAllClose(0.0, real_v_val)
def testWithTensorDependencies(self):
with self.test_session():
v = variables.Variable(0.0)
c1 = constant_op.constant(10)
c2 = constant_op.constant(20)
# c1_with_init_v depends on the init op for v
c1_with_init_v = control_flow_ops.with_dependencies(
name="c1_with_init_v", output_tensor=c1, dependencies=[v.initializer])
# c2_with_c1 depends on the value of c1_with_init_v
c2_with_c1_dep = control_flow_ops.with_dependencies(
name="c2_with_c1_dep",
output_tensor=c2,
dependencies=[c1_with_init_v])
# Fetching v directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v.eval()
# Get the value of 'c2_with_c1_dep', which should cause 'v'
# to be initialized.
self.assertAllEqual(20, c2_with_c1_dep.eval())
# Ensure that 'v' is initialized
self.assertAllClose(0.0, v.eval())
def testWithIndexedSlicesDependencies(self):
with self.test_session():
v = variables.Variable(
np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(np.float32))
v_at_1 = ops.IndexedSlices(v, constant_op.constant([1]))
gather_v_at_1 = array_ops.gather(v_at_1.values, v_at_1.indices)
v_at_1_after_init = control_flow_ops.with_dependencies([v.initializer],
v_at_1)
gather_v_at_1_after_init = array_ops.gather(v_at_1_after_init.values,
v_at_1_after_init.indices)
# Fetching gather_v_at_1 will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
gather_v_at_1.eval()
# Getting gather_v_at_1_after_init will work, and initialize v.
self.assertAllEqual([[10.0, 11.0]], gather_v_at_1_after_init.eval())
# Double check that 'v' is initialized
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]], v.eval())
def testDependenciesDevice(self):
with ops.Graph().as_default():
# device set on tensor => same device on dep.
with ops.device("/job:ps"):
vd = variables.Variable([0.0])
with_vd_dep = control_flow_ops.with_dependencies([vd.initializer], vd)
self.assertTrue("/job:ps" in with_vd_dep.device)
# No device set on tensor => no device on dep.
vnod = variables.Variable([0.0])
with_vnod_dep = control_flow_ops.with_dependencies([vnod.initializer],
vnod)
self.assertDeviceEqual(None, with_vnod_dep.device)
# device set on tensor, default device on graph => default device on dep.
vdef = variables.Variable([0.0], name="vdef")
with ops.device("/job:worker/device:GPU:1"):
with_vdef_dep = control_flow_ops.with_dependencies([vdef.initializer],
vdef)
# The device is empty, but the colocation constraint is set.
self.assertDeviceEqual("", with_vdef_dep.device)
self.assertEqual([b"loc:@vdef"], with_vdef_dep.op.colocation_groups())
def testGroup(self):
with self.test_session() as sess:
v1 = variables.Variable([0.0])
v2 = variables.Variable([1.0])
# Group init1 and init2 and run.
init = control_flow_ops.group(v1.initializer, v2.initializer)
# Fetching v1 directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
# Runs "init" before fetching v1 and v2.
init.run()
v1_val, v2_val = sess.run([v1, v2])
# Ensure that v1 and v2 are initialized
self.assertAllClose([0.0], v1_val)
self.assertAllClose([1.0], v2_val)
def testGroupEmpty(self):
op = control_flow_ops.group()
self.assertEqual(op.type, "NoOp")
self.assertEqual(op.control_inputs, [])
def testMergeShapes(self):
# All inputs unknown.
p1 = array_ops.placeholder(dtypes.float32)
p2 = array_ops.placeholder(dtypes.float32)
p3 = array_ops.placeholder(dtypes.float32)
m, index = control_flow_ops.merge([p1, p2, p3])
self.assertIs(None, m.get_shape().ndims)
self.assertEqual([], index.get_shape())
# All inputs known with different ranks.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[1, 2, 3])
m, index = control_flow_ops.merge([p1, p2])
self.assertIs(None, m.get_shape().ndims)
self.assertEqual([], index.get_shape())
# All inputs known with some dimensions different.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[2, 1])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, None], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[2, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
# All inputs known with same dimensions.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([1, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[None, None])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, None])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, None], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
def testRefSelect(self):
index = array_ops.placeholder(dtypes.int32)
# All inputs unknown.
p1 = array_ops.placeholder(dtypes.float32)
p2 = array_ops.placeholder(dtypes.float32)
p3 = array_ops.placeholder(dtypes.float32)
v1 = variables.Variable(p1, validate_shape=False)
v2 = variables.Variable(p2, validate_shape=False)
v3 = variables.Variable(p3, validate_shape=False)
self.assertIs(None, v1.get_shape().ndims)
s = control_flow_ops.ref_select(index, [v1, v2, v3])
self.assertIs(None, s.get_shape().ndims)
# All inputs known but different.
v1 = variables.Variable([[1, 2]])
v2 = variables.Variable([[2], [1]])
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertIs(None, s.get_shape().ndims)
# All inputs known and same.
v1 = variables.Variable([[1, 2]])
v2 = variables.Variable([[1, 2]])
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertEqual([1, 2], s.get_shape())
# Possibly the same but not guaranteed.
v1 = variables.Variable([[1., 2.]])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
v2 = variables.Variable(p2, validate_shape=False)
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertEqual(None, s.get_shape())
def testRunLoopTensor(self):
with self.test_session() as sess:
tensor_list = []
def condition(t):
return t < constant_op.constant(5)
def body(_):
tensor_list.append(constant_op.constant(5))
return constant_op.constant(10)
result = control_flow_ops.while_loop(condition, body,
[constant_op.constant(4)])
self.assertEqual(10, sess.run(result))
# Ensure that we cannot run a tensor that escapes the loop body
# accidentally.
with self.assertRaises(ValueError):
sess.run(tensor_list[0])
def testWhilePyFuncBasic(self):
def func(x):
return np.square(x)
with self.test_session():
r = control_flow_ops.while_loop(
lambda i, v: i < 4,
lambda i, v: [i + 1, script_ops.py_func(func, [v], [dtypes.float32])[0]],
[constant_op.constant(0), constant_op.constant(2.0, dtypes.float32)],
[tensor_shape.unknown_shape(), tensor_shape.unknown_shape()])
self.assertEqual(r[1].eval(), 65536.0)
def testWhileFuncBasic(self):
@function.Defun(dtypes.float32)
def func(x):
return math_ops.square(math_ops.square(x))
with self.test_session():
x = constant_op.constant(2.0, dtypes.float32)
r = control_flow_ops.while_loop(
lambda i, v: i < 2, lambda i, v: [i + 1, func(v)],
[constant_op.constant(0), x],
[tensor_shape.unknown_shape(),
tensor_shape.unknown_shape()])
self.assertEqual(r[1].eval(), 65536.0)
r = gradients_impl.gradients(r, x)[0]
self.assertEqual(r.eval(), 524288.0)
self.assertEqual(
len([op for op in x.graph.get_operations() if op.type == "StackV2"]),
1)
class ControlFlowContextCheckTest(test.TestCase):
def _getWhileTensor(self):
"""Creates and returns a tensor from a while context."""
tensor = []
def body(i):
if not tensor:
tensor.append(constant_op.constant(1))
return i + tensor[0]
control_flow_ops.while_loop(lambda i: i < 10, body, [0])
return tensor[0]
def _getCondTensor(self):
cond_tensor = []
def true_fn():
if not cond_tensor:
cond_tensor.append(constant_op.constant(1))
return cond_tensor[0]
control_flow_ops.cond(
math_ops.less(1, 2), true_fn, lambda: constant_op.constant(0))
return cond_tensor[0]
def testInvalidContext(self):
# Accessing a while loop tensor outside of control flow is illegal.
while_tensor = self._getWhileTensor()
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'while/Const_1' as input to 'Add' because 'while/Const_1' "
"is in a while loop. See info log for more details."):
math_ops.add(1, while_tensor)
def testInvalidContextInCond(self):
# Accessing a while loop tensor in cond is illegal.
while_tensor = self._getWhileTensor()
with self.assertRaisesRegexp(
ValueError, "Cannot use 'while/Const_1' as input to 'cond/Add' because "
"'while/Const_1' is in a while loop. See info log for more details."):
# TODO(skyewm): this passes if we return while_tensor directly instead
# of using it as input to another op.
control_flow_ops.cond(
math_ops.less(1, 2), lambda: math_ops.add(1, while_tensor),
lambda: constant_op.constant(0))
def testInvalidContextInWhile(self):
# Accessing a while loop tensor in a different while loop is illegal.
while_tensor = self._getWhileTensor()
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'while_1/Add' as input to 'while/Const_1' because they are "
"in different while loops. See info log for more details."):
control_flow_ops.while_loop(lambda i: i < 10,
lambda x: math_ops.add(1, while_tensor), [0])
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'while_2/NextIteration' as input to 'while/Const_1' "
"because they are in different while loops. See info log for more "
"details."):
control_flow_ops.while_loop(lambda i: i < 10, lambda i: while_tensor, [0])
def testValidCondContext(self):
# Accessing a tensor from a cond context is OK (although dangerous).
cond_tensor = self._getCondTensor()
math_ops.add(1, cond_tensor)
def testValidCondContextBranches(self):
# Accessing a tensor from a cond context from the other branch's cond
# context is OK (although dangerous).
cond_tensor = []
def branch_fn():
if not cond_tensor:
cond_tensor.append(constant_op.constant(1))
return cond_tensor[0]
control_flow_ops.cond(math_ops.less(1, 2), branch_fn, branch_fn)
def testValidWhileContext(self):
# Accessing a tensor in a nested while is OK.
def body(_):
c = constant_op.constant(1)
return control_flow_ops.while_loop(lambda i: i < 3, lambda i: i + c, [0])
control_flow_ops.while_loop(lambda i: i < 5, body, [0])
def testValidNestedContexts(self):
# Accessing a tensor from a cond context in a while context, all inside an
# outer while context, is OK.
def body(_):
cond_tensor = self._getCondTensor()
# Create another cond containing the while loop for good measure
return control_flow_ops.cond(
math_ops.less(1, 2),
lambda: control_flow_ops.while_loop(lambda i: i < 3,
lambda i: i + cond_tensor, [0]),
lambda: constant_op.constant(0))
control_flow_ops.while_loop(lambda i: i < 5, body, [0])
def testInvalidNestedContexts(self):
# Accessing a tensor from a while context in a different while context, all
# inside a cond context, is illegal.
def true_fn():
while_tensor = self._getWhileTensor()
return control_flow_ops.while_loop(lambda i: i < 3,
lambda i: i + while_tensor, [0])
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'cond/while_1/add' as input to 'cond/while/Const_1' because"
" they are in different while loops. See info log for more details."):
control_flow_ops.cond(
math_ops.less(1, 2), true_fn, lambda: constant_op.constant(0))
class TupleTest(test.TestCase):
def testTensors(self):
for v1_first in [True, False]:
with self.test_session():
v1 = variables.Variable([1.0])
add1 = math_ops.add(
control_flow_ops.with_dependencies([v1.initializer], v1._ref()), # pylint: disable=protected-access
2.0)
v2 = variables.Variable([10.0])
add2 = math_ops.add(
control_flow_ops.with_dependencies([v2.initializer], v2._ref()), # pylint: disable=protected-access
20.0)
t1, _, t2 = control_flow_ops.tuple([add1, None, add2])
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v2.eval()
if v1_first:
# Getting t1 initializes v2.
self.assertAllClose([3.0], t1.eval())
self.assertAllClose([10.0], v2.eval())
else:
# Getting t2 initializes v1.
self.assertAllClose([30.0], t2.eval())
self.assertAllClose([1.0], v1.eval())
def testIndexedSlices(self):
for v1_first in [True, False]:
with self.test_session():
v1 = variables.Variable(
np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(
np.float32))
v1_at_1 = ops.IndexedSlices(
control_flow_ops.with_dependencies([v1.initializer], v1._ref()), # pylint: disable=protected-access
constant_op.constant([1]))
v2 = variables.Variable(
np.array([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]]).astype(
np.float32))
v2_at_1 = ops.IndexedSlices(
control_flow_ops.with_dependencies([v2.initializer], v2._ref()), # pylint: disable=protected-access
constant_op.constant([1]))
st1, st2 = control_flow_ops.tuple([v1_at_1, v2_at_1])
g1 = array_ops.gather(st1.values, st1.indices)
g2 = array_ops.gather(st2.values, st2.indices)
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v2.eval()
if v1_first:
# Getting g1 initializes v2.
self.assertAllClose([[10.0, 11.0]], g1.eval())
self.assertAllClose([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]],
v2.eval())
else:
# Getting g2 initializes v1.
self.assertAllClose([[10.1, 11.1]], g2.eval())
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]],
v1.eval())
def testAcceptTensorsAsControlInputs(self):
with self.test_session():
var = variables.Variable(0)
assign = state_ops.assign(var, 1)
t, = control_flow_ops.tuple(
[constant_op.constant(0)], control_inputs=[assign])
# Should trigger the assign.
t.eval()
self.assertEquals(1, var.eval())
class AssertTest(test.TestCase):
def testGuardedAssertDoesNotCopyWhenTrue(self):
with self.test_session(use_gpu=True) as sess:
with ops.device(test.gpu_device_name()):
value = constant_op.constant(1.0)
with ops.device("/cpu:0"):
true = constant_op.constant(True)
guarded_assert = control_flow_ops.Assert(true, [value], name="guarded")
unguarded_assert = gen_logging_ops._assert(
true, [value], name="unguarded")
opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
guarded_metadata = config_pb2.RunMetadata()
sess.run(guarded_assert, options=opts, run_metadata=guarded_metadata)
unguarded_metadata = config_pb2.RunMetadata()
sess.run(unguarded_assert, options=opts, run_metadata=unguarded_metadata)
guarded_nodestat_names = [
n.node_name
for d in guarded_metadata.step_stats.dev_stats
for n in d.node_stats
]
unguarded_nodestat_names = [
n.node_name
for d in unguarded_metadata.step_stats.dev_stats
for n in d.node_stats
]
guarded_memcpy_nodestat_names = [
n for n in guarded_nodestat_names if "MEMCPYDtoH" in n
]
unguarded_memcpy_nodestat_names = [
n for n in unguarded_nodestat_names if "MEMCPYDtoH" in n
]
if "GPU" in [d.device_type for d in device_lib.list_local_devices()]:
# A copy was performed for the unguarded assert
self.assertLess(0, len(unguarded_memcpy_nodestat_names))
# No copy was performed for the guarded assert
self.assertEqual([], guarded_memcpy_nodestat_names)
class WhileOpBenchmark(test.Benchmark):
"""Evaluate the performance of while_loop op."""
def _getInitVariables(self):
batch_size = 10
image_size = 256
kernel_size = 3
depth = 16
init_step = constant_op.constant(-1)
image = variable_scope.get_variable(
"image",
initializer=random_ops.random_normal(
[batch_size, image_size, image_size, depth],
dtype=dtypes.float32,
stddev=1e-1))
kernel = variable_scope.get_variable(
"weights",
initializer=random_ops.truncated_normal(
[kernel_size, kernel_size, depth, depth],
dtype=dtypes.float32,
stddev=1e-1))
return init_step, image, kernel
def _runOneBenchmark(self,
default_device,
num_iters=10,
static_unroll=False,
steps=10):
"""Evaluate the while loop performance.
Args:
default_device: The default device to run all ops except the loop_body.
loop_body is always run on GPU.
num_iters: Number of iterations to run.
static_unroll: If true, run unrolled version; otherwise, run while_loop.
steps: Total number of repeated steps to run the loop.
Returns:
The duration of the run in seconds.
"""
def loop_body(i, x):
with ops.device("/gpu:0"):
# Always put loop body on GPU.
nx = nn_ops.conv2d(
input=x,
filter=kernel,
strides=[1, 1, 1, 1],
padding="SAME",
data_format="NHWC",
name="conv2d")
ni = math_ops.add(i, 1)
return ni, nx
ops.reset_default_graph()
with session.Session() as sess, ops.device(default_device):
# Get the initial id i, input x, and kernel.
i, x, kernel = self._getInitVariables()
sess.run(variables.global_variables_initializer())
if static_unroll:
for _ in xrange(steps):
i, x = loop_body(i, x)
else:
i, x = control_flow_ops.while_loop(
lambda i, _: i < steps,
loop_body, [i, x],
parallel_iterations=steps,
swap_memory=True)
r = math_ops.reduce_sum(x)
dx, dk = gradients_impl.gradients(r, [x, kernel])
# Use group to avoid fetching back results.
r = control_flow_ops.group(dx, dk)
for _ in xrange(3):
# exclude warm up time
sess.run(r)
start_time = time.time()
for _ in xrange(num_iters):
sess.run(r)
return (time.time() - start_time) / num_iters
def benchmarkWhileOpCrossDevicePlacement(self):
iters = 10
# Run loop body on GPU, but other ops on CPU.
duration = self._runOneBenchmark("cpu", iters, static_unroll=False)
self.report_benchmark(
name="while_op_cross_device", iters=iters, wall_time=duration)
def benchmarkWhileOpSameDevicePlacement(self):
iters = 10
# Run all ops on the same GPU device.
duration = self._runOneBenchmark("gpu", iters, static_unroll=False)
self.report_benchmark(
name="while_op_same_device", iters=iters, wall_time=duration)
def benchmarkWhileOpUnrollCrossDevicePlacement(self):
iters = 10
# Run loop body on GPU, but other ops on CPU.
duration = self._runOneBenchmark("cpu", iters, static_unroll=True)
self.report_benchmark(
name="unroll_cross_device_cpu", iters=iters, wall_time=duration)
def benchmarkWhileOpUnrollSameDevicePlacement(self):
iters = 10
# Run all ops on GPU.
duration = self._runOneBenchmark("gpu", iters, static_unroll=True)
self.report_benchmark(
name="unroll_same_device", iters=iters, wall_time=duration)
@test_util.with_cond_v2
class EagerTest(test.TestCase):
def testCond(self):
with context.eager_mode():
pred = math_ops.less(1, 2)
fn1 = lambda: [constant_op.constant(10)]
fn2 = lambda: [constant_op.constant(20)]
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual(r.numpy(), 10)
self.assertFalse(isinstance(r, list))
def testWhileLoop(self):
with context.eager_mode():
tensor = constant_op.constant([1, 2, 3, 4, 5])
self.assertAllEqual(isum(tensor).numpy(), [46, 47, 48, 49, 50])
def testWhileLoopWithMaxIterations(self):
with context.eager_mode():
tensor = constant_op.constant([1, 2, 3, 4, 5])
self.assertAllEqual(
isum(tensor, maximum_iterations=3).numpy(),
[1 + 3, 2 + 3, 3 + 3, 4 + 3, 5 + 3])
def testWhileWithMaximumIterationsAndSingleArgument(self):
with context.eager_mode():
tensor = constant_op.constant(0)
r = control_flow_ops.while_loop(
lambda i: i < 3, lambda i: i + 1, [tensor], maximum_iterations=1)
self.assertEqual(1, r.numpy())
def testWithDependencies(self):
with context.eager_mode():
t1 = constant_op.constant(1)
t2 = constant_op.constant(2)
t3 = control_flow_ops.with_dependencies(t1, t2)
self.assertAllEqual(t2.numpy(), t3.numpy())
def testTuple(self):
with context.eager_mode():
t1 = constant_op.constant(1)
t2 = constant_op.constant(2)
tup1, tup2 = control_flow_ops.tuple([t1, t2])
self.assertAllEqual(t1.numpy(), tup1.numpy())
self.assertAllEqual(t2.numpy(), tup2.numpy())
def testCase(self):
with context.eager_mode():
x = constant_op.constant(1)
y = constant_op.constant(2)
z = constant_op.constant(3)
f1 = lambda: constant_op.constant(17)
f2 = lambda: constant_op.constant(23)
f3 = lambda: constant_op.constant(-1)
r1 = control_flow_ops.case(
[(x < y, f1), (x > z, f2)], default=f3, exclusive=True)
self.assertAllEqual(r1.numpy(), 17)
if __name__ == "__main__":
test.main()
|
{
"content_hash": "38a9d699f5900718a7cac51c949b77a9",
"timestamp": "",
"source": "github",
"line_count": 3457,
"max_line_length": 112,
"avg_line_length": 35.87648249927683,
"alnum_prop": 0.6082241483571861,
"repo_name": "AnishShah/tensorflow",
"id": "374faad7a7f989ab80492320cee0bbc89c78465b",
"size": "124748",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/control_flow_ops_py_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1286"
},
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "337393"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "48452986"
},
{
"name": "CMake",
"bytes": "195768"
},
{
"name": "Dockerfile",
"bytes": "36400"
},
{
"name": "Go",
"bytes": "1210238"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "834103"
},
{
"name": "Jupyter Notebook",
"bytes": "2584246"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52618"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "40782103"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "458367"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
import collections
import jinja2
import json
import os
import urllib
import webapp2
import league_website
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
def MakeDivisionJson(division):
def MakeTeamJson(team):
return {
"name": team.name,
"wins": team.wins,
"ties": team.ties,
"losses": team.losses,
"games_played": team.wins + team.ties + team.losses,
"points_retrieved": team.points,
"points_calculated": 3 * team.wins + team.ties
}
return {
"name": division.name,
"teams": [MakeTeamJson(t) for t in division.teams]
}
class StandingsHandler(webapp2.RequestHandler):
def get(self):
divisions = league_website.RetrieveLeagueWebsite().Standings().Divisions()
template = JINJA_ENVIRONMENT.get_template('standings.html')
self.response.write(template.render({"divisions": divisions}))
#response = {
# "divisions": [MakeDivisionJson(d) for d in divisions]
#}
#self.response.headers['Content-Type'] = 'application/json'
#json.dump(response, self.response.out)
class CalculatedStandingsHandler(webapp2.RequestHandler):
def get(self):
matches = league_website.RetrieveLeagueWebsite().Matches()
teams = collections.defaultdict(dict)
standings = {}
template = JINJA_ENVIRONMENT.get_template('schedule.html')
self.response.write(template.render({"divisions": divisions}))
def MakeMatchJson(match):
match_dict = {
"time": match.time.isoformat(),
"location": match.location,
"teams": [match.team_1, match.team_2],
"goals": [match.result_1, match.result_2]
}
return match_dict
class ScheduleHandler(webapp2.RequestHandler):
def get(self):
matches = league_website.RetrieveLeagueWebsite().Matches()
response = {
"matches": [MakeMatchJson(m) for m in matches]
}
self.response.headers['Content-Type'] = 'application/json'
json.dump(response, self.response.out)
app = webapp2.WSGIApplication([
('/', StandingsHandler),
('/standings', StandingsHandler),
('/calculated_standings', CalculatedStandingsHandler),
('/schedule', ScheduleHandler),
], debug=True)
|
{
"content_hash": "7ea9d5b7ba114ff0a699c8e411a9e84f",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 78,
"avg_line_length": 29.376623376623378,
"alnum_prop": 0.6865605658709107,
"repo_name": "fortuna/corporate-soccer-league",
"id": "04018e3e843b15ed6723480e6cc7781a8b46ce3d",
"size": "2863",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app_engine/main.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "206649"
},
{
"name": "Python",
"bytes": "13067"
}
],
"symlink_target": ""
}
|
import collectd
from pysphere import VIServer
RUN = 0
METRIC_TYPES = {
'datastorecapacity': ('z_dscapacity', 'current'),
'datastorefreespace': ('z_dsfreespace', 'current'),
'datastoreusagepercent': ('z_dsusage_pct', 'current'),
'zonedatacenterscount': ('z_datacenters_count', 'current'),
'zoneclusterscount': ('z_clusters_count', 'current'),
'zonehostscount': ('z_hosts_count', 'current'),
'zonememoryusage': ('z_memory_usage', 'current'),
'zonecpuusage': ('z_cpu_usage', 'current'),
'zonememoryusagepercent': ('z_memory_usage_pct', 'current'),
'zonecpuusagepercent': ('z_cpu_usage_pct', 'current'),
'zonetotalmemory': ('z_total_memory', 'current'),
'zonecputotal': ('z_cpu_total', 'current'),
'hoststatus': ('h_status', 'current'),
'hostmemoryusage': ('h_memory_usage', 'current'),
'hostcpuusage': ('h_cpu_usage', 'current'),
'hostmemoryusagepercent': ('h_memory_usage_pct', 'current'),
'hostcpuusagepercent': ('h_cpu_usage_pct', 'current'),
'hosttotalmemory': ('h_total_memory', 'current'),
'hostcputotal': ('h_cpu_total', 'current'),
'hostrunningvms': ('h_vm_running_count', 'current'),
'hoststoppedvms': ('h_vm_stopped_count', 'current'),
'hosttotalvms': ('h_vm_total_count', 'current'),
'zonerunningvms': ('z_vm_running_count', 'current'),
'zonestoppedvms': ('z_vm_stopped_count', 'current'),
'zonetotalvms': ('z_vm_total_count', 'current'),
'datacenterclusterscount': ('d_clusters_count', 'current'),
'datacenterhostscount': ('d_hosts_count', 'current'),
'clussterhostscount': ('c_hosts_count', 'current'),
'datacenterrunningvms': ('d_vm_running_count', 'current'),
'datacenterstoppedvms': ('d_vm_stopped_count', 'current'),
'datacentertotalvms': ('d_vm_total_count', 'current'),
'datacentermemoryusage': ('d_memory_usage', 'current'),
'datacentercpuusage': ('d_cpu_usage', 'current'),
'datacentermemoryusagepercent': ('d_memory_usage_pct', 'current'),
'datacentercpuusagepercent': ('d_cpu_usage_pct', 'current'),
'datacentertotalmemory': ('d_total_memory', 'current'),
'datacentercputotal': ('d_cpu_total_count', 'current'),
'clusterrunningvms': ('c_vm_running_count', 'current'),
'clusterstoppedvms': ('c_vm_stopped_count', 'current'),
'clustertotalvms': ('c_vm_total_count', 'current'),
'clustermemoryusage': ('c_memory_usage', 'current'),
'clustercpuusage': ('c_cpu_usage', 'current'),
'clustermemoryusagepercent': ('c_memory_usage_percent', 'current'),
'clustercpuusagepercent': ('c_cpu_usage_percent', 'current'),
'clustertotalmemory': ('c_total_memory', 'current'),
'clustercputotal': ('c_cpu_total', 'current'),
}
METRIC_DELIM = '.'
def get_stats():
stats = dict()
v = VCENTERLIST.split()
for vcenter in v:
logger('verb', "get_stats calls vcenter %s user %s" % (vcenter, USERNAME))
server = VIServer()
try:
server.connect(vcenter, USERNAME, PASSWORD)
except Exception:
logger('warn', "failed to connect to %s" % (vcenter))
continue
# get datastores
for ds, dsname in server.get_datastores().items():
DatastoreCapacity = 0
DatastoreFreespace = 0
DatastoreUsagePercent = 0
try:
logger('verb', "get_stats calls Datastore metrics query on vcenter: %s for datastore: %s" % (vcenter, dsname))
props = server._retrieve_properties_traversal(property_names=['name', 'summary.capacity', 'summary.freeSpace'], from_node=ds, obj_type="Datastore")
for prop_set in props:
# mor = prop_set.Obj #in case you need it
for prop in prop_set.PropSet:
if prop.Name == "summary.capacity":
DatastoreCapacity = (prop.Val / 1048576)
elif prop.Name == "summary.freeSpace":
DatastoreFreespace = (prop.Val / 1048576)
except Exception:
logger('warn', "failed to get Datastore metrics value on vcenter: %s for datastore: %s" % (vcenter, dsname))
DatastoreUsagePercent = (((DatastoreCapacity - DatastoreFreespace) * 100) / DatastoreCapacity)
metricnameZoneDatastoreCapacity = METRIC_DELIM.join([vcenter.lower(), "datastores", dsname.lower(), 'datastorecapacity'])
metricnameZoneDatastoreFreespace = METRIC_DELIM.join([vcenter.lower(), "datastores", dsname.lower(), 'datastorefreespace'])
metricnameZoneDatastoreUsagePercent = METRIC_DELIM.join([vcenter.lower(), "datastores", dsname.lower(), 'datastoreusagepercent'])
try:
stats[metricnameZoneDatastoreCapacity] = DatastoreCapacity
stats[metricnameZoneDatastoreFreespace] = DatastoreFreespace
stats[metricnameZoneDatastoreUsagePercent] = DatastoreUsagePercent
except (TypeError, ValueError):
pass
ZoneDatacentersCount = 0
ZoneClustersCount = 0
ZoneHostsCount = 0
ZoneRunningVMS = 0
ZoneStoppedVMS = 0
ZoneTotalVMS = 0
ZoneMemoryUsage = 0
ZoneCpuUsage = 0
ZoneTotalMemory = 0
ZoneCpuTotal = 0
logger('verb', "get_stats calls get_datacenters query on vcenter: %s" % (vcenter))
datacenters = server.get_datacenters()
logger('verb', "get_stats completed get_datacenters query on vcenter: %s" % (vcenter))
ZoneDatacentersCount = len(datacenters)
for d, dname in server.get_datacenters().items():
if "." in dname:
dname = dname.split(".")[0]
DatacenterRunningVMS = 0
DatacenterStoppedVMS = 0
DatacenterTotalVMS = 0
DatacenterClustersCount = 0
DatacenterHostsCount = 0
DatacenterMemoryUsage = 0
DatacenterCpuUsage = 0
DatacenterTotalMemory = 0
DatacenterCpuTotal = 0
logger('verb', "get_stats calls get_clusters query on vcenter: %s for datacenter: %s" % (vcenter, dname))
clusters = server.get_clusters(d)
logger('verb', "get_stats completed get_clusters query on vcenter: %s for datacenter: %s" % (vcenter, dname))
DatacenterClustersCount = len(clusters)
ZoneClustersCount = ZoneClustersCount + DatacenterClustersCount
for c, cname in server.get_clusters(d).items():
if "." in cname:
cname = cname.split(".")[0]
ClusterMemoryUsage = 0
ClusterCpuUsage = 0
ClusterTotalMemory = 0
ClusterCpuTotal = 0
ClusterRunningVMS = 0
ClusterStoppedVMS = 0
ClusterTotalVMS = 0
logger('verb', "get_stats calls get_hosts query on vcenter: %s for cluster: %s" % (vcenter, cname))
hosts = server.get_hosts(c)
logger('verb', "get_stats completed get_hosts query on vcenter: %s for cluster: %s" % (vcenter, cname))
ClusterHostsCount = len(hosts)
DatacenterHostsCount = DatacenterHostsCount + ClusterHostsCount
ZoneHostsCount = ZoneHostsCount + DatacenterHostsCount
for h, hname in server.get_hosts(c).items():
HostMemoryUsage = 0
HostCpuUsage = 0
HostTotalMemory = 0
HostNumCpuCores = 0
HostMhzPerCore = 0
HostStatus = ''
if "." in hname:
hname = hname.split(".")[0]
try:
logger('verb', "get_stats calls Host CPU and Memory metrics query on vcenter: %s for host: %s" % (vcenter, hname))
props = server._retrieve_properties_traversal(property_names=['name', 'summary.overallStatus', 'summary.quickStats.overallMemoryUsage', 'summary.quickStats.overallCpuUsage', 'summary.hardware.memorySize', 'summary.hardware.numCpuCores', 'summary.hardware.cpuMhz'], from_node=h, obj_type="HostSystem")
for prop_set in props:
# mor = prop_set.Obj #in case you need it
for prop in prop_set.PropSet:
if prop.Name == "summary.quickStats.overallMemoryUsage":
HostMemoryUsage = prop.Val
elif prop.Name == "summary.quickStats.overallCpuUsage":
HostCpuUsage = prop.Val
elif prop.Name == "summary.hardware.memorySize":
HostTotalMemory = (prop.Val / 1048576)
elif prop.Name == "summary.hardware.numCpuCores":
HostNumCpuCores = prop.Val
elif prop.Name == "summary.hardware.cpuMhz":
HostMhzPerCore = prop.Val
elif prop.Name == "summary.overallStatus":
HostStatus = prop.Val
if HostStatus == "green":
HostStatus = 0
elif HostStatus == "gray":
HostStatus = 1
elif HostStatus == "yellow":
HostStatus = 2
elif HostStatus == "red":
HostStatus = 3
except Exception:
logger('warn', "failed to get Host CPU and Memory metrics value on vcenter: %s for host: %s" % (vcenter, hname))
try:
logger('verb', "get_stats calls HostRunningVMS query on vcenter: %s for host: %s" % (vcenter, hname))
HostRunningVMS = len(server.get_registered_vms(h, status='poweredOn'))
except Exception:
logger('warn', "failed to get nb of running VMS value on %s" % (hname))
try:
logger('verb', "get_stats calls HostStoppedVMS query on vcenter: %s for host: %s" % (vcenter, hname))
HostStoppedVMS = len(server.get_registered_vms(h, status='poweredOff'))
except Exception:
logger('warn', "failed to get nb of stopped VMS value on %s" % (hname))
try:
logger('verb', "get_stats calls HostTotalVMS query on vcenter: %s for host: %s" % (vcenter, hname))
HostTotalVMS = len(server.get_registered_vms(h))
except Exception:
logger('warn', "failed to get all VMS count on %s" % (hname))
HostCpuTotal = (HostNumCpuCores * HostMhzPerCore)
HostMemoryUsagePercent = ((HostMemoryUsage * 100) / HostTotalMemory)
HostCpuUsagePercent = ((HostCpuUsage * 100) / HostCpuTotal)
metricnameHostStatus = METRIC_DELIM.join([vcenter.lower(), dname.lower(), cname.lower(), hname.lower(), 'hoststatus'])
metricnameHostMemoryUsagePercent = METRIC_DELIM.join([vcenter.lower(), dname.lower(), cname.lower(), hname.lower(), 'hostmemoryusagepercent'])
metricnameHostCpuUsagePercent = METRIC_DELIM.join([vcenter.lower(), dname.lower(), cname.lower(), hname.lower(), 'hostcpuusagepercent'])
metricnameHostMemoryUsage = METRIC_DELIM.join([vcenter.lower(), dname.lower(), cname.lower(), hname.lower(), 'hostmemoryusage'])
metricnameHostCpuUsage = METRIC_DELIM.join([vcenter.lower(), dname.lower(), cname.lower(), hname.lower(), 'hostcpuusage'])
metricnameHostTotalMemory = METRIC_DELIM.join([vcenter.lower(), dname.lower(), cname.lower(), hname.lower(), 'hosttotalmemory'])
metricnameHostCpuTotal = METRIC_DELIM.join([vcenter.lower(), dname.lower(), cname.lower(), hname.lower(), 'hostcputotal'])
metricnameHostRunningVMS = METRIC_DELIM.join([vcenter.lower(), dname.lower(), cname.lower(), hname.lower(), 'hostrunningvms'])
metricnameHostStoppedVMS = METRIC_DELIM.join([vcenter.lower(), dname.lower(), cname.lower(), hname.lower(), 'hoststoppedvms'])
metricnameHostTotalVMS = METRIC_DELIM.join([vcenter.lower(), dname.lower(), cname.lower(), hname.lower(), 'hosttotalvms'])
ClusterMemoryUsage = ClusterMemoryUsage + HostMemoryUsage
ClusterCpuUsage = ClusterCpuUsage + HostCpuUsage
ClusterTotalMemory = ClusterTotalMemory + HostTotalMemory
ClusterCpuTotal = ClusterCpuTotal + HostCpuTotal
ClusterRunningVMS = ClusterRunningVMS + HostRunningVMS
ClusterStoppedVMS = ClusterStoppedVMS + HostStoppedVMS
ClusterTotalVMS = ClusterTotalVMS + HostTotalVMS
ClusterMemoryUsagePercent = ((ClusterMemoryUsage * 100) / ClusterTotalMemory)
ClusterCpuUsagePercent = ((ClusterCpuUsage * 100) / ClusterCpuTotal)
try:
stats[metricnameHostStatus] = HostStatus
stats[metricnameHostMemoryUsage] = HostMemoryUsage
stats[metricnameHostCpuUsage] = HostCpuUsage
stats[metricnameHostTotalMemory] = HostTotalMemory
stats[metricnameHostCpuUsagePercent] = HostCpuUsagePercent
stats[metricnameHostMemoryUsagePercent] = HostMemoryUsagePercent
stats[metricnameHostCpuTotal] = HostCpuTotal
stats[metricnameHostRunningVMS] = HostRunningVMS
stats[metricnameHostStoppedVMS] = HostStoppedVMS
stats[metricnameHostTotalVMS] = HostTotalVMS
except (TypeError, ValueError):
pass
DatacenterRunningVMS = DatacenterRunningVMS + ClusterRunningVMS
DatacenterStoppedVMS = DatacenterStoppedVMS + ClusterStoppedVMS
DatacenterTotalVMS = DatacenterTotalVMS + ClusterTotalVMS
DatacenterMemoryUsage = DatacenterMemoryUsage + ClusterMemoryUsage
DatacenterCpuUsage = DatacenterCpuUsage + ClusterCpuUsage
DatacenterTotalMemory = DatacenterTotalMemory + ClusterTotalMemory
DatacenterCpuTotal = DatacenterCpuTotal + ClusterCpuTotal
DatacenterMemoryUsagePercent = ((DatacenterMemoryUsage * 100) / DatacenterTotalMemory)
DatacenterCpuUsagePercent = ((DatacenterCpuUsage * 100) / DatacenterCpuTotal)
metricnameClusterRunningVMS = METRIC_DELIM.join([vcenter.lower(), dname.lower(), cname.lower(), 'clusterrunningvms'])
metricnameClusterStoppedVMS = METRIC_DELIM.join([vcenter.lower(), dname.lower(), cname.lower(), 'clusterstoppedvms'])
metricnameClusterTotalVMS = METRIC_DELIM.join([vcenter.lower(), dname.lower(), cname.lower(), 'clustertotalvms'])
metricnameClusterMemoryUsage = METRIC_DELIM.join([vcenter.lower(), dname.lower(), cname.lower(), 'clustermemoryusage'])
metricnameClusterCpuUsage = METRIC_DELIM.join([vcenter.lower(), dname.lower(), cname.lower(), 'clustercpuusage'])
metricnameClusterTotalMemory = METRIC_DELIM.join([vcenter.lower(), dname.lower(), cname.lower(), 'clustertotalmemory'])
metricnameClusterCpuTotal = METRIC_DELIM.join([vcenter.lower(), dname.lower(), cname.lower(), 'clustercputotal'])
metricnameClusterMemoryUsagePercent = METRIC_DELIM.join([vcenter.lower(), dname.lower(), cname.lower(), 'clustermemoryusagepercent'])
metricnameClusterCpuUsagePercent = METRIC_DELIM.join([vcenter.lower(), dname.lower(), cname.lower(), 'clustercpuusagepercent'])
try:
stats[metricnameClusterRunningVMS] = ClusterRunningVMS
stats[metricnameClusterStoppedVMS] = ClusterStoppedVMS
stats[metricnameClusterTotalVMS] = ClusterTotalVMS
stats[metricnameClusterMemoryUsage] = ClusterMemoryUsage
stats[metricnameClusterCpuUsage] = ClusterCpuUsage
stats[metricnameClusterMemoryUsagePercent] = ClusterMemoryUsagePercent
stats[metricnameClusterCpuUsagePercent] = ClusterCpuUsagePercent
stats[metricnameClusterTotalMemory] = ClusterTotalMemory
stats[metricnameClusterCpuTotal] = ClusterCpuTotal
except (TypeError, ValueError):
pass
# post datacenter metrics count here
ZoneRunningVMS = ZoneRunningVMS + DatacenterRunningVMS
ZoneStoppedVMS = ZoneStoppedVMS + DatacenterStoppedVMS
ZoneTotalVMS = ZoneTotalVMS + DatacenterTotalVMS
ZoneMemoryUsage = ZoneMemoryUsage + DatacenterMemoryUsage
ZoneCpuUsage = ZoneCpuUsage + DatacenterCpuUsage
ZoneTotalMemory = ZoneTotalMemory + DatacenterTotalMemory
ZoneCpuTotal = ZoneCpuTotal + DatacenterCpuTotal
ZoneMemoryUsagePercent = ((ZoneMemoryUsage * 100) / ZoneTotalMemory)
ZoneCpuUsagePercent = ((ZoneCpuUsage * 100) / ZoneCpuTotal)
metricnameDatacenterRunningVMS = METRIC_DELIM.join([vcenter.lower(), dname.lower(), 'datacenterrunningvms'])
metricnameDatacenterStoppedVMS = METRIC_DELIM.join([vcenter.lower(), dname.lower(), 'datacenterstoppedvms'])
metricnameDatacenterTotalVMS = METRIC_DELIM.join([vcenter.lower(), dname.lower(), 'datacentertotalvms'])
metricnameDatacenterMemoryUsage = METRIC_DELIM.join([vcenter.lower(), dname.lower(), 'datacentermemoryusage'])
metricnameDatacenterCpuUsage = METRIC_DELIM.join([vcenter.lower(), dname.lower(), 'datacentercpuusage'])
metricnameDatacenterMemoryUsagePercent = METRIC_DELIM.join([vcenter.lower(), dname.lower(), 'datacentermemoryusagepercent'])
metricnameDatacenterCpuUsagePercent = METRIC_DELIM.join([vcenter.lower(), dname.lower(), 'datacentercpuusagepercent'])
metricnameDatacenterTotalMemory = METRIC_DELIM.join([vcenter.lower(), dname.lower(), 'datacentertotalmemory'])
metricnameDatacenterCpuTotal = METRIC_DELIM.join([vcenter.lower(), dname.lower(), 'datacentercputotal'])
try:
stats[metricnameDatacenterRunningVMS] = DatacenterRunningVMS
stats[metricnameDatacenterStoppedVMS] = DatacenterStoppedVMS
stats[metricnameDatacenterTotalVMS] = DatacenterTotalVMS
stats[metricnameDatacenterMemoryUsage] = DatacenterMemoryUsage
stats[metricnameDatacenterCpuUsage] = DatacenterCpuUsage
stats[metricnameDatacenterMemoryUsagePercent] = DatacenterMemoryUsagePercent
stats[metricnameDatacenterCpuUsagePercent] = DatacenterCpuUsagePercent
stats[metricnameDatacenterTotalMemory] = DatacenterTotalMemory
stats[metricnameDatacenterCpuTotal] = DatacenterCpuTotal
except (TypeError, ValueError):
pass
# post zone metrics count here
metricnameZoneRunningVMS = METRIC_DELIM.join([vcenter.lower(), 'zonerunningvms'])
metricnameZoneStoppedVMS = METRIC_DELIM.join([vcenter.lower(), 'zonestoppedvms'])
metricnameZoneTotalVMS = METRIC_DELIM.join([vcenter.lower(), 'zonetotalvms'])
metricnameZoneMemoryUsage = METRIC_DELIM.join([vcenter.lower(), 'zonememoryusage'])
metricnameZoneCpuUsage = METRIC_DELIM.join([vcenter.lower(), 'zonecpuusage'])
metricnameZoneMemoryUsagePercent = METRIC_DELIM.join([vcenter.lower(), 'zonememoryusagepercent'])
metricnameZoneCpuUsagePercent = METRIC_DELIM.join([vcenter.lower(), 'zonecpuusagepercent'])
metricnameZoneTotalMemory = METRIC_DELIM.join([vcenter.lower(), 'zonetotalmemory'])
metricnameZoneCpuTotal = METRIC_DELIM.join([vcenter.lower(), 'zonecputotal'])
try:
stats[metricnameZoneRunningVMS] = ZoneRunningVMS
stats[metricnameZoneStoppedVMS] = ZoneStoppedVMS
stats[metricnameZoneTotalVMS] = ZoneTotalVMS
stats[metricnameZoneMemoryUsage] = ZoneMemoryUsage
stats[metricnameZoneCpuUsage] = ZoneCpuUsage
stats[metricnameZoneMemoryUsagePercent] = ZoneMemoryUsagePercent
stats[metricnameZoneCpuUsagePercent] = ZoneCpuUsagePercent
stats[metricnameZoneTotalMemory] = ZoneTotalMemory
stats[metricnameZoneCpuTotal] = ZoneCpuTotal
except (TypeError, ValueError):
pass
metricnameZoneDatacentersCount = METRIC_DELIM.join([vcenter.lower(), 'zonedatacenterscount'])
metricnameZoneClustersCount = METRIC_DELIM.join([vcenter.lower(), 'zoneclusterscount'])
metricnameZoneHostsCount = METRIC_DELIM.join([vcenter.lower(), 'zonehostscount'])
try:
stats[metricnameZoneDatacentersCount] = ZoneDatacentersCount
stats[metricnameZoneClustersCount] = ZoneClustersCount
stats[metricnameZoneHostsCount] = ZoneHostsCount
except (TypeError, ValueError):
pass
server.disconnect()
return stats
# callback configuration for module
def configure_callback(conf):
global NAME, VCENTERLIST, USERNAME, PASSWORD, VERBOSE_LOGGING, SKIP
NAME = 'Vcenter'
VCENTERLIST = ''
USERNAME = ''
PASSWORD = ''
VERBOSE_LOGGING = False
SKIP = 10
for node in conf.children:
if node.key == "Vcenter":
VCENTERLIST = node.values[0]
elif node.key == "Username":
USERNAME = node.values[0]
elif node.key == "Password":
PASSWORD = node.values[0]
elif node.key == "Verbose":
VERBOSE_LOGGING = bool(node.values[0])
elif node.key == "Skip":
SKIP = int(node.values[0])
else:
logger('warn', 'Unknown config key: %s' % node.key)
def read_callback():
global RUN, SKIP
RUN += 1
if RUN % SKIP != 1:
return
logger('verb', "beginning read_callback")
info = get_stats()
if not info:
logger('warn', "%s: No data received" % NAME)
return
for key, value in info.items():
key_prefix = ''
key_root = key
logger('verb', "read_callback key %s" % (key))
logger('verb', "read_callback value %s" % (value))
if value not in METRIC_TYPES:
try:
key_prefix, key_root = key.rsplit(METRIC_DELIM, 1)
except ValueError:
pass
if key_root not in METRIC_TYPES:
continue
key_root, val_type = METRIC_TYPES[key_root]
key_name = METRIC_DELIM.join([key_prefix, key_root])
logger('verb', "key_name %s" % (key_name))
val = collectd.Values(plugin=NAME, type=val_type)
val.type_instance = key_name
val.values = [value]
val.dispatch()
# logging function
def logger(t, msg):
if t == 'err':
collectd.error('%s: %s' % (NAME, msg))
elif t == 'warn':
collectd.warning('%s: %s' % (NAME, msg))
elif t == 'verb':
if VERBOSE_LOGGING:
collectd.info('%s: %s' % (NAME, msg))
else:
collectd.notice('%s: %s' % (NAME, msg))
# main
collectd.register_config(configure_callback)
collectd.register_read(read_callback)
|
{
"content_hash": "5ba8be154b363999843d09e25330f340",
"timestamp": "",
"source": "github",
"line_count": 445,
"max_line_length": 324,
"avg_line_length": 53.795505617977526,
"alnum_prop": 0.6046618488658674,
"repo_name": "llambiel/collectd-vcenter",
"id": "f53cf3042f5f7d12c03247dcd555ff4544da8c5f",
"size": "24094",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vcenter.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24094"
}
],
"symlink_target": ""
}
|
import json
import time
from bson import json_util
from tests import base
from girder import events
from girder.constants import AccessType
from girder.exceptions import ValidationException
from girder.models.user import User
from girder.models.token import Token
from girder_jobs.constants import JobStatus, REST_CREATE_JOB_TOKEN_SCOPE
from girder_jobs.models.job import Job
def setUpModule():
base.enabledPlugins.append('jobs')
base.startServer()
def tearDownModule():
base.stopServer()
class JobsTestCase(base.TestCase):
def setUp(self):
super().setUp()
self.users = [User().createUser(
'usr' + str(n), 'passwd', 'tst', 'usr', 'u%d@girder.test' % n)
for n in range(3)]
self.jobModel = Job()
def testJobs(self):
self.job = None
def schedule(event):
self.job = event.info
if self.job['handler'] == 'my_handler':
self.job['status'] = JobStatus.RUNNING
self.job = self.jobModel.save(self.job)
self.assertEqual(self.job['args'], ('hello', 'world'))
self.assertEqual(self.job['kwargs'], {'a': 'b'})
events.bind('jobs.schedule', 'test', schedule)
# Create a job
job = self.jobModel.createJob(
title='Job Title', type='my_type', args=('hello', 'world'),
kwargs={'a': 'b'}, user=self.users[1], handler='my_handler',
public=False)
self.assertEqual(self.job, None)
self.assertEqual(job['status'], JobStatus.INACTIVE)
# Schedule the job, make sure our handler was invoked
self.jobModel.scheduleJob(job)
self.assertEqual(self.job['_id'], job['_id'])
self.assertEqual(self.job['status'], JobStatus.RUNNING)
# Since the job is not public, user 2 should not have access
path = '/job/%s' % job['_id']
resp = self.request(path, user=self.users[2])
self.assertStatus(resp, 403)
resp = self.request(path, user=self.users[2], method='PUT')
self.assertStatus(resp, 403)
resp = self.request(path, user=self.users[2], method='DELETE')
self.assertStatus(resp, 403)
# If no user is specified, we should get a 401 error
resp = self.request(path, user=None)
self.assertStatus(resp, 401)
# Make sure user who created the job can see it
resp = self.request(path, user=self.users[1])
self.assertStatusOk(resp)
# We should be able to update the job as the user who created it
resp = self.request(path, method='PUT', user=self.users[1], params={
'log': 'My log message\n'
})
self.assertStatusOk(resp)
# We should be able to create a job token and use that to update it too
token = self.jobModel.createJobToken(job)
resp = self.request(path, method='PUT', params={
'log': 'append message',
'token': token['_id']
})
self.assertStatusOk(resp)
# We shouldn't get the log back in this case
self.assertNotIn('log', resp.json)
# Do a fetch on the job itself to get the log
resp = self.request(path, user=self.users[1])
self.assertStatusOk(resp)
self.assertEqual(
resp.json['log'], ['My log message\n', 'append message'])
# Test overwriting the log and updating status
resp = self.request(path, method='PUT', params={
'log': 'overwritten log',
'overwrite': 'true',
'status': JobStatus.SUCCESS,
'token': token['_id']
})
self.assertStatusOk(resp)
self.assertNotIn('log', resp.json)
self.assertEqual(resp.json['status'], JobStatus.SUCCESS)
job = self.jobModel.load(job['_id'], force=True, includeLog=True)
self.assertEqual(job['log'], ['overwritten log'])
# We should be able to delete the job as the user who created it
resp = self.request(path, user=self.users[1], method='DELETE')
self.assertStatusOk(resp)
job = self.jobModel.load(job['_id'], force=True)
self.assertIsNone(job)
def testLegacyLogBehavior(self):
# Force save a job with a string log to simulate a legacy job record
job = self.jobModel.createJob(
title='legacy', type='legacy', user=self.users[1], save=False)
job['log'] = 'legacy log'
job = self.jobModel.save(job, validate=False)
self.assertEqual(job['log'], 'legacy log')
# Load the record, we should now get the log as a list
job = self.jobModel.load(job['_id'], force=True, includeLog=True)
self.assertEqual(job['log'], ['legacy log'])
def testListJobs(self):
job = self.jobModel.createJob(title='A job', type='t', user=self.users[1], public=False)
anonJob = self.jobModel.createJob(title='Anon job', type='t')
# Ensure timestamp for public job is strictly higher (ms resolution)
time.sleep(0.1)
publicJob = self.jobModel.createJob(
title='Anon job', type='t', public=True)
# User 1 should be able to see their own jobs
resp = self.request('/job', user=self.users[1], params={
'userId': self.users[1]['_id']
})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
self.assertEqual(resp.json[0]['_id'], str(job['_id']))
# User 2 should not see user 1's jobs in the list
resp = self.request('/job', user=self.users[2], params={
'userId': self.users[1]['_id']
})
self.assertEqual(resp.json, [])
# Omitting a userId should assume current user
resp = self.request('/job', user=self.users[1])
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
self.assertEqual(resp.json[0]['_id'], str(job['_id']))
# Explicitly passing "None" should show anonymous jobs
resp = self.request('/job', user=self.users[0], params={
'userId': 'none'
})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 2)
self.assertEqual(resp.json[0]['_id'], str(publicJob['_id']))
self.assertEqual(resp.json[1]['_id'], str(anonJob['_id']))
# Non-admins should only see public anon jobs
resp = self.request('/job', params={'userId': 'none'})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
self.assertEqual(resp.json[0]['_id'], str(publicJob['_id']))
def testListAllJobs(self):
self.jobModel.createJob(title='user 0 job', type='t', user=self.users[0], public=False)
self.jobModel.createJob(title='user 1 job', type='t', user=self.users[1], public=False)
self.jobModel.createJob(title='user 1 job', type='t', user=self.users[1], public=True)
self.jobModel.createJob(title='user 2 job', type='t', user=self.users[2])
self.jobModel.createJob(title='anonymous job', type='t')
self.jobModel.createJob(title='anonymous public job', type='t2', public=True)
# User 0, as a site admin, should be able to see all jobs
resp = self.request('/job/all', user=self.users[0])
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 6)
# get with filter
resp = self.request('/job/all', user=self.users[0], params={
'types': json.dumps(['t']),
'statuses': json.dumps([0])
})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 5)
# get with unmet filter conditions
resp = self.request('/job/all', user=self.users[0], params={
'types': json.dumps(['nonexisttype'])
})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 0)
# User 1, as non site admin, should encounter http 403 (Forbidden)
resp = self.request('/job/all', user=self.users[1])
self.assertStatus(resp, 403)
# Not authenticated user should encounter http 401 (unauthorized)
resp = self.request('/job/all')
self.assertStatus(resp, 401)
def testFiltering(self):
job = self.jobModel.createJob(title='A job', type='t', user=self.users[1], public=True)
job['_some_other_field'] = 'foo'
job = self.jobModel.save(job)
resp = self.request('/job/%s' % job['_id'])
self.assertStatusOk(resp)
self.assertTrue('created' in resp.json)
self.assertTrue('_some_other_field' not in resp.json)
self.assertTrue('kwargs' not in resp.json)
self.assertTrue('args' not in resp.json)
resp = self.request('/job/%s' % job['_id'], user=self.users[0])
self.assertTrue('kwargs' in resp.json)
self.assertTrue('args' in resp.json)
self.jobModel.exposeFields(level=AccessType.READ, fields={'_some_other_field'})
self.jobModel.hideFields(level=AccessType.READ, fields={'created'})
resp = self.request('/job/%s' % job['_id'])
self.assertStatusOk(resp)
self.assertEqual(resp.json['_some_other_field'], 'foo')
self.assertTrue('created' not in resp.json)
def testJobProgressAndNotifications(self):
job = self.jobModel.createJob(title='a job', type='t', user=self.users[1], public=True)
path = '/job/%s' % job['_id']
resp = self.request(path)
self.assertEqual(resp.json['progress'], None)
self.assertEqual(resp.json['timestamps'], [])
resp = self.request(path, method='PUT', user=self.users[1], params={
'progressTotal': 100,
'progressCurrent': 3,
'progressMessage': 'Started',
'notify': 'false',
'status': JobStatus.QUEUED
})
self.assertStatusOk(resp)
self.assertEqual(resp.json['progress'], {
'total': 100,
'current': 3,
'message': 'Started',
'notificationId': None
})
# The status update should make it so we now have a timestamp
self.assertEqual(len(resp.json['timestamps']), 1)
self.assertEqual(resp.json['timestamps'][0]['status'], JobStatus.QUEUED)
self.assertIn('time', resp.json['timestamps'][0])
# If the status does not change on update, no timestamp should be added
resp = self.request(path, method='PUT', user=self.users[1], params={
'status': JobStatus.QUEUED
})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['timestamps']), 1)
self.assertEqual(resp.json['timestamps'][0]['status'], JobStatus.QUEUED)
# We passed notify=false, so we should only have the job creation notification
resp = self.request(path='/notification/stream', method='GET',
user=self.users[1], isJson=False,
params={'timeout': 0})
messages = self.getSseMessages(resp)
self.assertEqual(len(messages), 1)
# Update progress with notify=true (the default)
resp = self.request(path, method='PUT', user=self.users[1], params={
'progressCurrent': 50,
'progressMessage': 'Something bad happened',
'status': JobStatus.ERROR
})
self.assertStatusOk(resp)
self.assertNotEqual(resp.json['progress']['notificationId'], None)
# We should now see three notifications (job created + job status + progress)
resp = self.request(path='/notification/stream', method='GET',
user=self.users[1], isJson=False,
params={'timeout': 0})
messages = self.getSseMessages(resp)
job = self.jobModel.load(job['_id'], force=True)
self.assertEqual(len(messages), 3)
creationNotify = messages[0]
progressNotify = messages[1]
statusNotify = messages[2]
self.assertEqual(creationNotify['type'], 'job_created')
self.assertEqual(creationNotify['data']['_id'], str(job['_id']))
self.assertEqual(statusNotify['type'], 'job_status')
self.assertEqual(statusNotify['data']['_id'], str(job['_id']))
self.assertEqual(int(statusNotify['data']['status']), JobStatus.ERROR)
self.assertNotIn('kwargs', statusNotify['data'])
self.assertNotIn('log', statusNotify['data'])
self.assertEqual(progressNotify['type'], 'progress')
self.assertEqual(progressNotify['data']['title'], job['title'])
self.assertEqual(progressNotify['data']['current'], float(50))
self.assertEqual(progressNotify['data']['state'], 'error')
self.assertEqual(progressNotify['_id'], str(job['progress']['notificationId']))
def testDotsInKwargs(self):
kwargs = {
'$key.with.dots': 'value',
'foo': [{
'moar.dots': True
}]
}
job = self.jobModel.createJob(title='dots', type='x', user=self.users[0], kwargs=kwargs)
# Make sure we can update a job and notification creation works
self.jobModel.updateJob(job, status=JobStatus.QUEUED, notify=True)
self.assertEqual(job['kwargs'], kwargs)
resp = self.request('/job/%s' % job['_id'], user=self.users[0])
self.assertStatusOk(resp)
self.assertEqual(resp.json['kwargs'], kwargs)
job = self.jobModel.load(job['_id'], force=True)
self.assertEqual(job['kwargs'], kwargs)
job = self.jobModel.filter(job, self.users[0])
self.assertEqual(job['kwargs'], kwargs)
job = self.jobModel.filter(job, self.users[1])
self.assertFalse('kwargs' in job)
def testLocalJob(self):
job = self.jobModel.createLocalJob(
title='local', type='local', user=self.users[0], kwargs={
'hello': 'world'
}, module='plugin_tests.local_job_impl')
self.jobModel.scheduleJob(job)
job = self.jobModel.load(job['_id'], force=True, includeLog=True)
self.assertEqual(job['log'], ['job ran!'])
job = self.jobModel.createLocalJob(
title='local', type='local', user=self.users[0], kwargs={
'hello': 'world'
}, module='plugin_tests.local_job_impl', function='fail')
self.jobModel.scheduleJob(job)
job = self.jobModel.load(job['_id'], force=True, includeLog=True)
self.assertEqual(job['log'], ['job failed'])
def testValidateCustomStatus(self):
job = self.jobModel.createJob(title='test', type='x', user=self.users[0])
def validateStatus(event):
if event.info == 1234:
event.preventDefault().addResponse(True)
def validTransitions(event):
if event.info['status'] == 1234:
event.preventDefault().addResponse([JobStatus.INACTIVE])
with self.assertRaises(ValidationException):
self.jobModel.updateJob(job, status=1234) # Should fail
with events.bound('jobs.status.validate', 'test', validateStatus), \
events.bound('jobs.status.validTransitions', 'test', validTransitions):
self.jobModel.updateJob(job, status=1234) # Should work
with self.assertRaises(ValidationException):
self.jobModel.updateJob(job, status=4321) # Should fail
def testValidateCustomStrStatus(self):
job = self.jobModel.createJob(title='test', type='x', user=self.users[0])
def validateStatus(event):
states = ['a', 'b', 'c']
if event.info in states:
event.preventDefault().addResponse(True)
def validTransitions(event):
if event.info['status'] == 'a':
event.preventDefault().addResponse([JobStatus.INACTIVE])
with self.assertRaises(ValidationException):
self.jobModel.updateJob(job, status='a')
with events.bound('jobs.status.validate', 'test', validateStatus), \
events.bound('jobs.status.validTransitions', 'test', validTransitions):
self.jobModel.updateJob(job, status='a')
self.assertEqual(job['status'], 'a')
with self.assertRaises(ValidationException), \
events.bound('jobs.status.validate', 'test', validateStatus):
self.jobModel.updateJob(job, status='foo')
def testUpdateOtherFields(self):
job = self.jobModel.createJob(title='test', type='x', user=self.users[0])
job = self.jobModel.updateJob(job, otherFields={'other': 'fields'})
self.assertEqual(job['other'], 'fields')
def testCancelJob(self):
job = self.jobModel.createJob(title='test', type='x', user=self.users[0])
# add to the log
job = self.jobModel.updateJob(job, log='entry 1\n')
# Reload without the log
job = self.jobModel.load(id=job['_id'], force=True)
self.assertEqual(len(job.get('log', [])), 0)
# Cancel
job = self.jobModel.cancelJob(job)
self.assertEqual(job['status'], JobStatus.CANCELED)
# Reloading should still have the log and be canceled
job = self.jobModel.load(id=job['_id'], force=True, includeLog=True)
self.assertEqual(job['status'], JobStatus.CANCELED)
self.assertEqual(len(job.get('log', [])), 1)
def testCancelJobEndpoint(self):
job = self.jobModel.createJob(title='test', type='x', user=self.users[0])
# Ensure requires write perms
jobCancelUrl = '/job/%s/cancel' % job['_id']
resp = self.request(jobCancelUrl, user=self.users[1], method='PUT')
self.assertStatus(resp, 403)
# Try again with the right user
jobCancelUrl = '/job/%s/cancel' % job['_id']
resp = self.request(jobCancelUrl, user=self.users[0], method='PUT')
self.assertStatusOk(resp)
self.assertEqual(resp.json['status'], JobStatus.CANCELED)
def testJobsTypesAndStatuses(self):
self.jobModel.createJob(title='user 0 job', type='t1', user=self.users[0], public=False)
self.jobModel.createJob(title='user 1 job', type='t2', user=self.users[1], public=False)
self.jobModel.createJob(title='user 1 job', type='t3', user=self.users[1], public=True)
self.jobModel.createJob(title='user 2 job', type='t4', user=self.users[2])
self.jobModel.createJob(title='anonymous job', type='t5')
self.jobModel.createJob(title='anonymous public job', type='t6', public=True)
# User 1, as non site admin, should encounter http 403 (Forbidden)
resp = self.request('/job/typeandstatus/all', user=self.users[1])
self.assertStatus(resp, 403)
# Admin user gets all types and statuses
resp = self.request('/job/typeandstatus/all', user=self.users[0])
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['types']), 6)
self.assertEqual(len(resp.json['statuses']), 1)
# standard user gets types and statuses of its own jobs
resp = self.request('/job/typeandstatus', user=self.users[1])
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['types']), 2)
self.assertEqual(len(resp.json['statuses']), 1)
def testDefaultParentId(self):
job = self.jobModel.createJob(title='Job', type='Job', user=self.users[0])
# If not specified parentId should be None
self.assertEqual(job['parentId'], None)
def testIsParentIdCorrect(self):
parentJob = self.jobModel.createJob(
title='Parent Job', type='Parent Job', user=self.users[0])
childJob = self.jobModel.createJob(
title='Child Job', type='Child Job', user=self.users[0], parentJob=parentJob)
# During initialization parent job should be set correctly
self.assertEqual(childJob['parentId'], parentJob['_id'])
def testSetParentCorrectly(self):
parentJob = self.jobModel.createJob(
title='Parent Job', type='Parent Job', user=self.users[0])
childJob = self.jobModel.createJob(title='Child Job', type='Child Job', user=self.users[0])
self.jobModel.setParentJob(childJob, parentJob)
# After setParentJob method is called parent job should be set correctly
self.assertEqual(childJob['parentId'], parentJob['_id'])
def testParentCannotBeEqualToChild(self):
childJob = self.jobModel.createJob(title='Child Job', type='Child Job', user=self.users[0])
# Cannot set a job as it's own parent
with self.assertRaises(ValidationException):
self.jobModel.setParentJob(childJob, childJob)
def testParentIdCannotBeOverridden(self):
parentJob = self.jobModel.createJob(
title='Parent Job', type='Parent Job', user=self.users[0])
anotherParentJob = self.jobModel.createJob(
title='Another Parent Job', type='Parent Job', user=self.users[0])
childJob = self.jobModel.createJob(
title='Child Job', type='Child Job', user=self.users[0], parentJob=parentJob)
with self.assertRaises(ValidationException):
# If parent job is set, cannot be overridden
self.jobModel.setParentJob(childJob, anotherParentJob)
def testListChildJobs(self):
parentJob = self.jobModel.createJob(
title='Parent Job', type='Parent Job', user=self.users[0])
childJob = self.jobModel.createJob(
title='Child Job', type='Child Job', user=self.users[0], parentJob=parentJob)
self.jobModel.createJob(
title='Another Child Job', type='Child Job', user=self.users[0], parentJob=parentJob)
# Should return a list with 2 jobs
self.assertEqual(len(list(self.jobModel.listChildJobs(parentJob))), 2)
# Should return an empty list
self.assertEqual(len(list(self.jobModel.listChildJobs(childJob))), 0)
def testListChildJobsRest(self):
parentJob = self.jobModel.createJob(
title='Parent Job', type='Parent Job', user=self.users[0])
childJob = self.jobModel.createJob(
title='Child Job', type='Child Job', user=self.users[0], parentJob=parentJob)
self.jobModel.createJob(
title='Another Child Job', type='Child Job', user=self.users[0], parentJob=parentJob)
resp = self.request('/job', user=self.users[0],
params={'parentId': str(parentJob['_id'])})
resp2 = self.request('/job', user=self.users[0],
params={'parentId': str(childJob['_id'])})
self.assertStatusOk(resp)
self.assertStatusOk(resp2)
# Should return a list with 2 jobs
self.assertEqual(len(resp.json), 2)
# Should return an empty list
self.assertEqual(len(resp2.json), 0)
def testCreateJobRest(self):
resp = self.request('/job', method='POST',
user=self.users[0],
params={'title': 'job', 'type': 'job'})
# If user does not have the necessary token status is 403
self.assertStatus(resp, 403)
token = Token().createToken(scope=REST_CREATE_JOB_TOKEN_SCOPE)
resp2 = self.request(
'/job', method='POST', token=token, params={'title': 'job', 'type': 'job'})
# If user has the necessary token status is 200
self.assertStatusOk(resp2)
def testJobStateTransitions(self):
job = self.jobModel.createJob(
title='user 0 job', type='t1', user=self.users[0], public=False)
# We can't move straight to SUCCESS
with self.assertRaises(ValidationException):
job = self.jobModel.updateJob(job, status=JobStatus.SUCCESS)
self.jobModel.updateJob(job, status=JobStatus.QUEUED)
self.jobModel.updateJob(job, status=JobStatus.RUNNING)
self.jobModel.updateJob(job, status=JobStatus.ERROR)
# We shouldn't be able to move backwards
with self.assertRaises(ValidationException):
self.jobModel.updateJob(job, status=JobStatus.QUEUED)
with self.assertRaises(ValidationException):
self.jobModel.updateJob(job, status=JobStatus.RUNNING)
with self.assertRaises(ValidationException):
self.jobModel.updateJob(job, status=JobStatus.INACTIVE)
def testJobSaveEventModification(self):
def customSave(event):
kwargs = json_util.loads(event.info['kwargs'])
kwargs['key2'] = 'newvalue'
event.info['kwargs'] = json_util.dumps(kwargs)
job = self.jobModel.createJob(title='A job', type='t', user=self.users[1], public=True)
job['kwargs'] = {'key1': 'value1', 'key2': 'value2'}
with events.bound('model.job.save', 'test', customSave):
job = self.jobModel.save(job)
self.assertEqual(job['kwargs']['key2'], 'newvalue')
|
{
"content_hash": "297f8a55c5282b26d7da682d5bf4da06",
"timestamp": "",
"source": "github",
"line_count": 592,
"max_line_length": 99,
"avg_line_length": 42.04054054054054,
"alnum_prop": 0.6127852780456445,
"repo_name": "Kitware/girder",
"id": "853dbf0104fd039216219cc602ee956392bafbe8",
"size": "24912",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "plugins/jobs/plugin_tests/jobs_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "26244"
},
{
"name": "CSS",
"bytes": "6537"
},
{
"name": "Dockerfile",
"bytes": "1528"
},
{
"name": "HTML",
"bytes": "14"
},
{
"name": "JavaScript",
"bytes": "1176017"
},
{
"name": "Jinja",
"bytes": "322"
},
{
"name": "Mako",
"bytes": "7571"
},
{
"name": "Pug",
"bytes": "137980"
},
{
"name": "Python",
"bytes": "2018697"
},
{
"name": "Roff",
"bytes": "17"
},
{
"name": "Shell",
"bytes": "3354"
},
{
"name": "Stylus",
"bytes": "48706"
}
],
"symlink_target": ""
}
|
"""
===================================
The :mod:`array_split.tests` Module
===================================
Module for running all :mod:`array_split` unit-tests, including :mod:`unittest` test-cases
and :mod:`doctest` tests for module doc-strings and sphinx (RST) documentation.
Execute as::
python -m array_split.tests
.. currentmodule:: array_split.tests
Classes and Functions
=====================
.. autosummary::
:toctree: generated/
MultiPlatformAnd23Checker - Customised doctest output checking.
DocTestTestSuite - Loads all module and file doctests as single :mod:`unittest` suite.
load_tests - Returns suite of :mod:`doctest` and :mod:`unittest` tests.
"""
# pylint: disable=unused-import
from __future__ import absolute_import
import sys as _sys
import re as _re
import unittest as _unittest
import doctest as _doctest
import os.path
import array_split as _array_split
from array_split import split as _split
from .license import license as _license, copyright as _copyright, version as _version
from .split_test import SplitTest # noqa: F401,F403
__author__ = "Shane J. Latham"
__license__ = _license()
__copyright__ = _copyright()
__version__ = _version()
_doctest_OuputChecker = _doctest.OutputChecker
class MultiPlatformAnd23Checker(_doctest_OuputChecker):
"""
Overrides the :meth:`doctest.OutputChecker.check_output` method
to remove the :samp:`'L'` from integer literals
"""
def check_output(self, want, got, optionflags):
"""
For python-2 replaces "124L" with "124". For python 2 and 3,
replaces :samp:`", dtype=int64)"` with :samp:`")"`.
See :meth:`doctest.OutputChecker.check_output`.
"""
if _sys.version_info[0] <= 2:
got = _re.sub("([0-9]+)L", "\\1", got)
got = _re.sub(", dtype=int64\\)", ")", got)
return _doctest_OuputChecker.check_output(self, want, got, optionflags)
_doctest.OutputChecker = MultiPlatformAnd23Checker
class DocTestTestSuite(_unittest.TestSuite):
"""
Adds :mod:`array_split` doctests as `unittest.TestCase` objects.
"""
def __init__(self):
"""
Uses :meth:`unittest.TestSuite.addTests` to add :obj:`doctest.DocFileSuite`
and :obj:`doctest.DocTestSuite` tests.
"""
readme_file_name = \
os.path.realpath(
os.path.join(os.path.dirname(__file__), "..", "README.rst")
)
examples_rst_file_name = \
os.path.realpath(
os.path.join(
os.path.dirname(__file__),
"..",
"docs",
"source",
"examples",
"index.rst"
)
)
suite = _unittest.TestSuite()
if os.path.exists(readme_file_name):
suite.addTests(
_doctest.DocFileSuite(
readme_file_name,
module_relative=False,
optionflags=_doctest.NORMALIZE_WHITESPACE
)
)
if os.path.exists(examples_rst_file_name):
suite.addTests(
_doctest.DocFileSuite(
examples_rst_file_name,
module_relative=False,
optionflags=_doctest.NORMALIZE_WHITESPACE
)
)
suite.addTests(
_doctest.DocTestSuite(
_array_split,
optionflags=_doctest.NORMALIZE_WHITESPACE
)
)
suite.addTests(
_doctest.DocTestSuite(
_split,
optionflags=_doctest.NORMALIZE_WHITESPACE
)
)
_unittest.TestSuite.__init__(self, suite)
def load_tests(loader, tests, pattern): # pylint: disable=unused-argument
"""
Loads :mod:`array_split.split_test` tests and :obj:`DocTestTestSuite`
tests.
"""
suite = loader.loadTestsFromNames(["array_split.split_test", ])
suite.addTests(DocTestTestSuite())
return suite
__all__ = [s for s in dir() if not s.startswith('_')]
if __name__ == "__main__":
_unittest.main()
|
{
"content_hash": "b85d483d6ee5616f1dbd60c236a0790b",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 90,
"avg_line_length": 28.848275862068967,
"alnum_prop": 0.5646665072914177,
"repo_name": "array-split/array_split",
"id": "db8c8202ac93ff0c14b820f734baea578f6e5382",
"size": "4183",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "array_split/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "148484"
}
],
"symlink_target": ""
}
|
class ConfigmanException(Exception):
pass
class ConfigFileMissingError(IOError, ConfigmanException):
pass
class ConfigFileOptionNameMissingError(ConfigmanException):
pass
class NotAnOptionError(ConfigmanException):
pass
class OptionError(ConfigmanException):
pass
class CannotConvertError(ConfigmanException):
pass
|
{
"content_hash": "6940b8dc5556ac2fe4c7980bf6887397",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 59,
"avg_line_length": 15.863636363636363,
"alnum_prop": 0.7936962750716332,
"repo_name": "peterbe/configman",
"id": "f7f6be4a73d0d4303360c24d28b5ebeff728367f",
"size": "2063",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "configman/config_exceptions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "350364"
}
],
"symlink_target": ""
}
|
import six
import numbers
import logging
import numpy as np
from sklearn.metrics import log_loss
from sklearn.utils import check_consistent_length, check_array
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import LabelBinarizer
import mxnet as mx
from mxnet.metric import EvalMetric
__all__ = ['Activation', 'Dense', 'SoftmaxOutput', 'Variable',
'BatchNormalization', 'Dropout', 'Sequential', 'Adam']
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
class LogLoss(object):
def __init__(self):
self.lb_ = None
@property
def __name__(self):
return 'log_loss'
def __call__(self, y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None):
if self.lb_ is None:
self.lb_ = LabelBinarizer()
T = self.lb_.fit_transform(y_true)
else:
T = self.lb_.transform(y_true)
if T.shape[1] == 1:
T = np.append(1 - T, T, axis=1)
Y = np.clip(y_pred, eps, 1 - eps)
if not isinstance(Y, np.ndarray):
raise ValueError('y_pred should be an array of floats.')
if Y.ndim == 1:
Y = Y[:, np.newaxis]
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
check_consistent_length(T, Y)
T = check_array(T)
Y = check_array(Y)
if T.shape[1] != Y.shape[1]:
raise ValueError('y_true and y_pred have different number of classes '
'%d, %d' % (T.shape[1], Y.shape[1]))
Y /= Y.sum(axis=1)[:, np.newaxis]
loss = -(T * np.log(Y)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
LOSS_MAP = {'categorical_crossentropy': mx.metric.np(LogLoss())}
class MXNetSymbol(object):
def __init__(self, *args, **kwargs):
self.logger = logging.getLogger(__name__)
self.args = args
self.kwargs = kwargs
@property
def symbol(self):
pass
def __call__(self, prev_symbol=None):
if prev_symbol:
return self.symbol(prev_symbol, *self.args, **self.kwargs)
return self.symbol(*self.args, **self.kwargs)
class Activation(MXNetSymbol):
@property
def symbol(self):
return mx.symbol.Activation
def __call__(self, prev_symbol=None):
""" Overwrite to allow for passing of the name of the activation
directly. In addition, alllow for detection of output layers,
i.e. SoftmaxOutput
"""
assert len(self.args) == 1 # this should be the name of the activaiton
# pop off the activation
activation = self.args[0]
self.args = self.args[1:]
if not isinstance(activation, six.string_types):
raise ValueError('activation type must be a string')
if activation == 'softmax':
self.logger.debug('Detected SoftmaxOutput in activation.')
return mx.symbol.SoftmaxOutput(
prev_symbol, name='softmax', *self.args, **self.kwargs)
elif prev_symbol:
return self.symbol(
prev_symbol, *self.args, act_type=activation, **self.kwargs)
return self.symbol(*self.args, act_type=activation, **self.kwargs)
class LeakyReLU(MXNetSymbol):
@property
def symbol(self):
return mx.symbol.LeakyReLU
@property
def act_type(self):
pass
def __call__(self, prev_symbol=None):
if prev_symbol:
return self.symbol(
prev_symbol, *self.args, act_type=self.act_type, **self.kwargs)
return self.symbol(*self.args, **self.kwargs)
class PReLU(LeakyReLU):
@property
def act_type(self):
return 'prelu'
class Dense(MXNetSymbol):
""" We are going to use the Keras naming convention. We need a base
layer class eventually.
"""
@property
def symbol(self):
return mx.symbol.FullyConnected
def __call__(self, prev_symbol=None):
""" Overwrite to allow for passing num_hidden directly. """
assert len(self.args) == 1 # this should be the number of hidden units
# pop off the activation
num_hidden = self.args[0]
self.args = self.args[1:]
if not isinstance(num_hidden, numbers.Integral) or num_hidden < 0:
raise ValueError('number of hidden units must be a '
'positive integer.')
if prev_symbol:
# HACK: input_shape is used in keras and not mxnet. Lets pop
# it off for now and figure out a better inference later.
if 'input_shape' in self.kwargs:
del self.kwargs['input_shape']
return self.symbol(
prev_symbol, *self.args, num_hidden=num_hidden, **self.kwargs)
return self.symbol(*self.args, num_hidden=num_hidden, **self.kwargs)
class SoftmaxOutput(MXNetSymbol):
@property
def symbol(self):
return mx.symbol.SoftmaxOutput
class Variable(MXNetSymbol):
@property
def symbol(self):
return mx.symbol.Variable
class BatchNormalization(MXNetSymbol):
@property
def symbol(self):
return mx.symbol.BatchNorm
class Dropout(MXNetSymbol):
@property
def symbol(self):
return mx.symbol.Dropout
class Sequential(object):
def __init__(self):
self.logger = logging.getLogger(__name__)
self.prev_symbol = Variable('data')()
@property
def model(self):
return self._model
@model.setter
def model(self, value):
self._model = value
def compile(self, optimizer, loss, class_mode='categorical'):
""" for mxnet this is not necessary, but we have it here for
convenience.
"""
try:
self.loss = LOSS_MAP[loss]
except KeyError:
self.logger.debug('Loss function not found.')
self.loss = 'acc'
self.optimizer = optimizer
def visualize(self):
return mx.viz.plot_network(self.prev_symbol)
def add(self, symbol):
self.prev_symbol = symbol(self.prev_symbol)
def fit(self, X, y, nb_epoch=10, learning_rate=0.01, batch_size=128, validation_split=0.15):
self.model = mx.model.FeedForward(self.prev_symbol,
num_epoch=nb_epoch,
optimizer=self.optimizer,
numpy_batch_size=batch_size,
learning_rate=learning_rate)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=validation_split)
self.model.fit(X_train,
y_train,
eval_metric=self.loss,
eval_data=[X_test, y_test])
def predict(self, X):
return self.model.predict(X)
# direct imports
Adam = mx.optimizer.Adam
|
{
"content_hash": "0fbf260be0584a22634ba96ab1ea5be6",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 96,
"avg_line_length": 29.808333333333334,
"alnum_prop": 0.5835896002236511,
"repo_name": "joshloyal/Nettie",
"id": "2ff3580a9bff9139a670356e08a17d2a5a1021fb",
"size": "7154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/mxnet_backend.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7937"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render, get_object_or_404
from .models import Book
def index(request):
recommended_books = Book.objects.all()[:5]
context = {'recommended_books': recommended_books}
return render(request, 'index.html', context)
def detail(request, book_id):
book = get_object_or_404(Book, pk=book_id)
return render(request, 'detail.html', {'book': book})
def search_books(request):
keywords = request.POST.get('drname', False).lower()
results = {}
results['drname'] = keywords
results['book'] = []
for book in Book.objects.all():
if keywords in book.title.lower():
results['book'].append(book)
if len(results['book']) == 0:
return render(request, 'search.html', {'not_found_message': 'The book you search for is not found, but here are the recommended books', 'book': Book.objects.all()})
return render(request, 'search.html', results)
|
{
"content_hash": "c2785f8cc346450bae78bd214e9e6d30",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 172,
"avg_line_length": 37.16,
"alnum_prop": 0.6620021528525296,
"repo_name": "tonyshenyy/equal-read-interface",
"id": "8320ccc5d909e25a2e01b837876f4cae565d30d0",
"size": "929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "equal_read/books/views.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "481"
},
{
"name": "HTML",
"bytes": "7567"
},
{
"name": "Python",
"bytes": "8427"
}
],
"symlink_target": ""
}
|
import argparse
import copy
import logging
import numpy as np # noqa
from caffe2.proto import caffe2_pb2, caffe2_legacy_pb2
from caffe.proto import caffe_pb2
from caffe2.python import core, utils, workspace
from google.protobuf import text_format
logging.basicConfig()
log = logging.getLogger("caffe_translator")
log.setLevel(logging.INFO)
def _StateMeetsRule(state, rule):
"""A function that reproduces Caffe's StateMeetsRule functionality."""
if rule.HasField('phase') and rule.phase != state.phase:
return False
if rule.HasField('min_level') and state.level < rule.min_level:
return False
if rule.HasField('max_level') and state.level > rule.max_level:
return False
curr_stages = set(list(state.stage))
# all stages in rule.stages should be in, otherwise it's not a match.
if len(rule.stage) and any([s not in curr_stages for s in rule.stage]):
return False
# none of the stage in rule.stages should be in, otherwise it's not a match.
if len(rule.not_stage) and any([s in curr_stages for s in rule.not_stage]):
return False
# If none of the nonmatch happens, return True.
return True
def _ShouldInclude(net_state, layer):
"""A function that reproduces Caffe's inclusion and exclusion rule."""
ret = (len(layer.include) == 0)
# check exclude rules: if any exclusion is met, we shouldn't include.
ret &= not any([_StateMeetsRule(net_state, rule) for rule in layer.exclude])
if len(layer.include):
# check include rules: if any inclusion is met, we should include.
ret |= any([_StateMeetsRule(net_state, rule) for rule in layer.include])
return ret
class TranslatorRegistry(object):
registry_ = {}
@classmethod
def Register(cls, op_name):
"""A decorator for registering gradient mappings."""
def Wrapper(func):
cls.registry_[op_name] = func
return func
return Wrapper
@classmethod
def TranslateLayer(cls, layer, pretrained_blobs, is_test):
try:
caffe_ops, params = cls.registry_[layer.type](
layer, pretrained_blobs, is_test)
except KeyError:
raise KeyError('No translator registered for layer: %s yet.' %
str(layer))
if caffe_ops is None:
caffe_ops = []
if type(caffe_ops) is not list:
caffe_ops = [caffe_ops]
return caffe_ops, params
@classmethod
def TranslateModel(
cls,
caffe_net,
pretrained_net,
is_test=False,
net_state=None,
):
net_state = caffe_pb2.NetState() if net_state is None else net_state
net = caffe2_pb2.NetDef()
net.name = caffe_net.name
net_params = caffe2_pb2.TensorProtos()
if len(caffe_net.layers) > 0:
raise ValueError(
'I think something is wrong. This translation script '
'only accepts new style layers that are stored in the '
'layer field.'
)
for layer in caffe_net.layer:
if not _ShouldInclude(net_state, layer):
log.info('Current net state does not need layer {}'
.format(layer.name))
continue
log.info('Translate layer {}'.format(layer.name))
# Get pretrained one
pretrained_layers = (
[l for l in pretrained_net.layer
if l.name == layer.name] + [l
for l in pretrained_net.layers
if l.name == layer.name]
)
if len(pretrained_layers) > 1:
raise ValueError(
'huh? more than one pretrained layer of one name?')
elif len(pretrained_layers) == 1:
pretrained_blobs = [
utils.CaffeBlobToNumpyArray(blob)
for blob in pretrained_layers[0].blobs
]
else:
# No pretrained layer for the given layer name. We'll just pass
# no parameter blobs.
# print 'No pretrained layer for layer', layer.name
pretrained_blobs = []
operators, params = cls.TranslateLayer(
layer, pretrained_blobs, is_test)
net.op.extend(operators)
net_params.protos.extend(params)
return net, net_params
def TranslateModel(*args, **kwargs):
return TranslatorRegistry.TranslateModel(*args, **kwargs)
def ConvertTensorProtosToInitNet(net_params, input_name):
"""Takes the net_params returned from TranslateModel, and wrap it as an
init net that contain GivenTensorFill.
This is a very simple feature that only works with float tensors, and is
only intended to be used in an environment where you want a single
initialization file - for more complex cases, use a db to store the
parameters.
"""
init_net = caffe2_pb2.NetDef()
for tensor in net_params.protos:
if len(tensor.float_data) == 0:
raise RuntimeError(
"Only float tensors are supported in this util.")
op = core.CreateOperator(
"GivenTensorFill", [], [tensor.name],
arg=[
utils.MakeArgument("shape", list(tensor.dims)),
utils.MakeArgument("values", tensor.float_data)])
init_net.op.extend([op])
init_net.op.extend([core.CreateOperator("ConstantFill", [], [input_name], shape=[1])])
return init_net
def BaseTranslate(layer, caffe2_type):
"""A simple translate interface that maps the layer input and output."""
caffe2_op = caffe2_pb2.OperatorDef()
caffe2_op.type = caffe2_type
caffe2_op.input.extend(layer.bottom)
caffe2_op.output.extend(layer.top)
return caffe2_op
def AddArgument(op, key, value):
"""Makes an argument based on the value type."""
op.arg.extend([utils.MakeArgument(key, value)])
################################################################################
# Common translators for layers.
################################################################################
@TranslatorRegistry.Register("Input")
def TranslateInput(layer, pretrained_blobs, is_test):
return [], []
@TranslatorRegistry.Register("VideoData")
def TranslateVideoData(layer, pretrained_blobs, is_test):
return [], []
@TranslatorRegistry.Register("Data")
def TranslateData(layer, pretrained_blobs, is_test):
return [], []
# A function used in convolution, pooling and deconvolution to deal with
# conv pool specific parameters.
def _TranslateStridePadKernelHelper(param, caffe_op):
try:
if (len(param.stride) > 1 or len(param.kernel_size) > 1 or
len(param.pad) > 1):
raise NotImplementedError(
"Translator currently does not support non-conventional "
"pad/kernel/stride settings."
)
stride = param.stride[0] if len(param.stride) else 1
pad = param.pad[0] if len(param.pad) else 0
kernel = param.kernel_size[0] if len(param.kernel_size) else 0
except TypeError:
# This catches the case of a PoolingParameter, in which case we are
# having non-repeating pad, stride and kernel.
stride = param.stride
pad = param.pad
kernel = param.kernel_size
# Get stride
if param.HasField("stride_h") or param.HasField("stride_w"):
AddArgument(caffe_op, "stride_h", param.stride_h)
AddArgument(caffe_op, "stride_w", param.stride_w)
else:
AddArgument(caffe_op, "stride", stride)
# Get pad
if param.HasField("pad_h") or param.HasField("pad_w"):
if param.pad_h == param.pad_w:
AddArgument(caffe_op, "pad", param.pad_h)
else:
AddArgument(caffe_op, "pad_t", param.pad_h)
AddArgument(caffe_op, "pad_b", param.pad_h)
AddArgument(caffe_op, "pad_l", param.pad_w)
AddArgument(caffe_op, "pad_r", param.pad_w)
else:
AddArgument(caffe_op, "pad", pad)
# Get kernel
if param.HasField("kernel_h") or param.HasField("kernel_w"):
AddArgument(caffe_op, "kernel_h", param.kernel_h)
AddArgument(caffe_op, "kernel_w", param.kernel_w)
else:
AddArgument(caffe_op, "kernel", kernel)
@TranslatorRegistry.Register("Convolution3D")
def TranslateConvNd(layer, pretrained_blobs, is_test):
param = layer.convolution3d_param
caffe_op = BaseTranslate(layer, "Conv")
output = caffe_op.output[0]
caffe_op.input.append(output + '_w')
AddArgument(
caffe_op,
"kernels",
[param.kernel_depth, param.kernel_size, param.kernel_size])
AddArgument(
caffe_op,
"strides",
[param.temporal_stride, param.stride, param.stride])
temporal_pad = 0
spatial_pad = 0
if hasattr(param, 'temporal_pad'):
temporal_pad = param.temporal_pad
if hasattr(param, 'pad'):
spatial_pad = param.pad
AddArgument(caffe_op, "pads", [temporal_pad, spatial_pad, spatial_pad] * 2)
# weight
params = [
utils.NumpyArrayToCaffe2Tensor(pretrained_blobs[0], output + '_w')]
# bias
if len(pretrained_blobs) == 2:
caffe_op.input.append(output + '_b')
params.append(
utils.NumpyArrayToCaffe2Tensor(
pretrained_blobs[1].flatten(), output + '_b'))
return caffe_op, params
@TranslatorRegistry.Register("Convolution")
def TranslateConv(layer, pretrained_blobs, is_test):
param = layer.convolution_param
caffe_op = BaseTranslate(layer, "Conv")
output = caffe_op.output[0]
caffe_op.input.append(output + '_w')
_TranslateStridePadKernelHelper(param, caffe_op)
# weight
params = [
utils.NumpyArrayToCaffe2Tensor(pretrained_blobs[0], output + '_w')]
# bias
if len(pretrained_blobs) == 2:
caffe_op.input.append(output + '_b')
params.append(
utils.NumpyArrayToCaffe2Tensor(
pretrained_blobs[1].flatten(), output + '_b'))
# Group convolution option
if param.group != 1:
AddArgument(caffe_op, "group", param.group)
# Get dilation - not tested. If you have a model and this checks out,
# please provide a test and uncomment this.
if len(param.dilation) > 0:
if len(param.dilation) == 1:
AddArgument(caffe_op, "dilation", param.dilation[0])
elif len(param.dilation) == 2:
AddArgument(caffe_op, "dilation_h", param.dilation[0])
AddArgument(caffe_op, "dilation_w", param.dilation[1])
return caffe_op, params
@TranslatorRegistry.Register("Deconvolution")
def TranslateDeconv(layer, pretrained_blobs, is_test):
param = layer.convolution_param
if param.group > 1:
raise NotImplementedError(
"Translator currently does not support group deconvolution."
)
caffe_op = BaseTranslate(layer, "ConvTranspose")
output = caffe_op.output[0]
_TranslateStridePadKernelHelper(param, caffe_op)
caffe_op.input.extend([output + '_w', output + '_b'])
AddArgument(caffe_op, "order", "NCHW")
weight = utils.NumpyArrayToCaffe2Tensor(pretrained_blobs[0], output + '_w')
bias = utils.NumpyArrayToCaffe2Tensor(
pretrained_blobs[1].flatten(), output + '_b'
)
return caffe_op, [weight, bias]
@TranslatorRegistry.Register("ReLU")
def TranslateRelu(layer, pretrained_blobs, is_test):
return BaseTranslate(layer, "Relu"), []
@TranslatorRegistry.Register("Pooling")
def TranslatePool(layer, pretrained_blobs, is_test):
param = layer.pooling_param
if param.pool == caffe_pb2.PoolingParameter.MAX:
caffe_op = BaseTranslate(layer, "MaxPool")
elif param.pool == caffe_pb2.PoolingParameter.AVE:
caffe_op = BaseTranslate(layer, "AveragePool")
_TranslateStridePadKernelHelper(param, caffe_op)
AddArgument(caffe_op, "order", "NCHW")
try:
# In the Facebook port of Caffe, a torch_pooling field was added to
# map the pooling computation of Torch. Essentially, it uses
# floor((height + 2 * padding - kernel) / stride) + 1
# instead of
# ceil((height + 2 * padding - kernel) / stride) + 1
# which is Caffe's version.
# Torch pooling is actually the same as Caffe2 pooling, so we don't
# need to do anything.
is_torch_pooling = param.torch_pooling
except AttributeError:
is_torch_pooling = False
if not is_torch_pooling:
AddArgument(caffe_op, "legacy_pad",
caffe2_legacy_pb2.CAFFE_LEGACY_POOLING)
if param.global_pooling:
AddArgument(caffe_op, "global_pooling", 1)
return caffe_op, []
@TranslatorRegistry.Register("Pooling3D")
def TranslatePool3D(layer, pretrained_blobs, is_test):
param = layer.pooling3d_param
if param.pool == caffe_pb2.Pooling3DParameter.MAX:
caffe_op = BaseTranslate(layer, "MaxPool")
elif param.pool == caffe_pb2.Pooling3DParameter.AVE:
caffe_op = BaseTranslate(layer, "AveragePool")
AddArgument(caffe_op, "order", "NCHW")
AddArgument(
caffe_op,
"kernels",
[param.kernel_depth, param.kernel_size, param.kernel_size])
AddArgument(
caffe_op,
"strides",
[param.temporal_stride, param.stride, param.stride])
temporal_pad = 0
spatial_pad = 0
if hasattr(param, 'temporal_pad'):
temporal_pad = param.temporal_pad
if hasattr(param, 'pad'):
spatial_pad = param.pad
AddArgument(caffe_op, "pads", [temporal_pad, spatial_pad, spatial_pad] * 2)
return caffe_op, []
@TranslatorRegistry.Register("LRN")
def TranslateLRN(layer, pretrained_blobs, is_test):
caffe_op = BaseTranslate(layer, "LRN")
caffe_op.output.extend(['_' + caffe_op.output[0] + '_scale'])
param = layer.lrn_param
if param.norm_region != caffe_pb2.LRNParameter.ACROSS_CHANNELS:
raise ValueError(
"Does not support norm region other than across channels.")
AddArgument(caffe_op, "size", int(param.local_size))
AddArgument(caffe_op, "alpha", float(param.alpha))
AddArgument(caffe_op, "beta", float(param.beta))
AddArgument(caffe_op, "bias", float(param.k))
AddArgument(caffe_op, "order", "NCHW")
return caffe_op, []
@TranslatorRegistry.Register("InnerProduct")
def TranslateInnerProduct(layer, pretrained_blobs, is_test):
param = layer.inner_product_param
try:
if param.axis != 1 or param.transpose:
raise ValueError(
"We don't have testing case for non-default axis and transpose "
"cases yet so we are disabling it for now. If you have a model "
"with this, please do send us your model for us to update this "
"support, and you are more than welcome to send a PR for this.")
except AttributeError:
# We might be using an historic Caffe protobuf that does not have axis
# and transpose arguments, so we will silently pass.
pass
caffe_op = BaseTranslate(layer, "FC")
output = caffe_op.output[0]
caffe_op.input.extend([output + '_w', output + '_b'])
# To provide the old-style 4-dimensional blob (1, 1, dim_output, dim_input)
# case, we always explicitly reshape the pretrained blob.
if pretrained_blobs[0].ndim not in [2, 4]:
raise ValueError("Unexpected weight ndim.")
if (pretrained_blobs[0].ndim == 4 and
list(pretrained_blobs[0].shape[:2]) != [1, 1]):
raise ValueError(
"If pretrained blob has 4 dims (old-style Caffe), the first two "
"should be of value 1, but I got " + str(pretrained_blobs[0].shape))
weight = utils.NumpyArrayToCaffe2Tensor(
pretrained_blobs[0].reshape(-1, pretrained_blobs[0].shape[-1]),
output + '_w'
)
bias = utils.NumpyArrayToCaffe2Tensor(
pretrained_blobs[1].flatten(), output + '_b'
)
return caffe_op, [weight, bias]
@TranslatorRegistry.Register("Dropout")
def TranslateDropout(layer, pretrained_blobs, is_test):
caffe_op = BaseTranslate(layer, "Dropout")
caffe_op.output.extend(['_' + caffe_op.output[0] + '_mask'])
param = layer.dropout_param
AddArgument(caffe_op, "ratio", param.dropout_ratio)
if (is_test):
AddArgument(caffe_op, "is_test", 1)
return caffe_op, []
@TranslatorRegistry.Register("Softmax")
def TranslateSoftmax(layer, pretrained_blobs, is_test):
caffe_op = BaseTranslate(layer, "Softmax")
return caffe_op, []
@TranslatorRegistry.Register("SoftmaxWithLoss")
def TranslateSoftmaxWithLoss(layer, pretrained_blobs, is_test):
softmax_op = core.CreateOperator(
"Softmax", [layer.bottom[0]],
layer.bottom[0] + "_translator_autogen_softmax")
xent_op = core.CreateOperator(
"LabelCrossEntropy",
[softmax_op.output[0], layer.bottom[1]],
layer.bottom[0] + "_translator_autogen_xent")
loss_op = core.CreateOperator(
"AveragedLoss",
xent_op.output[0],
layer.top[0])
return [softmax_op, xent_op, loss_op], []
@TranslatorRegistry.Register("Accuracy")
def TranslateAccuracy(layer, pretrained_blobs, is_test):
caffe_op = BaseTranslate(layer, "Accuracy")
if layer.accuracy_param.top_k != 1:
AddArgument(caffe_op, "top_k", layer.accuracy_param.top_k)
return caffe_op, []
@TranslatorRegistry.Register("Concat")
def TranslateConcat(layer, pretrained_blobs, is_test):
caffe_op = BaseTranslate(layer, "Concat")
caffe_op.output.extend(['_' + caffe_op.output[0] + '_dims'])
AddArgument(caffe_op, "order", "NCHW")
return caffe_op, []
@TranslatorRegistry.Register("TanH")
def TranslateTanH(layer, pretrained_blobs, is_test):
caffe_op = BaseTranslate(layer, "Tanh")
return caffe_op, []
@TranslatorRegistry.Register("InstanceNorm")
def TranslateInstanceNorm(layer, pretrained_blobs, is_test):
caffe_op = BaseTranslate(layer, "InstanceNorm")
output = caffe_op.output[0]
weight = utils.NumpyArrayToCaffe2Tensor(
pretrained_blobs[0].flatten(), output + '_w')
bias = utils.NumpyArrayToCaffe2Tensor(
pretrained_blobs[1].flatten(), output + '_b')
caffe_op.input.extend([output + '_w', output + '_b'])
AddArgument(caffe_op, "order", "NCHW")
return caffe_op, [weight, bias]
@TranslatorRegistry.Register("BatchNorm")
def TranslateBatchNorm(layer, pretrained_blobs, is_test):
caffe_op = BaseTranslate(layer, "SpatialBN")
output = caffe_op.output[0]
param = layer.batch_norm_param
AddArgument(caffe_op, "is_test", is_test)
AddArgument(caffe_op, "epsilon", param.eps)
AddArgument(caffe_op, "order", "NCHW")
caffe_op.input.extend(
[output + "_scale",
output + "_bias",
output + "_mean",
output + "_var"])
if not is_test:
caffe_op.output.extend(
[output + "_mean",
output + "_var",
output + "_saved_mean",
output + "_saved_var"])
n_channels = pretrained_blobs[0].shape[0]
if pretrained_blobs[2][0] != 0:
mean = utils.NumpyArrayToCaffe2Tensor(
(1. / pretrained_blobs[2][0]) * pretrained_blobs[0],
output + '_mean')
var = utils.NumpyArrayToCaffe2Tensor(
(1. / pretrained_blobs[2][0]) * pretrained_blobs[1],
output + '_var')
else:
raise RuntimeError("scalar is zero.")
pretrained_blobs[2][0] = 1
pretrained_blobs[2] = np.tile(pretrained_blobs[2], (n_channels, ))
scale = utils.NumpyArrayToCaffe2Tensor(
pretrained_blobs[2],
output + '_scale')
bias = utils.NumpyArrayToCaffe2Tensor(
np.zeros_like(pretrained_blobs[2]),
output + '_bias')
return caffe_op, [scale, bias, mean, var]
@TranslatorRegistry.Register("Eltwise")
def TranslateElementWise(layer, pretrained_blobs, is_test):
param = layer.eltwise_param
# TODO(jiayq): if we have a protobuf that uses this, lift this constraint
# and verify that we can correctly translate.
if len(param.coeff) or param.operation != 1:
raise RuntimeError("This eltwise layer is not yet supported.")
caffe_op = BaseTranslate(layer, "Sum")
return caffe_op, []
@TranslatorRegistry.Register("Scale")
def TranslateScale(layer, pretrained_blobs, is_test):
mul_op = BaseTranslate(layer, "Mul")
scale_param = layer.scale_param
AddArgument(mul_op, "axis", scale_param.axis)
AddArgument(mul_op, "broadcast", True)
if len(mul_op.input) == 1:
# the scale parameter is in pretrained blobs
if scale_param.num_axes != 1:
raise RuntimeError("This path has not been verified yet.")
output = mul_op.output[0]
mul_op_param = output + '_w'
mul_op.input.append(mul_op_param)
weights = []
weights.append(utils.NumpyArrayToCaffe2Tensor(
pretrained_blobs[0].flatten(), mul_op_param))
add_op = None
if len(pretrained_blobs) == 1:
# No bias-term in Scale layer
pass
elif len(pretrained_blobs) == 2:
# Caffe Scale layer supports a bias term such that it computes
# (scale_param * X + bias), whereas Caffe2 Mul op doesn't.
# Include a separate Add op for the bias followed by Mul.
add_op = copy.deepcopy(mul_op)
add_op.type = "Add"
add_op_param = output + '_b'
internal_blob = output + "_internal"
del mul_op.output[:]
mul_op.output.append(internal_blob)
del add_op.input[:]
add_op.input.append(internal_blob)
add_op.input.append(add_op_param)
weights.append(utils.NumpyArrayToCaffe2Tensor(
pretrained_blobs[1].flatten(), add_op_param))
else:
raise RuntimeError("Unexpected number of pretrained blobs in Scale")
caffe_ops = [mul_op]
if add_op:
caffe_ops.append(add_op)
assert len(caffe_ops) == len(weights)
return caffe_ops, weights
elif len(mul_op.input) == 2:
# TODO(jiayq): find a protobuf that uses this and verify.
raise RuntimeError("This path has not been verified yet.")
else:
raise RuntimeError("Unexpected number of inputs.")
@TranslatorRegistry.Register("Reshape")
def TranslateReshape(layer, pretrained_blobs, is_test):
caffe_op = BaseTranslate(layer, "Reshape")
caffe_op.output.append("_" + caffe_op.input[0] + "_dims")
reshape_param = layer.reshape_param
AddArgument(caffe_op, 'shape', reshape_param.shape.dim)
return caffe_op, []
@TranslatorRegistry.Register("Flatten")
def TranslateFlatten(layer, pretrained_blobs, is_test):
param = layer.flatten_param
if param.end_axis != -1:
raise NotImplementedError("flatten_param.end_axis not supported yet.")
if param.axis == 0:
caffe_op = BaseTranslate(layer, "FlattenToVec")
elif param.axis == 1:
caffe_op = BaseTranslate(layer, "Flatten")
else:
# This could be a Reshape op, but dim size is not known here.
raise NotImplementedError(
"Not supported yet for flatten_param.axis {}.".format(param.axis))
return caffe_op, []
@TranslatorRegistry.Register("Sigmoid")
def TranslateSigmoid(layer, pretrained_blobs, is_test):
caffe_op = BaseTranslate(layer, "Sigmoid")
return caffe_op, []
@TranslatorRegistry.Register("ROIPooling")
def TranslateROIPooling(layer, pretrained_blobs, is_test):
caffe_op = BaseTranslate(layer, "RoIPool")
AddArgument(caffe_op, "order", "NCHW")
if is_test:
AddArgument(caffe_op, "is_test", is_test)
else:
# Only used for gradient computation
caffe_op.output.append(caffe_op.output[0] + '_argmaxes')
param = layer.roi_pooling_param
if param.HasField('pooled_h'):
AddArgument(caffe_op, 'pooled_h', param.pooled_h)
if param.HasField('pooled_w'):
AddArgument(caffe_op, 'pooled_w', param.pooled_w)
if param.HasField('spatial_scale'):
AddArgument(caffe_op, 'spatial_scale', param.spatial_scale)
return caffe_op, []
@TranslatorRegistry.Register("PReLU")
def TranslatePRelu(layer, pretrained_blobs, is_test):
caffe_op = BaseTranslate(layer, "PRelu")
output = caffe_op.output[0]
caffe_op.input.extend([output + '_Slope'])
slope = utils.NumpyArrayToCaffe2Tensor(pretrained_blobs[0], output + '_Slope')
return caffe_op, [slope]
@TranslatorRegistry.Register("Reduction")
def TranslateReduction(layer, pretrained_blobs, is_test):
param = layer.reduction_param
if param.operation == caffe_pb2.ReductionParameter.SUM:
caffe_op = BaseTranslate(layer, "ReduceBackSum")
elif param.operation == caffe_pb2.ReductionParameter.MEAN:
caffe_op = BaseTranslate(layer, "ReduceBackMean")
else:
raise NotImplementedError("Not yet supported")
if param.axis > 0:
# We can't figure out the number of dims to reduce from positive axis
# for back reduction since the shape info is not known here.
raise NotImplementedError("Not yet supported")
num_reduce_dim = -param.axis
AddArgument(caffe_op, "num_reduce_dim", num_reduce_dim)
return caffe_op, []
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Utilitity to convert pretrained caffe models to Caffe2 models.")
parser.add_argument("prototext", help="Caffe prototext.")
parser.add_argument("caffemodel", help="Caffe trained model.")
parser.add_argument("--init_net", help="Caffe2 initialization net.", default="init_net.pb")
parser.add_argument("--predict_net", help="Caffe2 prediction net.", default="predict_net.pb")
args = parser.parse_args()
caffenet = caffe_pb2.NetParameter()
caffenet_pretrained = caffe_pb2.NetParameter()
input_proto = args.prototext
input_caffemodel = args.caffemodel
output_init_net = args.init_net
output_predict_net = args.predict_net
text_format.Merge(
open(input_proto, 'r').read(), caffenet
)
caffenet_pretrained.ParseFromString(
open(input_caffemodel, 'rb').read()
)
net, pretrained_params = TranslateModel(
caffenet, caffenet_pretrained, is_test=True
)
# Assume there is one input and one output
external_input = net.op[0].input[0]
external_output = net.op[-1].output[0]
net.external_input.extend([external_input])
net.external_input.extend([param.name for param in pretrained_params.protos])
net.external_output.extend([external_output])
init_net = ConvertTensorProtosToInitNet(pretrained_params, external_input)
for param in pretrained_params.protos:
workspace.FeedBlob(param.name, utils.Caffe2TensorToNumpyArray(param))
with open(output_predict_net, 'wb') as f:
f.write(net.SerializeToString())
with open(output_init_net, 'wb') as f:
f.write(init_net.SerializeToString())
|
{
"content_hash": "33cecd7763bfff0d8e853ed702217b66",
"timestamp": "",
"source": "github",
"line_count": 726,
"max_line_length": 97,
"avg_line_length": 37.37603305785124,
"alnum_prop": 0.6325409987101529,
"repo_name": "bwasti/caffe2",
"id": "7c7dd916ca48310789335b861cdb9776f3d6463f",
"size": "27228",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "caffe2/python/caffe_translator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4514"
},
{
"name": "C",
"bytes": "58731"
},
{
"name": "C++",
"bytes": "2743591"
},
{
"name": "CMake",
"bytes": "131386"
},
{
"name": "CSS",
"bytes": "2196"
},
{
"name": "Cuda",
"bytes": "455661"
},
{
"name": "HTML",
"bytes": "5203"
},
{
"name": "Jupyter Notebook",
"bytes": "4615340"
},
{
"name": "Makefile",
"bytes": "527"
},
{
"name": "Metal",
"bytes": "29686"
},
{
"name": "Objective-C",
"bytes": "828"
},
{
"name": "Objective-C++",
"bytes": "147470"
},
{
"name": "Python",
"bytes": "2137478"
},
{
"name": "Shell",
"bytes": "20688"
}
],
"symlink_target": ""
}
|
import numpy as np
from traits.api import (HasTraits, Instance, Float, Enum, Int, Property,
cached_property, Any)
from traitsui.api import View, HSplit, ToolBar, Action, Item, VGroup
from pyface.api import ImageResource
from enable.api import ComponentEditor
from chaco.api import (create_line_plot, PlotAxis, VPlotContainer, DataRange1D,
LogMapper, OverlayPlotContainer)
from nidaqmx import DAQmxDefaults, ContinuousDAQmxSource
from experiment import (AbstractController, icon_dir, AbstractData,
AbstractParadigm, AbstractData)
from neurogen.util import db, dbtopa
from neurogen.calibration.util import psd_freq, psd, rms, tone_power_conv
from experiment.channel import FileChannel
ADC_FS = 200e3
class StandardCalSettings(AbstractParadigm):
kw = dict(log=True, context=True)
duration = Float(1, label='Recording duration (sec)', **kw)
input = Enum('ai0', ('ai0', 'ai1', 'ai2'),
label='Analog input (channel)', **kw)
expected_range = Float(10, label='Expected range (V)', **kw)
traits_view = View(
VGroup(
'duration',
'input',
'expected_range',
),
)
class StandardCalController(DAQmxDefaults, AbstractController):
adc_fs = ADC_FS
samples_acquired = Int(0)
acquired = Property(Float, depends_on='samples_acquired')
@cached_property
def _get_acquired(self):
return self.samples_acquired/self.adc_fs
def start(self, info=None):
self.model.component = None
self.complete = False
self.state = 'running'
self.initialize_context()
self.refresh_context()
duration = self.get_current_value('duration')
input = self.get_current_value('input')
input_line = '/{}/{}'.format(self.DEV, input)
expected_range = self.get_current_value('expected_range')
self.iface_adc = ContinuousDAQmxSource(fs=self.adc_fs,
input_line=input_line,
callback=self.poll,
callback_samples=self.adc_fs/8,
expected_range=expected_range)
self.samples_acquired = 0
self.target_samples = int(duration*self.adc_fs)
self.waveforms = []
self.iface_adc.setup()
self.iface_adc.start()
def stop_experiment(self, info=None):
self.iface_adc.clear()
self.complete = True
def poll(self, waveform):
self.waveforms.append(waveform)
self.samples_acquired += int(waveform.shape[-1])
if self.samples_acquired >= self.target_samples:
self.stop()
waveforms = np.concatenate(self.waveforms, axis=-1).ravel()
self.model.generate_plots(waveforms[..., :self.target_samples],
self.adc_fs)
class StandardCal(HasTraits):
paradigm = Instance(StandardCalSettings, ())
data = Instance(AbstractData, ())
component = Instance('enable.api.Component')
rms = Float(0, label='Overall RMS (mV)')
psd_rms = Float(0, label='PSD rms (mV)')
psd_rms_db = Float(0, label='PSD rms (dB re mV)')
def generate_plots(self, waveform, fs):
container = VPlotContainer(padding=70, spacing=70)
frequencies = psd_freq(waveform, fs)
psd_hanning = psd(waveform, fs, 'hanning')
self.rms = 1e3*rms(waveform, detrend=True)
self.psd_rms = psd_hanning[frequencies >= 500].mean()
self.psd_rms_db = db(psd_hanning[frequencies >= 500], 1e-3).mean()
time = np.arange(len(waveform))/fs
plot = create_line_plot((time, waveform*1e3), color='black')
axis = PlotAxis(component=plot, orientation='left',
title="Mic. signal (mV)")
plot.underlays.append(axis)
axis = PlotAxis(component=plot, orientation='bottom',
title="Time (sec)")
plot.underlays.append(axis)
container.insert(0, plot)
subcontainer = OverlayPlotContainer()
plot = create_line_plot((frequencies, db(psd_hanning, 1e-3)),
color='black')
index_range = DataRange1D(low_setting=10, high_setting=100e3)
index_mapper = LogMapper(range=index_range)
plot.index_mapper = index_mapper
axis = PlotAxis(component=plot, orientation='left',
title="Mic. spectrum (dB re mV)")
plot.underlays.append(axis)
axis = PlotAxis(component=plot, orientation='bottom',
title="Frequency (Hz)")
plot.underlays.append(axis)
subcontainer.add(plot)
container.insert(0, subcontainer)
self.component = container
traits_view = View(
HSplit(
Item('paradigm', style='custom', width=200,
enabled_when='handler.state!="running"'),
VGroup(
Item('handler.acquired', style='readonly',
label='Acquired (sec)'),
Item('rms', style='readonly'),
Item('psd_rms', style='readonly'),
Item('psd_rms_db', style='readonly'),
VGroup(
Item('component', editor=ComponentEditor(),
show_label=False),
),
),
show_labels=False,
show_border=True,
),
toolbar=ToolBar(
Action(name='Start', action='start',
image=ImageResource('1rightarrow', icon_dir),
enabled_when='handler.state!="running"'),
Action(name='Stop', action='stop',
image=ImageResource('Stop', icon_dir),
enabled_when='handler.state=="running"'),
),
resizable=True,
)
def launch_gui(**kwargs):
handler = StandardCalController()
StandardCal().edit_traits(handler=handler, **kwargs)
if __name__ == '__main__':
handler = StandardCalController()
StandardCal().configure_traits(handler=handler)
|
{
"content_hash": "14ff581c1e56a514389080bb9a1dd106",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 79,
"avg_line_length": 37.1566265060241,
"alnum_prop": 0.5768482490272373,
"repo_name": "bburan/cochlear",
"id": "dbed6267ba5f5f0f128fc93b3b4e09fbabbc289d",
"size": "6168",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cochlear/calibration/rms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "261721"
}
],
"symlink_target": ""
}
|
import webbrowser
from xierpa3.components import Theme, Page, Text
from xierpa3.builders.htmlbuilder import HtmlBuilder
class HelloWorld(Theme):
u"""The **HelloWorld** site class implements a basic "Hello, world!",
showing the smallest possible web page"""
TITLE = u'The standard "Hello, world!" page.' # Use as title of window.
def baseComponents(self):
u"""Create a theme site with just one single template home page. Answer a list
of page instances that are used as templates for this site."""
# Create an instance (=object) of the text component to be placed on the page.
text = Text('Hello, world!')
# Create an instance (=object) of the page, containing the "text" Text instance.
# The page class is also the page name in the url: http://localhost:8060/index
# Components can be a single component or a list of components.
homePage = Page(class_=self.C.TEMPLATE_INDEX, components=text, title=self.TITLE)
# Answer a list of types of pages for this site.
return [homePage]
def make(self, root):
u"""The instance of this class builds the HTML file at the optional path *root*.
If not defined, then @Builder.C.DEFAULT_ROOTPATH@ is used, as in general builders
are associated where output should go to.
E.g. the default @HtmlBuilder.C.DEFAULT_ROOTPATH@ is defined as to the user extended
path of @~/Desktop/Xierpa3Examples/[component.name]@.
And for @CssBuilder@ it is @~/Desktop/Xierpa3Examples/[component.name]/css/style.css@."""
# H T M L
# Create the main HTML builder instance to build the HTML part of the site.
htmlBuilder = HtmlBuilder()
# Compile the site instance and its components into HTML code.
self.build(htmlBuilder)
# Save the resulting HTML file in "index.html"
# Answer the file path, so we can directly open the file with a browser.
return htmlBuilder.save(self, path=root + 'index.html')
if __name__ == '__main__':
# This construction "__name__ == '__main__'" makes this Python file only
# be executed when called in direct mode, such as "python make.py" in the terminal.
# Since no rootPath is added to make(), the file export is in builder.DEFAULT_ROOTPATH
# which typically is the user extended path of ~/Desktop/Xierpa3Examples/HelloWorld/
# TODO: for some reason there is double output of HTML right now, using make()
# TODO: Make(root) implements on other example classes
site = HelloWorld()
root = site.getRootDir()
filePath = site.make(root)
webbrowser.open('file://'+filePath)
|
{
"content_hash": "6f70535a6e94b85475c27ed944e2d6b0",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 101,
"avg_line_length": 53.98,
"alnum_prop": 0.6721007780659504,
"repo_name": "petrvanblokland/Xierpa3",
"id": "5c0f74e2b4ef91ffddd9299c7a05962e896577ed",
"size": "3422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xierpa3/sites/examples/helloworld/make.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "41394"
},
{
"name": "JavaScript",
"bytes": "1507"
},
{
"name": "Python",
"bytes": "1349828"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.contrib.sites.models import Site
from django.utils.six.moves.urllib.parse import urljoin
from djblets.siteconfig.models import SiteConfiguration
from reviewboard.site.urlresolvers import local_site_reverse
def get_server_url(local_site_name=None, local_site=None, request=None):
"""Return the URL for the root of the server.
This will construct a URL that points to the root of the server, factoring
in whether to use HTTP or HTTPS.
If ``local_site_name`` or ``local_site`` is provided, then the URL will be
the root to the LocalSite's root, rather than the server's root.
If ``request`` is provided, then the Local Site, if any, will be
inferred from the request.
"""
site = Site.objects.get_current()
siteconfig = SiteConfiguration.objects.get_current()
root = local_site_reverse('root', local_site_name=local_site_name,
local_site=local_site, request=request)
return '%s://%s%s' % (siteconfig.get('site_domain_method'),
site.domain, root)
def build_server_url(*args, **kwargs):
"""Build an absolute URL containing the full URL to the server.
All additional arguments passed will be appended as paths to the URL.
"""
return urljoin(get_server_url(**kwargs), *args)
|
{
"content_hash": "a39aab5b0b9ef0cf7843e1bcdd502c28",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 78,
"avg_line_length": 37.5,
"alnum_prop": 0.6918518518518518,
"repo_name": "beol/reviewboard",
"id": "29c4fd89d2fb998b766f4e8408113c26a90fbddd",
"size": "1350",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "reviewboard/admin/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "212721"
},
{
"name": "HTML",
"bytes": "179427"
},
{
"name": "JavaScript",
"bytes": "1463002"
},
{
"name": "Python",
"bytes": "3686127"
},
{
"name": "Shell",
"bytes": "20225"
}
],
"symlink_target": ""
}
|
try:
import json as _json
except ImportError:
import sys
sys.path.append('simplejson-2.3.3')
import simplejson as _json
import requests as _requests
import urlparse as _urlparse
import random as _random
import base64 as _base64
from ConfigParser import ConfigParser as _ConfigParser
import os as _os
_CT = 'content-type'
_AJ = 'application/json'
_URL_SCHEME = frozenset(['http', 'https'])
def _get_token(user_id, password,
auth_svc='https://nexus.api.globusonline.org/goauth/token?' +
'grant_type=client_credentials'):
# This is bandaid helper function until we get a full
# KBase python auth client released
auth = _base64.b64encode(user_id + ':' + password)
headers = {'Authorization': 'Basic ' + auth}
ret = _requests.get(auth_svc, headers=headers, allow_redirects=True)
status = ret.status_code
if status >= 200 and status <= 299:
tok = _json.loads(ret.text)
elif status == 403:
raise Exception('Authentication failed: Bad user_id/password ' +
'combination for user %s' % (user_id))
else:
raise Exception(ret.text)
return tok['access_token']
def _read_rcfile(file=_os.environ['HOME'] + '/.authrc'): # @ReservedAssignment
# Another bandaid to read in the ~/.authrc file if one is present
authdata = None
if _os.path.exists(file):
try:
with open(file) as authrc:
rawdata = _json.load(authrc)
# strip down whatever we read to only what is legit
authdata = {x: rawdata.get(x) for x in (
'user_id', 'token', 'client_secret', 'keyfile',
'keyfile_passphrase', 'password')}
except Exception, e:
print "Error while reading authrc file %s: %s" % (file, e)
return authdata
def _read_inifile(file=_os.environ.get( # @ReservedAssignment
'KB_DEPLOYMENT_CONFIG', _os.environ['HOME'] +
'/.kbase_config')):
# Another bandaid to read in the ~/.kbase_config file if one is present
authdata = None
if _os.path.exists(file):
try:
config = _ConfigParser()
config.read(file)
# strip down whatever we read to only what is legit
authdata = {x: config.get('authentication', x)
if config.has_option('authentication', x)
else None for x in ('user_id', 'token',
'client_secret', 'keyfile',
'keyfile_passphrase', 'password')}
except Exception, e:
print "Error while reading INI file %s: %s" % (file, e)
return authdata
class ServerError(Exception):
def __init__(self, name, code, message, data=None, error=None):
self.name = name
self.code = code
self.message = '' if message is None else message
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
class _JSONObjectEncoder(_json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
return _json.JSONEncoder.default(self, obj)
class GenericClient(object):
def __init__(self, url=None, timeout=30 * 60, user_id=None,
password=None, token=None, ignore_authrc=False,
trust_all_ssl_certificates=False,
use_url_lookup=True):
if url is None:
raise ValueError('A url is required')
scheme, _, _, _, _, _ = _urlparse.urlparse(url)
if scheme not in _URL_SCHEME:
raise ValueError(url + " isn't a valid http url")
self.url = url
self.timeout = int(timeout)
self._headers = dict()
self.trust_all_ssl_certificates = trust_all_ssl_certificates
self.use_url_lookup = use_url_lookup
# token overrides user_id and password
if token is not None:
self._headers['AUTHORIZATION'] = token
elif user_id is not None and password is not None:
self._headers['AUTHORIZATION'] = _get_token(user_id, password)
elif 'KB_AUTH_TOKEN' in _os.environ:
self._headers['AUTHORIZATION'] = _os.environ.get('KB_AUTH_TOKEN')
elif not ignore_authrc:
authdata = _read_inifile()
if authdata is None:
authdata = _read_rcfile()
if authdata is not None:
if authdata.get('token') is not None:
self._headers['AUTHORIZATION'] = authdata['token']
elif(authdata.get('user_id') is not None
and authdata.get('password') is not None):
self._headers['AUTHORIZATION'] = _get_token(
authdata['user_id'], authdata['password'])
if self.timeout < 1:
raise ValueError('Timeout value must be at least 1 second')
def _call(self, url, method, params, json_rpc_context = None):
arg_hash = {'method': method,
'params': params,
'version': '1.1',
'id': str(_random.random())[2:]
}
if json_rpc_context:
arg_hash['context'] = json_rpc_context
body = _json.dumps(arg_hash, cls=_JSONObjectEncoder)
ret = _requests.post(url, data=body, headers=self._headers,
timeout=self.timeout,
verify=not self.trust_all_ssl_certificates)
if ret.status_code == _requests.codes.server_error:
json_header = None
if _CT in ret.headers:
json_header = ret.headers[_CT]
if _CT in ret.headers and ret.headers[_CT] == _AJ:
err = _json.loads(ret.text)
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, ret.text)
else:
raise ServerError('Unknown', 0, ret.text)
if ret.status_code != _requests.codes.OK:
ret.raise_for_status()
ret.encoding = 'utf-8'
resp = _json.loads(ret.text)
if 'result' not in resp:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
return resp['result']
def sync_call(self, service_method, param_list, service_version = None, json_rpc_context = None):
if json_rpc_context and type(json_rpc_context) is not dict:
raise ValueError('Method send_data: argument json_rpc_context is not type dict as required.')
url = self.url
if self.use_url_lookup:
module_name = service_method.split('.')[0]
service_status_ret = self._call(self.url, 'ServiceWizard.get_service_status',
[{'module_name' : module_name,
'version' : service_version}], None)[0]
url = service_status_ret['url']
return self._call(url, service_method, param_list, json_rpc_context)
|
{
"content_hash": "eaf1cac22ea37d58849ecf1670fe4332",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 105,
"avg_line_length": 41.00558659217877,
"alnum_prop": 0.5543596730245232,
"repo_name": "kbaseIncubator/onerepotest",
"id": "e14fcbc65792bf79fd70c84345dbcb8ae4ff358f",
"size": "7340",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/kbaseclients/GenericClient.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "13011"
},
{
"name": "JavaScript",
"bytes": "9155"
},
{
"name": "Makefile",
"bytes": "2811"
},
{
"name": "Perl",
"bytes": "20468"
},
{
"name": "Python",
"bytes": "61816"
},
{
"name": "Ruby",
"bytes": "770"
},
{
"name": "Shell",
"bytes": "1407"
}
],
"symlink_target": ""
}
|
from nose.tools import assert_true, assert_equal
import pickle as pkl
import sys
import os
import numpy as np
from sklearn.model_selection import train_test_split
sys.path.insert(1, os.path.abspath('..'))
from lopq.model import LOPQModel, eigenvalue_allocation, accumulate_covariance_estimators, compute_rotations_from_accumulators
from lopq.search import LOPQSearcher, LOPQSearcherLMDB
from lopq.eval import compute_all_neighbors, get_cell_histogram, get_recall
########################################
# Helpers
########################################
relpath = lambda x: os.path.abspath(os.path.join(os.path.dirname(__file__), x))
def load_oxford_data():
from lopq.utils import load_xvecs
data = load_xvecs(relpath('../../data/oxford/oxford_features.fvecs'))
return data
def make_random_model():
m = LOPQModel(V=5, M=4, subquantizer_clusters=10)
m.fit(np.random.RandomState(42).rand(200, 8), n_init=1)
return m
########################################
# Tests
########################################
def test_eigenvalue_allocation():
a = pkl.load(open(relpath('./testdata/test_eigenvalue_allocation_input.pkl')))
vals, vecs = np.linalg.eigh(a)
res = eigenvalue_allocation(4, vals)
expected = np.array([
63, 56, 52, 48, 44, 40, 36, 30, 26, 22, 18, 14, 10, 6, 3, 0,
62, 57, 53, 51, 45, 41, 39, 33, 32, 31, 29, 25, 21, 17, 13, 9,
61, 58, 54, 49, 47, 42, 38, 34, 28, 24, 20, 16, 12, 8, 5, 2,
60, 59, 55, 50, 46, 43, 37, 35, 27, 23, 19, 15, 11, 7, 4, 1
])
assert_true(np.equal(res, expected).all())
def test_eigenvalue_allocation_normalized_features():
eigenvalues = np.array([
2.02255824, 1.01940991, 0.01569471, 0.01355569, 0.01264379,
0.01137654, 0.01108961, 0.01054673, 0.01023358, 0.00989679,
0.00939045, 0.00900322, 0.00878857, 0.00870027, 0.00850136,
0.00825236, 0.00813437, 0.00800231, 0.00790201, 0.00782219,
0.00763405, 0.00752334, 0.00739174, 0.00728246, 0.00701366,
0.00697365, 0.00677283, 0.00669658, 0.00654397, 0.00647679,
0.00630645, 0.00621057
])
indices = eigenvalue_allocation(2, eigenvalues)
first_half = eigenvalues[indices[:16]]
second_half = eigenvalues[indices[16:]]
diff = np.abs(np.sum(np.log(first_half)) - np.sum(np.log(second_half)))
assert_true(diff < .1, "eigenvalue_allocation is not working correctly")
def test_accumulate_covariance_estimators():
data, centroids = pkl.load(open(relpath('./testdata/test_accumulate_covariance_estimators_input.pkl')))
expected = pkl.load(open(relpath('./testdata/test_accumulate_covariance_estimators_output.pkl')))
actual = accumulate_covariance_estimators(data, centroids)
# Summed residual outer products
assert_true(np.allclose(expected[0], actual[0]))
# Summed residuals
assert_true(np.allclose(expected[1], actual[1]))
# Assignment count per cluster
assert_true(np.array_equal(expected[2], actual[2]))
# Assignments over data
assert_true(np.array_equal(expected[3], actual[3]))
# Residual data
assert_true(np.allclose(expected[4], actual[4]))
def test_compute_rotations_from_accumulators():
A, mu, count, num_buckets = pkl.load(open(relpath('./testdata/test_compute_rotations_from_accumulators_input.pkl')))
expected = pkl.load(open(relpath('./testdata/test_compute_rotations_from_accumulators_output.pkl')))
actual = compute_rotations_from_accumulators(A, mu, count, num_buckets)
# Rotations
assert_true(np.allclose(expected[0], actual[0]))
# Mean residuals
assert_true(np.allclose(expected[1], actual[1]))
def test_reconstruction():
m = LOPQModel.load_proto(relpath('./testdata/random_test_model.lopq'))
code = ((0, 1), (0, 1, 2, 3))
r = m.reconstruct(code)
expected = [-2.27444688, 6.47126941, 4.5042611, 4.76683476, 0.83671082, 9.36027283, 8.11780532, 6.34846377]
assert_true(np.allclose(expected, r))
def test_oxford5k():
random_state = 40
data = load_oxford_data()
train, test = train_test_split(data, test_size=0.2, random_state=random_state)
# Compute distance-sorted neighbors in training set for each point in test set
nns = compute_all_neighbors(test, train)
# Fit model
m = LOPQModel(V=16, M=8)
m.fit(train, n_init=1, random_state=random_state)
# Assert correct code computation
assert_equal(m.predict(test[0]), ((3, 2), (14, 164, 83, 49, 185, 29, 196, 250)))
# Assert low number of empty cells
h = get_cell_histogram(train, m)
assert_equal(np.count_nonzero(h == 0), 6)
# Assert true NN recall on test set
searcher = LOPQSearcher(m)
searcher.add_data(train)
recall, _ = get_recall(searcher, test, nns)
assert_true(np.all(recall > [0.51, 0.92, 0.97, 0.97]))
# Test partial fitting with just coarse quantizers
m2 = LOPQModel(V=16, M=8, parameters=(m.Cs, None, None, None))
m2.fit(train, n_init=1, random_state=random_state)
searcher = LOPQSearcher(m2)
searcher.add_data(train)
recall, _ = get_recall(searcher, test, nns)
assert_true(np.all(recall > [0.51, 0.92, 0.97, 0.97]))
# Test partial fitting with coarse quantizers and rotations
m3 = LOPQModel(V=16, M=8, parameters=(m.Cs, m.Rs, m.mus, None))
m3.fit(train, n_init=1, random_state=random_state)
searcher = LOPQSearcher(m3)
searcher.add_data(train)
recall, _ = get_recall(searcher, test, nns)
assert_true(np.all(recall > [0.51, 0.92, 0.97, 0.97]))
def test_proto():
import os
filename = './temp_proto.lopq'
m = make_random_model()
m.export_proto(filename)
m2 = LOPQModel.load_proto(filename)
assert_equal(m.V, m2.V)
assert_equal(m.M, m2.M)
assert_equal(m.subquantizer_clusters, m2.subquantizer_clusters)
assert_true(np.allclose(m.Cs[0], m2.Cs[0]))
assert_true(np.allclose(m.Rs[0], m2.Rs[0]))
assert_true(np.allclose(m.mus[0], m2.mus[0]))
assert_true(np.allclose(m.subquantizers[0][0], m.subquantizers[0][0]))
os.remove(filename)
def test_mat():
import os
filename = './temp_mat.mat'
m = make_random_model()
m.export_mat(filename)
m2 = LOPQModel.load_mat(filename)
assert_equal(m.V, m2.V)
assert_equal(m.M, m2.M)
assert_equal(m.subquantizer_clusters, m2.subquantizer_clusters)
assert_true(np.allclose(m.Cs[0], m2.Cs[0]))
assert_true(np.allclose(m.Rs[0], m2.Rs[0]))
assert_true(np.allclose(m.mus[0], m2.mus[0]))
assert_true(np.allclose(m.subquantizers[0][0], m.subquantizers[0][0]))
os.remove(filename)
def searcher_instance_battery(searcher, q):
"""
Helper to perform battery of searcher tests.
"""
retrieved, visited = searcher.get_result_quota(q)
assert_equal(len(retrieved), 12)
assert_equal(visited, 3)
retrieved, visited = searcher.search(q)
assert_equal(len(retrieved), 10)
assert_equal(visited, 3)
retrieved, visited = searcher.get_result_quota(q, quota=20)
assert_equal(len(retrieved), 28)
assert_equal(visited, 5)
retrieved, visited = searcher.search(q, quota=20)
assert_equal(len(retrieved), 20)
assert_equal(visited, 5)
retrieved, visited = searcher.search(q, quota=20, limit=10)
assert_equal(len(retrieved), 10)
assert_equal(visited, 5)
def test_searcher():
data = pkl.load(open(relpath('./testdata/test_searcher_data.pkl')))
m = LOPQModel.load_proto(relpath('./testdata/random_test_model.lopq'))
q = np.ones(8)
# Test add_data
searcher = LOPQSearcher(m)
searcher.add_data(data)
searcher_instance_battery(searcher, q)
# Test add_codes
searcher = LOPQSearcher(m)
codes = [m.predict(x) for x in data]
searcher.add_codes(codes)
searcher_instance_battery(searcher, q)
def test_searcher_lmdb():
import shutil
data = pkl.load(open(relpath('./testdata/test_searcher_data.pkl')))
m = LOPQModel.load_proto(relpath('./testdata/random_test_model.lopq'))
lmbd_test_path = './test_lopq_lmbd'
q = np.ones(8)
# Test add_data
searcher = LOPQSearcherLMDB(m, lmbd_test_path)
searcher.add_data(data)
searcher_instance_battery(searcher, q)
# Clean up
shutil.rmtree(lmbd_test_path)
# Test add_codes
searcher = LOPQSearcherLMDB(m, lmbd_test_path)
codes = [m.predict(x) for x in data]
searcher.add_codes(codes)
searcher_instance_battery(searcher, q)
# Clean up
shutil.rmtree(lmbd_test_path)
def test_proto_partial():
import os
filename = './temp_proto_partial.lopq'
c = (np.random.rand(8, 8), np.random.rand(8,8))
m = LOPQModel(parameters=(c, None, None, None))
m.export_proto(filename)
m2 = LOPQModel.load_proto(filename)
assert_equal(m.V, m2.V)
assert_equal(m.M, m2.M)
assert_equal(m.subquantizer_clusters, m2.subquantizer_clusters)
assert_true(np.allclose(m.Cs[0], m2.Cs[0]))
assert_true(m.Rs == m2.Rs)
assert_true(m.mus == m2.mus)
assert_true(m.subquantizers == m.subquantizers)
os.remove(filename)
|
{
"content_hash": "491c634187c89846c1cf99fb66b2ee9e",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 126,
"avg_line_length": 31.027397260273972,
"alnum_prop": 0.6528697571743929,
"repo_name": "yahoo/lopq",
"id": "96832d0b6312e11226c4ae7633b2f7fc97d3b3ed",
"size": "9212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/test/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "102787"
},
{
"name": "Shell",
"bytes": "288"
}
],
"symlink_target": ""
}
|
"""Abstraction layer to resize images using PIL, ImageMagick, or a
public resizing proxy if neither is available.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import urllib
import subprocess
import os
import re
from tempfile import NamedTemporaryFile
from beets import logging
from beets import util
# Resizing methods
PIL = 1
IMAGEMAGICK = 2
WEBPROXY = 3
PROXY_URL = 'http://images.weserv.nl/'
log = logging.getLogger('beets')
def resize_url(url, maxwidth):
"""Return a proxied image URL that resizes the original image to
maxwidth (preserving aspect ratio).
"""
return '{0}?{1}'.format(PROXY_URL, urllib.urlencode({
'url': url.replace('http://', ''),
'w': bytes(maxwidth),
}))
def temp_file_for(path):
"""Return an unused filename with the same extension as the
specified path.
"""
ext = os.path.splitext(path)[1]
with NamedTemporaryFile(suffix=ext, delete=False) as f:
return f.name
def pil_resize(maxwidth, path_in, path_out=None):
"""Resize using Python Imaging Library (PIL). Return the output path
of resized image.
"""
path_out = path_out or temp_file_for(path_in)
from PIL import Image
log.debug(u'artresizer: PIL resizing {0} to {1}',
util.displayable_path(path_in), util.displayable_path(path_out))
try:
im = Image.open(util.syspath(path_in))
size = maxwidth, maxwidth
im.thumbnail(size, Image.ANTIALIAS)
im.save(path_out)
return path_out
except IOError:
log.error(u"PIL cannot create thumbnail for '{0}'",
util.displayable_path(path_in))
return path_in
def im_resize(maxwidth, path_in, path_out=None):
"""Resize using ImageMagick's ``convert`` tool.
Return the output path of resized image.
"""
path_out = path_out or temp_file_for(path_in)
log.debug(u'artresizer: ImageMagick resizing {0} to {1}',
util.displayable_path(path_in), util.displayable_path(path_out))
# "-resize widthxheight>" shrinks images with dimension(s) larger
# than the corresponding width and/or height dimension(s). The >
# "only shrink" flag is prefixed by ^ escape char for Windows
# compatibility.
try:
util.command_output([
b'convert', util.syspath(path_in, prefix=False),
b'-resize', b'{0}x^>'.format(maxwidth),
util.syspath(path_out, prefix=False),
])
except subprocess.CalledProcessError:
log.warn(u'artresizer: IM convert failed for {0}',
util.displayable_path(path_in))
return path_in
return path_out
BACKEND_FUNCS = {
PIL: pil_resize,
IMAGEMAGICK: im_resize,
}
def pil_getsize(path_in):
from PIL import Image
try:
im = Image.open(util.syspath(path_in))
return im.size
except IOError as exc:
log.error(u"PIL could not read file {}: {}",
util.displayable_path(path_in), exc)
def im_getsize(path_in):
cmd = [b'identify', b'-format', b'%w %h',
util.syspath(path_in, prefix=False)]
try:
out = util.command_output(cmd)
except subprocess.CalledProcessError as exc:
log.warn('ImageMagick size query failed')
log.debug(
'`convert` exited with (status {}) when '
'getting size with command {}:\n{}',
exc.returncode, cmd, exc.output.strip()
)
return
try:
return tuple(map(int, out.split(b' ')))
except IndexError:
log.warn(u'Could not understand IM output: {0!r}', out)
BACKEND_GET_SIZE = {
PIL: pil_getsize,
IMAGEMAGICK: im_getsize,
}
class Shareable(type):
"""A pseudo-singleton metaclass that allows both shared and
non-shared instances. The ``MyClass.shared`` property holds a
lazily-created shared instance of ``MyClass`` while calling
``MyClass()`` to construct a new object works as usual.
"""
def __init__(cls, name, bases, dict):
super(Shareable, cls).__init__(name, bases, dict)
cls._instance = None
@property
def shared(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
class ArtResizer(object):
"""A singleton class that performs image resizes.
"""
__metaclass__ = Shareable
def __init__(self):
"""Create a resizer object with an inferred method.
"""
self.method = self._check_method()
log.debug(u"artresizer: method is {0}", self.method)
self.can_compare = self._can_compare()
def resize(self, maxwidth, path_in, path_out=None):
"""Manipulate an image file according to the method, returning a
new path. For PIL or IMAGEMAGIC methods, resizes the image to a
temporary file. For WEBPROXY, returns `path_in` unmodified.
"""
if self.local:
func = BACKEND_FUNCS[self.method[0]]
return func(maxwidth, path_in, path_out)
else:
return path_in
def proxy_url(self, maxwidth, url):
"""Modifies an image URL according the method, returning a new
URL. For WEBPROXY, a URL on the proxy server is returned.
Otherwise, the URL is returned unmodified.
"""
if self.local:
return url
else:
return resize_url(url, maxwidth)
@property
def local(self):
"""A boolean indicating whether the resizing method is performed
locally (i.e., PIL or ImageMagick).
"""
return self.method[0] in BACKEND_FUNCS
def get_size(self, path_in):
"""Return the size of an image file as an int couple (width, height)
in pixels.
Only available locally
"""
if self.local:
func = BACKEND_GET_SIZE[self.method[0]]
return func(path_in)
def _can_compare(self):
"""A boolean indicating whether image comparison is available"""
return self.method[0] == IMAGEMAGICK and self.method[1] > (6, 8, 7)
@staticmethod
def _check_method():
"""Return a tuple indicating an available method and its version."""
version = has_IM()
if version:
return IMAGEMAGICK, version
version = has_PIL()
if version:
return PIL, version
return WEBPROXY, (0)
def has_IM():
"""Return Image Magick version or None if it is unavailable
Try invoking ImageMagick's "convert"."""
try:
out = util.command_output([b'identify', b'--version'])
if 'imagemagick' in out.lower():
pattern = r".+ (\d+)\.(\d+)\.(\d+).*"
match = re.search(pattern, out)
if match:
return (int(match.group(1)),
int(match.group(2)),
int(match.group(3)))
return (0,)
except (subprocess.CalledProcessError, OSError):
return None
def has_PIL():
"""Return Image Magick version or None if it is unavailable
Try importing PIL."""
try:
__import__('PIL', fromlist=[str('Image')])
return (0,)
except ImportError:
return None
|
{
"content_hash": "9403f127328a512f1f0cae2457824952",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 78,
"avg_line_length": 29.74590163934426,
"alnum_prop": 0.6025075778451364,
"repo_name": "parapente/beets",
"id": "1b6a5903e095e21420dc5e96bd1f10fcd7706941",
"size": "7929",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "beets/util/artresizer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2951"
},
{
"name": "HTML",
"bytes": "3307"
},
{
"name": "JavaScript",
"bytes": "85950"
},
{
"name": "Python",
"bytes": "1693553"
},
{
"name": "Shell",
"bytes": "7413"
}
],
"symlink_target": ""
}
|
"""TensorArray: a dynamically sized array of Tensors.
@@TensorArray
"""
# Mixture of pep8 and non-pep8 names, so disable pylint bad-name
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import tf_should_use
# TensorArray object accesses many of the hidden generated ops, but is
# in fact built to wrap these methods.
# pylint: disable=protected-access
class TensorArray(object):
"""Class wrapping dynamic-sized, per-time-step, write-once Tensor arrays.
This class is meant to be used with dynamic iteration primitives such as
`while_loop` and `map_fn`. It supports gradient back-propagation via special
"flow" control flow dependencies.
"""
def __init__(self,
dtype,
size=None,
dynamic_size=None,
clear_after_read=None,
tensor_array_name=None,
handle=None,
flow=None,
infer_shape=True,
element_shape=None,
colocate_with_first_write_call=True,
name=None):
"""Construct a new TensorArray or wrap an existing TensorArray handle.
A note about the parameter `name`:
The name of the `TensorArray` (even if passed in) is uniquified: each time
a new `TensorArray` is created at runtime it is assigned its own name for
the duration of the run. This avoids name collisions if a `TensorArray`
is created within a `while_loop`.
Args:
dtype: (required) data type of the TensorArray.
size: (optional) int32 scalar `Tensor`: the size of the TensorArray.
Required if handle is not provided.
dynamic_size: (optional) Python bool: If true, writes to the TensorArray
can grow the TensorArray past its initial size. Default: False.
clear_after_read: Boolean (optional, default: True). If True, clear
TensorArray values after reading them. This disables read-many
semantics, but allows early release of memory.
tensor_array_name: (optional) Python string: the name of the TensorArray.
This is used when creating the TensorArray handle. If this value is
set, handle should be None.
handle: (optional) A `Tensor` handle to an existing TensorArray. If this
is set, tensor_array_name should be None.
flow: (optional) A float `Tensor` scalar coming from an existing
`TensorArray.flow`.
infer_shape: (optional, default: True) If True, shape inference
is enabled. In this case, all elements must have the same shape.
element_shape: (optional, default: None) A `TensorShape` object specifying
the shape constraints of each of the elements of the TensorArray.
Need not be fully defined.
colocate_with_first_write_call: If `True`, the TensorArray will be
colocated on the same device as the Tensor used on its first write
(write operations include `write`, `unstack`, and `split`). If `False`,
the TensorArray will be placed on the device determined by the
device context available during its initialization.
name: A name for the operation (optional).
Raises:
ValueError: if both handle and tensor_array_name are provided.
TypeError: if handle is provided but is not a Tensor.
"""
if handle is not None and tensor_array_name:
raise ValueError(
"Cannot construct with both handle and tensor_array_name")
if handle is not None and not isinstance(handle, ops.Tensor):
raise TypeError("Handle must be a Tensor")
if handle is None and size is None:
raise ValueError("Size must be provided if handle is not provided")
if handle is not None and size is not None:
raise ValueError("Cannot provide both a handle and size "
"at the same time")
if handle is not None and element_shape is not None:
raise ValueError("Cannot provide both a handle and element_shape "
"at the same time")
if handle is not None and dynamic_size is not None:
raise ValueError("Cannot provide both a handle and dynamic_size "
"at the same time")
if handle is not None and clear_after_read is not None:
raise ValueError("Cannot provide both a handle and clear_after_read "
"at the same time")
if clear_after_read is None:
clear_after_read = True
dynamic_size = dynamic_size or False
self._dtype = dtype
# Used to keep track of what tensors the TensorArray should be
# colocated with. We choose to colocate the TensorArray with the
# first tensor written to it.
self._colocate_with_first_write_call = colocate_with_first_write_call
if colocate_with_first_write_call:
self._colocate_with = []
else:
self._colocate_with = None
# Record the current static shape for the array elements. The element
# shape is defined either by `element_shape` or the shape of the tensor
# of the first write. If `infer_shape` is true, all writes checks for
# shape equality.
if element_shape is None:
self._infer_shape = infer_shape
self._element_shape = []
else:
self._infer_shape = True
self._element_shape = [tensor_shape.TensorShape(element_shape)]
with ops.name_scope(name, "TensorArray", [handle, size, flow]) as scope:
if handle is not None:
self._handle = handle
if flow is None:
raise ValueError("flow must not be None if handle is not None.")
self._flow = flow
else:
# Construct the TensorArray with an empty device. The first
# write into the TensorArray from a Tensor with a set device
# will retroactively set the device value of this op.
def create():
return gen_data_flow_ops._tensor_array_v3(
dtype=dtype,
size=size,
element_shape=element_shape,
dynamic_size=dynamic_size,
clear_after_read=clear_after_read,
tensor_array_name=tensor_array_name,
name=scope)
if colocate_with_first_write_call:
with ops.device(None), ops.colocate_with(None, ignore_existing=True):
self._handle, self._flow = create()
else:
self._handle, self._flow = create()
@property
def flow(self):
"""The flow `Tensor` forcing ops leading to this TensorArray state."""
return self._flow
@property
def dtype(self):
"""The data type of this TensorArray."""
return self._dtype
@property
def handle(self):
"""The reference to the TensorArray."""
return self._handle
def _merge_element_shape(self, shape):
"""Changes the element shape of the array given a shape to merge with.
Args:
shape: A `TensorShape` object to merge with.
Raises:
ValueError: if the provided shape is incompatible with the current
element shape of the `TensorArray`.
"""
if self._element_shape:
if not shape.is_compatible_with(self._element_shape[0]):
raise ValueError(
"Inconsistent shapes: saw %s but expected %s "
"(and infer_shape=True)" % (shape, self._element_shape[0]))
self._element_shape[0] = self._element_shape[0].merge_with(shape)
else:
self._element_shape.append(shape)
@contextlib.contextmanager
def _maybe_colocate_with(self, value):
"""Colocate operations with an internal colocation group or `value`.
Args:
value: `Tensor`, the tensor to try to colocate with.
Yields:
Does not yield anything, but the new context is a colocation context.
If no internal colocation group is set, colocate with `value` and set
the internal colocation group to be value.
"""
if not self._colocate_with_first_write_call:
yield
else:
if not self._colocate_with:
self._colocate_with.append(value)
with ops.colocate_with(self._colocate_with[0]):
yield
def identity(self):
"""Returns a TensorArray with the same content and properties.
Returns:
A new TensorArray object with flow that ensures the control dependencies
from the contexts will become control dependencies for writes, reads, etc.
Use this object all for subsequent operations.
"""
flow = array_ops.identity(self._flow)
ta = TensorArray(
dtype=self._dtype, handle=self._handle, flow=flow,
infer_shape=self._infer_shape,
colocate_with_first_write_call=self._colocate_with_first_write_call)
ta._element_shape = self._element_shape
ta._colocate_with = self._colocate_with
return ta
def grad(self, source, flow=None, name=None):
# tensor_array_grad requires a flow input when forward
# TensorArrays are dynamically sized. This forces the creation
# of the grad TensorArray only once the final forward array's size
# is fixed.
if flow is None:
flow = self.flow
with ops.name_scope(name, "TensorArrayGrad", [self._handle]):
with ops.colocate_with(self._handle):
g_handle, unused_flow = gen_data_flow_ops._tensor_array_grad_v3(
handle=self._handle, source=source, flow_in=flow, name=name)
with ops.control_dependencies([g_handle]):
flow = array_ops.identity(flow, name="gradient_flow")
g = TensorArray(
dtype=self._dtype,
handle=g_handle,
flow=flow,
infer_shape=self._infer_shape,
colocate_with_first_write_call=False)
g._element_shape = self._element_shape
return g
def read(self, index, name=None):
"""Read the value at location `index` in the TensorArray.
Args:
index: 0-D. int32 tensor with the index to read from.
name: A name for the operation (optional).
Returns:
The tensor at index `index`.
"""
value = gen_data_flow_ops._tensor_array_read_v3(
handle=self._handle,
index=index,
flow_in=self._flow,
dtype=self._dtype,
name=name)
if self._element_shape:
value.set_shape(self._element_shape[0].dims)
return value
@tf_should_use.should_use_result
def write(self, index, value, name=None):
"""Write `value` into index `index` of the TensorArray.
Args:
index: 0-D. int32 scalar with the index to write to.
value: N-D. Tensor of type `dtype`. The Tensor to write to this index.
name: A name for the operation (optional).
Returns:
A new TensorArray object with flow that ensures the write occurs.
Use this object all for subsequent operations.
Raises:
ValueError: if there are more writers than specified.
"""
with ops.name_scope(name, "TensorArrayWrite", [self._handle, index, value]):
value = ops.convert_to_tensor(value, name="value")
with self._maybe_colocate_with(value):
flow_out = gen_data_flow_ops._tensor_array_write_v3(
handle=self._handle,
index=index,
value=value,
flow_in=self._flow,
name=name)
ta = TensorArray(
dtype=self._dtype, handle=self._handle, flow=flow_out,
colocate_with_first_write_call=self._colocate_with_first_write_call)
ta._infer_shape = self._infer_shape
ta._element_shape = self._element_shape
ta._colocate_with = self._colocate_with
if ta._infer_shape:
ta._merge_element_shape(value.get_shape())
return ta
def stack(self, name=None):
"""Return the values in the TensorArray as a stacked `Tensor`.
All of the values must have been written and their shapes must all match.
If input shapes have rank-`R`, then output shape will have rank-`(R+1)`.
Args:
name: A name for the operation (optional).
Returns:
All the tensors in the TensorArray stacked into one tensor.
"""
with ops.colocate_with(self._handle):
with ops.name_scope(name, "TensorArrayStack", [self._handle]):
return self.gather(math_ops.range(0, self.size()), name=name)
def gather(self, indices, name=None):
"""Return selected values in the TensorArray as a packed `Tensor`.
All of selected values must have been written and their shapes
must all match.
Args:
indices: A `1-D` `Tensor` taking values in `[0, max_value)`. If
the `TensorArray` is not dynamic, `max_value=size()`.
name: A name for the operation (optional).
Returns:
The in the `TensorArray` selected by `indices`, packed into one tensor.
"""
if self._element_shape:
element_shape = self._element_shape[0]
else:
element_shape = tensor_shape.TensorShape(None)
value = gen_data_flow_ops._tensor_array_gather_v3(
handle=self._handle,
indices=indices,
flow_in=self._flow,
dtype=self._dtype,
name=name,
element_shape=element_shape)
if self._element_shape and self._element_shape[0].dims is not None:
value.set_shape([None] + self._element_shape[0].dims)
return value
def concat(self, name=None):
"""Return the values in the TensorArray as a concatenated `Tensor`.
All of the values must have been written, their ranks must match, and
and their shapes must all match for all dimensions except the first.
Args:
name: A name for the operation (optional).
Returns:
All the tensors in the TensorArray concatenated into one tensor.
"""
if self._element_shape and self._element_shape[0].dims is not None:
element_shape_except0 = (
tensor_shape.TensorShape(self._element_shape[0].dims[1:]))
else:
element_shape_except0 = tensor_shape.TensorShape(None)
value, _ = gen_data_flow_ops._tensor_array_concat_v3(
handle=self._handle,
flow_in=self._flow,
dtype=self._dtype,
name=name,
element_shape_except0=element_shape_except0)
if self._element_shape and self._element_shape[0].dims is not None:
value.set_shape([None] + self._element_shape[0].dims[1:])
return value
@tf_should_use.should_use_result
def unstack(self, value, name=None):
"""Unstack the values of a `Tensor` in the TensorArray.
If input value shapes have rank-`R`, then the output TensorArray will
contain elements whose shapes are rank-`(R-1)`.
Args:
value: (N+1)-D. Tensor of type `dtype`. The Tensor to unstack.
name: A name for the operation (optional).
Returns:
A new TensorArray object with flow that ensures the unstack occurs.
Use this object all for subsequent operations.
Raises:
ValueError: if the shape inference fails.
"""
with ops.name_scope(name, "TensorArrayUnstack", [self._handle, value]):
num_elements = array_ops.shape(value)[0]
return self.scatter(
indices=math_ops.range(0, num_elements), value=value, name=name)
@tf_should_use.should_use_result
def scatter(self, indices, value, name=None):
"""Scatter the values of a `Tensor` in specific indices of a `TensorArray`.
Args:
indices: A `1-D` `Tensor` taking values in `[0, max_value)`. If
the `TensorArray` is not dynamic, `max_value=size()`.
value: (N+1)-D. Tensor of type `dtype`. The Tensor to unpack.
name: A name for the operation (optional).
Returns:
A new TensorArray object with flow that ensures the scatter occurs.
Use this object all for subsequent operations.
Raises:
ValueError: if the shape inference fails.
"""
with ops.name_scope(name, "TensorArrayScatter",
[self._handle, value, indices]):
value = ops.convert_to_tensor(value, name="value")
with self._maybe_colocate_with(value):
flow_out = gen_data_flow_ops._tensor_array_scatter_v3(
handle=self._handle,
indices=indices,
value=value,
flow_in=self._flow,
name=name)
ta = TensorArray(
dtype=self._dtype, handle=self._handle, flow=flow_out,
colocate_with_first_write_call=self._colocate_with_first_write_call)
ta._infer_shape = self._infer_shape
ta._element_shape = self._element_shape
ta._colocate_with = self._colocate_with
if ta._infer_shape and context.in_graph_mode():
val_shape = flow_out.op.inputs[2].get_shape()
element_shape = tensor_shape.unknown_shape()
if val_shape.dims is not None:
element_shape = tensor_shape.TensorShape(val_shape.dims[1:])
ta._merge_element_shape(element_shape)
return ta
@tf_should_use.should_use_result
def split(self, value, lengths, name=None):
"""Split the values of a `Tensor` into the TensorArray.
Args:
value: (N+1)-D. Tensor of type `dtype`. The Tensor to split.
lengths: 1-D. int32 vector with the lengths to use when splitting
`value` along its first dimension.
name: A name for the operation (optional).
Returns:
A new TensorArray object with flow that ensures the split occurs.
Use this object all for subsequent operations.
Raises:
ValueError: if the shape inference fails.
"""
with ops.name_scope(name, "TensorArraySplit",
[self._handle, value, lengths]):
value = ops.convert_to_tensor(value, name="value")
with self._maybe_colocate_with(value):
lengths_64 = math_ops.to_int64(lengths)
flow_out = gen_data_flow_ops._tensor_array_split_v3(
handle=self._handle,
value=value,
lengths=lengths_64,
flow_in=self._flow,
name=name)
ta = TensorArray(
dtype=self._dtype, handle=self._handle, flow=flow_out,
colocate_with_first_write_call=self._colocate_with_first_write_call)
ta._infer_shape = self._infer_shape
ta._element_shape = self._element_shape
ta._colocate_with = self._colocate_with
if ta._infer_shape and context.in_graph_mode():
val_shape = flow_out.op.inputs[1].get_shape()
clengths = tensor_util.constant_value(flow_out.op.inputs[2])
element_shape = tensor_shape.unknown_shape()
if val_shape.dims is not None:
if clengths is not None and clengths.max() == clengths.min():
element_shape = tensor_shape.TensorShape([clengths[0]] +
val_shape.dims[1:])
ta._merge_element_shape(element_shape)
return ta
def size(self, name=None):
"""Return the size of the TensorArray."""
return gen_data_flow_ops._tensor_array_size_v3(
handle=self._handle, flow_in=self.flow, name=name)
@tf_should_use.should_use_result
def close(self, name=None):
"""Close the current TensorArray."""
return gen_data_flow_ops._tensor_array_close_v3(
handle=self._handle, name=name)
# pylint: enable=protected-access
|
{
"content_hash": "8f395caae46cf6d64d1d3d109671a0ba",
"timestamp": "",
"source": "github",
"line_count": 499,
"max_line_length": 80,
"avg_line_length": 38.855711422845694,
"alnum_prop": 0.6492856774459745,
"repo_name": "bowang/tensorflow",
"id": "08325ba7710d5f7007f5c55934f15ab5a4015536",
"size": "20078",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/tensor_array_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8458"
},
{
"name": "C",
"bytes": "201402"
},
{
"name": "C++",
"bytes": "29732195"
},
{
"name": "CMake",
"bytes": "647266"
},
{
"name": "Go",
"bytes": "976912"
},
{
"name": "Java",
"bytes": "412231"
},
{
"name": "Jupyter Notebook",
"bytes": "1833675"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "38128"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6715"
},
{
"name": "Protocol Buffer",
"bytes": "276756"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "26533729"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "373122"
}
],
"symlink_target": ""
}
|
import serial #serial library
import sys #used to read argument from shell
ser = serial.Serial('/dev/ttyMCC', 115200) # open serial port
stringa = sys.argv[1] + ':' + sys.argv[2] + ':' + sys.argv[3]
ser.write(stringa.encode()) # write a string
if (sys.argv[1] == "i" and (sys.argv[3] == "a" or sys.argv[3] == "d")):
line = ser.readline()
print line
ser.close() # close port
|
{
"content_hash": "44f9cac9fc70446e1e9b9ae753f6290b",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 71,
"avg_line_length": 43.77777777777778,
"alnum_prop": 0.6192893401015228,
"repo_name": "ghesio/SGWEB",
"id": "28532d01af589406629b18151c63813eb1b103c1",
"size": "611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SGWEB/sgweb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "3001"
},
{
"name": "CSS",
"bytes": "668"
},
{
"name": "JavaScript",
"bytes": "18746"
},
{
"name": "PHP",
"bytes": "43638"
},
{
"name": "Python",
"bytes": "1119"
},
{
"name": "Shell",
"bytes": "3563"
}
],
"symlink_target": ""
}
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 30, transform = "Difference", sigma = 0.0, exog_count = 0, ar_order = 12);
|
{
"content_hash": "6ebad01d50b27302148ea3d94ae55826",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 165,
"avg_line_length": 37.857142857142854,
"alnum_prop": 0.7056603773584905,
"repo_name": "antoinecarme/pyaf",
"id": "5c0a567a9fcc65efdc8eeb97280969f0633baa46",
"size": "265",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Difference/trend_PolyTrend/cycle_30/ar_12/test_artificial_32_Difference_PolyTrend_30_12_0.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
"""Provide the RedditorListingMixin class."""
from ....const import urljoin
from ..generator import ListingGenerator
from .base import BaseListingMixin
from .gilded import GildedListingMixin
class RedditorListingMixin(BaseListingMixin, GildedListingMixin):
"""Adds additional methods pertaining to Redditor instances."""
@property
def comments(self):
r"""An instance of :class:`.SubListing` providing access to comments.
For example, to output the first line of all new comments by
``/u/spez`` try:
.. code:: python
for comment in reddit.redditor('spez').comments.new(limit=None):
print(comment.body.split('\n', 1)[0][:79])
"""
if self.__dict__.get('_comments') is None:
self._comments = SubListing(self._reddit, self._path, 'comments')
return self._comments
@property
def submissions(self):
"""An instance of :class:`.SubListing` providing access to submissions.
For example, to output the title's of top 100 of all time submissions
for ``/u/spez`` try:
.. code:: python
for submission in reddit.redditor('spez').submissions.top('all'):
print(submission.title)
"""
if self.__dict__.get('_submissions') is None:
self._submissions = SubListing(self._reddit, self._path,
'submitted')
return self._submissions
def downvoted(self, **generator_kwargs):
"""Return a ListingGenerator for items the user has downvoted.
May raise ``prawcore.Forbidden`` after issuing the request if the user
is not authorized to access the list. Note that because this function
returns a :class:`.ListingGenerator` the exception may not occur until
sometime after this function has returned.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
"""
return ListingGenerator(self._reddit, urljoin(self._path, 'downvoted'),
**generator_kwargs)
def gildings(self, **generator_kwargs):
"""Return a ListingGenerator for items the user has gilded.
May raise ``prawcore.Forbidden`` after issuing the request if the user
is not authorized to access the list. Note that because this function
returns a :class:`.ListingGenerator` the exception may not occur until
sometime after this function has returned.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
"""
return ListingGenerator(self._reddit,
urljoin(self._path, 'gilded/given'),
**generator_kwargs)
def hidden(self, **generator_kwargs):
"""Return a ListingGenerator for items the user has hidden.
May raise ``prawcore.Forbidden`` after issuing the request if the user
is not authorized to access the list. Note that because this function
returns a :class:`.ListingGenerator` the exception may not occur until
sometime after this function has returned.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
"""
return ListingGenerator(self._reddit, urljoin(self._path, 'hidden'),
**generator_kwargs)
def saved(self, **generator_kwargs):
"""Return a ListingGenerator for items the user has saved.
May raise ``prawcore.Forbidden`` after issuing the request if the user
is not authorized to access the list. Note that because this function
returns a :class:`.ListingGenerator` the exception may not occur until
sometime after this function has returned.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
"""
return ListingGenerator(self._reddit, urljoin(self._path, 'saved'),
**generator_kwargs)
def upvoted(self, **generator_kwargs):
"""Return a ListingGenerator for items the user has upvoted.
May raise ``prawcore.Forbidden`` after issuing the request if the user
is not authorized to access the list. Note that because this function
returns a :class:`.ListingGenerator` the exception may not occur until
sometime after this function has returned.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
"""
return ListingGenerator(self._reddit, urljoin(self._path, 'upvoted'),
**generator_kwargs)
class SubListing(BaseListingMixin):
"""Helper class for generating SubListing objects."""
def __init__(self, reddit, base_path, subpath):
"""Initialize a SubListing instance.
:param reddit: An instance of :class:`.Reddit`.
:param base_path: The path to the object up to this point.
:param subpath: The additional path to this sublisting.
"""
super(SubListing, self).__init__(reddit, None)
self._listing_use_sort = True
self._reddit = reddit
self._path = urljoin(base_path, subpath)
|
{
"content_hash": "0da07178682d213017e4ca7a5568bf35",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 79,
"avg_line_length": 38.98540145985402,
"alnum_prop": 0.6382699868938401,
"repo_name": "nmtake/praw",
"id": "0be84041a4ae0eba705f072c8e83b841d40e277c",
"size": "5341",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "praw/models/listing/mixins/redditor.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "367899"
},
{
"name": "Shell",
"bytes": "189"
}
],
"symlink_target": ""
}
|
import sys, os
sys.path.append("../..")
# import facerec modules
from feature import Fisherfaces, SpatialHistogram, Identity
from distance import EuclideanDistance, ChiSquareDistance
from classifier import NearestNeighbor
from model import PredictableModel
from validation import KFoldCrossValidation
from visual import subplot
from util import minmax_normalize
from serialization import save_model, load_model
# import numpy, matplotlib and logging
import numpy as np
from PIL import Image
import matplotlib.cm as cm
import logging
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from lbp import LPQ, ExtendedLBP
def read_images(path, sz=None):
"""Reads the images in a given folder, resizes images on the fly if size is given.
Args:
path: Path to a folder with subfolders representing the subjects (persons).
sz: A tuple with the size Resizes
Returns:
A list [X,y]
X: The images, which is a Python list of numpy arrays.
y: The corresponding labels (the unique number of the subject, person) in a Python list.
"""
c = 0
X,y = [], []
for dirname, dirnames, filenames in os.walk(path):
for subdirname in dirnames:
subject_path = os.path.join(dirname, subdirname)
for filename in os.listdir(subject_path):
try:
im = Image.open(os.path.join(subject_path, filename))
im = im.convert("L")
# resize to given size (if given)
if (sz is not None):
im = im.resize(self.sz, Image.ANTIALIAS)
X.append(np.asarray(im, dtype=np.uint8))
y.append(c)
except IOError, (errno, strerror):
print "I/O error({0}): {1}".format(errno, strerror)
except:
print "Unexpected error:", sys.exc_info()[0]
raise
c = c+1
return [X,y]
def run_rec():
# This is where we write the images, if an output_dir is given
# in command line:
out_dir = None
# Now read in the image data. This must be a valid path!
[X,y] = read_images('images')
# Then set up a handler for logging:
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# Add handler to facerec modules, so we see what's going on inside:
logger = logging.getLogger("facerec")
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
# Define the Fisherfaces as Feature Extraction method:
feature = Fisherfaces()
# Define a 1-NN classifier with Euclidean Distance:
classifier = NearestNeighbor(dist_metric=EuclideanDistance(), k=1)
# Define the model as the combination
my_model = PredictableModel(feature=feature, classifier=classifier)
# Compute the Fisherfaces on the given data (in X) and labels (in y):
my_model.compute(X, y)
# We then save the model, which uses Pythons pickle module:
save_model('model.pkl', my_model)
model = load_model('model.pkl')
# Then turn the first (at most) 16 eigenvectors into grayscale
# images (note: eigenvectors are stored by column!)
#E = []
#for i in xrange(min(model.feature.eigenvectors.shape[1], 16)):
# e = model.feature.eigenvectors[:,i].reshape(X[0].shape)
# E.append(minmax_normalize(e,0,255, dtype=np.uint8))
# Plot them and store the plot to "python_fisherfaces_fisherfaces.pdf"
#subplot(title="Fisherfaces", images=E, rows=4, cols=4, sptitle="Fisherface", colormap=cm.jet, filename="fisherfaces.png")
# Perform a 10-fold cross validation
cv = KFoldCrossValidation(model, k=10)
cv.validate(X, y)
# And print the result:
cv.print_results()
im = Image.open('search.png')
im = im.convert("L")
predicted_label = model.predict(im)[0]
print(predicted_label)
return predicted_label
|
{
"content_hash": "6fe2b4c481cc158a591bef9202ed29b4",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 126,
"avg_line_length": 39.431372549019606,
"alnum_prop": 0.6496767777225261,
"repo_name": "revan/facerecserver",
"id": "f465148fc5decce4edd9d7d98e82fba711cdb806",
"size": "5661",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simple_rec.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "70350"
}
],
"symlink_target": ""
}
|
if __name__ == '__build__':
raise Exception
import string
__version__ = string.split('$Revision: 1.1.1.1 $')[1]
__date__ = string.join(string.split('$Date: 2007/02/15 19:25:21 $')[1:3], ' ')
__author__ = 'Tarn Weisner Burton <twburton@users.sourceforge.net>'
#
# Ported to PyOpenGL 2.0 by Tarn Weisner Burton 10May2001
#
# This code was created by Richard Campbell '99 (ported to Python/PyOpenGL by John Ferguson 2000)
#
# The port was based on the PyOpenGL tutorial module: dots.py
#
# If you've found this code useful, please let me know (email John Ferguson at hakuin@voicenet.com).
#
# See original source and C based tutorial at http://nehe.gamedev.net
#
# Note:
# -----
# This code is not a good example of Python and using OO techniques. It is a simple and direct
# exposition of how to use the Open GL API in Python via the PyOpenGL package. It also uses GLUT,
# which in my opinion is a high quality library in that it makes my work simpler. Due to using
# these APIs, this code is more like a C program using function based programming (which Python
# is in fact based upon, note the use of closures and lambda) than a "good" OO program.
#
# To run this code get and install OpenGL, GLUT, PyOpenGL (see http://www.python.org), and PyNumeric.
# Installing PyNumeric means having a C compiler that is configured properly, or so I found. For
# Win32 this assumes VC++, I poked through the setup.py for Numeric, and chased through disutils code
# and noticed what seemed to be hard coded preferences for VC++ in the case of a Win32 OS. However,
# I am new to Python and know little about disutils, so I may just be not using it right.
#
# BTW, since this is Python make sure you use tabs or spaces to indent, I had numerous problems since I
# was using editors that were not sensitive to Python.
#
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
import sys
# Some api in the chain is translating the keystrokes to this octal string
# so instead of saying: ESCAPE = 27, we use the following.
ESCAPE = '\033'
# Number of the glut window.
window = 0
# A general OpenGL initialization function. Sets all of the initial parameters.
def InitGL(Width, Height): # We call this right after our OpenGL window is created.
glClearColor(0.0, 0.0, 0.0, 0.0) # This Will Clear The Background Color To Black
glClearDepth(1.0) # Enables Clearing Of The Depth Buffer
glDepthFunc(GL_LESS) # The Type Of Depth Test To Do
glEnable(GL_DEPTH_TEST) # Enables Depth Testing
glShadeModel(GL_SMOOTH) # Enables Smooth Color Shading
glMatrixMode(GL_PROJECTION)
glLoadIdentity() # Reset The Projection Matrix
# Calculate The Aspect Ratio Of The Window
gluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0)
glMatrixMode(GL_MODELVIEW)
# The function called when our window is resized (which shouldn't happen if you enable fullscreen, below)
def ReSizeGLScene(Width, Height):
if Height == 0: # Prevent A Divide By Zero If The Window Is Too Small
Height = 1
glViewport(0, 0, Width, Height) # Reset The Current Viewport And Perspective Transformation
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0)
glMatrixMode(GL_MODELVIEW)
# The main drawing function.
def DrawGLScene():
# Clear The Screen And The Depth Buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity() # Reset The View
# Move Left 1.5 units and into the screen 6.0 units.
glTranslatef(-1.5, 0.0, -6.0)
# Since we have smooth color mode on, this will be great for the Phish Heads :-).
# Draw a triangle
glBegin(GL_POLYGON) # Start drawing a polygon
glColor3f(1.0, 0.0, 0.0) # Red
glVertex3f(0.0, 1.0, 0.0) # Top
glColor3f(0.0, 1.0, 0.0) # Green
glVertex3f(1.0, -1.0, 0.0) # Bottom Right
glColor3f(0.0, 0.0, 1.0) # Blue
glVertex3f(-1.0, -1.0, 0.0) # Bottom Left
glEnd() # We are done with the polygon
# Move Right 3.0 units.
glTranslatef(3.0, 0.0, 0.0)
# Draw a square (quadrilateral)
glColor3f(0.3, 0.5, 1.0) # Bluish shade
glBegin(GL_QUADS) # Start drawing a 4 sided polygon
glVertex3f(-1.0, 1.0, 0.0) # Top Left
glVertex3f(1.0, 1.0, 0.0) # Top Right
glVertex3f(1.0, -1.0, 0.0) # Bottom Right
glVertex3f(-1.0, -1.0, 0.0) # Bottom Left
glEnd() # We are done with the polygon
# since this is double buffered, swap the buffers to display what just got drawn.
glutSwapBuffers()
# The function called whenever a key is pressed. Note the use of Python tuples to pass in: (key, x, y)
def keyPressed(*args):
# If escape is pressed, kill everything.
if args[0] == ESCAPE:
sys.exit()
def main():
global window
# For now we just pass glutInit one empty argument. I wasn't sure what should or could be passed in (tuple, list, ...)
# Once I find out the right stuff based on reading the PyOpenGL source, I'll address this.
glutInit(sys.argv)
# Select type of Display mode:
# Double buffer
# RGBA color
# Alpha components supported
# Depth buffer
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
# get a 640 x 480 window
glutInitWindowSize(640, 480)
# the window starts at the upper left corner of the screen
glutInitWindowPosition(0, 0)
# Okay, like the C version we retain the window id to use when closing, but for those of you new
# to Python (like myself), remember this assignment would make the variable local and not global
# if it weren't for the global declaration at the start of main.
window = glutCreateWindow("Jeff Molofee's GL Code Tutorial ... NeHe '99")
# Register the drawing function with glut, BUT in Python land, at least using PyOpenGL, we need to
# set the function pointer and invoke a function to actually register the callback, otherwise it
# would be very much like the C version of the code.
glutDisplayFunc(DrawGLScene)
# Uncomment this line to get full screen.
#glutFullScreen()
# When we are doing nothing, redraw the scene.
glutIdleFunc(DrawGLScene)
# Register the function called when our window is resized.
glutReshapeFunc(ReSizeGLScene)
# Register the function called when the keyboard is pressed.
glutKeyboardFunc(keyPressed)
# Initialize our window.
InitGL(640, 480)
# Start Event Processing Engine
glutMainLoop()
# Print message to console, and kick off the main to get it rolling.
print "Hit ESC key to quit."
main()
|
{
"content_hash": "0589aceafa641c26cfcc9ae6403c1fdf",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 119,
"avg_line_length": 39.796407185628745,
"alnum_prop": 0.6953054468853446,
"repo_name": "lhl/vrdev",
"id": "2f6c62952ddf8f32f798ba2a89e6e2c9b47c4a23",
"size": "6722",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "002-pyopengl/PyOpenGL-Demo-3.0.1b1/PyOpenGL-Demo/NeHe/lesson3.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "GLSL",
"bytes": "195562"
},
{
"name": "JavaScript",
"bytes": "1624"
},
{
"name": "Jupyter Notebook",
"bytes": "1091162"
},
{
"name": "Python",
"bytes": "856749"
},
{
"name": "Shell",
"bytes": "832"
}
],
"symlink_target": ""
}
|
import json
import logging
import os
import re
class GPServiceAccount():
"""Holds authentication details for connecting to the Globalization
Pipeline (GP) service instance. The service supports Globalization
Pipeline Authentication and Identity and Access Management (IAM) authentication
There are three options for creating a ``GPServiceAccount`` instance:
1. Provide the parameters.
Mandatory for both authentication mechanisms
``url``, ``instanceId``
For Globalization Pipeline authentication:
``userId`` and ``password``
For IAM authentication:
``apiKey``
If both Globalization Pipeline and IAM authentication credentials are provided then ``GPServiceAccount``
instance will be initialized with Globalization Pipeline authentication.
2. Use the user defined environment variables (no params required).
Mandatory for both authentication mechanisms:
``GPServiceAccount.GP_URL_ENV_VAR``,
``GPServiceAccount.GP_INSTANCE_ID_ENV_VAR``
For Globalization Pipeline authentication:
``GPServiceAccount.GP_USER_ID_ENV_VAR``,
``GPServiceAccount.GP_PASSWORD_ENV_VAR``
For IAM authentication:
``GPServiceAccount.GP_IAM_API_KEY_ENV_VAR``
If both Globalization Pipeline and IAM authentication credentials are provided then ``GPServiceAccount``
instance will be initialized with Globalization Pipeline authentication.
3. Search the ``VCAP_SERVICES`` environment variable for all avaliable
GP service instances. If a service instance name is provided, it will
be given precedence. (optional ``serviceInstanceName`` param)
"""
# check these user defined env vars if they are set
GP_URL_ENV_VAR = "GP_URL"
GP_INSTANCE_ID_ENV_VAR = "GP_INSTANCE_ID"
GP_USER_ID_ENV_VAR = "GP_USER_ID"
GP_PASSWORD_ENV_VAR = "GP_PASSWORD"
GP_IAM_API_KEY_ENV_VAR = "GP_IAM_API_KEY"
# check VCAP_SERVICES env var and extract data if necessary
# (see app's VCAP_SERVICES env var for json template)
__VCAP_SERVICES_ENV_VAR = "VCAP_SERVICES"
__NAME_KEY = 'name'
__CREDENTIALS_KEY = 'credentials'
__INSTANCE_ID_KEY = 'instanceId'
__URL_KEY = 'url'
__USER_ID_KEY = 'userId'
__PASSWORD_KEY = 'password'
__IAM_API_KEY_KEY = 'apikey'
__gpServiceNameRegex = re.compile('^g11n-pipeline|^gp-')
__url = None
__instanceId = None
__userId = None
__password = None
__apiKey = None
__iamEnabled = None
def __setcreds__(self, url, instanceId, userId, password):
self.__url = url
self.__instanceId = instanceId
self.__userId = userId
self.__password = password
self.__iamEnabled = False
def __setIamCreds__(self, url, instanceId, apiKey):
self.__url = url
self.__instanceId = instanceId
self.__apiKey = apiKey
self.__iamEnabled = True
def __init__(self, url=None, instanceId=None, userId=None, password=None,
serviceInstanceName=None, credentialsJson=None, apiKey=None):
credentialsSet = False
if url and instanceId:
logging.info('Trying to initialize by params')
if userId and password:
self.__setcreds__(url, instanceId, userId, password)
credentialsSet = True
logging.info('using user provided data to create GPServiceAccount supporting GP auth.')
elif apiKey:
self.__setIamCreds__(url, instanceId, apiKey)
credentialsSet = True
logging.info('using user provided data to create GPServiceAccount supporting IAM auth.')
if credentialsSet:
logging.info('Successfully completed initialization by params')
if not credentialsSet:
logging.info('Trying to initialize by user env')
(url, instanceId, userId, password, apiKey) = self.__get_user_env_vars()
if url and instanceId:
if userId and password:
self.__setcreds__(url, instanceId, userId, password)
credentialsSet = True
logging.info("""using user defined environment variables to
create GPServiceAccount""")
elif apiKey:
self.__setIamCreds__(url, instanceId, apiKey)
credentialsSet = True
logging.info("""using user defined environment variables to
create GPServiceAccount supporting IAM auth.""")
if credentialsSet:
logging.info('Successfully completed initialization by user env variables')
if not credentialsSet and credentialsJson is not None:
logging.info('Trying to initialize by credentials file')
(url, instanceId, userId, password, apiKey) = self.__get_credentials_from_file(credentialsJson)
if url and instanceId:
if userId and password:
self.__setcreds__(url, instanceId, userId, password)
credentialsSet = True
logging.info("""using user defined environment variables to
create GPServiceAccount""")
elif apiKey:
self.__setcreds__(url, instanceId, apiKey)
credentialsSet = True
logging.info("""using user defined environment variables to
create GPServiceAccount supporting IAM auth.""")
if credentialsSet:
logging.info('Successfully completed initialization by credentials file')
if not credentialsSet:
logging.info('Trying to initialize by vcap_services env var')
(url, instanceId, userId, password, apiKey) = \
self.__parse_vcap_services_env_var(serviceInstanceName)
if url and instanceId:
if userId and password:
self.__setcreds__(url, instanceId, userId, password)
credentialsSet = True
logging.info("""using VCAP_SERVICES environment variable to
create GPServiceAccount""")
elif apiKey:
self.__setIamCreds__(url, instanceId, apiKey)
credentialsSet = True
logging.info("""using VCAP_SERVICES environment variable to
create GPServiceAccount supporting IAM auth.""")
if credentialsSet:
logging.info('Successfully completed initialization by vcap_services env var')
# make sure that all the vars are set
if not credentialsSet:
logging.error("Failed to initialize service account by any of the available init methods.")
assert self.__url, ('url is not a string: <%s>' % self.__url)
assert self.__instanceId, ('instanceId is not a string: <%s>' %self.__instanceId)
if self.__iamEnabled:
assert self.__apiKey, ('apiKey is not a string: <%s>' %self.__apiKey)
logging.info(('created GPServiceAccount supporting IAM auth using url <%s>, ' + \
'instanceId <%s>, and IAM API Key <***>'), self.__url,
self.__instanceId, self.__userId)
else:
assert self.__userId, ('userId is not a string: <%s>' %self.__userId)
assert self.__password, ('password is not a string: <%s>' %self.__password)
logging.info(('created GPServiceAccount using url <%s>, ' + \
'instanceId <%s>, userId <%s>, and password <***>'), self.__url,
self.__instanceId, self.__userId)
def get_url(self):
"""Return the ``url`` being used by this ``GPServiceAccount``"""
return self.__url
def get_instance_id(self):
"""Return the ``instanceId`` being used by this ``GPServiceAccount``"""
return self.__instanceId
def get_user_id(self):
"""Return the ``userId`` being used by this ``GPServiceAccount``"""
return self.__userId
def get_password(self):
"""Return the ``password`` being used by this ``GPServiceAccount``"""
return self.__password
def get_api_key(self):
"""Return the ``IAM API Key`` being used by this ``GPServiceAccount``"""
return self.__apiKey
def is_iam_enabled(self):
"""Return true if the ``GPServiceAccount`` being used is IAM enabled"""
return self.__iamEnabled
def __get_credentials_from_file(self, credsFile):
credsJson = open(credsFile, "r")
credentials = json.load(credsJson)
if credentials and "credentials" in credentials:
# credentials: { ... }
credentials = credentials["credentials"]
if credentials:
return (credentials["url"], credentials["instanceId"], credentials["userId"], credentials["password"], credentials["apikey"])
return (None, None, None, None, None)
def __get_user_env_vars(self):
"""Return the user defined environment variables"""
return (os.environ.get(self.GP_URL_ENV_VAR),
os.environ.get(self.GP_INSTANCE_ID_ENV_VAR),
os.environ.get(self.GP_USER_ID_ENV_VAR),
os.environ.get(self.GP_PASSWORD_ENV_VAR),
os.environ.get(self.GP_IAM_API_KEY_ENV_VAR))
def __parse_vcap_services_env_var(self, serviceInstanceName=None):
"""Parse the ``VCAP_SERVICES`` env var and search for the necessary
values
"""
vcapServices = os.environ.get(self.__VCAP_SERVICES_ENV_VAR)
if not vcapServices:
return (None, None, None, None, None)
parsedVcapServices = json.loads(vcapServices)
gpServicesInstances = []
for serviceName in parsedVcapServices:
if self.__gpServiceNameRegex.match(serviceName):
serviceInstances = parsedVcapServices.get(serviceName)
for serviceInstance in serviceInstances:
gpServicesInstances.append(serviceInstance)
if not gpServicesInstances:
return (None, None, None, None, None)
targetGPServiceInstance = None
# use first service if no name is provided
if not serviceInstanceName:
targetGPServiceInstance = gpServicesInstances[0]
else:
# search for service name
for gpServiceInstance in gpServicesInstances:
if gpServiceInstance.get(self.__NAME_KEY)== serviceInstanceName:
targetGPServiceInstance = gpServiceInstance
break
# service was not found
if not targetGPServiceInstance:
return (None, None, None, None, None)
credentials = targetGPServiceInstance.get(self.__CREDENTIALS_KEY)
if not credentials:
return (None, None, None, None, None)
url = credentials.get(self.__URL_KEY)
instanceId = credentials.get(self.__INSTANCE_ID_KEY)
userId = credentials.get(self.__USER_ID_KEY)
password = credentials.get(self.__PASSWORD_KEY)
apiKey = credentials.get(self.__IAM_API_KEY_KEY)
return (url, instanceId, userId, password, apiKey)
|
{
"content_hash": "36801fef1c318f0fa0c2e2878582cada",
"timestamp": "",
"source": "github",
"line_count": 259,
"max_line_length": 137,
"avg_line_length": 45.44015444015444,
"alnum_prop": 0.5861160676353131,
"repo_name": "IBM-Bluemix/gp-python-client",
"id": "321a1abda5b824e118e38a3a7e5a513efe6d50c8",
"size": "12357",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gpclient/gpserviceaccount.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "68867"
}
],
"symlink_target": ""
}
|
import sys
from os.path import abspath, dirname, join, normpath
BASE = dirname(abspath(__file__))
SRC = normpath(join(BASE, '..', '..', '..', '..', 'src'))
# must generate data next to testdoc.html to get relative sources correct
INPUT = join(BASE, 'libdoc.txt')
OUTPUT = join(BASE, 'libdoc.js')
sys.path.insert(0, SRC)
from robotide.lib.robot.libdoc import LibraryDocumentation
from robotide.lib.robot.libdocpkg.htmlwriter import LibdocModelWriter
with open(OUTPUT, 'w') as output:
libdoc = LibraryDocumentation(INPUT)
LibdocModelWriter(output, libdoc).write_data()
print OUTPUT
|
{
"content_hash": "d20bd12bdfcdc2b2db679a5d73c391aa",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 73,
"avg_line_length": 29.7,
"alnum_prop": 0.7289562289562289,
"repo_name": "fingeronthebutton/RIDE",
"id": "7c35d053059d1ac5a9b11219d2d00bd5ee8a67b2",
"size": "617",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/robotide/lib/robot/htmldata/testdata/create_libdoc_data.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "21370"
},
{
"name": "HTML",
"bytes": "110675"
},
{
"name": "JavaScript",
"bytes": "41401"
},
{
"name": "Python",
"bytes": "2902622"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import hypothesis.strategies as st
from caffe2.python import core, workspace
from caffe2.proto import caffe2_pb2
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import numpy as np
def _fill_diagonal(shape, value):
result = np.zeros(shape)
np.fill_diagonal(result, value)
return (result,)
class TestFillerOperator(hu.HypothesisTestCase):
@given(**hu.gcs)
def test_shape_error(self, gc, dc):
op = core.CreateOperator(
'GaussianFill',
[],
'out',
shape=32, # illegal parameter
mean=0.0,
std=1.0,
)
exception = False
try:
workspace.RunOperatorOnce(op)
except Exception:
exception = True
self.assertTrue(exception, "Did not throw exception on illegal shape")
op = core.CreateOperator(
'ConstantFill',
[],
'out',
shape=[], # scalar
value=2.0,
)
exception = False
self.assertTrue(workspace.RunOperatorOnce(op))
self.assertEqual(workspace.FetchBlob('out'), [2.0])
@given(
shape=hu.dims().flatmap(
lambda dims: hu.arrays(
[dims], dtype=np.int64,
elements=st.integers(min_value=0, max_value=20)
)
),
a=st.integers(min_value=0, max_value=100),
b=st.integers(min_value=0, max_value=100),
**hu.gcs
)
def test_uniform_int_fill_op_blob_input(self, shape, a, b, gc, dc):
net = core.Net('test_net')
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
shape_blob = net.Const(shape, dtype=np.int64)
a_blob = net.Const(a, dtype=np.int32)
b_blob = net.Const(b, dtype=np.int32)
uniform_fill = net.UniformIntFill([shape_blob, a_blob, b_blob],
1, input_as_shape=1)
workspace.RunNetOnce(net)
blob_out = workspace.FetchBlob(uniform_fill)
if b < a:
new_shape = shape[:]
new_shape[0] = 0
np.testing.assert_array_equal(new_shape, blob_out.shape)
else:
np.testing.assert_array_equal(shape, blob_out.shape)
self.assertTrue((blob_out >= a).all())
self.assertTrue((blob_out <= b).all())
@given(
**hu.gcs
)
def test_uniform_fill_using_arg(self, gc, dc):
net = core.Net('test_net')
shape = [2**3, 5]
# uncomment this to test filling large blob
# shape = [2**30, 5]
min_v = -100
max_v = 100
output_blob = net.UniformIntFill(
[],
['output_blob'],
shape=shape,
min=min_v,
max=max_v,
)
workspace.RunNetOnce(net)
output_data = workspace.FetchBlob(output_blob)
np.testing.assert_array_equal(shape, output_data.shape)
min_data = np.min(output_data)
max_data = np.max(output_data)
self.assertGreaterEqual(min_data, min_v)
self.assertLessEqual(max_data, max_v)
self.assertNotEqual(min_data, max_data)
@given(
shape=st.sampled_from(
[
[3, 3],
[5, 5, 5],
[7, 7, 7, 7],
]
),
**hu.gcs
)
def test_diagonal_fill_op_float(self, shape, gc, dc):
value = 2.5
op = core.CreateOperator(
'DiagonalFill',
[],
'out',
shape=shape, # scalar
value=value,
)
for device_option in dc:
op.device_option.CopyFrom(device_option)
# Check against numpy reference
self.assertReferenceChecks(gc, op, [shape, value], _fill_diagonal)
@given(**hu.gcs)
def test_diagonal_fill_op_int(self, gc, dc):
value = 2
shape = [3, 3]
op = core.CreateOperator(
'DiagonalFill',
[],
'out',
shape=shape,
dtype=core.DataType.INT32,
value=value,
)
# Check against numpy reference
self.assertReferenceChecks(gc, op, [shape, value], _fill_diagonal)
@given(**hu.gcs)
def test_gaussian_fill_op(self, gc, dc):
op = core.CreateOperator(
'GaussianFill',
[],
'out',
shape=[17, 3, 3], # sample odd dimensions
mean=0.0,
std=1.0,
)
for device_option in dc:
op.device_option.CopyFrom(device_option)
assert workspace.RunOperatorOnce(op), "GaussianFill op did not run "
"successfully"
blob_out = workspace.FetchBlob('out')
assert np.count_nonzero(blob_out) > 0, "All generated elements are "
"zeros. Is the random generator functioning correctly?"
@given(**hu.gcs)
def test_msra_fill_op(self, gc, dc):
op = core.CreateOperator(
'MSRAFill',
[],
'out',
shape=[15, 5, 3], # sample odd dimensions
)
for device_option in dc:
op.device_option.CopyFrom(device_option)
assert workspace.RunOperatorOnce(op), "MSRAFill op did not run "
"successfully"
blob_out = workspace.FetchBlob('out')
assert np.count_nonzero(blob_out) > 0, "All generated elements are "
"zeros. Is the random generator functioning correctly?"
if __name__ == "__main__":
import unittest
unittest.main()
|
{
"content_hash": "ea4ed2bb21c6a4bc5230d18f647ae867",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 80,
"avg_line_length": 29.341836734693878,
"alnum_prop": 0.5348635019996523,
"repo_name": "davinwang/caffe2",
"id": "4673574f4a63c21231d8b6757963fe67f83f7aaa",
"size": "6422",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "caffe2/python/operator_test/filler_ops_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5415"
},
{
"name": "C",
"bytes": "315653"
},
{
"name": "C++",
"bytes": "4667189"
},
{
"name": "CMake",
"bytes": "137169"
},
{
"name": "CSS",
"bytes": "2196"
},
{
"name": "Cuda",
"bytes": "664949"
},
{
"name": "HTML",
"bytes": "5203"
},
{
"name": "Makefile",
"bytes": "1225"
},
{
"name": "Metal",
"bytes": "36752"
},
{
"name": "Objective-C",
"bytes": "7159"
},
{
"name": "Objective-C++",
"bytes": "239139"
},
{
"name": "Python",
"bytes": "2845652"
},
{
"name": "Shell",
"bytes": "25992"
}
],
"symlink_target": ""
}
|
from marshmallow import fields, Schema
class ExplorePermalinkPostSchema(Schema):
formData = fields.Dict(
required=True,
allow_none=False,
description="Chart form data",
)
urlParams = fields.List(
fields.Tuple(
(
fields.String(required=True, allow_none=True, description="Key"),
fields.String(required=True, allow_none=True, description="Value"),
),
required=False,
allow_none=True,
description="URL Parameter key-value pair",
),
required=False,
allow_none=True,
description="URL Parameters",
)
|
{
"content_hash": "a5f97fd54ecc2000108de8d8512c9be7",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 83,
"avg_line_length": 29.043478260869566,
"alnum_prop": 0.5733532934131736,
"repo_name": "airbnb/caravel",
"id": "e1f9d069b853f4d7a7c2fecb04cb3a4b0bcdb69b",
"size": "1453",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "superset/explore/permalink/schemas.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57416"
},
{
"name": "HTML",
"bytes": "112618"
},
{
"name": "JavaScript",
"bytes": "406496"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "588212"
},
{
"name": "Shell",
"bytes": "980"
}
],
"symlink_target": ""
}
|
import base64
try:
import cPickle as pickle
except ImportError:
import pickle
from datetime import date, datetime, timedelta
from django.core.mail import send_mail
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import User
from django.conf import settings
from django.db import models
from django.db.models.signals import post_save
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from django.dispatch import receiver
from django.db.models import Q
from django_extensions.db.fields import UUIDField
from activity import utils
# TODO look for fields with build-in serialization support
# TODO add some decorator/function to auto add activity to form
# TODO add proxy model
class ActivityQuerySet(models.query.QuerySet):
def mark_for_update(self):
return self.update(data_for_template_cached=None)
def for_user(self, user):
return self.filter(Q(public=True) | Q(to_user=user))
def by_user(self, user):
return self.filter(user=user)
def by_object(self, obj, activity_class, content_type=None, num=''):
if not content_type:
content_type = ContentType.objects.get_for_model(activity_class)
return self.filter(**{
'content_type': content_type,
'obj%s_id' % str(num): obj.pk
})
def by_type(self, activity_type):
content_type = ContentType.objects.get(model=activity_type)
return self.filter(content_type=content_type)
def send_by_email(
self, email, template_name='activity/activity_email.txt',
subject=_("New activity on site"), **kwargs
):
'''Send activity items in queryset to given email'''
data = kwargs
data.update({'email': email, 'activity': self})
body = render_to_string(template_name, data)
send_mail(subject, body, settings.DEFAULT_FROM_EMAIL, [email])
class ActivityManager(models.Manager):
"""Contain extra difficult queries"""
def get_query_set(self):
return ActivityQuerySet(self.model)
def __getattr__(self, attr, *args):
try:
return getattr(self.__class__, attr, *args)
except AttributeError:
return getattr(self.get_query_set(), attr, *args)
class Activity(models.Model):
"""Store user activity in different apps. Like Facebook"""
NONE = 0
ADD = 1
REMOVE = 2
ACTION_CHOICES = (
(NONE, _('none')),
(ADD, _('added')),
(REMOVE, _('removed')),
)
id = UUIDField(primary_key=True)
user = models.ForeignKey(User, related_name="activity")
time = models.DateTimeField(blank=False, null=False, auto_now_add=True)
public = models.BooleanField(default=True)
# if this field is set, activity feed will be shown only to this user
to_user = models.ForeignKey(
User, blank=True, null=True, related_name="activity_for_user"
)
action = models.IntegerField(blank=False, null=False)
# Need to make effective future grouping by object
obj_id = models.CharField(blank=True, null=True, max_length=40)
obj2_id = models.CharField(blank=True, null=True, max_length=40)
obj3_id = models.CharField(blank=True, null=True, max_length=40)
obj4_id = models.CharField(blank=True, null=True, max_length=40)
obj5_id = models.CharField(blank=True, null=True, max_length=40)
content_type = models.ForeignKey(ContentType)
data_for_template_cached = models.TextField(blank=True, null=True)
objects = ActivityManager()
def render_action(self):
return dict(self.ACTION_CHOICES)[self.action]
def save(self, force_insert=False, force_update=False, *args, **kwargs):
if not force_update and self.__class__.__name__ != "Activity":
self.content_type = ContentType.objects.get_for_model(self)
return super(Activity, self).save(
force_insert, force_update, *args, **kwargs
)
def get_or_create_data_for_template(self):
if not self.data_for_template_cached:
current_type_model_name = self.content_type.model
pickled = pickle.dumps(
getattr(self, current_type_model_name).data_for_template(self),
protocol=pickle.HIGHEST_PROTOCOL
)
self.data_for_template_cached = base64.encodestring(pickled)
self.save(force_update=True)
return pickle.loads(base64.decodestring(self.data_for_template_cached))
def data_for_template(self, activity):
return {'activity': self}
def render(self, content_type=".html"):
"""Render current activity """
current_type_model_name = self.content_type.model
current_type_model_class = self.content_type.model_class()
return hasattr(current_type_model_class, 'render_html') \
and getattr(self, current_type_model_name).render_html() \
or render_to_string(
"activity/%s%s" % (current_type_model_name, content_type),
self.get_or_create_data_for_template()
)
def render_email(self):
return self.render('_email.txt').strip(' \n')
class Meta:
ordering = ('-time',)
verbose_name, verbose_name_plural = "activity", "activity"
def __unicode__(self):
return u"Activity"
def mark_for_update(self):
self.data_for_template_cached = None
self.save()
@property
def pretty_date(self):
today = date.today()
if self.time.date() == today:
return _('Today')
elif self.time.date() == today - timedelta(days=1):
return _('Yesterday')
else:
return False
class NotifySettings(models.Model):
"""Activity notification settings for each user"""
HOUR = 60 * 60
HOUR6 = 60 * 60 * 6
HOUR12 = 60 * 60 * 12
DAY = 60 * 60 * 24
WEEK = 60 * 60 * 24 * 7
FREQUENCY_CHOICES = (
(HOUR, _('every hour')),
(HOUR6, _('4 times per day')),
(HOUR12, _('2 time per day')),
(DAY, _('every day')),
(WEEK, _('every week')),
)
id = UUIDField(primary_key=True)
user = models.OneToOneField(User, related_name="notify_settings")
frequency = models.IntegerField(
choices=FREQUENCY_CHOICES, default=DAY, verbose_name=_('frequency')
)
immediately = models.ManyToManyField(ContentType, blank=True, null=True)
last_sended = models.DateTimeField(blank=True, null=True)
class Meta:
ordering = ['user']
def __unicode__(self):
return u"%s's notify settings" % self.user
def can_send(self, send_time=None):
''' check if we can send notify to user '''
if not self.last_sended:
return True
if not send_time:
send_time = datetime.now()
return self.last_sended + timedelta(seconds=self.frequency) < send_time
@receiver(
post_save, sender=User,
dispatch_uid="activities.update_activity_with_updated_user_data"
)
def update_activity_with_updated_user_data(sender, instance, **kwargs):
Activity.objects.by_user(instance).mark_for_update()
@receiver(
post_save, sender=User,
dispatch_uid='activities.attach_notify_settings_to_user'
)
def attach_notify_settings_to_user(sender, instance, created, **kwargs):
if created:
# TODO add ability to customize default immediately settings
notify_settings = NotifySettings(user=instance)
notify_settings.save()
utils.autodiscover()
|
{
"content_hash": "bcaaec320b37fed38479b135ec315525",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 79,
"avg_line_length": 33.458149779735685,
"alnum_prop": 0.6422646477946017,
"repo_name": "django-stars/dash2011",
"id": "f013d8254862b6c32ac729d86b4df1c5dd6b6604",
"size": "7595",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "presence/apps/activity/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "4760"
},
{
"name": "Python",
"bytes": "62786"
}
],
"symlink_target": ""
}
|
import os
import logging
import datetime
import math
import re
from six.moves.urllib.request import urlopen
from six.moves.urllib.parse import urlencode
import aniso8601
from flask import Flask, json, render_template
from flask_ask import Ask, request, session, question, statement
ENDPOINT = "https://tidesandcurrents.noaa.gov/api/datagetter"
SESSION_CITY = "city"
SESSION_DATE = "date"
# NOAA station codes
STATION_CODE_SEATTLE = "9447130"
STATION_CODE_SAN_FRANCISCO = "9414290"
STATION_CODE_MONTEREY = "9413450"
STATION_CODE_LOS_ANGELES = "9410660"
STATION_CODE_SAN_DIEGO = "9410170"
STATION_CODE_BOSTON = "8443970"
STATION_CODE_NEW_YORK = "8518750"
STATION_CODE_VIRGINIA_BEACH = "8638863"
STATION_CODE_WILMINGTON = "8658163"
STATION_CODE_CHARLESTON = "8665530"
STATION_CODE_BEAUFORT = "8656483"
STATION_CODE_MYRTLE_BEACH = "8661070"
STATION_CODE_MIAMI = "8723214"
STATION_CODE_TAMPA = "8726667"
STATION_CODE_NEW_ORLEANS = "8761927"
STATION_CODE_GALVESTON = "8771341"
STATIONS = {}
STATIONS["seattle"] = STATION_CODE_SEATTLE
STATIONS["san francisco"] = STATION_CODE_SAN_FRANCISCO
STATIONS["monterey"] = STATION_CODE_MONTEREY
STATIONS["los angeles"] = STATION_CODE_LOS_ANGELES
STATIONS["san diego"] = STATION_CODE_SAN_DIEGO
STATIONS["boston"] = STATION_CODE_BOSTON
STATIONS["new york"] = STATION_CODE_NEW_YORK
STATIONS["virginia beach"] = STATION_CODE_VIRGINIA_BEACH
STATIONS["wilmington"] = STATION_CODE_WILMINGTON
STATIONS["charleston"] = STATION_CODE_CHARLESTON
STATIONS["beaufort"] = STATION_CODE_BEAUFORT
STATIONS["myrtle beach"] = STATION_CODE_MYRTLE_BEACH
STATIONS["miami"] = STATION_CODE_MIAMI
STATIONS["tampa"] = STATION_CODE_TAMPA
STATIONS["new orleans"] = STATION_CODE_NEW_ORLEANS
STATIONS["galveston"] = STATION_CODE_GALVESTON
app = Flask(__name__)
ask = Ask(app, "/")
logging.getLogger('flask_ask').setLevel(logging.DEBUG)
class TideInfo(object):
def __init__(self):
self.first_high_tide_time = None
self.first_high_tide_height = None
self.low_tide_time = None
self.low_tide_height = None
self.second_high_tide_time = None
self.second_high_tide_height = None
@ask.launch
def launch():
welcome_text = render_template('welcome')
help_text = render_template('help')
return question(welcome_text).reprompt(help_text)
@ask.intent('OneshotTideIntent',
mapping={'city': 'City', 'date': 'Date'},
convert={'date': 'date'},
default={'city': 'seattle', 'date': datetime.date.today })
def one_shot_tide(city, date):
if city.lower() not in STATIONS:
return supported_cities()
return _make_tide_request(city, date)
@ask.intent('DialogTideIntent',
mapping={'city': 'City', 'date': 'Date'},
convert={'date': 'date'})
def dialog_tide(city, date):
if city is not None:
if city.lower() not in STATIONS:
return supported_cities()
if SESSION_DATE not in session.attributes:
session.attributes[SESSION_CITY] = city
return _dialog_date(city)
date = aniso8601.parse_date(session.attributes[SESSION_DATE])
return _make_tide_request(city, date)
elif date is not None:
if SESSION_CITY not in session.attributes:
session.attributes[SESSION_DATE] = date.isoformat()
return _dialog_city(date)
city = session.attributes[SESSION_CITY]
return _make_tide_request(city, date)
else:
return _dialog_no_slot()
@ask.intent('SupportedCitiesIntent')
def supported_cities():
cities = ", ".join(sorted(STATIONS.keys()))
list_cities_text = render_template('list_cities', cities=cities)
list_cities_reprompt_text = render_template('list_cities_reprompt')
return question(list_cities_text).reprompt(list_cities_reprompt_text)
@ask.intent('AMAZON.HelpIntent')
def help():
help_text = render_template('help')
list_cities_reprompt_text = render_template('list_cities_reprompt')
return question(help_text).reprompt(list_cities_reprompt_text)
@ask.intent('AMAZON.StopIntent')
def stop():
bye_text = render_template('bye')
return statement(bye_text)
@ask.intent('AMAZON.CancelIntent')
def cancel():
bye_text = render_template('bye')
return statement(bye_text)
@ask.session_ended
def session_ended():
return "{}", 200
@app.template_filter()
def humanize_date(dt):
# http://stackoverflow.com/a/20007730/1163855
ordinal = lambda n: "%d%s" % (n,"tsnrhtdd"[(n/10%10!=1)*(n%10<4)*n%10::4])
month_and_day_of_week = dt.strftime('%A %B')
day_of_month = ordinal(dt.day)
year = dt.year if dt.year != datetime.datetime.now().year else ""
formatted_date = "{} {} {}".format(month_and_day_of_week, day_of_month, year)
formatted_date = re.sub('\s+', ' ', formatted_date)
return formatted_date
@app.template_filter()
def humanize_time(dt):
morning_threshold = 12
afternoon_threshold = 17
evening_threshold = 20
hour_24 = dt.hour
if hour_24 < morning_threshold:
period_of_day = "in the morning"
elif hour_24 < afternoon_threshold:
period_of_day = "in the afternoon"
elif hour_24 < evening_threshold:
period_of_day = "in the evening"
else:
period_of_day = " at night"
the_time = dt.strftime('%I:%M')
formatted_time = "{} {}".format(the_time, period_of_day)
return formatted_time
@app.template_filter()
def humanize_height(height):
round_down_threshold = 0.25
round_to_half_threshold = 0.75
is_negative = False
if height < 0:
height = abs(height)
is_negative = True
remainder = height % 1
if remainder < round_down_threshold:
remainder_text = ""
feet = int(math.floor(height))
elif remainder < round_to_half_threshold:
remainder_text = "and a half"
feet = int(math.floor(height))
else:
remainder_text = ""
feet = int(math.floor(height))
if is_negative:
feet *= -1
formatted_height = "{} {} feet".format(feet, remainder_text)
formatted_height = re.sub('\s+', ' ', formatted_height)
return formatted_height
def _dialog_no_slot():
if SESSION_CITY in session.attributes:
date_dialog2_text = render_template('date_dialog2')
return question(date_dialog2_text).reprompt(date_dialog2_text)
else:
return supported_cities()
def _dialog_date(city):
date_dialog_text = render_template('date_dialog', city=city)
date_dialog_reprompt_text = render_template('date_dialog_reprompt')
return question(date_dialog_text).reprompt(date_dialog_reprompt_text)
def _dialog_city(date):
session.attributes[SESSION_DATE] = date
session.attributes_encoder = _json_date_handler
city_dialog_text = render_template('city_dialog', date=date)
city_dialog_reprompt_text = render_template('city_dialog_reprompt')
return question(city_dialog_text).reprompt(city_dialog_reprompt_text)
def _json_date_handler(obj):
if isinstance(obj, datetime.date):
return obj.isoformat()
def _make_tide_request(city, date):
station = STATIONS.get(city.lower())
noaa_api_params = {
'station': station,
'product': 'predictions',
'datum': 'MLLW',
'units': 'english',
'time_zone': 'lst_ldt',
'format': 'json'
}
if date == datetime.date.today():
noaa_api_params['date'] = 'today'
else:
noaa_api_params['begin_date'] = date.strftime('%Y%m%d')
noaa_api_params['range'] = 24
url = ENDPOINT + "?" + urlencode(noaa_api_params)
resp_body = urlopen(url).read()
if len(resp_body) == 0:
statement_text = render_template('noaa_problem')
else:
noaa_response_obj = json.loads(resp_body)
predictions = noaa_response_obj['predictions']
tideinfo = _find_tide_info(predictions)
statement_text = render_template('tide_info', date=date, city=city, tideinfo=tideinfo)
return statement(statement_text).simple_card("Tide Pooler", statement_text)
def _find_tide_info(predictions):
"""
Algorithm to find the 2 high tides for the day, the first of which is smaller and occurs
mid-day, the second of which is larger and typically in the evening.
"""
last_prediction = None
first_high_tide = None
second_high_tide = None
low_tide = None
first_tide_done = False
for prediction in predictions:
if last_prediction is None:
last_prediction = prediction
continue
if last_prediction['v'] < prediction['v']:
if not first_tide_done:
first_high_tide = prediction
else:
second_high_tide = prediction
else: # we're decreasing
if not first_tide_done and first_high_tide is not None:
first_tide_done = True
elif second_high_tide is not None:
break # we're decreasing after having found the 2nd tide. We're done.
if first_tide_done:
low_tide = prediction
last_prediction = prediction
fmt = '%Y-%m-%d %H:%M'
parse = datetime.datetime.strptime
tideinfo = TideInfo()
tideinfo.first_high_tide_time = parse(first_high_tide['t'], fmt)
tideinfo.first_high_tide_height = float(first_high_tide['v'])
tideinfo.second_high_tide_time = parse(second_high_tide['t'], fmt)
tideinfo.second_high_tide_height = float(second_high_tide['v'])
tideinfo.low_tide_time = parse(low_tide['t'], fmt)
tideinfo.low_tide_height = float(low_tide['v'])
return tideinfo
if __name__ == '__main__':
if 'ASK_VERIFY_REQUESTS' in os.environ:
verify = str(os.environ.get('ASK_VERIFY_REQUESTS', '')).lower()
if verify == 'false':
app.config['ASK_VERIFY_REQUESTS'] = False
app.run(debug=True)
|
{
"content_hash": "bc86291ca89807c666130c83eb89a4d3",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 94,
"avg_line_length": 32.946308724832214,
"alnum_prop": 0.6561417804033408,
"repo_name": "johnwheeler/flask-ask",
"id": "6c54fdaf701a4bddb0829b7949b9fb184b503b06",
"size": "9818",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/tidepooler/tidepooler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "82356"
}
],
"symlink_target": ""
}
|
"""Switch implementation for Wireless Sensor Tags (wirelesstag.net)."""
from __future__ import annotations
from typing import Any
import voluptuous as vol
from homeassistant.components.switch import (
PLATFORM_SCHEMA,
SwitchEntity,
SwitchEntityDescription,
)
from homeassistant.const import CONF_MONITORED_CONDITIONS
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from . import DOMAIN as WIRELESSTAG_DOMAIN, WirelessTagBaseSensor
SWITCH_TYPES: tuple[SwitchEntityDescription, ...] = (
SwitchEntityDescription(
key="temperature",
name="Arm Temperature",
),
SwitchEntityDescription(
key="humidity",
name="Arm Humidity",
),
SwitchEntityDescription(
key="motion",
name="Arm Motion",
),
SwitchEntityDescription(
key="light",
name="Arm Light",
),
SwitchEntityDescription(
key="moisture",
name="Arm Moisture",
),
)
SWITCH_KEYS: list[str] = [desc.key for desc in SWITCH_TYPES]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MONITORED_CONDITIONS, default=[]): vol.All(
cv.ensure_list, [vol.In(SWITCH_KEYS)]
)
}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up switches for a Wireless Sensor Tags."""
platform = hass.data[WIRELESSTAG_DOMAIN]
tags = platform.load_tags()
monitored_conditions = config[CONF_MONITORED_CONDITIONS]
entities = [
WirelessTagSwitch(platform, tag, description)
for tag in tags.values()
for description in SWITCH_TYPES
if description.key in monitored_conditions
and description.key in tag.allowed_monitoring_types
]
add_entities(entities, True)
class WirelessTagSwitch(WirelessTagBaseSensor, SwitchEntity):
"""A switch implementation for Wireless Sensor Tags."""
def __init__(self, api, tag, description: SwitchEntityDescription):
"""Initialize a switch for Wireless Sensor Tag."""
super().__init__(api, tag)
self.entity_description = description
self._name = f"{self._tag.name} {description.name}"
def turn_on(self, **kwargs: Any) -> None:
"""Turn on the switch."""
self._api.arm(self)
def turn_off(self, **kwargs: Any) -> None:
"""Turn on the switch."""
self._api.disarm(self)
@property
def is_on(self) -> bool:
"""Return True if entity is on."""
return self._state
def updated_state_value(self):
"""Provide formatted value."""
return self.principal_value
@property
def principal_value(self):
"""Provide actual value of switch."""
attr_name = f"is_{self.entity_description.key}_sensor_armed"
return getattr(self._tag, attr_name, False)
|
{
"content_hash": "d4b8f937f4703cc9df32dbdcbb97e2bb",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 71,
"avg_line_length": 29.009345794392523,
"alnum_prop": 0.6630154639175257,
"repo_name": "w1ll1am23/home-assistant",
"id": "9829cffd2b585063bb27f817e41a59c474653359",
"size": "3104",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/wirelesstag/switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52277012"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
import datetime
from collections import OrderedDict
from litmos.litmos import LitmosType
from litmos.api import API
from litmos.course_module import CourseModule
class Course(LitmosType):
SCHEMA = OrderedDict([
('Id', ''),
('Name', ''),
('Description', ''),
('Code', ''),
('Active', ''),
('ForSale', ''),
('OriginalId', ''),
('EcommerceShortDescription', ''),
('EcommerceLongDescription', ''),
('CourseCodeForBulkImport', ''),
('Price', ''),
('AccessTillDate', ''),
('AccessTillDays', '')
])
def modules(self):
return CourseModule._parse_response(
API.get_sub_resource(
self.__class__.name(),
self.Id,
'modules'
)
)
def module_complete(self, module_id, attributes):
attributes['CourseId'] = self.Id
iso_8601_date = attributes['UpdatedAt']
updated_at_datetime = datetime.datetime.strptime(iso_8601_date, '%Y-%m-%dT%H:%M:%S.%fZ')
epoch_datetime = int(updated_at_datetime.timestamp() * 1000)
attributes['UpdatedAt'] = "/Date({0})/".format(epoch_datetime)
return API.update_sub_resource(
'results',
None,
'modules',
module_id,
attributes
)
|
{
"content_hash": "9d6f1cae800015063d0fdf2f968e3781",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 96,
"avg_line_length": 26.346153846153847,
"alnum_prop": 0.5255474452554745,
"repo_name": "charliequinn/python-litmos-api",
"id": "b0f7369fd72a71cb9e050a3ada5a9b82fd69a176",
"size": "1370",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/litmos/course.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "55750"
}
],
"symlink_target": ""
}
|
import rpc
import objects
from oslo_service import service
from oslo_config import cfg
from oslo_log import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class Service(service.Service):
def __init__(self, manager, rpc_api):
super(Service, self).__init__()
LOG.info('init service')
# policy.init()
# rpc.init()
# self.host
self.manager = manager
self.timers = []
self.rpc_endpoints = [rpc_api]
serializer = objects.FabObjectSerializer()
self.rpc_server = rpc.get_server(rpc_api.target, self.rpc_endpoints, serializer)
def start(self, *args, **kwargs):
super(Service, self).start()
LOG.info('start service')
self.rpc_server.start()
self.tg.add_dynamic_timer(self.periodic_tasks,
initial_delay=0,
periodic_interval_max=120)
def periodic_tasks(self, raise_on_error=False):
ctxt = {}
return self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
def wait(self):
LOG.info('wait service')
def stop(self):
LOG.info('stop service')
super(Service, self).stop()
try:
self.rpcserver.stop()
self.rpcserver.wait()
except Exception:
pass
|
{
"content_hash": "88e244add89bbf2723ca3d32dc9f1473",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 88,
"avg_line_length": 25.62264150943396,
"alnum_prop": 0.5773195876288659,
"repo_name": "fabrickit/fabkit",
"id": "456816d8618d58f716fa6dca4b1340c8270d6749",
"size": "1375",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/agent/service.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4979"
},
{
"name": "CoffeeScript",
"bytes": "65442"
},
{
"name": "HTML",
"bytes": "40630"
},
{
"name": "JavaScript",
"bytes": "2315"
},
{
"name": "Mako",
"bytes": "988"
},
{
"name": "Python",
"bytes": "256382"
},
{
"name": "Shell",
"bytes": "2697"
}
],
"symlink_target": ""
}
|
import os
import re
import string
import requests
import numpy as np
import collections
import random
import pickle
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.framework import ops
ops.reset_default_graph()
# Start a session
sess = tf.Session()
# Set RNN Parameters
min_word_freq = 5 # Trim the less frequent words off
rnn_size = 128 # RNN Model size, has to equal embedding size
epochs = 10 # Number of epochs to cycle through data
batch_size = 100 # Train on this many examples at once
learning_rate = 0.001 # Learning rate
training_seq_len = 50 # how long of a word group to consider
embedding_size = rnn_size
save_every = 500 # How often to save model checkpoints
eval_every = 50 # How often to evaluate the test sentences
prime_texts = ['thou art more', 'to be or not to', 'wherefore art thou']
# Download/store Shakespeare data
data_dir = 'temp'
data_file = 'shakespeare.txt'
model_path = 'shakespeare_model'
full_model_dir = os.path.join(data_dir, model_path)
# Declare punctuation to remove, everything except hyphens and apostrophes
punctuation = string.punctuation
punctuation = ''.join([x for x in punctuation if x not in ['-', "'"]])
# Make Model Directory
if not os.path.exists(full_model_dir):
os.makedirs(full_model_dir)
# Make data directory
if not os.path.exists(data_dir):
os.makedirs(data_dir)
print('Loading Shakespeare Data')
# Check if file is downloaded.
if not os.path.isfile(os.path.join(data_dir, data_file)):
print('Not found, downloading Shakespeare texts from www.gutenberg.org')
shakespeare_url = 'http://www.gutenberg.org/cache/epub/100/pg100.txt'
# Get Shakespeare text
response = requests.get(shakespeare_url)
shakespeare_file = response.content
# Decode binary into string
s_text = shakespeare_file.decode('utf-8')
# Drop first few descriptive paragraphs.
s_text = s_text[7675:]
# Remove newlines
s_text = s_text.replace('\r\n', '')
s_text = s_text.replace('\n', '')
# Write to file
with open(os.path.join(data_dir, data_file), 'w') as out_conn:
out_conn.write(s_text)
else:
# If file has been saved, load from that file
with open(os.path.join(data_dir, data_file), 'r') as file_conn:
s_text = file_conn.read().replace('\n', '')
# Clean text
print('Cleaning Text')
s_text = re.sub(r'[{}]'.format(punctuation), ' ', s_text)
s_text = re.sub('\s+', ' ', s_text ).strip().lower()
# Build word vocabulary function
def build_vocab(text, min_word_freq):
word_counts = collections.Counter(text.split(' '))
# limit word counts to those more frequent than cutoff
word_counts = {key:val for key, val in word_counts.items() if val>min_word_freq}
# Create vocab --> index mapping
words = word_counts.keys()
vocab_to_ix_dict = {key:(ix+1) for ix, key in enumerate(words)}
# Add unknown key --> 0 index
vocab_to_ix_dict['unknown']=0
# Create index --> vocab mapping
ix_to_vocab_dict = {val:key for key,val in vocab_to_ix_dict.items()}
return(ix_to_vocab_dict, vocab_to_ix_dict)
# Build Shakespeare vocabulary
print('Building Shakespeare Vocab')
ix2vocab, vocab2ix = build_vocab(s_text, min_word_freq)
vocab_size = len(ix2vocab) + 1
print('Vocabulary Length = {}'.format(vocab_size))
# Sanity Check
assert(len(ix2vocab) == len(vocab2ix))
# Convert text to word vectors
s_text_words = s_text.split(' ')
s_text_ix = []
for ix, x in enumerate(s_text_words):
try:
s_text_ix.append(vocab2ix[x])
except:
s_text_ix.append(0)
s_text_ix = np.array(s_text_ix)
# Define LSTM RNN Model
class LSTM_Model():
def __init__(self, rnn_size, batch_size, learning_rate,
training_seq_len, vocab_size, infer_sample=False):
self.rnn_size = rnn_size
self.vocab_size = vocab_size
self.infer_sample = infer_sample
self.learning_rate = learning_rate
if infer_sample:
self.batch_size = 1
self.training_seq_len = 1
else:
self.batch_size = batch_size
self.training_seq_len = training_seq_len
self.lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(rnn_size)
self.initial_state = self.lstm_cell.zero_state(self.batch_size, tf.float32)
self.x_data = tf.placeholder(tf.int32, [self.batch_size, self.training_seq_len])
self.y_output = tf.placeholder(tf.int32, [self.batch_size, self.training_seq_len])
with tf.variable_scope('lstm_vars'):
# Softmax Output Weights
W = tf.get_variable('W', [self.rnn_size, self.vocab_size], tf.float32, tf.random_normal_initializer())
b = tf.get_variable('b', [self.vocab_size], tf.float32, tf.constant_initializer(0.0))
# Define Embedding
embedding_mat = tf.get_variable('embedding_mat', [self.vocab_size, self.rnn_size],
tf.float32, tf.random_normal_initializer())
embedding_output = tf.nn.embedding_lookup(embedding_mat, self.x_data)
rnn_inputs = tf.split(1, self.training_seq_len, embedding_output)
rnn_inputs_trimmed = [tf.squeeze(x, [1]) for x in rnn_inputs]
# If we are inferring (generating text), we add a 'loop' function
# Define how to get the i+1 th input from the i th output
def inferred_loop(prev, count):
# Apply hidden layer
prev_transformed = tf.matmul(prev, W) + b
# Get the index of the output (also don't run the gradient)
prev_symbol = tf.stop_gradient(tf.argmax(prev_transformed, 1))
# Get embedded vector
output = tf.nn.embedding_lookup(embedding_mat, prev_symbol)
return(output)
decoder = tf.nn.seq2seq.rnn_decoder
outputs, last_state = decoder(rnn_inputs_trimmed,
self.initial_state,
self.lstm_cell,
loop_function=inferred_loop if infer_sample else None)
# Non inferred outputs
output = tf.reshape(tf.concat(1, outputs), [-1, self.rnn_size])
# Logits and output
self.logit_output = tf.matmul(output, W) + b
self.model_output = tf.nn.softmax(self.logit_output)
loss_fun = tf.nn.seq2seq.sequence_loss_by_example
loss = loss_fun([self.logit_output],[tf.reshape(self.y_output, [-1])],
[tf.ones([self.batch_size * self.training_seq_len])],
self.vocab_size)
self.cost = tf.reduce_sum(loss) / (self.batch_size * self.training_seq_len)
self.final_state = last_state
gradients, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tf.trainable_variables()), 4.5)
optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.train_op = optimizer.apply_gradients(zip(gradients, tf.trainable_variables()))
def sample(self, sess, words=ix2vocab, vocab=vocab2ix, num=10, prime_text='thou art'):
state = sess.run(self.lstm_cell.zero_state(1, tf.float32))
word_list = prime_text.split()
for word in word_list[:-1]:
x = np.zeros((1, 1))
x[0, 0] = vocab[word]
feed_dict = {self.x_data: x, self.initial_state:state}
[state] = sess.run([self.final_state], feed_dict=feed_dict)
out_sentence = prime_text
word = word_list[-1]
for n in range(num):
x = np.zeros((1, 1))
x[0, 0] = vocab[word]
feed_dict = {self.x_data: x, self.initial_state:state}
[model_output, state] = sess.run([self.model_output, self.final_state], feed_dict=feed_dict)
sample = np.argmax(model_output[0])
if sample == 0:
break
word = words[sample]
out_sentence = out_sentence + ' ' + word
return(out_sentence)
with tf.variable_scope('lstm_model') as scope:
# Define LSTM Model
lstm_model = LSTM_Model(rnn_size, batch_size, learning_rate,
training_seq_len, vocab_size)
scope.reuse_variables()
test_lstm_model = LSTM_Model(rnn_size, batch_size, learning_rate,
training_seq_len, vocab_size, infer_sample=True)
# Create model saver
saver = tf.train.Saver(tf.all_variables())
# Create batches for each epoch
num_batches = int(len(s_text_ix)/(batch_size * training_seq_len)) + 1
# Split up text indices into subarrays, of equal size
batches = np.array_split(s_text_ix, num_batches)
# Reshape each split into [batch_size, training_seq_len]
batches = [np.resize(x, [batch_size, training_seq_len]) for x in batches]
# Initialize all variables
init = tf.initialize_all_variables()
sess.run(init)
# Train model
train_loss = []
iteration_count = 1
for epoch in range(epochs):
# Shuffle word indices
random.shuffle(batches)
# Create targets from shuffled batches
targets = [np.roll(x, -1, axis=1) for x in batches]
# Run a through one epoch
print('Starting Epoch #{} of {}.'.format(epoch+1, epochs))
# Reset initial LSTM state every epoch
state = sess.run(lstm_model.initial_state)
for ix, batch in enumerate(batches):
training_dict = {lstm_model.x_data: batch, lstm_model.y_output: targets[ix]}
c, h = lstm_model.initial_state
training_dict[c] = state.c
training_dict[h] = state.h
temp_loss, state, _ = sess.run([lstm_model.cost, lstm_model.final_state, lstm_model.train_op],
feed_dict=training_dict)
train_loss.append(temp_loss)
# Print status every 10 gens
if iteration_count % 10 == 0:
summary_nums = (iteration_count, epoch+1, ix+1, num_batches+1, temp_loss)
print('Iteration: {}, Epoch: {}, Batch: {} out of {}, Loss: {:.2f}'.format(*summary_nums))
# Save the model and the vocab
if iteration_count % save_every == 0:
# Save model
model_file_name = os.path.join(full_model_dir, 'model')
saver.save(sess, model_file_name, global_step = iteration_count)
print('Model Saved To: {}'.format(model_file_name))
# Save vocabulary
dictionary_file = os.path.join(full_model_dir, 'vocab.pkl')
with open(dictionary_file, 'wb') as dict_file_conn:
pickle.dump([vocab2ix, ix2vocab], dict_file_conn)
if iteration_count % eval_every == 0:
for sample in prime_texts:
print(test_lstm_model.sample(sess, ix2vocab, vocab2ix, num=10, prime_text=sample))
iteration_count += 1
# Plot loss over time
plt.plot(train_loss, 'k-')
plt.title('Sequence to Sequence Loss')
plt.xlabel('Generation')
plt.ylabel('Loss')
plt.show()
|
{
"content_hash": "f2f9ed91bc4113668904d85fdadf68c5",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 114,
"avg_line_length": 39.96715328467153,
"alnum_prop": 0.6194868048580038,
"repo_name": "benjaminoh1/tensorflowcookbook",
"id": "62ef2cd901a1ba11d4edb0e59899c0227ac0fddf",
"size": "11123",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Chapter 09/implementing_lstm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "313397"
}
],
"symlink_target": ""
}
|
"""
Schema file for retrieving all the contents of the `editor` models.
"""
import graphene
from django.conf import settings
from graphene_django.debug import DjangoDebug
from graphene_django.types import DjangoObjectType
from gollahalli_cms.editor.models import ContentModel, EducationModel, ProjectsModel, TutorialsModel, ExperienceModel, SkillsModel, \
SkillsContentModel, PublicationsModel, PublicationsContentModel, MetaContentModel
class ContentSchema(DjangoObjectType):
"""
Schema DjangoObjectType for `ContentModel`
"""
class Meta:
model = ContentModel
description = "Main content schema."
def resolve_cv(self, info, **kwargs):
"""
Resolves CV URL
Parameters
----------
args
context
info
"""
if self.cv and hasattr(self.cv, 'url'):
return settings.SHARE_URL[:-1] + self.cv.url
def resolve_file(self, info, **kwargs):
"""
Resolves file URL
Parameters
----------
self
args
context
info
"""
if self.file and hasattr(self.file, 'url'):
return settings.SHARE_URL[:-1] + self.file.url
def resolve_image(self, info, **kwargs):
"""
Resolves image URL
Parameters
----------
args
context
info
"""
if self.image and hasattr(self.image, 'url'):
return settings.SHARE_URL[:-1] + self.image.file
class EducationSchema(DjangoObjectType):
"""
Schema DjangoObjectType for `EducationModel`
"""
class Meta:
model = EducationModel
description = "Education schema"
def resolve_file(self, info, **kwargs):
"""
Resolves file URL
Parameters
----------
self
args
context
info
"""
if self.file and hasattr(self.file, 'url'):
return settings.SHARE_URL[:-1] + self.file.url
def resolve_image(self, info, **kwargs):
"""
Resolves image URL
Parameters
----------
args
context
info
"""
if self.image and hasattr(self.image, 'url'):
return settings.SHARE_URL[:-1] + self.image.file
class ProjectsSchema(DjangoObjectType):
"""
Schema DjangoObjectType for `ProjectsModel`
"""
class Meta:
model = ProjectsModel
description = "Number of projects"
def resolve_file(self, info, **kwargs):
"""
Resolves file URL
Parameters
----------
self
args
context
info
"""
if self.file and hasattr(self.file, 'url'):
return settings.SHARE_URL[:-1] + self.file.url
def resolve_image(self, info, **kwargs):
"""
Resolves image URL
Parameters
----------
args
context
info
"""
if self.image and hasattr(self.image, 'url'):
return settings.SHARE_URL[:-1] + self.image.file
class TutorialsSchema(DjangoObjectType):
"""
Schema DjangoObjectType for `TutorialsModel`
"""
class Meta:
model = TutorialsModel
description = "All tutorial developed"
def resolve_file(self, info, **kwargs):
"""
Resolves file URL
Parameters
----------
self
args
context
info
"""
if self.file and hasattr(self.file, 'url'):
return settings.SHARE_URL[:-1] + self.file.url
def resolve_image(self, info, **kwargs):
"""
Resolves image URL
Parameters
----------
args
context
info
"""
if self.image and hasattr(self.image, 'url'):
return settings.SHARE_URL[:-1] + self.image.file
class ExperienceSchema(DjangoObjectType):
"""
Schema DjangoObjectType for `ExperienceModel`
"""
class Meta:
model = ExperienceModel
description = "All experiences"
class SkillsSchema(DjangoObjectType):
"""
Schema DjangoObjectType for `SkillsModel`
"""
class Meta:
model = SkillsModel
description = "All skills"
class SkillsContentSchema(DjangoObjectType):
"""
Schema DjangoObjectType for `SkillsContentModel`
"""
class Meta:
model = SkillsContentModel
description = "All skills contents"
def resolve_file(self, info, **kwargs):
"""
Resolves file URL
Parameters
----------
self
args
context
info
"""
if self.file and hasattr(self.file, 'url'):
return settings.SHARE_URL[:-1] + self.file.url
def resolve_image(self, info, **kwargs):
"""
Resolves image URL
Parameters
----------
args
context
info
"""
if self.image and hasattr(self.image, 'url'):
return settings.SHARE_URL[:-1] + self.image.file
class PublicationsSchema(DjangoObjectType):
"""
Schema DjangoObjectType for `PublicationsModel`
"""
class Meta:
model = PublicationsModel
description = "Type of publications"
class PublicationContentSchema(DjangoObjectType):
"""
Schema DjangoObjectType for `PublicationsContentModel`
"""
class Meta:
model = PublicationsContentModel
description = "All publication contents"
def resolve_file(self, info, **kwargs):
"""
Resolves file URL
Parameters
----------
self
args
context
info
"""
if self.file and hasattr(self.file, 'url'):
return settings.SHARE_URL[:-1] + self.file.url
def resolve_image(self, info, **kwargs):
"""
Resolves image URL
Parameters
----------
args
context
info
"""
if self.image and hasattr(self.image, 'url'):
return settings.SHARE_URL[:-1] + self.image.file
class MetaContentSchema(DjangoObjectType):
"""
Schema DjangoObjectType for `MetaContentModel`
"""
class Meta:
model = MetaContentModel
description = "All meta tags, headers and footers."
# ----------------------------------------------------------------------------
# Query object
# ----------------------------------------------------------------------------
class Query:
"""
Query all the contents.
"""
all_contents = graphene.List(ContentSchema)
all_education = graphene.List(EducationSchema)
all_projects = graphene.List(ProjectsSchema)
all_tutorials = graphene.List(TutorialsSchema)
all_experience = graphene.List(ExperienceSchema)
all_skills = graphene.List(SkillsSchema)
all_skills_content = graphene.List(SkillsContentSchema)
all_publications = graphene.List(PublicationsSchema)
all_publications_content = graphene.List(PublicationContentSchema)
all_meta_content = graphene.List(MetaContentSchema)
debug = graphene.Field(DjangoDebug, name='__debug')
def resolve_all_contents(self, info, **kwargs):
"""
Returns all contents of `ContentModel`
"""
return ContentModel.objects.all()
def resolve_all_educations(self, info, **kwargs):
"""
Returns all contents of `EducationModel`
"""
# We can easily optimize query count in the resolve method
return EducationModel.objects.select_related('education').all()
def resolve_all_projects(self, info, **kwargs):
"""
Returns all contents of `ProjectsModel`
"""
return ProjectsModel.objects.select_related('projects').all()
def resolve_all_tutorials(self, info, **kwargs):
"""
Returns all contents of `TutorialsModel`
"""
return TutorialsModel.objects.select_related('tutorials').all()
def resolve_all_experience(self, info, **kwargs):
"""
Returns all contents of `ExperienceModel`
"""
return ExperienceModel.objects.select_related('experience').all()
def resolve_all_skills(self, info, **kwargs):
"""
Returns all contents of `SkillsModel`
"""
return SkillsModel.objects.select_related('skills').all()
def resolve_all_skills_content(self, info, **kwargs):
"""
Returns all contents of `SkillsContentModel`
"""
return SkillsContentModel.objects.select_related('skills_content').all()
def resolve_all_publications(self, info, **kwargs):
"""
Returns all contents of `PublicationsModel`
"""
return PublicationsModel.objects.select_related('publications').all()
def resolve_all_publications_content(self, info, **kwargs):
"""
Returns all contents of `PublicationsContentModel`
"""
return PublicationsContentModel.objects.select_related('publications_content').all()
def resolve_all_meta_content(self, info, **kwargs):
"""
Returns all content for `MetaContentModel`
"""
return MetaContentModel.objects.all()
|
{
"content_hash": "79f9873ca644e8f4ab7bbe9d46a5b78b",
"timestamp": "",
"source": "github",
"line_count": 382,
"max_line_length": 133,
"avg_line_length": 24.2434554973822,
"alnum_prop": 0.5704567541302236,
"repo_name": "akshaybabloo/gollahalli-com",
"id": "4fa4380ef75af9ddbbe26e19d891227b588cae7a",
"size": "9261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gollahalli_cms/editor/schema.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "575955"
},
{
"name": "HTML",
"bytes": "174680"
},
{
"name": "JavaScript",
"bytes": "626303"
},
{
"name": "Python",
"bytes": "222676"
},
{
"name": "TeX",
"bytes": "244"
},
{
"name": "XSLT",
"bytes": "6993"
}
],
"symlink_target": ""
}
|
import httplib2
from oslo_serialization import jsonutils as json
from oslotest import mockpatch
from tempest.services.compute.json import networks_client
from tempest.tests import base
from tempest.tests import fake_auth_provider
class TestNetworksClient(base.TestCase):
FAKE_NETWORK = {
"bridge": None,
"vpn_public_port": None,
"dhcp_start": None,
"bridge_interface": None,
"share_address": None,
"updated_at": None,
"id": "34d5ae1e-5659-49cf-af80-73bccd7d7ad3",
"cidr_v6": None,
"deleted_at": None,
"gateway": None,
"rxtx_base": None,
"label": u'30d7',
"priority": None,
"project_id": None,
"vpn_private_address": None,
"deleted": None,
"vlan": None,
"broadcast": None,
"netmask": None,
"injected": None,
"cidr": None,
"vpn_public_address": None,
"multi_host": None,
"enable_dhcp": None,
"dns2": None,
"created_at": None,
"host": None,
"mtu": None,
"gateway_v6": None,
"netmask_v6": None,
"dhcp_server": None,
"dns1": None
}
network_id = "34d5ae1e-5659-49cf-af80-73bccd7d7ad3"
FAKE_NETWORKS = [FAKE_NETWORK]
def setUp(self):
super(TestNetworksClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = networks_client.NetworksClient(
fake_auth, 'compute', 'regionOne')
def _test_list_networks(self, bytes_body=False):
expected = {"networks": self.FAKE_NETWORKS}
serialized_body = json.dumps(expected)
if bytes_body:
serialized_body = serialized_body.encode('utf-8')
mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=mocked_resp))
resp = self.client.list_networks()
self.assertEqual(expected, resp)
def test_list_networks_with_str_body(self):
self._test_list_networks()
def test_list_networks_with_bytes_body(self):
self._test_list_networks(bytes_body=True)
def _test_show_network(self, bytes_body=False):
expected = {"network": self.FAKE_NETWORKS}
serialized_body = json.dumps(expected)
if bytes_body:
serialized_body = serialized_body.encode('utf-8')
mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=mocked_resp))
resp = self.client.show_network(self.network_id)
self.assertEqual(expected, resp)
def test_show_network_with_str_body(self):
self._test_show_network()
def test_show_network_with_bytes_body(self):
self._test_show_network(bytes_body=True)
|
{
"content_hash": "70eab821831cafcc2d608dd8edb48a33",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 75,
"avg_line_length": 31.74468085106383,
"alnum_prop": 0.6055630026809652,
"repo_name": "pczerkas/tempest",
"id": "cbeaefc4104fe067474ee3400f1fe7795931a685",
"size": "3615",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/tests/services/compute/test_networks_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2787983"
},
{
"name": "Shell",
"bytes": "8578"
}
],
"symlink_target": ""
}
|
import logging
from .ga import ga_event
from collections import defaultdict, OrderedDict
from flask import render_template, request, redirect, abort, flash, jsonify
from flask_security import current_user, login_required
from sqlalchemy import desc
from pmg import app, db
from pmg.api.client import load_from_api
from pmg.models import Committee, CommitteeMeeting, SavedSearch
logger = logging.getLogger(__name__)
@app.route("/email-alerts/", methods=["GET", "POST"])
def email_alerts():
"""
Allow a user to manage their notification alerts.
"""
next_url = request.values.get("next", "")
if current_user.is_authenticated() and request.method == "POST":
ids = request.form.getlist("committees")
current_user.committee_alerts = Committee.query.filter(
Committee.id.in_(ids)
).all()
current_user.subscribe_daily_schedule = bool(
request.form.get("subscribe_daily_schedule")
)
db.session.commit()
# register a google analytics event
ga_event("user", "change-alerts")
if next_url:
return redirect(next_url)
return ""
committees = load_from_api(
"v2/committees", return_everything=True, params={"monitored": True}
)["results"]
if current_user.is_authenticated():
subscriptions = set(c.id for c in current_user.committee_alerts)
else:
subscriptions = set()
provincial_committees = OrderedDict()
for committee in committees:
house = committee["house"]
house_name = house["name"]
if house["sphere"] == "provincial":
if house_name not in provincial_committees:
provincial_committees[house_name] = []
provincial_committees[house_name].append(committee)
saved_searches = defaultdict(list)
if current_user.is_authenticated():
for ss in current_user.saved_searches:
saved_searches[ss.search].append(ss)
return render_template(
"user_management/email_alerts.html",
committees=committees,
after_signup=bool(next_url),
subscriptions=subscriptions,
next_url=next_url,
saved_searches=saved_searches,
provincial_committees=provincial_committees,
)
@app.route("/user/committee/alerts/add/<int:committee_id>", methods=["POST"])
def user_add_committee_alert(committee_id):
if current_user.is_authenticated() and request.method == "POST":
current_user.committee_alerts.append(Committee.query.get(committee_id))
db.session.commit()
ga_event("user", "add-alert", "cte-alert-box")
flash("We'll send you email alerts for updates on this committee.", "success")
return redirect(request.headers.get("referer", "/"))
@app.route("/user/committee/alerts/remove/<int:committee_id>", methods=["POST"])
def user_remove_committee_alert(committee_id):
if current_user.is_authenticated() and request.method == "POST":
current_user.committee_alerts.remove(Committee.query.get(committee_id))
db.session.commit()
ga_event("user", "remove-alert", "cte-alert-box")
flash("We won't send you email alerts for this committee.", "warning")
return redirect(request.headers.get("referer", "/"))
@app.route("/user/follow/committee/<int:committee_id>", methods=["POST"])
def user_follow_committee(committee_id):
if current_user.is_authenticated() and request.method == "POST":
follow_committee(committee_id)
return redirect(request.headers.get("referer", "/"))
@app.route("/user/unfollow/committee/<int:committee_id>", methods=["POST"])
def user_unfollow_committee(committee_id):
if current_user.is_authenticated() and request.method == "POST":
committee = Committee.query.get(committee_id)
if committee in current_user.following:
current_user.unfollow_committee(committee)
if committee in current_user.committee_alerts:
current_user.committee_alerts.remove(committee)
db.session.commit()
ga_event("user", "unfollow-committee", "cte-follow-committee")
return redirect(request.headers.get("referer", "/"))
@app.route("/user/megamenu/")
def user_megamenu():
if current_user.is_authenticated():
return render_template("_megamenu.html", **get_megamenu())
else:
abort(404)
@app.route("/committee-subscriptions/", methods=["GET", "POST"])
def committee_subscriptions():
"""
Manage subscriptions to premium content.
"""
premium_committees = load_from_api("committee/premium", return_everything=True)[
"results"
]
return render_template(
"user_management/committee_subscriptions.html",
premium_committees=premium_committees,
)
@app.route("/user/saved-search/", methods=["POST"])
@login_required
def create_search():
saved_search = SavedSearch.find_or_create(
current_user,
request.form.get("q"),
content_type=request.form.get("content_type") or None,
committee_id=request.form.get("committee_id") or None,
)
db.session.commit()
return jsonify(id=saved_search.id)
@app.route("/user/saved-search/<int:id>/delete", methods=["POST"])
@login_required
def remove_search(id):
saved_search = SavedSearch.query.get(id)
if not saved_search or current_user != saved_search.user:
abort(404)
db.session.delete(saved_search)
db.session.commit()
return ""
def get_megamenu():
user_following = None
recent_meetings = None
user_follows_committees = False
if current_user and current_user.is_authenticated():
user_following = sorted(current_user.following, key=lambda cte: cte.name)[:20]
if user_following:
user_follows_committees = True
recent_meetings = current_user.get_followed_committee_meetings().limit(10)
if not user_following:
user_following = Committee.query.filter(
Committee.id.in_(Committee.POPULAR_COMMITTEES)
).all()
if not recent_meetings:
recent_meetings = (
CommitteeMeeting.query.filter(
CommitteeMeeting.committee_id.in_(Committee.POPULAR_COMMITTEES)
)
.order_by(desc(CommitteeMeeting.date))
.limit(10)
)
return {
"user_follows_committees": user_follows_committees,
"user_following": user_following,
"recent_meetings": recent_meetings,
}
def follow_committee(committee_id):
committee = Committee.query.get(committee_id)
if committee not in current_user.following:
current_user.follow_committee(committee)
if committee not in current_user.committee_alerts:
current_user.committee_alerts.append(committee)
db.session.commit()
ga_event("user", "follow-committee", "cte-follow-committee")
|
{
"content_hash": "5e0d0ccc6d590cb141bd5f46ff121d72",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 86,
"avg_line_length": 32.0046511627907,
"alnum_prop": 0.6593518383955821,
"repo_name": "Code4SA/pmg-cms-2",
"id": "e2fe7bb3f62ecb7ebbe20f4b62a52b3bc116e9ce",
"size": "6881",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pmg/user_management.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "204619"
},
{
"name": "HTML",
"bytes": "361071"
},
{
"name": "JavaScript",
"bytes": "109536"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "469838"
}
],
"symlink_target": ""
}
|
import pytest
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
import pytorch_lightning as pl
import tests.helpers.pipelines as tpipes
import tests.helpers.utils as tutils
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.utilities import memory
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers import BoringModel, RandomDataset
from tests.helpers.datamodules import ClassifDataModule
from tests.helpers.runif import RunIf
from tests.helpers.simple_models import ClassificationModel
class CustomClassificationModelDP(ClassificationModel):
def _step(self, batch, batch_idx):
x, y = batch
logits = self(x)
return {"logits": logits, "y": y}
def training_step(self, batch, batch_idx):
out = self._step(batch, batch_idx)
loss = F.cross_entropy(out["logits"], out["y"])
return loss
def validation_step(self, batch, batch_idx):
return self._step(batch, batch_idx)
def test_step(self, batch, batch_idx):
return self._step(batch, batch_idx)
def validation_step_end(self, outputs):
self.log("val_acc", self.valid_acc(outputs["logits"], outputs["y"]))
def test_step_end(self, outputs):
self.log("test_acc", self.test_acc(outputs["logits"], outputs["y"]))
@RunIf(min_gpus=2)
def test_multi_gpu_early_stop_dp(tmpdir):
"""Make sure DDP works. with early stopping"""
tutils.set_random_master_port()
dm = ClassifDataModule()
model = CustomClassificationModelDP()
trainer_options = dict(
default_root_dir=tmpdir,
callbacks=[EarlyStopping(monitor="val_acc")],
max_epochs=50,
limit_train_batches=10,
limit_val_batches=10,
gpus=[0, 1],
accelerator="dp",
)
tpipes.run_model_test(trainer_options, model, dm)
@RunIf(min_gpus=2)
def test_multi_gpu_model_dp(tmpdir):
tutils.set_random_master_port()
trainer_options = dict(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=10,
limit_val_batches=10,
gpus=[0, 1],
accelerator="dp",
progress_bar_refresh_rate=0,
)
model = BoringModel()
tpipes.run_model_test(trainer_options, model)
# test memory helper functions
memory.get_memory_profile("min_max")
class ReductionTestModel(BoringModel):
def train_dataloader(self):
return DataLoader(RandomDataset(32, 64), batch_size=2)
def val_dataloader(self):
return DataLoader(RandomDataset(32, 64), batch_size=2)
def test_dataloader(self):
return DataLoader(RandomDataset(32, 64), batch_size=2)
def add_outputs(self, output, device):
output.update(
{
"reduce_int": torch.tensor(device.index, dtype=torch.int, device=device),
"reduce_float": torch.tensor(device.index, dtype=torch.float, device=device),
}
)
def training_step(self, batch, batch_idx):
output = super().training_step(batch, batch_idx)
self.add_outputs(output, batch.device)
return output
def validation_step(self, batch, batch_idx):
output = super().validation_step(batch, batch_idx)
self.add_outputs(output, batch.device)
return output
def test_step(self, batch, batch_idx):
output = super().test_step(batch, batch_idx)
self.add_outputs(output, batch.device)
return output
def training_epoch_end(self, outputs):
assert outputs[0]["loss"].shape == torch.Size([])
assert outputs[0]["reduce_int"].item() == 0 # mean([0, 1]) = 0
assert outputs[0]["reduce_float"].item() == 0.5 # mean([0., 1.]) = 0.5
def test_dp_raise_exception_with_batch_transfer_hooks(tmpdir, monkeypatch):
"""
Test that an exception is raised when overriding batch_transfer_hooks in DP model.
"""
monkeypatch.setattr("torch.cuda.device_count", lambda: 2)
class CustomModel(BoringModel):
def transfer_batch_to_device(self, batch, device):
batch = batch.to(device)
return batch
trainer_options = dict(default_root_dir=tmpdir, max_steps=7, gpus=[0, 1], accelerator="dp")
trainer = Trainer(**trainer_options)
model = CustomModel()
with pytest.raises(MisconfigurationException, match=r"Overriding `transfer_batch_to_device` is not .* in DP"):
trainer.fit(model)
class CustomModel(BoringModel):
def on_before_batch_transfer(self, batch, dataloader_idx):
batch += 1
return batch
trainer = Trainer(**trainer_options)
model = CustomModel()
with pytest.raises(MisconfigurationException, match=r"Overriding `on_before_batch_transfer` is not .* in DP"):
trainer.fit(model)
class CustomModel(BoringModel):
def on_after_batch_transfer(self, batch, dataloader_idx):
batch += 1
return batch
trainer = Trainer(**trainer_options)
model = CustomModel()
with pytest.raises(MisconfigurationException, match=r"Overriding `on_after_batch_transfer` is not .* in DP"):
trainer.fit(model)
@RunIf(min_gpus=2)
def test_dp_training_step_dict(tmpdir):
"""This test verifies that dp properly reduces dictionaries"""
model = ReductionTestModel()
model.training_step_end = None
model.validation_step_end = None
model.test_step_end = None
trainer = pl.Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=1,
limit_val_batches=1,
limit_test_batches=1,
gpus=2,
accelerator="dp",
)
trainer.fit(model)
|
{
"content_hash": "ce902ba978d3070ddb193f45c4ef4047",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 114,
"avg_line_length": 31.22826086956522,
"alnum_prop": 0.6555864949530108,
"repo_name": "williamFalcon/pytorch-lightning",
"id": "efaf761cb711611f697d933d38d0e357d8ff3a18",
"size": "6332",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/accelerators/test_dp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "511511"
},
{
"name": "Shell",
"bytes": "1731"
}
],
"symlink_target": ""
}
|
try:
from http.client import BadStatusLine
except ImportError:
from httplib import BadStatusLine
import pytest
from selenium.common.exceptions import (
NoSuchElementException,
NoSuchFrameException,
TimeoutException,
WebDriverException)
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class TestFrameSwitching(object):
# ----------------------------------------------------------------------------------------------
#
# Tests that WebDriver doesn't do anything fishy when it navigates to a page with frames.
#
# ----------------------------------------------------------------------------------------------
def testShouldAlwaysFocusOnTheTopMostFrameAfterANavigationEvent(self, driver, pages):
pages.load("frameset.html")
driver.find_element(By.TAG_NAME, "frameset") # Test passes if this does not throw.
def testShouldNotAutomaticallySwitchFocusToAnIFrameWhenAPageContainingThemIsLoaded(self, driver, pages):
pages.load("iframes.html")
driver.find_element(By.ID, "iframe_page_heading")
def testShouldOpenPageWithBrokenFrameset(self, driver, pages):
pages.load("framesetPage3.html")
frame1 = driver.find_element(By.ID, "first")
driver.switch_to.frame(frame1)
driver.switch_to.default_content()
frame2 = driver.find_element(By.ID, "second")
driver.switch_to.frame(frame2) # IE9 can not switch to this broken frame - it has no window.
# ----------------------------------------------------------------------------------------------
#
# Tests that WebDriver can switch to frames as expected.
#
# ----------------------------------------------------------------------------------------------
def testShouldBeAbleToSwitchToAFrameByItsIndex(self, driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(1)
assert driver.find_element(By.ID, "pageNumber").text == "2"
def testShouldBeAbleToSwitchToAnIframeByItsIndex(self, driver, pages):
pages.load("iframes.html")
driver.switch_to.frame(0)
assert driver.find_element(By.NAME, "id-name1").get_attribute("value") == "name"
def testShouldBeAbleToSwitchToAFrameByItsName(self, driver, pages):
pages.load("frameset.html")
driver.switch_to.frame("fourth")
assert driver.find_element(By.TAG_NAME, "frame").get_attribute("name") == "child1"
def testShouldBeAbleToSwitchToAnIframeByItsName(self, driver, pages):
pages.load("iframes.html")
driver.switch_to.frame("iframe1-name")
assert driver.find_element(By.NAME, "id-name1").get_attribute("value") == "name"
def testShouldBeAbleToSwitchToAFrameByItsID(self, driver, pages):
pages.load("frameset.html")
driver.switch_to.frame("fifth")
assert driver.find_element(By.NAME, "windowOne").text == "Open new window"
def testShouldBeAbleToSwitchToAnIframeByItsID(self, driver, pages):
pages.load("iframes.html")
driver.switch_to.frame("iframe1")
assert driver.find_element(By.NAME, "id-name1").get_attribute("value") == "name"
def testShouldBeAbleToSwitchToFrameWithNameContainingDot(self, driver, pages):
pages.load("frameset.html")
driver.switch_to.frame("sixth.iframe1")
assert "Page number 3" in driver.find_element(By.TAG_NAME, "body").text
def testShouldBeAbleToSwitchToAFrameUsingAPreviouslyLocatedWebElement(self, driver, pages):
pages.load("frameset.html")
frame = driver.find_element(By.TAG_NAME, "frame")
driver.switch_to.frame(frame)
assert driver.find_element(By.ID, "pageNumber").text == "1"
def testShouldBeAbleToSwitchToAnIFrameUsingAPreviouslyLocatedWebElement(self, driver, pages):
pages.load("iframes.html")
frame = driver.find_element(By.TAG_NAME, "iframe")
driver.switch_to.frame(frame)
element = driver.find_element(By.NAME, "id-name1")
assert element.get_attribute("value") == "name"
def testShouldEnsureElementIsAFrameBeforeSwitching(self, driver, pages):
pages.load("frameset.html")
frame = driver.find_element(By.TAG_NAME, "frameset")
with pytest.raises(NoSuchFrameException):
driver.switch_to.frame(frame)
def testFrameSearchesShouldBeRelativeToTheCurrentlySelectedFrame(self, driver, pages):
pages.load("frameset.html")
driver.switch_to.frame("second")
assert driver.find_element(By.ID, "pageNumber").text == "2"
with pytest.raises(NoSuchElementException):
driver.switch_to.frame(driver.find_element_by_name("third"))
driver.switch_to.default_content()
driver.switch_to.frame(driver.find_element_by_name("third"))
with pytest.raises(NoSuchFrameException):
driver.switch_to.frame("second")
driver.switch_to.default_content()
driver.switch_to.frame(driver.find_element_by_name("second"))
assert driver.find_element(By.ID, "pageNumber").text == "2"
def testShouldSelectChildFramesByChainedCalls(self, driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(driver.find_element_by_name("fourth"))
driver.switch_to.frame(driver.find_element_by_name("child2"))
assert driver.find_element(By.ID, "pageNumber").text == "11"
def testShouldThrowFrameNotFoundExceptionLookingUpSubFramesWithSuperFrameNames(self, driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(driver.find_element_by_name("fourth"))
with pytest.raises(NoSuchElementException):
driver.switch_to.frame(driver.find_element_by_name("second"))
def testShouldThrowAnExceptionWhenAFrameCannotBeFound(self, driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.switch_to.frame(driver.find_element_by_name("Nothing here"))
def testShouldThrowAnExceptionWhenAFrameCannotBeFoundByIndex(self, driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchFrameException):
driver.switch_to.frame(27)
@pytest.mark.xfail_phantomjs(raises=WebDriverException)
def testShouldBeAbleToSwitchToParentFrame(self, driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(driver.find_element_by_name("fourth"))
driver.switch_to.parent_frame()
driver.switch_to.frame(driver.find_element_by_name("first"))
assert driver.find_element(By.ID, "pageNumber").text == "1"
@pytest.mark.xfail_phantomjs(raises=WebDriverException)
def testShouldBeAbleToSwitchToParentFrameFromASecondLevelFrame(self, driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(driver.find_element_by_name("fourth"))
driver.switch_to.frame(driver.find_element_by_name("child1"))
driver.switch_to.parent_frame()
driver.switch_to.frame(driver.find_element_by_name("child2"))
assert driver.find_element(By.ID, "pageNumber").text == "11"
@pytest.mark.xfail_phantomjs(raises=WebDriverException)
def testSwitchingToParentFrameFromDefaultContextIsNoOp(self, driver, pages):
pages.load("xhtmlTest.html")
driver.switch_to.parent_frame()
assert driver.title == "XHTML Test Page"
@pytest.mark.xfail_phantomjs(raises=WebDriverException)
def testShouldBeAbleToSwitchToParentFromAnIframe(self, driver, pages):
pages.load("iframes.html")
driver.switch_to.frame(0)
driver.switch_to.parent_frame()
driver.find_element(By.ID, "iframe_page_heading")
# ----------------------------------------------------------------------------------------------
#
# General frame handling behavior tests
#
# ----------------------------------------------------------------------------------------------
def testShouldContinueToReferToTheSameFrameOnceItHasBeenSelected(self, driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(2)
checkbox = driver.find_element(By.XPATH, "//input[@name='checky']")
checkbox.click()
checkbox.submit()
# TODO(simon): this should not be needed, and is only here because IE's submit returns too
# soon.
WebDriverWait(driver, 3).until(EC.text_to_be_present_in_element((By.XPATH, '//p'), 'Success!'))
@pytest.mark.xfail_marionette(raises=TimeoutException)
def testShouldFocusOnTheReplacementWhenAFrameFollowsALinkToA_TopTargetedPage(self, driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(0)
driver.find_element(By.LINK_TEXT, "top").click()
expectedTitle = "XHTML Test Page"
WebDriverWait(driver, 3).until(EC.title_is(expectedTitle))
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "only-exists-on-xhtmltest")))
def testShouldAllowAUserToSwitchFromAnIframeBackToTheMainContentOfThePage(self, driver, pages):
pages.load("iframes.html")
driver.switch_to.frame(0)
driver.switch_to.default_content()
driver.find_element(By.ID, "iframe_page_heading")
def testShouldAllowTheUserToSwitchToAnIFrameAndRemainFocusedOnIt(self, driver, pages):
pages.load("iframes.html")
driver.switch_to.frame(0)
driver.find_element(By.ID, "submitButton").click()
assert self.getTextOfGreetingElement(driver) == "Success!"
def getTextOfGreetingElement(self, driver):
return WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "greeting"))).text
def testShouldBeAbleToClickInAFrame(self, driver, pages):
pages.load("frameset.html")
driver.switch_to.frame("third")
# This should replace frame "third" ...
driver.find_element(By.ID, "submitButton").click()
# driver should still be focused on frame "third" ...
assert self.getTextOfGreetingElement(driver) == "Success!"
# Make sure it was really frame "third" which was replaced ...
driver.switch_to.default_content()
driver.switch_to.frame("third")
assert self.getTextOfGreetingElement(driver) == "Success!"
def testShouldBeAbleToClickInAFrameThatRewritesTopWindowLocation(self, driver, pages):
pages.load("click_tests/issue5237.html")
driver.switch_to.frame(driver.find_element_by_id("search"))
driver.find_element(By.ID, "submit").click()
driver.switch_to.default_content()
WebDriverWait(driver, 3).until(EC.title_is("Target page for issue 5237"))
def testShouldBeAbleToClickInASubFrame(self, driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(driver.find_element_by_id("sixth"))
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
# This should replace frame "iframe1" inside frame "sixth" ...
driver.find_element(By.ID, "submitButton").click()
# driver should still be focused on frame "iframe1" inside frame "sixth" ...
assert self.getTextOfGreetingElement(driver), "Success!"
# Make sure it was really frame "iframe1" inside frame "sixth" which was replaced ...
driver.switch_to.default_content()
driver.switch_to.frame(driver.find_element_by_id("sixth"))
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
assert driver.find_element(By.ID, "greeting").text == "Success!"
def testShouldBeAbleToFindElementsInIframesByXPath(self, driver, pages):
pages.load("iframes.html")
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
element = driver.find_element(By.XPATH, "//*[@id = 'changeme']")
assert element is not None
@pytest.mark.xfail_phantomjs
def testGetCurrentUrlReturnsTopLevelBrowsingContextUrl(self, driver, pages):
pages.load("frameset.html")
assert "frameset.html" in driver.current_url
driver.switch_to.frame(driver.find_element_by_name("second"))
assert "frameset.html" in driver.current_url
@pytest.mark.xfail_phantomjs
def testGetCurrentUrlReturnsTopLevelBrowsingContextUrlForIframes(self, driver, pages):
pages.load("iframes.html")
assert "iframes.html" in driver.current_url
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
assert "iframes.html" in driver.current_url
@pytest.mark.xfail_phantomjs(raises=BadStatusLine)
def testShouldBeAbleToSwitchToTheTopIfTheFrameIsDeletedFromUnderUs(self, driver, pages):
pages.load("frame_switching_tests/deletingFrame.html")
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
killIframe = driver.find_element(By.ID, "killIframe")
killIframe.click()
driver.switch_to.default_content()
WebDriverWait(driver, 3).until_not(
EC.presence_of_element_located((By.ID, "iframe1")))
addIFrame = driver.find_element(By.ID, "addBackFrame")
addIFrame.click()
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "iframe1")))
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "success")))
@pytest.mark.xfail_phantomjs(raises=BadStatusLine)
def testShouldBeAbleToSwitchToTheTopIfTheFrameIsDeletedFromUnderUsWithFrameIndex(self, driver, pages):
pages.load("frame_switching_tests/deletingFrame.html")
iframe = 0
WebDriverWait(driver, 3).until(EC.frame_to_be_available_and_switch_to_it(iframe))
# we should be in the frame now
killIframe = driver.find_element(By.ID, "killIframe")
killIframe.click()
driver.switch_to.default_content()
addIFrame = driver.find_element(By.ID, "addBackFrame")
addIFrame.click()
WebDriverWait(driver, 3).until(EC.frame_to_be_available_and_switch_to_it(iframe))
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "success")))
@pytest.mark.xfail_phantomjs(raises=BadStatusLine)
def testShouldBeAbleToSwitchToTheTopIfTheFrameIsDeletedFromUnderUsWithWebelement(self, driver, pages):
pages.load("frame_switching_tests/deletingFrame.html")
iframe = driver.find_element(By.ID, "iframe1")
WebDriverWait(driver, 3).until(EC.frame_to_be_available_and_switch_to_it(iframe))
# we should be in the frame now
killIframe = driver.find_element(By.ID, "killIframe")
killIframe.click()
driver.switch_to.default_content()
addIFrame = driver.find_element(By.ID, "addBackFrame")
addIFrame.click()
iframe = driver.find_element(By.ID, "iframe1")
WebDriverWait(driver, 3).until(EC.frame_to_be_available_and_switch_to_it(iframe))
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "success")))
@pytest.mark.xfail_chrome(raises=NoSuchElementException)
@pytest.mark.xfail_marionette(raises=WebDriverException)
@pytest.mark.xfail_phantomjs(raises=BadStatusLine)
def testShouldNotBeAbleToDoAnythingTheFrameIsDeletedFromUnderUs(self, driver, pages):
pages.load("frame_switching_tests/deletingFrame.html")
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
killIframe = driver.find_element(By.ID, "killIframe")
killIframe.click()
with pytest.raises(NoSuchFrameException):
driver.find_element(By.ID, "killIframe").click()
def testShouldReturnWindowTitleInAFrameset(self, driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(driver.find_element_by_name("third"))
assert "Unique title" == driver.title
def testJavaScriptShouldExecuteInTheContextOfTheCurrentFrame(self, driver, pages):
pages.load("frameset.html")
assert driver.execute_script("return window == window.top")
driver.switch_to.frame(driver.find_element(By.NAME, "third"))
assert driver.execute_script("return window != window.top")
def testShouldNotSwitchMagicallyToTheTopWindow(self, driver, pages):
pages.load("frame_switching_tests/bug4876.html")
driver.switch_to.frame(0)
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "inputText")))
for i in range(20):
try:
input = WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "inputText")))
submit = WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "submitButton")))
input.clear()
import random
input.send_keys("rand%s" % int(random.random()))
submit.click()
finally:
url = driver.execute_script("return window.location.href")
# IE6 and Chrome add "?"-symbol to the end of the URL
if (url.endswith("?")):
url = url[:len(url) - 1]
assert pages.url("frame_switching_tests/bug4876_iframe.html") == url
def testGetShouldSwitchToDefaultContext(self, driver, pages):
pages.load("iframes.html")
driver.find_element(By.ID, "iframe1")
driver.switch_to.frame(driver.find_element(By.ID, "iframe1"))
driver.find_element(By.ID, "cheese") # Found on formPage.html but not on iframes.html.
pages.load("iframes.html") # This must effectively switch_to.default_content(), too.
driver.find_element(By.ID, "iframe1")
|
{
"content_hash": "a8aaeb097533ced3c33bf5f7ce83450c",
"timestamp": "",
"source": "github",
"line_count": 374,
"max_line_length": 112,
"avg_line_length": 47.3048128342246,
"alnum_prop": 0.6647637350214787,
"repo_name": "TikhomirovSergey/selenium",
"id": "51153c196d79c7dad92c5c35244074262d82f2fa",
"size": "18480",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "py/test/selenium/webdriver/common/frame_switching_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "825"
},
{
"name": "Batchfile",
"bytes": "307"
},
{
"name": "C",
"bytes": "62040"
},
{
"name": "C#",
"bytes": "2712020"
},
{
"name": "C++",
"bytes": "1900856"
},
{
"name": "CSS",
"bytes": "26244"
},
{
"name": "HTML",
"bytes": "1840976"
},
{
"name": "Java",
"bytes": "4490360"
},
{
"name": "JavaScript",
"bytes": "4943302"
},
{
"name": "Makefile",
"bytes": "4655"
},
{
"name": "Objective-C",
"bytes": "4376"
},
{
"name": "Python",
"bytes": "725777"
},
{
"name": "Ragel in Ruby Host",
"bytes": "3086"
},
{
"name": "Ruby",
"bytes": "722511"
},
{
"name": "Shell",
"bytes": "1305"
},
{
"name": "XSLT",
"bytes": "1047"
}
],
"symlink_target": ""
}
|
"""Markdown renderers"""
from django.utils.deconstruct import deconstructible
import markdown
@deconstructible
class BaseRenderer(markdown.Markdown):
# TODO test renderer and extensions
def __init__(self, *args, **kwargs):
"""Default markdown renderer
It's required that you add one renderer for each markdown field.
We need to subclass `markdown.Markdown` in order to provide
serialization and the `__call__` method.
"""
self.__args = args
self.__kwargs = kwargs
super(BaseRenderer, self).__init__(*args, **kwargs)
def __call__(self, text):
"""Convert markdown to serialized XHTML or HTML
Note that this method is not guaranteed to return clean html.
"""
return self.convert(text)
|
{
"content_hash": "8f7230f8532e26a5f11c25d6e7a9b631",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 72,
"avg_line_length": 26.666666666666668,
"alnum_prop": 0.64375,
"repo_name": "AmatanHead/collective-blog",
"id": "30f54d97f44f98022a04c210d29e061b297900ce",
"size": "800",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "s_markdown/renderer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "132224"
},
{
"name": "HTML",
"bytes": "457421"
},
{
"name": "JavaScript",
"bytes": "315569"
},
{
"name": "Python",
"bytes": "225674"
},
{
"name": "Shell",
"bytes": "291"
}
],
"symlink_target": ""
}
|
"""
This script is used to read KRTD cameras files and a PLY file and produce a
homography file. It fits a plane to the landmarks in the PLY and then
extracts the sequence of homographies induced by that plane (assuming
no radial distortion).
"""
from optparse import OptionParser
import numpy as np
import numpy.linalg as npla
import os.path
from camera_io import *
from landmark_io import *
from homography_io import *
def homography_from_plane(camera1, camera2, plane):
"""Computes the homography induced by a plane between images of
camera1 and camera2. The resulting homography maps a point from
camera1 to camera2 via the specified plane.
"""
K1, R1, t1, _ = camera1
K2, R2, t2, _ = camera2
R3 = R2 * R1.T
t3 = t2 - R2 * R1.T * t1
n = np.matrix(plane[:3]).T
d = float(plane[3])
n = R1 * n
d -= float(t1.T * n)
return K2 * (R3 - t3 * n.T / d) * K1.I
def estimate_plane(points):
"""Estimate a plane and fit a set of 3D point
"""
C = np.mean(points, 0)
N = npla.svd(np.cov(points.T))[2][2]
d = np.dot(N,C)
return np.hstack((N,-d))
def main():
usage = "usage: %prog [options] ply_file krtd_glob out_homogs"
description = "Read a PLY and set of KRTDs and produce a homography file"
parser = OptionParser(usage=usage, description=description)
(options, args) = parser.parse_args()
ply_filename = args[0]
krtd_glob = args[1]
out_homog_file = args[2]
L , _ = load_landmark_ply_file(ply_filename)
C = load_camera_krtd_glob(krtd_glob)
plane = estimate_plane(L)
cams = sorted(C.iteritems())
cam0 = cams[0][1]
homogs = []
for f, cam in cams:
print f
H = homography_from_plane(cam, cam0, plane)
homogs.append(H)
homogs = [("%d %d"%(i,i), H) for i, H in enumerate(homogs)]
write_homography_file(homogs, out_homog_file)
if __name__ == "__main__":
main()
|
{
"content_hash": "847f6b7fd53ce3bce2901ea7d86a0585",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 77,
"avg_line_length": 25.142857142857142,
"alnum_prop": 0.6322314049586777,
"repo_name": "linus-sherrill/maptk",
"id": "f0261293294d294d632d9a77026186c99cafb3c2",
"size": "3534",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/krtd_and_ply_to_homog.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1746"
},
{
"name": "C++",
"bytes": "1585333"
},
{
"name": "CMake",
"bytes": "64390"
},
{
"name": "Python",
"bytes": "54703"
},
{
"name": "Ruby",
"bytes": "25362"
},
{
"name": "Shell",
"bytes": "2908"
}
],
"symlink_target": ""
}
|
"""Django views implementing the XBlock workbench.
This code is in the Workbench layer.
"""
import logging
import mimetypes
from xblock.core import XBlock, XBlockAside
from xblock.django.request import django_to_webob_request, webob_to_django_response
from xblock.exceptions import NoSuchUsage
from xblock.plugin import PluginMissingError
from django.conf import settings
from django.http import Http404, HttpResponse, JsonResponse
from django.shortcuts import redirect, render
from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie
from .models import XBlockState
from .runtime import WorkbenchRuntime
from .runtime_util import reset_global_state
from .scenarios import get_scenarios
log = logging.getLogger(__name__)
# We don't really have authentication and multiple students, just accept their
# id on the URL.
def get_student_id(request):
"""Get the student_id from the given request."""
student_id = request.GET.get('student', 'student_1')
return student_id
# ---- Views -----
def index(_request):
"""Render `index.html`"""
the_scenarios = sorted(get_scenarios().items())
the_scenarios = [
(class_name, scenario)
for class_name, scenario in the_scenarios
if class_name.split('.')[0] not in settings.EXCLUDED_XBLOCKS
]
return render(_request, 'workbench/index.html', {
'scenarios': [(desc, scenario.description) for desc, scenario in the_scenarios]
})
@ensure_csrf_cookie
def show_scenario(request, scenario_id, view_name='student_view', template='workbench/block.html'):
"""
Render the given `scenario_id` for the given `view_name`, on the provided `template`.
`view_name` defaults to 'student_view'.
`template` defaults to 'block.html'.
"""
student_id = get_student_id(request)
log.info("Start show_scenario %r for student %s", scenario_id, student_id)
try:
scenario = get_scenarios()[scenario_id]
except KeyError as ex:
raise Http404 from ex
usage_id = scenario.usage_id
runtime = WorkbenchRuntime(student_id)
block = runtime.get_block(usage_id)
render_context = {
'activate_block_id': request.GET.get('activate_block_id', None)
}
frag = block.render(view_name, render_context)
log.info("End show_scenario %s", scenario_id)
return render(request, template, {
'scenario': scenario,
'block': block,
'body': frag.body_html(),
'head_html': frag.head_html(),
'foot_html': frag.foot_html(),
'student_id': student_id,
})
def user_list(_request):
"""
This will return a list of all users in the database
"""
# We'd really like to do .distinct, but sqlite+django does not support this;
# hence the hack with sorted(set(...))
users = sorted(
user_id[0]
for user_id in set(XBlockState.objects.values_list('user_id'))
)
return JsonResponse(users, safe=False)
def handler(request, usage_id, handler_slug, suffix='', authenticated=True):
"""The view function for authenticated handler requests."""
if authenticated:
student_id = get_student_id(request)
log.info("Start handler %s/%s for student %s", usage_id, handler_slug, student_id)
else:
student_id = "none"
log.info("Start handler %s/%s", usage_id, handler_slug)
runtime = WorkbenchRuntime(student_id)
try:
block = runtime.get_block(usage_id)
except NoSuchUsage as ex:
raise Http404 from ex
request = django_to_webob_request(request)
request.path_info_pop()
request.path_info_pop()
result = block.runtime.handle(block, handler_slug, request, suffix)
log.info("End handler %s/%s", usage_id, handler_slug)
return webob_to_django_response(result)
def aside_handler(request, aside_id, handler_slug, suffix='', authenticated=True):
"""The view function for authenticated handler requests."""
if authenticated:
student_id = get_student_id(request)
log.info("Start handler %s/%s for student %s", aside_id, handler_slug, student_id)
else:
student_id = "none"
log.info("Start handler %s/%s", aside_id, handler_slug)
runtime = WorkbenchRuntime(student_id)
try:
block = runtime.get_aside(aside_id)
except NoSuchUsage as ex:
raise Http404 from ex
request = django_to_webob_request(request)
request.path_info_pop()
request.path_info_pop()
result = block.runtime.handle(block, handler_slug, request, suffix)
log.info("End handler %s/%s", aside_id, handler_slug)
return webob_to_django_response(result)
def package_resource(_request, block_type, resource):
"""
Wrapper for `pkg_resources` that tries to access a resource and, if it
is not found, raises an Http404 error.
"""
try:
xblock_class = XBlock.load_class(block_type)
except PluginMissingError:
try:
xblock_class = XBlockAside.load_class(block_type)
except PluginMissingError as ex:
raise Http404 from ex
try:
content = xblock_class.open_local_resource(resource)
except Exception as ex:
raise Http404 from ex
mimetype, _ = mimetypes.guess_type(resource)
return HttpResponse(content, content_type=mimetype)
@csrf_exempt
def reset_state(request):
"""Delete all state and reload the scenarios."""
log.info("RESETTING ALL STATE")
reset_global_state()
referrer_url = request.META['HTTP_REFERER']
return redirect(referrer_url)
|
{
"content_hash": "98cf88463bc54fc447b51eb51602d0ab",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 99,
"avg_line_length": 31.322033898305083,
"alnum_prop": 0.6731601731601732,
"repo_name": "stvstnfrd/xblock-sdk",
"id": "57c9907338c3a9e68c1604667f8c5c194d20c4d2",
"size": "5544",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "workbench/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "14419"
},
{
"name": "Dockerfile",
"bytes": "681"
},
{
"name": "HTML",
"bytes": "8020"
},
{
"name": "JavaScript",
"bytes": "237802"
},
{
"name": "Makefile",
"bytes": "2918"
},
{
"name": "Python",
"bytes": "146395"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
import django_resized.forms
class Migration(migrations.Migration):
dependencies = [
('deal', '0019_auto_20141023_0309'),
]
operations = [
migrations.AlterField(
model_name='deal',
name='thumbnail',
field=django_resized.forms.ResizedImageField(null=True, upload_to=b'upload_image/', blank=True),
),
]
|
{
"content_hash": "003b48c63885589c91a6056f7e352445",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 108,
"avg_line_length": 24.31578947368421,
"alnum_prop": 0.6298701298701299,
"repo_name": "raytung/Slice",
"id": "4e3e661cd191d08b99491675353ed40805c6a856",
"size": "486",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deal/migrations/0020_auto_20141023_0316.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "317788"
},
{
"name": "JavaScript",
"bytes": "67473"
},
{
"name": "Perl",
"bytes": "1404"
},
{
"name": "Python",
"bytes": "165203"
}
],
"symlink_target": ""
}
|
import configparser
import os
import shutil
import click
from ..util import (get_config_path, get_db_path, initialize_message,
successful_update_message, DB_NAME)
from .. import errors
def list_config():
config_path = get_config_path()
if not os.path.isfile(config_path):
initialize_message()
exit(1)
click.echo('Config path: ' + config_path)
db_path = get_db_path()
if not os.path.isfile(db_path):
click.secho('The data file does not exist.', fg='red')
exit(1)
click.echo('Data path: ' + db_path)
def update_config():
try:
old_db_path = get_db_path()
except errors.MissingConfigError:
initialize_message()
exit(1)
config = configparser.ConfigParser()
config.read(get_config_path())
new_db_dir = click.prompt('New (absolute) directory for the data')
while not os.path.isdir(new_db_dir):
click.secho('Invalid directory.', fg='red')
new_db_dir = click.prompt('New (absolute) directory for the data')
new_db_path = os.path.join(new_db_dir, DB_NAME)
shutil.move(old_db_path, new_db_path)
config['database']['path'] = new_db_path
with open(get_config_path(), 'w') as config_file:
config.write(config_file)
successful_update_message()
|
{
"content_hash": "21ed012a11d9c66faa983d8a0927acc1",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 74,
"avg_line_length": 26.775510204081634,
"alnum_prop": 0.6310975609756098,
"repo_name": "dguo/churn",
"id": "3cb92dee2bfbbb9d122d39992d8276d0a98c85e5",
"size": "1312",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/subcommands/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "175"
},
{
"name": "Python",
"bytes": "48959"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from BinPy import *
# <codecell>
# Usage of IC 7404:
ic = IC_7404()
print(ic.__doc__)
# <codecell>
# The Pin configuration is:
inp = {1: 1, 3: 0, 5: 0, 7: 0, 9: 0, 11: 0, 13: 1, 14: 1}
# Pin initinalization
# Powering up the IC - using -- ic.setIC({14: 1, 7: 0})
ic.setIC({14: 1, 7: 0})
# Setting the inputs of the ic
ic.setIC(inp)
# Draw the IC with the current configuration\n
ic.drawIC()
# <codecell>
# Run the IC with the current configuration using -- print ic.run() --
# Note that the ic.run() returns a dict of pin configuration similar to
print (ic.run())
# <codecell>
# Seting the outputs to the current IC configuration using --
# ic.setIC(ic.run()) --\n
ic.setIC(ic.run())
# Draw the final configuration
ic.drawIC()
# <codecell>
# Seting the outputs to the current IC configuration using --
# ic.setIC(ic.run()) --
ic.setIC(ic.run())
# Draw the final configuration
ic.drawIC()
# Run the IC
print (ic.run())
# <codecell>
# Connector Outputs
c = Connector()
# Set the output connector to a particular pin of the ic
ic.setOutput(8, c)
print(c)
|
{
"content_hash": "c658a8dce749a38a5687a8fabca548f9",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 71,
"avg_line_length": 15.18918918918919,
"alnum_prop": 0.6574733096085409,
"repo_name": "rajathkumarmp/BinPy",
"id": "9749b23d1bf0a66daf33cefb0c9e925cc15cc31d",
"size": "1235",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "BinPy/examples/source/ic/Series_7400/IC7404.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "694067"
},
{
"name": "Shell",
"bytes": "2956"
}
],
"symlink_target": ""
}
|
from nose.tools import eq_
from pyquery import PyQuery as pq
from kitsune.sumo.tests import TestCase
from kitsune.sumo.urlresolvers import reverse
from kitsune.wiki.tests import TranslatedRevisionFactory
class LocalizationDashTests(TestCase):
"""Tests for the Localization Dashboard.
The L10n Dash shares a lot of code with the Contributor Dash, so
this also covers much of the latter, such as the readout template,
most of the view mechanics, and the Unreviewed Changes readout
itself.
"""
@staticmethod
def _assert_readout_contains(doc, slug, contents):
"""Assert `doc` contains `contents` within the `slug` readout."""
html = doc('a#' + slug).closest('details').html()
assert contents in html, "'" + contents + "' is not in the following: " + html
def test_render(self):
"""Assert main dash and all the readouts render and don't crash."""
# Put some stuff in the DB so at least one row renders for each readout:
unreviewed = TranslatedRevisionFactory(
document__locale='de',
reviewed=None,
is_approved=False,
is_ready_for_localization=True)
response = self.client.get(reverse('dashboards.localization', locale='de'), follow=False)
eq_(200, response.status_code)
doc = pq(response.content)
self._assert_readout_contains(doc, 'unreviewed', unreviewed.document.title)
|
{
"content_hash": "6978d3275959b847b3ac1978b419d545",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 97,
"avg_line_length": 38.91891891891892,
"alnum_prop": 0.6784722222222223,
"repo_name": "mythmon/kitsune",
"id": "e61774597b81737c07fb1d139a06f9b91f0c7e33",
"size": "1440",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "kitsune/dashboards/tests/test_templates.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2694"
},
{
"name": "CSS",
"bytes": "281386"
},
{
"name": "HTML",
"bytes": "624493"
},
{
"name": "JavaScript",
"bytes": "750034"
},
{
"name": "Python",
"bytes": "2721930"
},
{
"name": "Shell",
"bytes": "10281"
},
{
"name": "Smarty",
"bytes": "2062"
}
],
"symlink_target": ""
}
|
"""Tests NODE_NETWORK_LIMITED.
Tests that a node configured with -prune=550 signals NODE_NETWORK_LIMITED correctly
and that it responds to getdata requests for blocks correctly:
- send a block within 288 + 2 of the tip
- disconnect peers who request blocks older than that."""
from test_framework.messages import CInv, msg_getdata, msg_verack, NODE_BLOOM, NODE_NETWORK_LIMITED, NODE_WITNESS
from test_framework.mininode import P2PInterface, mininode_lock
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, disconnect_nodes, connect_nodes_bi, sync_blocks, wait_until
class P2PIgnoreInv(P2PInterface):
firstAddrnServices = 0
def on_inv(self, message):
# The node will send us invs for other blocks. Ignore them.
pass
def on_addr(self, message):
self.firstAddrnServices = message.addrs[0].nServices
def wait_for_addr(self, timeout=5):
test_function = lambda: self.last_message.get("addr")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def send_getdata_for_block(self, blockhash):
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, int(blockhash, 16)))
self.send_message(getdata_request)
class NodeNetworkLimitedTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [['-prune=550', '-addrmantest'], [], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def disconnect_all(self):
disconnect_nodes(self.nodes[0], 1)
disconnect_nodes(self.nodes[1], 0)
disconnect_nodes(self.nodes[2], 1)
disconnect_nodes(self.nodes[2], 0)
disconnect_nodes(self.nodes[0], 2)
disconnect_nodes(self.nodes[1], 2)
def setup_network(self):
super(NodeNetworkLimitedTest, self).setup_network()
self.disconnect_all()
def run_test(self):
node = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
expected_services = NODE_BLOOM | NODE_WITNESS | NODE_NETWORK_LIMITED
self.log.info("Check that node has signalled expected services.")
assert_equal(node.nServices, expected_services)
self.log.info("Check that the localservices is as expected.")
assert_equal(int(self.nodes[0].getnetworkinfo()['localservices'], 16), expected_services)
self.log.info("Mine enough blocks to reach the NODE_NETWORK_LIMITED range.")
connect_nodes_bi(self.nodes, 0, 1)
blocks = self.nodes[1].generate(292)
sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Make sure we can max retrieve block at tip-288.")
node.send_getdata_for_block(blocks[1]) # last block in valid range
node.wait_for_block(int(blocks[1], 16), timeout=3)
self.log.info("Requesting block at height 2 (tip-289) must fail (ignored).")
node.send_getdata_for_block(blocks[0]) # first block outside of the 288+2 limit
node.wait_for_disconnect(5)
self.log.info("Check local address relay, do a fresh connection.")
self.nodes[0].disconnect_p2ps()
node1 = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
node1.send_message(msg_verack())
node1.wait_for_addr()
#must relay address with NODE_NETWORK_LIMITED
assert_equal(node1.firstAddrnServices, 1036)
self.nodes[0].disconnect_p2ps()
node1.wait_for_disconnect()
# connect unsynced node 2 with pruned NODE_NETWORK_LIMITED peer
# because node 2 is in IBD and node 0 is a NODE_NETWORK_LIMITED peer, sync must not be possible
connect_nodes_bi(self.nodes, 0, 2)
try:
sync_blocks([self.nodes[0], self.nodes[2]], timeout=5)
except:
pass
# node2 must remain at heigh 0
assert_equal(self.nodes[2].getblockheader(self.nodes[2].getbestblockhash())['height'], 0)
# now connect also to node 1 (non pruned)
connect_nodes_bi(self.nodes, 1, 2)
# sync must be possible
sync_blocks(self.nodes)
# disconnect all peers
self.disconnect_all()
# mine 10 blocks on node 0 (pruned node)
self.nodes[0].generate(10)
# connect node1 (non pruned) with node0 (pruned) and check if the can sync
connect_nodes_bi(self.nodes, 0, 1)
# sync must be possible, node 1 is no longer in IBD and should therefore connect to node 0 (NODE_NETWORK_LIMITED)
sync_blocks([self.nodes[0], self.nodes[1]])
if __name__ == '__main__':
NodeNetworkLimitedTest().main()
|
{
"content_hash": "6bfb9560ff6b3e118e3eb40c10020678",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 121,
"avg_line_length": 41.26548672566372,
"alnum_prop": 0.6626635213381943,
"repo_name": "merelcoin/merelcoin",
"id": "ef9c71dc02d105d8ec71c6c19662d55cee29a9db",
"size": "4877",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "test/functional/p2p_node_network_limited.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "694223"
},
{
"name": "C++",
"bytes": "6032230"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "196429"
},
{
"name": "Makefile",
"bytes": "2491551"
},
{
"name": "NSIS",
"bytes": "6834"
},
{
"name": "Objective-C",
"bytes": "6153"
},
{
"name": "Objective-C++",
"bytes": "6588"
},
{
"name": "Python",
"bytes": "1474453"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Roff",
"bytes": "2559606"
},
{
"name": "Shell",
"bytes": "886663"
}
],
"symlink_target": ""
}
|
from collections import OrderedDict
import json
import contoml
from prettytoml.errors import TOMLError, DuplicateKeysError, DuplicateTablesError, InvalidTOMLFileError
def test_loading_toml_without_trailing_newline():
toml_text = '[main]\nname = "azmy"'
toml = contoml.loads(toml_text)
assert toml['main']['name'] == 'azmy'
def test_array_edge_cases():
# Parsing an empty array value
toml_text = """[section]
key = []"""
toml = contoml.loads(toml_text)
assert 'section' in toml
assert len(toml['section']['key']) == 0
def test_loading_an_empty_toml_source():
toml_text = ''
contoml.loads(toml_text)
# Should not fail
def test_parsing_section_with_indentation_and_comment_lines():
toml = """[main]
listen = ":8966"
redis_host = "localhost:6379"
redis_password = ""
[influxdb]
host = "localhost:8086"
db = "agentcontroller"
user = "ac"
password = "acctrl"
[handlers]
binary = "python2.7"
cwd = "./handlers"
[handlers.env]
PYTHONPATH = "/opt/jumpscale7/lib:../client"
SYNCTHING_URL = "http://localhost:8384/"
SYNCTHING_SHARED_FOLDER_ID = "jumpscripts"
#SYNCTHING_API_KEY = ""
REDIS_ADDRESS = "localhost"
REDIS_PORT = "6379"
#REDIS_PASSWORD = ""
"""
f = contoml.loads(toml)
assert f['handlers']['env']['REDIS_ADDRESS'] == 'localhost'
assert 'REDIS_PASSWORD' not in f['handlers']['env']
f['handlers']['env']['REDIS_PASSWORD'] = 'MYPASSWORD'
expected = """[main]
listen = ":8966"
redis_host = "localhost:6379"
redis_password = ""
[influxdb]
host = "localhost:8086"
db = "agentcontroller"
user = "ac"
password = "acctrl"
[handlers]
binary = "python2.7"
cwd = "./handlers"
[handlers.env]
PYTHONPATH = "/opt/jumpscale7/lib:../client"
SYNCTHING_URL = "http://localhost:8384/"
SYNCTHING_SHARED_FOLDER_ID = "jumpscripts"
#SYNCTHING_API_KEY = ""
REDIS_ADDRESS = "localhost"
REDIS_PORT = "6379"
REDIS_PASSWORD = "MYPASSWORD"
#REDIS_PASSWORD = ""
"""
assert expected == f.dumps()
def test_loading_complex_file_1():
toml = """
[main]
gid = 1
nid = 10
max_jobs = 100
message_id_file = "./.mid"
history_file = "./.history"
agent_controllers = ["http://localhost:8966/"]
[cmds]
[cmds.execute_js_py]
binary = "python2.7"
cwd = "./jumpscripts"
script = "{domain}/{name}.py"
[cmds.sync]
#syncthing extension
binary = "python2.7"
cwd = "./extensions/sync"
script = "{name}.py"
[cmds.sync.env]
PYTHONPATH = "../"
JUMPSCRIPTS_HOME = "../../jumpscripts"
SYNCTHING_URL = "http://localhost:8384"
[channel]
cmds = [0] # long polling from agent 0
[logging]
[logging.db]
type = "DB"
log_dir = "./logs"
levels = [2, 4, 7, 8, 9] # (all error messages) empty for all
[logging.ac]
type = "AC"
flush_int = 300 # seconds (5min)
batch_size = 1000 # max batch size, force flush if reached this count.
agent_controllers = [] # to all agents
levels = [2, 4, 7, 8, 9] # (all error messages) empty for all
[logging.console]
type = "console"
levels = [2, 4, 7, 8, 9]
[stats]
interval = 60 # seconds
agent_controllers = []
"""
contoml.loads(toml)
def test_weird_edge_case_1():
toml_text = """l = "t"
creativity = "on vacation"
"""
f = contoml.loads(toml_text)
assert f['']['l'] == 't'
def test_accessing_deeply_nested_dicts():
t = """[cmds]
[cmds.sync]
#syncthing extension
binary = "python2.7"
cwd = "./extensions/sync"
script = "{name}.py"
[cmds.sync.env]
PYTHONPATH = "../"
JUMPSCRIPTS_HOME = "../../jumpscripts"
SYNCTHING_URL = "http://localhost:8384"
"""
f = contoml.loads(t)
assert f['cmds']['sync']['env']['SYNCTHING_URL'] == 'http://localhost:8384'
f['cmds']['sync']['env']['SYNCTHING_URL'] = 'Nowhere'
expected_toml = """[cmds]
[cmds.sync]
#syncthing extension
binary = "python2.7"
cwd = "./extensions/sync"
script = "{name}.py"
[cmds.sync.env]
PYTHONPATH = "../"
JUMPSCRIPTS_HOME = "../../jumpscripts"
SYNCTHING_URL = "Nowhere"
"""
assert expected_toml == f.dumps()
def test_table_with_pound_in_title():
toml = """["key#group"]
answer = 42"""
parsed = contoml.loads(toml)
assert parsed.primitive['key#group']['answer'] == 42
def test_fails_to_parse_bad_escape_characters():
toml = r"""
invalid-escape = r"This string has a bad \a escape character."
"""
try:
contoml.loads(toml)
assert False, "Should raise an exception before getting here"
except TOMLError:
pass
def test_parsing_multiline_strings_correctly():
toml = r'''multiline_empty_one = """"""
multiline_empty_two = """
"""
multiline_empty_three = """\
"""
multiline_empty_four = """\
\
\
"""
equivalent_one = "The quick brown fox jumps over the lazy dog."
equivalent_two = """
The quick brown \
fox jumps over \
the lazy dog."""
equivalent_three = """\
The quick brown \
fox jumps over \
the lazy dog.\
"""
'''
parsed = contoml.loads(toml)
assert parsed['']['multiline_empty_one'] == parsed['']['multiline_empty_two'] == \
parsed['']['multiline_empty_three'] == parsed['']['multiline_empty_four']
def test_unicode_string_literals():
toml = u'answer = "δ"\n'
parsed = contoml.loads(toml)
assert parsed['']['answer'] == u"δ"
def test_one_entry_array_of_tables():
t = '''[[people]]
first_name = "Bruce"
last_name = "Springsteen"
'''
parsed = contoml.loads(t)
assert parsed['people'][0]['first_name'] == 'Bruce'
assert parsed['people'][0]['last_name'] == 'Springsteen'
def non_empty(iterable):
return tuple(filter(bool, iterable))
|
{
"content_hash": "9a6b2eb504a0c650f315d1cfc9952993",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 103,
"avg_line_length": 21.68421052631579,
"alnum_prop": 0.603502080443828,
"repo_name": "Jumpscale/python-consistent-toml",
"id": "f23d8b348fea8df72476f1e89302edcc20f0a261",
"size": "5795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_contoml.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33702"
}
],
"symlink_target": ""
}
|
import pickle, base64
import subprocess, csv
import optparse, tempfile, re, traceback
from datetime import date
from time import localtime, strftime, sleep, time
from threading import Thread
from Queue import Queue
import urllib # for passing strings across gpssh
import shutil # for copying
import pg # Database interaction
from gppylib.gplog import * # Greenplum logging facility
from gppylib.commands import base # Greenplum layer for worker pools
from gppylib.commands import unix # Greenplum layer for unix interaction
from gppylib.commands.gp import GpCreateDBIdFile
from gppylib.db import dbconn
from gppylib.gpversion import GpVersion
from gppylib.gparray import GpArray, GpDB
from gppylib.gphostcache import GpHostCache
from gppylib.gpcoverage import GpCoverage
from gppylib.operations.gpMigratorUtil import *
libdir = os.path.join(sys.path[0], 'lib/')
logger = get_default_logger()
MIGRATIONUSER = 'gpmigrator'
LOCKEXT = '.gpmigrator_orig'
WORKDIR = 'gpmigrator'
BACKUPDIR = 'backup'
UPGRADEDIR = 'upgrade'
PARALLELISM = 16
#============================================================
def make_conn(ug, user, db, options, port, sockdir):
retries = 5
for i in range(retries):
try:
logger.debug("making database connection: user = %s, "
"dbname = %s port = %i"
% (user, db, port))
conn = pg.connect(user=user,
dbname=db,
opt=options,
port=port)
break
except pg.InternalError, e:
if 'too many clients already' in str(e) and i < retries:
logger.warning('Max Connection reached, attempt %d / %d' % (i+1, retries))
sleep(2)
continue
raise ConnectionError(str(e))
return conn
#============================================================
def cli_help(execname):
help_path = os.path.join(sys.path[0], '..', 'docs', 'cli_help', execname + '_help');
f = None
try:
try:
f = open(help_path);
return f.read(-1)
except:
return ''
finally:
if f: f.close()
#============================================================
def usage(execname):
print cli_help(execname) or __doc__
#============================================================
class ConnectionError(StandardError): pass
class UpgradeError(StandardError): pass
class CmdError(StandardError):
def __init__(self, cmd, stdout, stderr):
self.cmd = cmd
self.stdout = stdout
self.stderr = stderr
def __str__(self):
return self.stderr
#============================================================
def is_supported_version(version, upgrade=True):
'''
Checks that a given GpVersion Object is supported for upgrade/downgrade.
We do not support:
Versions < PREVIOUS MINOR RELEASE
Versions > main
Versions with unusual "builds" (eg 3.4.0.0_EAP1)
'''
if upgrade:
upstr = "upgrade"
else:
upstr = "downgrade"
# Acceptable builds are 'dev', 'filerep', and any build that consists
# entirely of digits
build = version.getVersionBuild()
if not re.match(r"(dev|filerep|\d+|build|Preview_v1)", build):
raise UpgradeError(
"HAWQ '%s' is not supported for %s"
% (str(version), upstr))
if version >= "1.0.0.0" and version <= 'main':
return True
if version < "1.0.0.0":
raise UpgradeError(
"HAWQ '%s' is not supported for %s"
% (str(version), upstr))
else:
raise UpgradeError(
"To %s HAWQ '%s' use the %s tool "
"shipped with that release"
% (upstr, str(version), upstr))
#============================================================
class GpUpgradeCmd(base.Command):
def __init__(self, name, cmd, ctxt=base.LOCAL, remoteHost=None):
cmdStr = ' '.join(cmd)
base.Command.__init__(self, name, cmdStr, ctxt, remoteHost)
#============================================================
class GPUpgradeBase(object):
def __init__(self):
self.cmd = 'MASTER' # Command being run
self.array = None
self.dbs = {} # dict of databases in the array
self.dbup = None
self.debug = False
self.faultinjection = None
self.hostcache = None
self.interrupted = False # SIGINT recieved
self.logdir = None
self.logfile = None
self.masterdir = None # Master Data Directory
self.masterport = 5432 # Master Port
self.mirrors = [] # list of mirrors
self.newenv = None # enviornment: new gp env
self.newhome = None # New gphome/bin directory
self.oldenv = None # enviornment: old gp env
self.oldhome = None # Old gphome/bin directory
self.option = None # Argument passed to cmd
self.path = None # Default path
self.pool = None # worker pool
self.user = None # db admin user
self.workdir = None # directory: masterdir/gpmigrator
self.checkonly = False # checkcat only; do not run upgrade
#------------------------------------------------------------
def RunCmd(self, cmd, env=None, utility=False, supressDebug=False, shell=False):
'''
Runs a single command on this machine
'''
if type(cmd) in [type('string'), type(u'unicode')]:
cmdstr = cmd
cmd = cmd.split(' ')
elif type(cmd) == type(['list']):
cmdstr = ' '.join(cmd)
else:
logger.warn("Unknown RunCmd datatype '%s'" % str(type(cmd)))
cmdstr = str(cmd)
if self.debug and not supressDebug:
logger.debug("ENV: " + str(env))
logger.debug("CMD: " + cmdstr)
# If utility mode has been specified add role to env
if env == None:
env = {'PATH': self.path}
if utility:
env['PGOPTIONS'] = '-c gp_session_role=utility'
try:
pipe = subprocess.Popen(cmd, env=env,
shell=shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
result = pipe.communicate();
except OSError, e:
raise CmdError(cmdstr, '', str(e))
if self.debug and not supressDebug:
logger.debug(result[1])
if pipe.returncode:
raise CmdError(' '.join(cmd), result[0], result[1])
else:
return result[0].strip();
#------------------------------------------------------------
def SetupEnv(self, gphome, masterdir):
'''
Sets up environment variables for Greenplum Administration
'''
home = os.environ.get('HOME')
lpath = os.environ.get('LD_LIBRARY_PATH')
dypath = os.environ.get('DYLD_LIBRARY_PATH')
# Add $GPHOME/bin to the path for this environment
path = '%s/bin:%s/ext/python/bin:%s' % (gphome, gphome, self.path)
if lpath:
lpath = '%s/lib:%s/ext/python/lib:%s' % (gphome, gphome, lpath)
else:
lpath = '%s/lib:%s/ext/python/lib' % (gphome, gphome)
if dypath:
dypath = '%s/lib:%s/ext/python/lib:%s' % (gphome, gphome, dypath)
else:
dypath = '%s/lib:%s/ext/python/lib' % (gphome, gphome)
env = {}
env['HOME'] = home
env['USER'] = self.user
env['LOGNAME'] = self.user
env['GPHOME'] = gphome
env['PATH'] = path
env['LD_LIBRARY_PATH'] = lpath
env['DYLD_LIBRARY_PATH'] = dypath
env['PYTHONPATH'] = os.path.join(gphome, 'lib', 'python')
env['PYTHONHOME'] = os.path.join(gphome, 'ext', 'python')
if masterdir:
env['MASTER_DATA_DIRECTORY'] = masterdir
env['PGPORT'] = str(self.masterport)
return env
#------------------------------------------------------------
def Select(self, qry, db='template0', port=None, forceutility=False, forceUseEnvUser=False):
# forceUseEnvUser: user will be GPMIGRATOR if we're in lockdown mode(gpmigrator); otherwise
# it'll be the env user. Setting forceuseEnvUser will always use env user,
# even though we're in gpmigrator and in lockdown.
'''
Execute a SQL query and return an array of result rows
+ Single columns will be returned as an array of values
+ Multiple columns will be returned as an array of tuples
'''
#
# If we are executing from one of the segments then we don't
# actually know where the data directory is for the segment
# is so we can't check if we are in lockdown mode
#
# Fortunately we never actually lock the segments, so we can
# always run as current user.
#
if self.cmd == 'MASTER':
if not self.dbup:
raise UpgradeError('database is down')
[env, utility, upgrade] = self.dbup
locked = os.path.join(env['MASTER_DATA_DIRECTORY'],
'pg_hba.conf'+LOCKEXT)
if os.path.exists(locked) and not forceUseEnvUser:
user = MIGRATIONUSER
else:
user = env['USER']
else:
# If we lockdown on segments this needs to change, some method
# would need to be made to determine if the segment in question
# is locked
user = self.user
utility = True
if port == None:
port = self.masterport
elif port != self.masterport:
utility = True
if utility or forceutility:
options = '-c gp_session_role=utility'
else:
options = ''
if self.__dict__.get('sock_dir'):
sockdir = self.sock_dir
else:
sockdir = '/tmp/'
conn = make_conn(self, user, db, options, port, sockdir)
curs = None
logger.debug('executing query ' + qry)
try:
curs = conn.query(qry)
except Exception, e:
conn.close()
logger.fatal('Error executing SQL')
raise ConnectionError('%s\nsql> %s' % (str(e), qry))
rows = []
for tuple in curs.dictresult():
if len(tuple) == 1:
rows.append(tuple.values()[0])
else:
rows.append(tuple)
conn.close()
return rows
#------------------------------------------------------------
def Update(self, qry, db='template0', port=None, forceutility=False, upgradeMode=False, modSysTbl=False, forceUseEnvUser=False, defSysTbl=False):
# forceUseEnvUser: user will be GPMIGRATOR if we're in lockdown mode(gpmigrator); otherwise
# it'll be the env user. Setting forceuseEnvUser will always use env user,
# even though we're in gpmigrator and in lockdown.
'''
Execute a SQL query expecting no output
'''
# If we are executing from one of the segments then we don't
# have the current database state, so we have to know what to
# do via other means.
#
# The assumption is:
# A) utility = true
# B) pguser = MIGRATIONUSER
#
# If we ever need to run as the non-upgrade user from a segment
# this code will obviously need to change
#
if self.cmd == 'MASTER':
if not self.dbup:
raise UpgradeError('database is down')
[env, utility, upgrade] = self.dbup
locked = os.path.join(env['MASTER_DATA_DIRECTORY'],
'pg_hba.conf'+LOCKEXT)
if os.path.exists(locked) and not forceUseEnvUser:
user = MIGRATIONUSER
else:
user = env['USER']
else:
# If we lockdown on segments this needs to change, some method
# would need to be made to determine if the segment in question
# is locked
user = self.user
utility = True
if port == None:
port = self.masterport
options = ' '
if modSysTbl and defSysTbl: options += ' -c allow_system_table_mods=all'
elif modSysTbl: options += ' -c allow_system_table_mods=dml'
elif defSysTbl: options += ' -c allow_system_table_mods=ddl'
if utility or forceutility: options += ' -c gp_session_role=utility'
if upgradeMode: options += ' -c gp_maintenance_conn=true'
if self.__dict__.get('sock_dir'):
sockdir = self.sock_dir
else:
sockdir = '/tmp/'
conn = make_conn(self, user, db, options, port, sockdir)
logger.debug('executing query ' + qry)
try:
conn.query(qry)
except Exception, e:
logger.fatal('Error executing SQL')
raise ConnectionError('%s\nsql> %s' % (str(e), qry))
finally:
conn.close()
#------------------------------------------------------------
def CheckUp(self, tryconn = False):
'''
Checks if one of the postmasters is up and running.
Return the environment of the running server
'''
olddir = self.oldenv['MASTER_DATA_DIRECTORY']
newdir = self.newenv['MASTER_DATA_DIRECTORY']
env = None
if olddir and os.path.exists('%s/postmaster.pid' % olddir):
logger.debug('CheckUp: found old postmaster running')
env = self.oldenv
if newdir and os.path.exists('%s/postmaster.pid' % newdir):
logger.debug('CheckUp: found new postmaster running')
env = self.newenv
if not env:
logger.info('Postmaster not running')
raise UpgradeError("Postmaster failed to start")
if tryconn:
try:
logger.debug('trying to establish connection to database')
logger.debug('user = %s, db = template0, port = %i, dir = %s'
% (self.user, self.masterport, self.sock_dir))
conn = make_conn(self, self.user, 'template0', '', self.masterport,
self.sock_dir)
except Exception:
raise UpgradeError("Found a postmaster.pid but postmaster " +
"running")
conn.close()
return env
#------------------------------------------------------------
def CheckDown(self, warn=False):
'''
Checks that neither postmaster is running and that the database
was cleanly shutdown.
'''
if self.cmd == 'MASTER':
datadirs = [ self.masterdir ]
else:
datadirs = self.datadirs
shutdown_re = re.compile('Database cluster state: *(.*)')
for oldseg in datadirs:
(d, content) = os.path.split(oldseg)
newseg = os.path.join(d, WORKDIR, UPGRADEDIR, content)
# Only check the upgrade directory if its not legacy
if self.datadirs:
dirs = [oldseg, newseg]
else:
dirs = [oldseg]
for dir in dirs:
if dir == oldseg: env = self.oldenv
else: env = self.newenv
# upgrade directory might not actually exist
if not os.path.isdir(dir):
continue
pid = os.path.join(dir, 'postmaster.pid')
if os.path.exists(pid):
raise UpgradeError("Greenplum process running: " + pid)
shutdown = self.RunCmd('pg_controldata ' + dir, env=env)
for line in shutdown.split('\n'):
m = shutdown_re.match(line)
if m:
if m.group(1) == 'shut down':
break
msg = 'pg_controldata: "Database cluster state: %s"\n' % m.group(1)
msg += 'Greenplum segment %s did not shutdown cleanly' % dir
if warn:
logger.warn(msg)
else:
raise UpgradeError(msg)
if self.cmd == 'MASTER' and self.datadirs:
self.CallSlaves('CHKDOWN')
return True
#------------------------------------------------------------
# In hawq2.0, because command changed, now utility and upgrade mode are conflict
# And also upgrade mode changed to only start master, because segment catalog will be re-init
def Startup(self, env, utility=False, upgrade=False):
'''
Starts up the specified database
'''
if self.dbup:
raise UpgradeError('database already started')
self.dbup = [env, utility, upgrade]
isu = ""
if utility:
isu = " in utility mode"
elif upgrade:
isu = " in upgrade mode"
if (env == self.oldenv):
logger.info('Starting old Greenplum postmaster%s' % isu)
else:
logger.info('Starting new Greenplum postmaster%s' % isu)
try:
if (env == self.oldenv):
cmd = "gpstart -a"
else:
#upgrade mode in hawq2.0 only start master node.
if utility or upgrade:
cmd = "hawq start master -a"
else:
cmd = "hawq start cluster -a"
if utility:
cmd = cmd +' -m'
env['GPSTART_INTERNAL_MASTER_ONLY'] = '1'
if upgrade: cmd = cmd +' -U upgrade'
cmd += ' -l %s' % self.logdir
locked = os.path.join(env['MASTER_DATA_DIRECTORY'],
'pg_hba.conf'+LOCKEXT)
try:
if os.path.exists(locked):
env['PGUSER'] = MIGRATIONUSER
logger.debug("lockfile: '%s' exists" % locked)
else:
logger.debug("lockfile: '%s' does not exist" % locked)
logger.debug("Starting cluster with env = %s" % str(env))
pid = subprocess.Popen(cmd, preexec_fn=os.setpgrp,
env=env, shell=True,
stdout=self.logfile,
stderr=self.logfile,
close_fds=True)
finally:
if os.path.exists(locked):
del env['PGUSER']
# Ignore interrupt requests until startup is done
error = None
retcode = None
while retcode == None:
try:
retcode = pid.wait();
except KeyboardInterrupt, e:
if not self.interrupted:
logger.fatal('***************************************')
logger.fatal('SIGINT-: Upgrade Aborted')
logger.fatal('***************************************')
logger.info( 'Performing clean abort')
self.interrupted = True
error = e
else:
logger.info( 'SIGINT-: Still processing shutdown')
if retcode < 0:
raise UpgradeError("Startup terminated by signal");
today = date.today().strftime('%Y%m%d')
logname = os.path.join(self.logdir, 'gpstart_%s.log' % today)
if retcode == 1:
logger.warn('***************************************')
logger.warn('Warnings generated starting cluster')
logger.warn('Check %s for detailed warnings' % logname)
logger.warn('***************************************')
if retcode > 1:
logger.fatal('***************************************')
logger.fatal('Startup failed with error code %d' % retcode)
logger.fatal('Check %s for detailed warnings' % logname)
logger.fatal('***************************************')
raise UpgradeError('Startup failed')
# If we recieved an interrupt, resignal it now that the startup is done
if error:
raise error
except OSError, e:
logger.fatal(str(e))
raise UpgradeError('Startup failed')
self.CheckUp();
#------------------------------------------------------------
def Shutdown(self):
'''
Stops the specified database
'''
if not self.dbup:
return
[env, utility, upgrade] = self.dbup
dir = env['MASTER_DATA_DIRECTORY']
if not os.path.exists('%s/postmaster.pid' % dir):
logger.warn(
'Shutdown skipped - %s/postmaster.pid not found' % dir)
return
if (env == self.oldenv):
logger.info('Shutting down old Greenplum postmaster')
else:
logger.info('Shutting down new Greenplum postmaster')
locked = os.path.join(env['MASTER_DATA_DIRECTORY'],
'pg_hba.conf'+LOCKEXT)
try:
# Note on arguments to gpstop:
# This code has gone back and forth on -s vs -f for shutdown:
#
# -f aborts active connections. This is a good thing. If
# a user or script snuck in a connection before we were able
# to establish the lockdown then we want to abort that
# connection otherwise it will cause the upgrade to fail and
# that is bad.
#
# Prior versions would sometimes issue a kill for fast
# shutdown, this is a problem since we need the database
# shutdown cleanly with no pending xlog transactions.
# Because of that we switched to -s.
#
# -s causes problems because it is a stop that will fail if a
# session is connected and we want to abort active sessions.
# Because of that we switched back to -f.
#
# The belief is currently that the current version of gpstop
# should be good with passing -f. To help safeguard this belief
# there is a check when we set the catalog version to ensure
# that the database shutdown cleanly.
#
# If this needs to be changed again please read the above,
# consider what happens if you try to upgrade with an active
# connection open to the database, and procede cautiously.
if (env == self.oldenv):
if utility: cmd = 'gpstop -a -f -m'
else: cmd = 'gpstop -a -f'
else:
if utility or upgrade: cmd = 'hawq stop master -a -M fast'
else: cmd = 'hawq stop cluster -a -M fast'
cmd += ' -l %s' % self.logdir
locked = os.path.join(env['MASTER_DATA_DIRECTORY'],
'pg_hba.conf'+LOCKEXT)
try:
if os.path.exists(locked):
env['PGUSER'] = MIGRATIONUSER
logger.debug('handling locked shutdown')
logger.debug('shutting down with env: %s' % str(env))
logger.debug('shutting down with command: %s' % cmd)
pid = subprocess.Popen(cmd, preexec_fn=os.setpgrp,
env=env, shell=True,
stdout=self.logfile,
stderr=self.logfile,
close_fds=True)
finally:
if os.path.exists(locked):
del env['PGUSER']
self.dbup = None
# Ignore interrupt requests until shutdown is done
error = None
retcode = None
while retcode == None:
try:
retcode = pid.wait();
except KeyboardInterrupt, e:
if not self.interrupted:
logger.fatal('***************************************')
logger.fatal('SIGINT-: Upgrade Aborted')
logger.fatal('***************************************')
logger.info( 'Performing clean abort')
self.interrupted = True
error = e
else:
logger.info( 'SIGINT-: Still processing shutdown')
if retcode < 0:
raise UpgradeError("Shutdown terminated by signal");
today = date.today().strftime('%Y%m%d')
logname = os.path.join(self.logdir, 'gpstop_%s.log' % today)
if retcode == 1:
logger.warn('***************************************')
logger.warn('Warnings generated stopping cluster')
logger.warn('Check %s for detailed warnings' % logname)
logger.warn('***************************************')
if retcode > 1:
logger.fatal('***************************************')
logger.fatal('Shutdown failed with error code %d' % retcode)
logger.fatal('Check %s for detailed error' % logname)
logger.fatal('***************************************')
raise UpgradeError('Shutdown failed')
# If we recieved an interrupt, resignal it now that the startup is done
if error:
raise error
except OSError, e:
logger.fatal(str(e))
raise UpgradeError('Shutdown failed')
self.CheckDown();
#------------------------------------------------------------
def PreUpgradeCheck(self):
'''
Check master/segment binary version, GUC, free space, catalog
'''
self.CheckBinaryVersion()
self.CheckGUCs()
# XXX: cannot rely on gp_toolkit to check free disk space
# self.CheckFreeSpace()
# XXX: gpcheckcat not yet designed for HAWQ
# self.CheckCatalog()
#------------------------------------------------------------
def CheckBinaryVersion(self):
'''
Validate that the correct binary is installed in all segments
'''
logger.info('Checking Segment binary version')
hosts = self.Select("select distinct hostname from gp_segment_configuration");
masterversion = self.getversion(self.newhome, self.newenv)
cmdStr = '%s/bin/pg_ctl --hawq-version' % self.newhome
for uh in hosts:
cmd = base.Command(uh, cmdStr, base.REMOTE, uh)
self.pool.addCommand(cmd)
self.pool.join()
items = self.pool.getCompletedItems()
for i in items:
if i.results.rc:
logger.error("error on host %s with error: %s" % (i.remoteHost, i.results.stderr))
raise UpgradeError('Cannot verify segment GPDB binary')
if not i.results.stdout:
logger.error("could not find version string from host %s with command: %s" % (i.remoteHost, cmdStr))
raise UpgradeError('Cannot verify segment GPDB binary')
version_string = i.results.stdout.strip()
if version_string != masterversion:
logger.error("version string on host %s: '%s' does not match expected: '%s'" % (i.remoteHost, version_string, masterversion))
raise UpgradeError('Master/Segment binary mismatch')
#------------------------------------------------------------
def CheckGUCs(self):
'''
Validate that the GUCs are set properly
- gp_external_enable_exec must be set to true
'''
logger.info('Checking GUCs')
disable_exec_ext = self.Select("select current_setting('gp_external_enable_exec')");
if (disable_exec_ext[0] == 'off'):
logger.fatal('***************************************')
logger.fatal("gp_external_enable_exec is set to 'false'.")
logger.fatal("Please set gp_external_enable_exec to 'true'.")
logger.fatal('***************************************')
raise UpgradeError('Invalid GUC value')
#------------------------------------------------------------
def CheckFreeSpace(self):
'''
Validate that we've enough space for upgrade.
Right now, we require 2GB. It's arbitrary.
'''
logger.info('Checking Free Space')
nospacesegs = self.Select("select dfsegment from gp_toolkit.gp_disk_free where dfspace::float/1024/1024 < 2")
if (len(nospacesegs)>0):
logger.fatal('***************************************')
logger.fatal('The following segment ID has less than 2GB of free space.')
logger.fatal('Please make sure that there is at least 2GB of free space on each segment before upgrade.')
logger.fatal(nospacesegs)
logger.fatal('***************************************')
raise UpgradeError('Insufficient Space on Segment')
df=unix.DiskFree.get_disk_free_info_local('gpmigrator_check_freespace', self.masterdir)
mdf = float(df[3])/1024/1024
if (mdf < 2):
logger.fatal('***************************************')
logger.fatal('The Master data directory has only %sGB of free space.' % mdf)
logger.fatal('Please make sure that there is at least 2GB of free space on the master.')
logger.fatal('***************************************')
raise UpgradeError('Insufficient Space on Master')
#------------------------------------------------------------
def CheckCatalog(self):
'''
Validate that the created catalog looks good
'''
logger.info('Checking Catalog')
self.CheckUp()
[env, utility, upgrade] = self.dbup
locked = os.path.join(env['MASTER_DATA_DIRECTORY'],
'pg_hba.conf'+LOCKEXT)
if os.path.exists(locked):
user = MIGRATIONUSER
else:
user = env['USER']
exec_cmd = libdir + '/gpcheckcat -p %d -U %s -B %i ' % \
(self.masterport, user, PARALLELISM)
oids = sorted(self.dbs.keys())
for dboid in oids:
db = self.dbs[dboid]
# Because hawq1.x allow user to connnect template0, here we can't skip
#if db == 'template0':
#continue
if db == 'gpperfmon':
self.Checkgpperfmon()
cmd = exec_cmd + db
logger.info('... Checking ' + db)
logger.debug("CMD: " + cmd)
outfilename = os.path.join(self.workdir, 'gpcheckcat_%s.log' % db)
outfilename = outfilename.replace(' ', '_')
outfile = open(outfilename, 'w')
pipe = subprocess.Popen(cmd, shell=True, stdout=outfile, stderr=outfile,close_fds=True)
retcode = pipe.wait()
if retcode < 0:
raise UpgradeError('Catalog Check terminated by signal')
if retcode > 0:
raise UpgradeError('Catalog Check Failed - see %s for details' % outfilename)
#------------------------------------------------------------
def Checkgpperfmon(self):
'''
gpperfmon validation check
1. move outstanding flat file data to data.<time> dir
2. dry run the upgrade script and then rollback
'''
try:
logger.info("Checking gpperfmon")
perfdatadir = os.path.join(self.masterdir, "gpperfmon", "data")
if (os.path.isdir(perfdatadir)):
perfdatabkup = os.path.join(self.masterdir, "gpperfmon", "data.%s" % time())
os.rename(perfdatadir, perfdatabkup)
# test run gpperfmon upgrade script
rolname = self.Select("select rolname from pg_authid where oid=10")[0]
fname = os.path.join(self.newhome, "lib/gpperfmon/gpperfmon42.sql")
sql = open(fname, 'r').read()
sql = "BEGIN;\nSET SESSION AUTHORIZATION %s;\n%s\nROLLBACK;" % (rolname, sql)
self.Update(sql, db='gpperfmon')
except BaseException, e:
sys.stderr.write(traceback.format_exc())
sys.stderr.write(str(e))
raise e
#------------------------------------------------------------
def PerformPostUpgrade(self):
'''
Handles various post upgrade tasks including:
- Populates the hawq_toolkit schema
- Performs updates for the gpperfmon database
'''
try:
# Get the admin role and all the databases
logger.info("Installing hawq_toolkit")
rolname = self.Select("select rolname from pg_authid where oid=10")[0]
# Read the toolkit sql file into memory
fname = '%s/share/postgresql/gp_toolkit.sql' % self.newhome
sql = open(fname, 'r').read()
# Set our role to the admin role then execute the sql script
sql = "SET SESSION AUTHORIZATION %s;\n%s" % (rolname, sql)
oids = sorted(self.dbs.keys())
for dboid in oids:
db = self.dbs[dboid]
if db == 'template0':
continue
self.Update(sql, db=db)
except BaseException, e:
sys.stderr.write(traceback.format_exc())
sys.stderr.write(str(e))
raise e
#------------------------------------------------------------
def getversion(self, home,env):
binary = os.path.join(home, 'bin', 'pg_ctl')
if not os.path.exists(binary):
raise UpgradeError(binary + ' not found')
try:
return self.RunCmd('pg_ctl --hawq-version', env=env)
except CmdError:
pass
conf = '%s/include/pg_config.h' % home
if os.path.exists(conf):
cmd = 'grep PG_VERSION_STR ' + conf
try:
return self.RunCmd(cmd, env=self.oldenv)
except Exception, e:
logger.fatal(str(e))
raise UpgradeError('Unable to determine version of %s' % home)
|
{
"content_hash": "165ae321548178702495003c9db4c90d",
"timestamp": "",
"source": "github",
"line_count": 893,
"max_line_length": 149,
"avg_line_length": 39.370660694288915,
"alnum_prop": 0.4981227601114967,
"repo_name": "cwelton/incubator-hawq",
"id": "73c5637e1a66f0f92876b9b9457ca9e9f3f15fbb",
"size": "36037",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tools/bin/gppylib/operations/gpMigratorUtil.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5196"
},
{
"name": "Batchfile",
"bytes": "11532"
},
{
"name": "C",
"bytes": "32524222"
},
{
"name": "C++",
"bytes": "4920731"
},
{
"name": "CMake",
"bytes": "82508"
},
{
"name": "DTrace",
"bytes": "1154"
},
{
"name": "Groff",
"bytes": "30181"
},
{
"name": "HTML",
"bytes": "69676"
},
{
"name": "Java",
"bytes": "1883275"
},
{
"name": "Lex",
"bytes": "196336"
},
{
"name": "Makefile",
"bytes": "405870"
},
{
"name": "Objective-C",
"bytes": "10954"
},
{
"name": "PLSQL",
"bytes": "188742"
},
{
"name": "PLpgSQL",
"bytes": "2418721"
},
{
"name": "Perl",
"bytes": "987905"
},
{
"name": "Protocol Buffer",
"bytes": "43646"
},
{
"name": "Python",
"bytes": "111333"
},
{
"name": "SQLPL",
"bytes": "164386"
},
{
"name": "Shell",
"bytes": "150873"
},
{
"name": "Smarty",
"bytes": "244244"
},
{
"name": "Thrift",
"bytes": "9459"
},
{
"name": "Yacc",
"bytes": "440756"
}
],
"symlink_target": ""
}
|
"""Tests for the Netgear config flow."""
from unittest.mock import Mock, patch
from pynetgear import DEFAULT_USER
import pytest
from homeassistant import data_entry_flow
from homeassistant.components import ssdp
from homeassistant.components.netgear.const import (
CONF_CONSIDER_HOME,
DOMAIN,
MODELS_PORT_5555,
PORT_80,
PORT_5555,
)
from homeassistant.config_entries import SOURCE_SSDP, SOURCE_USER
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_USERNAME,
)
from tests.common import MockConfigEntry
URL = "http://routerlogin.net"
URL_SSL = "https://routerlogin.net"
SERIAL = "5ER1AL0000001"
ROUTER_INFOS = {
"Description": "Netgear Smart Wizard 3.0, specification 1.6 version",
"SignalStrength": "-4",
"SmartAgentversion": "3.0",
"FirewallVersion": "net-wall 2.0",
"VPNVersion": None,
"OthersoftwareVersion": "N/A",
"Hardwareversion": "N/A",
"Otherhardwareversion": "N/A",
"FirstUseDate": "Sunday, 30 Sep 2007 01:10:03",
"DeviceMode": "0",
"ModelName": "RBR20",
"SerialNumber": SERIAL,
"Firmwareversion": "V2.3.5.26",
"DeviceName": "Desk",
"DeviceNameUserSet": "true",
"FirmwareDLmethod": "HTTPS",
"FirmwareLastUpdate": "2019_10.5_18:42:58",
"FirmwareLastChecked": "2020_5.3_1:33:0",
"DeviceModeCapability": "0;1",
}
TITLE = f"{ROUTER_INFOS['ModelName']} - {ROUTER_INFOS['DeviceName']}"
TITLE_INCOMPLETE = ROUTER_INFOS["ModelName"]
HOST = "10.0.0.1"
SERIAL_2 = "5ER1AL0000002"
PORT = 80
SSL = False
USERNAME = "Home_Assistant"
PASSWORD = "password"
SSDP_URL = f"http://{HOST}:{PORT}/rootDesc.xml"
SSDP_URL_SLL = f"https://{HOST}:{PORT}/rootDesc.xml"
@pytest.fixture(name="service")
def mock_controller_service():
"""Mock a successful service."""
with patch(
"homeassistant.components.netgear.async_setup_entry", return_value=True
), patch("homeassistant.components.netgear.router.Netgear") as service_mock:
service_mock.return_value.get_info = Mock(return_value=ROUTER_INFOS)
service_mock.return_value.port = 80
service_mock.return_value.ssl = False
yield service_mock
@pytest.fixture(name="service_5555")
def mock_controller_service_5555():
"""Mock a successful service."""
with patch(
"homeassistant.components.netgear.async_setup_entry", return_value=True
), patch("homeassistant.components.netgear.router.Netgear") as service_mock:
service_mock.return_value.get_info = Mock(return_value=ROUTER_INFOS)
service_mock.return_value.port = 5555
service_mock.return_value.ssl = True
yield service_mock
@pytest.fixture(name="service_incomplete")
def mock_controller_service_incomplete():
"""Mock a successful service."""
router_infos = ROUTER_INFOS.copy()
router_infos.pop("DeviceName")
with patch(
"homeassistant.components.netgear.async_setup_entry", return_value=True
), patch("homeassistant.components.netgear.router.Netgear") as service_mock:
service_mock.return_value.get_info = Mock(return_value=router_infos)
service_mock.return_value.port = 80
service_mock.return_value.ssl = False
yield service_mock
@pytest.fixture(name="service_failed")
def mock_controller_service_failed():
"""Mock a failed service."""
with patch("homeassistant.components.netgear.router.Netgear") as service_mock:
service_mock.return_value.login_try_port = Mock(return_value=None)
service_mock.return_value.get_info = Mock(return_value=None)
yield service_mock
async def test_user(hass, service):
"""Test user step."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
# Have to provide all config
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: HOST,
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == SERIAL
assert result["title"] == TITLE
assert result["data"].get(CONF_HOST) == HOST
assert result["data"].get(CONF_PORT) == PORT
assert result["data"].get(CONF_SSL) == SSL
assert result["data"].get(CONF_USERNAME) == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
async def test_user_connect_error(hass, service_failed):
"""Test user step with connection failure."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
# Have to provide all config
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: HOST,
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {"base": "config"}
async def test_user_incomplete_info(hass, service_incomplete):
"""Test user step with incomplete device info."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
# Have to provide all config
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: HOST,
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == SERIAL
assert result["title"] == TITLE_INCOMPLETE
assert result["data"].get(CONF_HOST) == HOST
assert result["data"].get(CONF_PORT) == PORT
assert result["data"].get(CONF_SSL) == SSL
assert result["data"].get(CONF_USERNAME) == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
async def test_abort_if_already_setup(hass, service):
"""Test we abort if the router is already setup."""
MockConfigEntry(
domain=DOMAIN,
data={CONF_PASSWORD: PASSWORD},
unique_id=SERIAL,
).add_to_hass(hass)
# Should fail, same SERIAL (flow)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_ssdp_already_configured(hass):
"""Test ssdp abort when the router is already configured."""
MockConfigEntry(
domain=DOMAIN,
data={CONF_PASSWORD: PASSWORD},
unique_id=SERIAL,
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_SSDP},
data=ssdp.SsdpServiceInfo(
ssdp_usn="mock_usn",
ssdp_st="mock_st",
ssdp_location=SSDP_URL_SLL,
upnp={
ssdp.ATTR_UPNP_MODEL_NUMBER: "RBR20",
ssdp.ATTR_UPNP_PRESENTATION_URL: URL,
ssdp.ATTR_UPNP_SERIAL: SERIAL,
},
),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_ssdp(hass, service):
"""Test ssdp step."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_SSDP},
data=ssdp.SsdpServiceInfo(
ssdp_usn="mock_usn",
ssdp_st="mock_st",
ssdp_location=SSDP_URL,
upnp={
ssdp.ATTR_UPNP_MODEL_NUMBER: "RBR20",
ssdp.ATTR_UPNP_PRESENTATION_URL: URL,
ssdp.ATTR_UPNP_SERIAL: SERIAL,
},
),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_PASSWORD: PASSWORD}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == SERIAL
assert result["title"] == TITLE
assert result["data"].get(CONF_HOST) == HOST
assert result["data"].get(CONF_PORT) == PORT_80
assert result["data"].get(CONF_SSL) == SSL
assert result["data"].get(CONF_USERNAME) == DEFAULT_USER
assert result["data"][CONF_PASSWORD] == PASSWORD
async def test_ssdp_port_5555(hass, service_5555):
"""Test ssdp step with port 5555."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_SSDP},
data=ssdp.SsdpServiceInfo(
ssdp_usn="mock_usn",
ssdp_st="mock_st",
ssdp_location=SSDP_URL_SLL,
upnp={
ssdp.ATTR_UPNP_MODEL_NUMBER: MODELS_PORT_5555[0],
ssdp.ATTR_UPNP_PRESENTATION_URL: URL_SSL,
ssdp.ATTR_UPNP_SERIAL: SERIAL,
},
),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_PASSWORD: PASSWORD}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == SERIAL
assert result["title"] == TITLE
assert result["data"].get(CONF_HOST) == HOST
assert result["data"].get(CONF_PORT) == PORT_5555
assert result["data"].get(CONF_SSL) is True
assert result["data"].get(CONF_USERNAME) == DEFAULT_USER
assert result["data"][CONF_PASSWORD] == PASSWORD
async def test_options_flow(hass, service):
"""Test specifying non default settings using options flow."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_PASSWORD: PASSWORD},
unique_id=SERIAL,
title=TITLE,
)
config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_CONSIDER_HOME: 1800,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options == {
CONF_CONSIDER_HOME: 1800,
}
|
{
"content_hash": "91ea72e8f803e31057c2f55dbd0c1439",
"timestamp": "",
"source": "github",
"line_count": 329,
"max_line_length": 82,
"avg_line_length": 33.74772036474164,
"alnum_prop": 0.6364045753399982,
"repo_name": "rohitranjan1991/home-assistant",
"id": "33c634e250a8ffb094fb570a5d4ad50d4efb48b0",
"size": "11103",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "tests/components/netgear/test_config_flow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1017265"
},
{
"name": "Python",
"bytes": "1051086"
},
{
"name": "Shell",
"bytes": "3946"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
from ..defs import (task_name_sep, task_state_to_int, task_int_to_state)
from ...util import option_list
from ...io import findfile
from .base import (BaseTask, task_classes)
from desiutil.log import get_logger
import sys,re,os,copy
from desiutil.log import get_logger
# NOTE: only one class in this file should have a name that starts with "Task".
class TaskStarFit(BaseTask):
"""Class containing the properties of one extraction task.
"""
def __init__(self):
super(TaskStarFit, self).__init__()
# then put int the specifics of this class
# _cols must have a state
self._type = "starfit"
self._cols = [
"night",
"spec",
"expid",
"state"
]
self._coltypes = [
"integer",
"integer",
"integer",
"integer"
]
# _name_fields must also be in _cols
self._name_fields = ["night","spec","expid"]
self._name_formats = ["08d","d","08d"]
def _paths(self, name):
"""See BaseTask.paths.
"""
props = self.name_split(name)
return [ findfile("stdstars", night=props["night"], expid=props["expid"],
groupname=None, nside=None, camera=None, band=None,
spectrograph=props["spec"]) ]
def _deps(self, name, db, inputs):
"""See BaseTask.deps.
"""
from .base import task_classes
props = self.name_split(name)
# we need all the cameras for the fit of standard stars
deptasks = dict()
for band in ["b","r","z"] :
props_and_band = props.copy()
props_and_band["band"] = band
deptasks[band+"-frame"]=task_classes["extract"].name_join(props_and_band)
deptasks[band+"-fiberflat"]=task_classes["fiberflatnight"].name_join(props_and_band)
deptasks[band+"-sky"]=task_classes["sky"].name_join(props_and_band)
return deptasks
def _run_max_procs(self):
# This is a serial task.
return 1
def _run_time(self, name, procs, db):
# Run time on one proc on machine with scale factor == 1.0
return 35.0
def _run_max_mem_proc(self, name, db):
# Per-process memory requirements
return 5.0
def _run_defaults(self):
"""See BaseTask.run_defaults.
"""
import glob
log = get_logger()
opts = {}
starmodels = None
if "DESI_BASIS_TEMPLATES" in os.environ:
filenames = sorted(glob.glob(os.environ["DESI_BASIS_TEMPLATES"]+"/stdstar_templates_*.fits"))
if len(filenames) > 0 :
starmodels = filenames[-1]
else:
filenames = sorted(glob.glob(os.environ["DESI_BASIS_TEMPLATES"]+"/star_templates_*.fits"))
log.warning('Unable to find stdstar templates in {}; using star templates instead'.format(
os.getenv('DESI_BASIS_TEMPLATES')))
if len(filenames) > 0 :
starmodels = filenames[-1]
else:
msg = 'Unable to find stdstar or star templates in {}'.format(
os.getenv('DESI_BASIS_TEMPLATES'))
log.error(msg)
raise RuntimeError(msg)
else:
log.error("DESI_BASIS_TEMPLATES not set!")
raise RuntimeError("could not find the stellar templates")
opts["starmodels"] = starmodels
opts["delta-color"] = 0.2
opts["color"] = "G-R"
return opts
def _option_list(self, name, opts):
"""Build the full list of options.
This includes appending the filenames and incorporating runtime
options.
"""
from .base import task_classes, task_type
log = get_logger()
deps = self.deps(name)
options = {}
### options["ncpu"] = 1
options["outfile"] = self.paths(name)[0]
options["frames"] = []
options["skymodels"] = []
options["fiberflats"] = []
# frames skymodels fiberflats
props = self.name_split(name)
for band in ["b", "r", "z"] :
props_and_band = props.copy()
props_and_band["band"] = band
task = task_classes["extract"].name_join(props_and_band)
frame_filename = task_classes["extract"].paths(task)[0]
task = task_classes["fiberflatnight"].name_join(props_and_band)
fiberflat_filename = task_classes["fiberflatnight"].paths(task)[0]
task = task_classes["sky"].name_join(props_and_band)
sky_filename = task_classes["sky"].paths(task)[0]
# check all files exist
if os.path.isfile(frame_filename) \
and os.path.isfile(fiberflat_filename) \
and os.path.isfile(sky_filename) :
options["frames"].append(frame_filename)
options["skymodels"].append(sky_filename)
options["fiberflats"].append(fiberflat_filename)
else :
log.warning("missing band {} for {}".format(band,name))
options.update(opts)
return option_list(options)
def _run_cli(self, name, opts, procs, db):
"""See BaseTask.run_cli.
"""
entry = "desi_fit_stdstars"
optlist = self._option_list(name, opts)
com = "{} {}".format(entry, " ".join(optlist))
return com
def _run(self, name, opts, comm, db):
"""See BaseTask.run.
"""
from ...scripts import stdstars
optlist = self._option_list(name, opts)
args = stdstars.parse(optlist)
stdstars.main(args)
return
def postprocessing(self, db, name, cur):
"""For successful runs, postprocessing on DB"""
# run getready on all fierflatnight with same night,band,spec
props = self.name_split(name)
log = get_logger()
tt="fluxcalib"
cmd = "select name from {} where night={} and expid={} and spec={} and state=0".format(tt,props["night"],props["expid"],props["spec"])
cur.execute(cmd)
tasks = [ x for (x,) in cur.fetchall() ]
log.debug("checking {}".format(tasks))
for task in tasks :
task_classes[tt].getready( db=db,name=task,cur=cur)
|
{
"content_hash": "98f59b5f33e3eeb6db5bdf75c460866c",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 142,
"avg_line_length": 33.37435897435898,
"alnum_prop": 0.5534726490473264,
"repo_name": "desihub/desispec",
"id": "ceea560a7ab0e79f8eeb56ac70023f7d806371e5",
"size": "6596",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "py/desispec/pipeline/tasks/starfit.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "761"
},
{
"name": "Python",
"bytes": "4219435"
},
{
"name": "Shell",
"bytes": "17927"
}
],
"symlink_target": ""
}
|
"""
Created on Thu Aug 6 18:21:36 2015
@author: wirkert
"""
import numpy as np
class Msi():
""" a multi spectral image stack consisting of:
image: a rows x columns x nrWavelengths dimensional array
properties: additional, application dependent properties
"""
def __init__(self, image=None, properties=None):
if image is None:
image = np.array([])
if properties is None:
properties = {}
self._image = image
self._properties = properties
self._assure_basic_properties()
self._test_image()
def get_image(self):
return self._image
def set_image(self, image, wavelengths=None):
"""
Put a new image into this msi
Args:
image: the rows x columns x nrWavelengths dimensional array
np.array.
wavelengths: a np.array of size nrWavelengths. If the number of
wavelengths hasn't change this is not needed.
"""
self._image = image
if wavelengths is not None:
self.set_wavelengths(wavelengths)
self._assure_basic_properties()
self._test_image()
def get_wavelengths(self):
""" shortcut to get the wavelengths property
The wavelengths are given in [m] units and need not be sorted. """
if 'wavelengths' not in self.get_properties():
return None
return self._properties['wavelengths']
def set_wavelengths(self, wavelengths):
""" shortcut to set the wavelengths property """
w_prop = {"wavelengths":wavelengths}
self.add_property(w_prop)
self._test_image()
def get_properties(self):
return self._properties
def add_property(self, newProperty):
""" add a new property(ies) to the existing properties """
self._properties.update(newProperty)
self._test_image()
def set_mask(self, mask):
"""" applies a masked to the Msi. After this call, the image is of
type MaskedArray. If the image was already masked, the existing
masked will be "or ed" with the new mask. mask is a boolean array of
the same shape as self.get_image()
Args:
mask: a mask of the same size as the image. 1s stand for pixels
masked out, 0s for pixels not masked."""
if not isinstance(self.get_image(), np.ma.MaskedArray):
self.set_image(np.ma.masked_array(self.get_image(), mask,
fill_value=999999))
else:
self.get_image()[mask] = np.ma.masked
def __eq__(self, other):
"""
overrite the == operator
Two Msi s are the same if they contain the same image and properties.
Note: properties not implemented yet!
"""
if isinstance(other, Msi):
samesame = np.array_equal(other.get_image(), self.get_image())
return samesame
return NotImplemented
def __ne__(self, other):
""" != operator implemented by inverting to =="""
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def _assure_basic_properties(self):
"""
helper method to automatically add the basic properties:
wavelength
to the msi if not added explicicly. basic wavelengths will just be
integers from 0 to 1
"""
if self._image.size > 0 and (
("wavelengths" not in self._properties.keys() or
self._properties["wavelengths"].size == 0)):
self._properties["wavelengths"] = np.arange(self._image.shape[-1])
if self._image.size == 0 and "wavelengths" not in self._properties.keys():
self._properties["wavelengths"] = np.array([])
def _test_image(self):
"""
helper method which tests for the integrity of the msi.
E.g. the number of wavelengths must match the number of bands.
"""
# either both image and wavelength property are empty
if self._image.size == 0 and len(self._properties["wavelengths"]) != 0:
raise RuntimeError("dimension of image and wavelength mismatch: " +
"image size is zero, but wavelengths are set")
# or both are same
elif self._image.shape[-1] != len(self._properties["wavelengths"]):
raise RuntimeError("dimension of image and wavelength mismatch: " +
"image size and wavelenths do not match")
|
{
"content_hash": "74af5647ea4d823757130c5cbdb04eda",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 82,
"avg_line_length": 36.23622047244095,
"alnum_prop": 0.5884398087787919,
"repo_name": "fmilano/mitk",
"id": "5638d44ad7ce8e8e8bfb04c396cd4c83372fb041",
"size": "4626",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "Modules/Biophotonics/python/iMC/msi/msi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2668761"
},
{
"name": "C++",
"bytes": "25270216"
},
{
"name": "CSS",
"bytes": "52056"
},
{
"name": "Java",
"bytes": "350330"
},
{
"name": "JavaScript",
"bytes": "47660"
},
{
"name": "Makefile",
"bytes": "742"
},
{
"name": "Objective-C",
"bytes": "509788"
},
{
"name": "Perl",
"bytes": "982"
},
{
"name": "Python",
"bytes": "7545"
},
{
"name": "Shell",
"bytes": "6507"
},
{
"name": "TeX",
"bytes": "1204"
},
{
"name": "XSLT",
"bytes": "15769"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import unittest
import mock
import autosklearn.util.smac
from autosklearn.util import Backend
class SMACTest(unittest.TestCase):
@mock.patch.object(Backend, 'write_txt_file')
def test_write_instance_file(self, MockedBackend):
# Holdout
backend = MockedBackend(None, None)
autosklearn.util.smac._write_instance_file(
'holdout', None, 'path', backend, 'tmp_dir'
)
self.assertEqual(len(backend.method_calls), 2)
self.assertEqual(backend.method_calls[0][1],
('tmp_dir/instances.txt', 'holdout path', 'Instances'))
self.assertEqual(backend.method_calls[1][1],
('tmp_dir/test_instances.txt', 'test path',
'Test instances'))
# Nested CV
backend = MockedBackend(None, None)
autosklearn.util.smac._write_instance_file(
'nested-cv', {'inner_folds': 2, 'outer_folds': 2},
'path', backend, 'tmp_dir'
)
self.assertEqual(len(backend.method_calls), 4)
self.assertEqual(backend.method_calls[2][1],
('tmp_dir/instances.txt', 'nested-cv:2/2 path',
'Instances'))
self.assertEqual(backend.method_calls[3][1],
('tmp_dir/test_instances.txt', 'test path',
'Test instances'))
# CV
backend = MockedBackend(None, None)
autosklearn.util.smac._write_instance_file(
'cv', {'folds': 2},
'path', backend, 'tmp_dir'
)
self.assertEqual(len(backend.method_calls), 6)
self.assertEqual(backend.method_calls[4][1],
('tmp_dir/instances.txt', 'cv:2 path',
'Instances'))
self.assertEqual(backend.method_calls[5][1],
('tmp_dir/test_instances.txt', 'test path',
'Test instances'))
# Partial-CV
backend = MockedBackend(None, None)
autosklearn.util.smac._write_instance_file(
'partial-cv', {'folds': 2},
'path', backend, 'tmp_dir'
)
self.assertEqual(len(backend.method_calls), 8)
self.assertEqual(backend.method_calls[6][1],
('tmp_dir/instances.txt',
'partial-cv:0/2 path\npartial-cv:1/2 path',
'Instances'))
self.assertEqual(backend.method_calls[7][1],
('tmp_dir/test_instances.txt', 'test path',
'Test instances'))
|
{
"content_hash": "8cdc72faf8b8425138ccdd2cc8e45850",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 80,
"avg_line_length": 40.015151515151516,
"alnum_prop": 0.5297235895494131,
"repo_name": "hmendozap/auto-sklearn",
"id": "258e225866132c67993e70e68d237c11e57f5d14",
"size": "2641",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/util/test_smac.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6722"
},
{
"name": "Makefile",
"bytes": "6791"
},
{
"name": "Python",
"bytes": "1207634"
},
{
"name": "Shell",
"bytes": "851"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(
name='cisco_pass',
packages=['cisco_pass'],
version='0.1',
description='Generate Cisco password strings. Decrypt type7. Devices: IOS, NXOS, ACE, CatOS',
author='Roger Caldwell',
author_email='roger@monkey.net',
url='https://github.com/rcaldwel/cisco_pass',
download_url='https://github.com/rcaldwel/cisco_pass/tarball/0.1',
keywords=['cisco', 'password', 'crypt'],
install_requires=['passlib'],
classifiers=[],
)
|
{
"content_hash": "9a56caea5e17b99cddeeb83c1ea1d373",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 99,
"avg_line_length": 33,
"alnum_prop": 0.6666666666666666,
"repo_name": "rcaldwel/cisco_pass",
"id": "b16fd0b126c9eafcfe3ed602d8b7e78ddb7d28c1",
"size": "530",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2629"
}
],
"symlink_target": ""
}
|
from sqlalchemy import Column
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import Text
BASE_TABLE_NAME = 'instance_extra'
NEW_COLUMN_NAME = 'vcpu_model'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
for prefix in ('', 'shadow_'):
table = Table(prefix + BASE_TABLE_NAME, meta, autoload=True)
new_column = Column(NEW_COLUMN_NAME, Text, nullable=True)
if not hasattr(table.c, NEW_COLUMN_NAME):
table.create_column(new_column)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
for prefix in ('', 'shadow_'):
table = Table(prefix + BASE_TABLE_NAME, meta, autoload=True)
if hasattr(table.c, NEW_COLUMN_NAME):
getattr(table.c, NEW_COLUMN_NAME).drop()
|
{
"content_hash": "4ce62379ab70ce4a81a7c371bc180ac6",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 68,
"avg_line_length": 28.482758620689655,
"alnum_prop": 0.6634382566585957,
"repo_name": "affo/nova",
"id": "223e4593346cfcc9e57ea8e1464e6eff96093560",
"size": "1400",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "nova/db/sqlalchemy/migrate_repo/versions/276_vcpu_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "3223"
},
{
"name": "Python",
"bytes": "15659662"
},
{
"name": "Shell",
"bytes": "20716"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnitLabel
from pants.util.dirutil import safe_mkdir
from pants.contrib.go.targets.go_target import GoTarget
from pants.contrib.go.tasks.go_workspace_task import GoWorkspaceTask
class GoCompile(GoWorkspaceTask):
"""Compiles a Go package into either a library binary or executable binary.
GoCompile will populate the "bin/" and "pkg/" directories of each target's Go
workspace (see GoWorkspaceTask) with executables and library binaries respectively.
"""
@classmethod
def register_options(cls, register):
super(GoCompile, cls).register_options(register)
register('--build-flags', default='', fingerprint=True,
help='Build flags to pass to Go compiler.')
@classmethod
def product_types(cls):
return ['exec_binary', 'deployable_archives']
def execute(self):
self.context.products.safe_create_data('exec_binary', lambda: {})
with self.invalidated(self.context.targets(self.is_go),
invalidate_dependents=True,
topological_order=True) as invalidation_check:
# Maps each local/remote library target to its compiled binary.
lib_binary_map = {}
go_exec_binary = self.context.products.get_data('exec_binary')
go_deployable_archive = self.context.products.get('deployable_archives')
for vt in invalidation_check.all_vts:
gopath = self.get_gopath(vt.target)
if not isinstance(vt.target, GoTarget):
continue
if not vt.valid:
self.ensure_workspace(vt.target)
self._sync_binary_dep_links(vt.target, gopath, lib_binary_map)
self._go_install(vt.target, gopath)
if self.is_binary(vt.target):
binary_path = os.path.join(gopath, 'bin', os.path.basename(vt.target.address.spec_path))
go_exec_binary[vt.target] = binary_path
go_deployable_archive.add(vt.target, os.path.dirname(binary_path)).append(os.path.basename(binary_path))
else:
lib_binary_map[vt.target] = os.path.join(gopath, 'pkg', self.goos_goarch,
vt.target.import_path + '.a')
def _go_install(self, target, gopath):
args = self.get_options().build_flags.split() + [target.import_path]
result, go_cmd = self.go_dist.execute_go_cmd(
'install', gopath=gopath, args=args,
workunit_factory=self.context.new_workunit,
workunit_name='install {}'.format(target.address.spec),
workunit_labels=[WorkUnitLabel.COMPILER])
if result != 0:
raise TaskError('{} failed with exit code {}'.format(go_cmd, result))
def _sync_binary_dep_links(self, target, gopath, lib_binary_map):
"""Syncs symlinks under gopath to the library binaries of target's transitive dependencies.
:param Target target: Target whose transitive dependencies must be linked.
:param str gopath: $GOPATH of target whose "pkg/" directory must be populated with links
to library binaries.
:param dict<Target, str> lib_binary_map: Dictionary mapping a remote/local Go library to the
path of the compiled binary (the ".a" file) of the
library.
Required links to binary dependencies under gopath's "pkg/" dir are either created if
non-existent, or refreshed if the link is older than the underlying binary. Any pre-existing
links within gopath's "pkg/" dir that do not correspond to a transitive dependency of target
are deleted.
"""
required_links = set()
for dep in target.closure():
if dep == target:
continue
if not isinstance(dep, GoTarget):
continue
lib_binary = lib_binary_map[dep]
lib_binary_link = os.path.join(gopath, os.path.relpath(lib_binary, self.get_gopath(dep)))
safe_mkdir(os.path.dirname(lib_binary_link))
if os.path.islink(lib_binary_link):
if os.stat(lib_binary).st_mtime > os.lstat(lib_binary_link).st_mtime:
# The binary under the link was updated after the link was created. Refresh
# the link so the mtime (modification time) of the link is greater than the
# mtime of the binary. This stops Go from needlessly re-compiling the library.
os.unlink(lib_binary_link)
os.symlink(lib_binary, lib_binary_link)
else:
os.symlink(lib_binary, lib_binary_link)
required_links.add(lib_binary_link)
self.remove_unused_links(os.path.join(gopath, 'pkg'), required_links)
|
{
"content_hash": "ebb5895baab3c11934c2885b4161dfec",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 114,
"avg_line_length": 47.63,
"alnum_prop": 0.6632374553852614,
"repo_name": "pombredanne/pants",
"id": "3072d5fe98f945721ec9f7ac8932bb2cec37ac10",
"size": "4910",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/go/src/python/pants/contrib/go/tasks/go_compile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Cucumber",
"bytes": "919"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1746"
},
{
"name": "HTML",
"bytes": "79866"
},
{
"name": "Java",
"bytes": "446241"
},
{
"name": "JavaScript",
"bytes": "29992"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "5091198"
},
{
"name": "Scala",
"bytes": "84585"
},
{
"name": "Shell",
"bytes": "58748"
},
{
"name": "Thrift",
"bytes": "1966"
}
],
"symlink_target": ""
}
|
"""The tg2app package"""
|
{
"content_hash": "81d0ca9d122ca4580b55c02d3af2dc73",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 24,
"avg_line_length": 25,
"alnum_prop": 0.64,
"repo_name": "ralphbean/moksha",
"id": "b9eb912a4a7e340735ac85ea573915544e491685",
"size": "49",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "moksha/tests/quickstarts/tg2app/tg2app/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "1249457"
},
{
"name": "Python",
"bytes": "731300"
},
{
"name": "Shell",
"bytes": "1776"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import print_function
import six
from . import defs
def _populate_options(opts):
assert isinstance(opts, dict)
opts = dict((k.upper(), v) for k,v in opts.iteritems()
if isinstance(k, six.string_types))
newopts = defs.DEFAULT_OPTIONS.copy()
newopts.update(opts)
return newopts
class Options(object):
def __init__(self, opts=None):
opts = opts or {}
self.opts = _populate_options(opts)
def __getitem__(self, name):
assert name == name.upper()
return self.opts[name]
def __getattr__(self, name):
assert name == name.upper()
try:
return self.opts[name]
except KeyError:
msg = "'{}' object has no attribute '{}'"
raise AttributeError(msg.format(type(self).__name__, name))
def get(self, name, default=None):
assert name == name.upper()
return self.opts.get(name, default)
|
{
"content_hash": "11cdb211d1e01437f4718d06ce6376d0",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 71,
"avg_line_length": 26.86842105263158,
"alnum_prop": 0.5768854064642507,
"repo_name": "colossalbit/cssypy",
"id": "f1d136ca4ed939a76b8c6412019162e606623766",
"size": "1021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cssypy/optionsdict.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "306596"
}
],
"symlink_target": ""
}
|
from django import forms
from .models import (
Compound,
CompoundSummary,
CompoundProperty,
CompoundTarget
)
from django.forms.models import BaseInlineFormSet
from mps.forms import BootstrapForm
from django.forms.models import inlineformset_factory
class CompoundForm(BootstrapForm):
"""Form for Compounds"""
class Meta(object):
model = Compound
widgets = {
'name': forms.Textarea(attrs={'rows': 1}),
'tags': forms.Textarea(attrs={'rows': 1}),
'smiles': forms.Textarea(attrs={'size': 50, 'rows': 3}),
'inchikey': forms.Textarea(attrs={'size': 50, 'rows': 1}),
'molecular_formula': forms.Textarea(attrs={'size': 50, 'rows': 3}),
'ro5_violations': forms.TextInput(attrs={'size': 2}),
'drug_class': forms.Textarea(attrs={'rows': 1}),
'clearance': forms.Textarea(attrs={'size': 50, 'rows': 3}),
'absorption': forms.Textarea(attrs={'size': 50, 'rows': 3}),
'pk_metabolism': forms.Textarea(attrs={'size': 50, 'rows': 3}),
'preclinical': forms.Textarea(attrs={'size': 50, 'rows': 3}),
'clinical': forms.Textarea(attrs={'size': 50, 'rows': 3}),
'post_marketing': forms.Textarea(attrs={'size': 50, 'rows': 3}),
}
exclude = (
'created_by',
'created_on',
'modified_on',
'modified_by',
'signed_off_by',
'signed_off_date',
'last_update'
)
# Deal with nonsense
def clean(self):
"""Clean Compound Form
Ensures that chemblid, inchikey, and pubchemid, and drugbank_id are unique
"""
super(CompoundForm, self).clean()
chemblid = self.cleaned_data.get('chemblid', '')
inchikey = self.cleaned_data.get('inchikey', '')
pubchemid = self.cleaned_data.get('pubchemid', '')
drugbank_id = self.cleaned_data.get('drugbank_id', '')
# If this instance does not have a primary key, then it is new and this is NOT an update
if not self.instance.pk:
if chemblid and Compound.objects.filter(chemblid=chemblid).exists():
raise forms.ValidationError('A compound with the given ChEMBL ID already exists')
if inchikey and Compound.objects.filter(inchikey=inchikey).exists():
raise forms.ValidationError('A compound with the given InChI Key already exists')
if pubchemid and Compound.objects.filter(pubchemid=pubchemid).exists():
raise forms.ValidationError('A compound with the given PubChem ID already exists')
if drugbank_id and Compound.objects.filter(drugbank_id=drugbank_id).exists():
raise forms.ValidationError('A compound with the given DrugBank ID already exists')
return self.cleaned_data
# SUBJECT TO DEPRECATION
class CompoundSummaryInlineFormset(BaseInlineFormSet):
"""Form for Summary inlines"""
class Meta(object):
model = CompoundSummary
exclude = ('',)
# SUBJECT TO DEPRECATION
class CompoundPropertyInlineFormset(BaseInlineFormSet):
"""Form for property inlines"""
class Meta(object):
model = CompoundProperty
exclude = ('',)
class CompoundTargetForm(BootstrapForm):
"""Form for Target inlines"""
class Meta(object):
model = CompoundTarget
exclude = ('',)
class CompoundTargetInlineFormset(BaseInlineFormSet):
"""Formset for Target inlines"""
class Meta(object):
model = CompoundTarget
exclude = ('',)
CompoundTargetFormset = inlineformset_factory(
Compound,
CompoundTarget,
form=CompoundTargetForm,
formset=CompoundTargetInlineFormset,
extra=1,
exclude=[],
widgets={
'name': forms.Textarea(attrs={'size': 25, 'rows': 1}),
'uniprot_id': forms.TextInput(attrs={'size': 10}),
'pharmacological_action': forms.TextInput(attrs={'size': 7}),
'organism': forms.TextInput(attrs={'size': 7}),
'type': forms.TextInput(attrs={'size': 11}),
}
)
|
{
"content_hash": "5fea6eca78a1b4168fcbdfc34be24146",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 99,
"avg_line_length": 36.38053097345133,
"alnum_prop": 0.614205789345658,
"repo_name": "UPDDI/mps-database-server",
"id": "c4b4f531b388f5d62d47ba3e68a9bea0b8238da0",
"size": "4111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "compounds/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14194"
},
{
"name": "HTML",
"bytes": "1128291"
},
{
"name": "JavaScript",
"bytes": "701549"
},
{
"name": "Python",
"bytes": "1735408"
},
{
"name": "Shell",
"bytes": "1535"
},
{
"name": "TSQL",
"bytes": "41508"
}
],
"symlink_target": ""
}
|
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Drill'
copyright = u'2012, Dan Watson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0a'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Drilldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Drill.tex', u'Drill Documentation',
u'Dan Watson', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'drill', u'Drill Documentation',
[u'Dan Watson'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Drill', u'Drill Documentation',
u'Dan Watson', 'Drill', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
{
"content_hash": "ce04942b5c48b6ab248c351aa8fdd90e",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 80,
"avg_line_length": 31.882096069868997,
"alnum_prop": 0.700041090261608,
"repo_name": "dcwatson/drill",
"id": "3f2a787e990df34e93a6b5741b004acdfd917bc7",
"size": "7717",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "30020"
}
],
"symlink_target": ""
}
|
'''
Created by auto_sdk on 2015.04.21
'''
from aliyun.api.base import RestApi
class Rds20140815CreateReadOnlyDBInstanceRequest(RestApi):
def __init__(self,domain='rds.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.ClientToken = None
self.DBInstanceClass = None
self.DBInstanceDescription = None
self.DBInstanceId = None
self.DBInstanceStorage = None
self.EngineVersion = None
self.InstanceNetworkType = None
self.PayType = None
self.PrivateIpAddress = None
self.RegionId = None
self.VPCId = None
self.VSwitchId = None
self.ZoneId = None
def getapiname(self):
return 'rds.aliyuncs.com.CreateReadOnlyDBInstance.2014-08-15'
|
{
"content_hash": "aa2eae7799b32343f6690e01af13a681",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 63,
"avg_line_length": 30.217391304347824,
"alnum_prop": 0.7251798561151079,
"repo_name": "wanghe4096/website",
"id": "6df4f730e49ea6190c2f7d2062096e68f84c768b",
"size": "695",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aliyun/api/rest/Rds20140815CreateReadOnlyDBInstanceRequest.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "121965"
},
{
"name": "HTML",
"bytes": "163477"
},
{
"name": "JavaScript",
"bytes": "227130"
},
{
"name": "Lua",
"bytes": "5653"
},
{
"name": "Python",
"bytes": "325945"
},
{
"name": "Shell",
"bytes": "1359"
}
],
"symlink_target": ""
}
|
from functools import wraps
from pulsar.api import ImproperlyConfigured
from pulsar.apps import wsgi
from lux.models import Schema
from lux.openapi import OperationInfo
from lux.utils.data import compact_dict
class route(wsgi.route):
"""Extend pulsar wsgi route decorator for openapi information
It adds the openapi namedtuple to the route parameters dictionary
"""
def __init__(self, rule=None, body_schema=None, path_schema=None,
query_schema=None, header_schema=None, default_response=200,
default_response_schema=None,
responses=None, **kwargs):
if isinstance(rule, type(Schema)):
rule = rule()
if isinstance(rule, Schema):
if path_schema:
raise ImproperlyConfigured(
'both rule and path_schema are provided as schema'
)
path_schema = rule
rule = path_schema.rule()
kwargs['openapi'] = OperationInfo(
path=path_schema,
body=body_schema,
query=query_schema,
header=header_schema,
responses=responses,
default_response=default_response,
default_response_schema=default_response_schema
)
super().__init__(rule, **kwargs)
def __call__(self, method):
api = self.parameters['openapi']
if api.body or api.responses[api.default_response]:
# the callable must accept the schema as second parameter
@wraps(method)
def _(router, request):
return method(router, request, **compact_dict(
body_schema=api.body,
query_schema=api.query,
schema=api.schema
))
return super().__call__(_)
return super().__call__(method)
|
{
"content_hash": "55bc68cb26cd0b7b44dcc795a8284c25",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 77,
"avg_line_length": 33.464285714285715,
"alnum_prop": 0.5789754535752402,
"repo_name": "quantmind/lux",
"id": "5441b6ebc417861611c981e8f7b6d6c60111ec6a",
"size": "1874",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lux/ext/rest/route.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "906"
},
{
"name": "HTML",
"bytes": "5107"
},
{
"name": "JavaScript",
"bytes": "219127"
},
{
"name": "Makefile",
"bytes": "422"
},
{
"name": "Mako",
"bytes": "1050"
},
{
"name": "PLpgSQL",
"bytes": "140"
},
{
"name": "Python",
"bytes": "615221"
},
{
"name": "Shell",
"bytes": "196"
}
],
"symlink_target": ""
}
|
"""pyversioncheck - Module to help with checking versions"""
import types
import rfc822
import urllib
import sys
# Verbose options
VERBOSE_SILENT=0 # Single-line reports per package
VERBOSE_NORMAL=1 # Single-line reports per package, more info if outdated
VERBOSE_EACHFILE=2 # Report on each URL checked
VERBOSE_CHECKALL=3 # Check each URL for each package
# Test directory
## urllib bug: _TESTDIR="ftp://ftp.cwi.nl/pub/jack/python/versiontestdir/"
_TESTDIR="http://www.cwi.nl/~jack/versiontestdir/"
def versioncheck(package, url, version, verbose=0):
ok, newversion, fp = checkonly(package, url, version, verbose)
if verbose > VERBOSE_NORMAL:
return ok
if ok < 0:
print '%s: No correctly formatted current version file found'%(package)
elif ok == 1:
print '%s: up-to-date (version %s)'%(package, version)
else:
print '%s: version %s installed, version %s found:' % \
(package, version, newversion)
if verbose > VERBOSE_SILENT:
while 1:
line = fp.readline()
if not line: break
sys.stdout.write('\t'+line)
return ok
def checkonly(package, url, version, verbose=0):
if verbose >= VERBOSE_EACHFILE:
print '%s:'%package
if type(url) == types.StringType:
ok, newversion, fp = _check1version(package, url, version, verbose)
else:
for u in url:
ok, newversion, fp = _check1version(package, u, version, verbose)
if ok >= 0 and verbose < VERBOSE_CHECKALL:
break
return ok, newversion, fp
def _check1version(package, url, version, verbose=0):
if verbose >= VERBOSE_EACHFILE:
print ' Checking %s'%url
try:
fp = urllib.urlopen(url)
except IOError, arg:
if verbose >= VERBOSE_EACHFILE:
print ' Cannot open:', arg
return -1, None, None
msg = rfc822.Message(fp, seekable=0)
newversion = msg.getheader('current-version')
if not newversion:
if verbose >= VERBOSE_EACHFILE:
print ' No "Current-Version:" header in URL or URL not found'
return -1, None, None
version = version.lower().strip()
newversion = newversion.lower().strip()
if version == newversion:
if verbose >= VERBOSE_EACHFILE:
print ' Version identical (%s)'%newversion
return 1, version, fp
else:
if verbose >= VERBOSE_EACHFILE:
print ' Versions different (installed: %s, new: %s)'% \
(version, newversion)
return 0, newversion, fp
def _test():
print '--- TEST VERBOSE=1'
print '--- Testing existing and identical version file'
versioncheck('VersionTestPackage', _TESTDIR+'Version10.txt', '1.0', verbose=1)
print '--- Testing existing package with new version'
versioncheck('VersionTestPackage', _TESTDIR+'Version11.txt', '1.0', verbose=1)
print '--- Testing package with non-existing version file'
versioncheck('VersionTestPackage', _TESTDIR+'nonexistent.txt', '1.0', verbose=1)
print '--- Test package with 2 locations, first non-existing second ok'
versfiles = [_TESTDIR+'nonexistent.txt', _TESTDIR+'Version10.txt']
versioncheck('VersionTestPackage', versfiles, '1.0', verbose=1)
print '--- TEST VERBOSE=2'
print '--- Testing existing and identical version file'
versioncheck('VersionTestPackage', _TESTDIR+'Version10.txt', '1.0', verbose=2)
print '--- Testing existing package with new version'
versioncheck('VersionTestPackage', _TESTDIR+'Version11.txt', '1.0', verbose=2)
print '--- Testing package with non-existing version file'
versioncheck('VersionTestPackage', _TESTDIR+'nonexistent.txt', '1.0', verbose=2)
print '--- Test package with 2 locations, first non-existing second ok'
versfiles = [_TESTDIR+'nonexistent.txt', _TESTDIR+'Version10.txt']
versioncheck('VersionTestPackage', versfiles, '1.0', verbose=2)
if __name__ == '__main__':
_test()
|
{
"content_hash": "113f80ee3dc312a093ce9e89f95404fe",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 84,
"avg_line_length": 42.33673469387755,
"alnum_prop": 0.6218365871294288,
"repo_name": "hwu25/AppPkg",
"id": "1a9d10e3ab41e042737eb0f7d979cab13905b82d",
"size": "4149",
"binary": false,
"copies": "10",
"ref": "refs/heads/trunk",
"path": "Applications/Python/Python-2.7.2/Tools/versioncheck/pyversioncheck.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "13199099"
},
{
"name": "C++",
"bytes": "105080"
},
{
"name": "CSS",
"bytes": "1905"
},
{
"name": "Lua",
"bytes": "249"
},
{
"name": "Makefile",
"bytes": "12852"
},
{
"name": "Objective-C",
"bytes": "1374661"
},
{
"name": "Python",
"bytes": "16366233"
},
{
"name": "Shell",
"bytes": "9469"
},
{
"name": "Visual Basic",
"bytes": "494"
}
],
"symlink_target": ""
}
|
import unittest
import uuid
from swiftclient import client
from swift.common.storage_policy import POLICIES
from swift.common.manager import Manager
from swift.common.direct_client import direct_delete_account, \
direct_get_object, direct_head_container, ClientException
from test.probe.common import kill_servers, reset_environment, \
get_to_final_state
class TestAccountReaper(unittest.TestCase):
def setUp(self):
(self.pids, self.port2server, self.account_ring, self.container_ring,
self.object_ring, self.policy, self.url, self.token,
self.account, self.configs) = reset_environment()
def tearDown(self):
kill_servers(self.port2server, self.pids)
def test_sync(self):
all_objects = []
# upload some containers
for policy in POLICIES:
container = 'container-%s-%s' % (policy.name, uuid.uuid4())
client.put_container(self.url, self.token, container,
headers={'X-Storage-Policy': policy.name})
obj = 'object-%s' % uuid.uuid4()
body = 'test-body'
client.put_object(self.url, self.token, container, obj, body)
all_objects.append((policy, container, obj))
Manager(['container-updater']).once()
headers = client.head_account(self.url, self.token)
self.assertEqual(int(headers['x-account-container-count']),
len(POLICIES))
self.assertEqual(int(headers['x-account-object-count']),
len(POLICIES))
self.assertEqual(int(headers['x-account-bytes-used']),
len(POLICIES) * len(body))
part, nodes = self.account_ring.get_nodes(self.account)
for node in nodes:
direct_delete_account(node, part, self.account)
Manager(['account-reaper']).once()
get_to_final_state()
for policy, container, obj in all_objects:
cpart, cnodes = self.container_ring.get_nodes(
self.account, container)
for cnode in cnodes:
try:
direct_head_container(cnode, cpart, self.account,
container)
except ClientException as err:
self.assertEquals(err.http_status, 404)
else:
self.fail('Found un-reaped /%s/%s on %r' %
(self.account, container, node))
object_ring = POLICIES.get_object_ring(policy.idx, '/etc/swift/')
part, nodes = object_ring.get_nodes(self.account, container, obj)
for node in nodes:
try:
direct_get_object(node, part, self.account,
container, obj)
except ClientException as err:
self.assertEquals(err.http_status, 404)
else:
self.fail('Found un-reaped /%s/%s/%s on %r in %s!' %
(self.account, container, obj, node, policy))
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "b8f434470b7b858b87f735668d5bbecc",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 77,
"avg_line_length": 38.81481481481482,
"alnum_prop": 0.5623409669211196,
"repo_name": "kalrey/swift",
"id": "028157029a6354023e8c7f1414d8e90dccb25b1a",
"size": "3711",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test/probe/test_account_reaper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from ...errors.saklientexception import SaklientException
from ..client import Client
from .resource import Resource
from .icon import Icon
from .iface import Iface
from .serverplan import ServerPlan
from .serverinstance import ServerInstance
from .isoimage import IsoImage
from ..enums.eserverinstancestatus import EServerInstanceStatus
from ..enums.eavailability import EAvailability
from ...util import Util
import saklient
# module saklient.cloud.resources.server
class Server(Resource):
## サーバの実体1つに対応し、属性の取得や操作を行うためのクラス。
# (instance field) m_id
# (instance field) m_name
# (instance field) m_description
# (instance field) m_tags
# (instance field) m_icon
# (instance field) m_plan
# (instance field) m_ifaces
# (instance field) m_instance
# (instance field) m_availability
## @private
# @return {str}
def _api_path(self):
return "/server"
## @private
# @return {str}
def _root_key(self):
return "Server"
## @private
# @return {str}
def _root_key_m(self):
return "Servers"
## @private
# @return {str}
def _class_name(self):
return "Server"
## @private
# @return {str}
def _id(self):
return self.get_id()
## このローカルオブジェクトに現在設定されているリソース情報をAPIに送信し、新規作成または上書き保存します。
#
# @return {saklient.cloud.resources.server.Server} this
def save(self):
return self._save()
## 最新のリソース情報を再取得します。
#
# @return {saklient.cloud.resources.server.Server} this
def reload(self):
return self._reload()
## @ignore
# @param {saklient.cloud.client.Client} client
# @param {any} obj
# @param {bool} wrapped=False
def __init__(self, client, obj, wrapped=False):
super(Server, self).__init__(client)
Util.validate_type(client, "saklient.cloud.client.Client")
Util.validate_type(wrapped, "bool")
self.api_deserialize(obj, wrapped)
## サーバが起動しているときtrueを返します。
#
# @return {bool}
def is_up(self):
return self.get_instance().is_up()
## サーバが停止しているときtrueを返します。
#
# @return {bool}
def is_down(self):
return self.get_instance().is_down()
## サーバを起動します。
#
# @return {saklient.cloud.resources.server.Server} this
def boot(self):
self._client.request("PUT", self._api_path() + "/" + Util.url_encode(self._id()) + "/power")
return self.reload()
## サーバをシャットダウンします。
#
# @return {saklient.cloud.resources.server.Server} this
def shutdown(self):
self._client.request("DELETE", self._api_path() + "/" + Util.url_encode(self._id()) + "/power")
return self.reload()
## サーバを強制停止します。
#
# @return {saklient.cloud.resources.server.Server} this
def stop(self):
self._client.request("DELETE", self._api_path() + "/" + Util.url_encode(self._id()) + "/power", {
'Force': True
})
return self.reload()
## サーバを強制再起動します。
#
# @return {saklient.cloud.resources.server.Server} this
def reboot(self):
self._client.request("PUT", self._api_path() + "/" + Util.url_encode(self._id()) + "/reset")
return self.reload()
## サーバが起動するまで待機します。
#
# @param {int} timeoutSec=180
# @return {bool}
def sleep_until_up(self, timeoutSec=180):
Util.validate_type(timeoutSec, "int")
return self.sleep_until(EServerInstanceStatus.up, timeoutSec)
## サーバが停止するまで待機します。
#
# @param {int} timeoutSec=180
# @return {bool} 成功時はtrue、タイムアウトやエラーによる失敗時はfalseを返します。
def sleep_until_down(self, timeoutSec=180):
Util.validate_type(timeoutSec, "int")
return self.sleep_until(EServerInstanceStatus.down, timeoutSec)
## サーバが指定のステータスに遷移するまで待機します。
#
# @ignore
# @param {str} status
# @param {int} timeoutSec=180
# @return {bool}
def sleep_until(self, status, timeoutSec=180):
Util.validate_type(status, "str")
Util.validate_type(timeoutSec, "int")
step = 10
while (0 < timeoutSec):
self.reload()
s = None
inst = self.instance
if inst is not None:
s = inst.status
if s is None:
s = EServerInstanceStatus.down
if s == status:
return True
timeoutSec -= step
if 0 < timeoutSec:
Util.sleep(step)
return False
## サーバプランを変更します。
#
# 成功時はリソースIDが変わることにご注意ください。
#
# @param {saklient.cloud.resources.serverplan.ServerPlan} planTo
# @return {saklient.cloud.resources.server.Server} this
def change_plan(self, planTo):
Util.validate_type(planTo, "saklient.cloud.resources.serverplan.ServerPlan")
path = self._api_path() + "/" + Util.url_encode(self._id()) + "/to/plan/" + Util.url_encode(planTo._id())
result = self._client.request("PUT", path)
self.api_deserialize(result, True)
return self
## サーバにインタフェースを1つ増設し、それを取得します。
#
# @return {saklient.cloud.resources.iface.Iface} 増設されたインタフェース
def add_iface(self):
model = Util.create_class_instance("saklient.cloud.models.Model_Iface", [self._client])
res = model.create()
res.server_id = self._id()
return res.save()
## サーバにISOイメージを挿入します。
#
# @param {saklient.cloud.resources.isoimage.IsoImage} iso
# @return {saklient.cloud.resources.server.Server} this
def insert_iso_image(self, iso):
Util.validate_type(iso, "saklient.cloud.resources.isoimage.IsoImage")
path = self._api_path() + "/" + Util.url_encode(self._id()) + "/cdrom"
q = {
'CDROM': {
'ID': iso._id()
}
}
self._client.request("PUT", path, q)
self.reload()
return self
## サーバに挿入されているISOイメージを排出します。
#
# @return {saklient.cloud.resources.server.Server} this
def eject_iso_image(self):
path = self._api_path() + "/" + Util.url_encode(self._id()) + "/cdrom"
self._client.request("DELETE", path)
self.reload()
return self
# (instance field) n_id = False
## (This method is generated in Translator_default#buildImpl)
#
# @return {str}
def get_id(self):
return self.m_id
## ID
id = property(get_id, None, None)
# (instance field) n_name = False
## (This method is generated in Translator_default#buildImpl)
#
# @return {str}
def get_name(self):
return self.m_name
## (This method is generated in Translator_default#buildImpl)
#
# @param {str} v
# @return {str}
def set_name(self, v):
Util.validate_type(v, "str")
self.m_name = v
self.n_name = True
return self.m_name
## 名前
name = property(get_name, set_name, None)
# (instance field) n_description = False
## (This method is generated in Translator_default#buildImpl)
#
# @return {str}
def get_description(self):
return self.m_description
## (This method is generated in Translator_default#buildImpl)
#
# @param {str} v
# @return {str}
def set_description(self, v):
Util.validate_type(v, "str")
self.m_description = v
self.n_description = True
return self.m_description
## 説明
description = property(get_description, set_description, None)
# (instance field) n_tags = False
## (This method is generated in Translator_default#buildImpl)
#
# @return {str[]}
def get_tags(self):
self.n_tags = True
return self.m_tags
## (This method is generated in Translator_default#buildImpl)
#
# @param {str[]} v
# @return {str[]}
def set_tags(self, v):
Util.validate_type(v, "list")
self.m_tags = v
self.n_tags = True
return self.m_tags
## タグ文字列の配列
tags = property(get_tags, set_tags, None)
# (instance field) n_icon = False
## (This method is generated in Translator_default#buildImpl)
#
# @return {saklient.cloud.resources.icon.Icon}
def get_icon(self):
return self.m_icon
## (This method is generated in Translator_default#buildImpl)
#
# @param {saklient.cloud.resources.icon.Icon} v
# @return {saklient.cloud.resources.icon.Icon}
def set_icon(self, v):
Util.validate_type(v, "saklient.cloud.resources.icon.Icon")
self.m_icon = v
self.n_icon = True
return self.m_icon
## アイコン
icon = property(get_icon, set_icon, None)
# (instance field) n_plan = False
## (This method is generated in Translator_default#buildImpl)
#
# @return {saklient.cloud.resources.serverplan.ServerPlan}
def get_plan(self):
return self.m_plan
## (This method is generated in Translator_default#buildImpl)
#
# @param {saklient.cloud.resources.serverplan.ServerPlan} v
# @return {saklient.cloud.resources.serverplan.ServerPlan}
def set_plan(self, v):
Util.validate_type(v, "saklient.cloud.resources.serverplan.ServerPlan")
if not self.is_new:
raise SaklientException("immutable_field", "Immutable fields cannot be modified after the resource creation: " + "saklient.cloud.resources.server.Server#plan")
self.m_plan = v
self.n_plan = True
return self.m_plan
## プラン
plan = property(get_plan, set_plan, None)
# (instance field) n_ifaces = False
## (This method is generated in Translator_default#buildImpl)
#
# @return {saklient.cloud.resources.iface.Iface[]}
def get_ifaces(self):
return self.m_ifaces
## インタフェース
ifaces = property(get_ifaces, None, None)
# (instance field) n_instance = False
## (This method is generated in Translator_default#buildImpl)
#
# @return {saklient.cloud.resources.serverinstance.ServerInstance}
def get_instance(self):
return self.m_instance
## インスタンス情報
instance = property(get_instance, None, None)
# (instance field) n_availability = False
## (This method is generated in Translator_default#buildImpl)
#
# @return {str}
def get_availability(self):
return self.m_availability
## 有効状態 {@link EAvailability}
availability = property(get_availability, None, None)
## (This method is generated in Translator_default#buildImpl)
#
# @param {any} r
def api_deserialize_impl(self, r):
self.is_new = r is None
if self.is_new:
r = {
}
self.is_incomplete = False
if Util.exists_path(r, "ID"):
self.m_id = None if Util.get_by_path(r, "ID") is None else str(Util.get_by_path(r, "ID"))
else:
self.m_id = None
self.is_incomplete = True
self.n_id = False
if Util.exists_path(r, "Name"):
self.m_name = None if Util.get_by_path(r, "Name") is None else str(Util.get_by_path(r, "Name"))
else:
self.m_name = None
self.is_incomplete = True
self.n_name = False
if Util.exists_path(r, "Description"):
self.m_description = None if Util.get_by_path(r, "Description") is None else str(Util.get_by_path(r, "Description"))
else:
self.m_description = None
self.is_incomplete = True
self.n_description = False
if Util.exists_path(r, "Tags"):
if Util.get_by_path(r, "Tags") is None:
self.m_tags = []
else:
self.m_tags = []
for t in Util.get_by_path(r, "Tags"):
v1 = None
v1 = None if t is None else str(t)
self.m_tags.append(v1)
else:
self.m_tags = None
self.is_incomplete = True
self.n_tags = False
if Util.exists_path(r, "Icon"):
self.m_icon = None if Util.get_by_path(r, "Icon") is None else Icon(self._client, Util.get_by_path(r, "Icon"))
else:
self.m_icon = None
self.is_incomplete = True
self.n_icon = False
if Util.exists_path(r, "ServerPlan"):
self.m_plan = None if Util.get_by_path(r, "ServerPlan") is None else ServerPlan(self._client, Util.get_by_path(r, "ServerPlan"))
else:
self.m_plan = None
self.is_incomplete = True
self.n_plan = False
if Util.exists_path(r, "Interfaces"):
if Util.get_by_path(r, "Interfaces") is None:
self.m_ifaces = []
else:
self.m_ifaces = []
for t in Util.get_by_path(r, "Interfaces"):
v2 = None
v2 = None if t is None else Iface(self._client, t)
self.m_ifaces.append(v2)
else:
self.m_ifaces = None
self.is_incomplete = True
self.n_ifaces = False
if Util.exists_path(r, "Instance"):
self.m_instance = None if Util.get_by_path(r, "Instance") is None else ServerInstance(self._client, Util.get_by_path(r, "Instance"))
else:
self.m_instance = None
self.is_incomplete = True
self.n_instance = False
if Util.exists_path(r, "Availability"):
self.m_availability = None if Util.get_by_path(r, "Availability") is None else str(Util.get_by_path(r, "Availability"))
else:
self.m_availability = None
self.is_incomplete = True
self.n_availability = False
## @ignore
# @param {bool} withClean=False
# @return {any}
def api_serialize_impl(self, withClean=False):
Util.validate_type(withClean, "bool")
missing = []
ret = {
}
if withClean or self.n_id:
Util.set_by_path(ret, "ID", self.m_id)
if withClean or self.n_name:
Util.set_by_path(ret, "Name", self.m_name)
else:
if self.is_new:
missing.append("name")
if withClean or self.n_description:
Util.set_by_path(ret, "Description", self.m_description)
if withClean or self.n_tags:
Util.set_by_path(ret, "Tags", [])
for r1 in self.m_tags:
v = None
v = r1
(ret["Tags"] if "Tags" in ret else None).append(v)
if withClean or self.n_icon:
Util.set_by_path(ret, "Icon", (None if self.m_icon is None else self.m_icon.api_serialize(withClean)) if withClean else ({
'ID': "0"
} if self.m_icon is None else self.m_icon.api_serialize_id()))
if withClean or self.n_plan:
Util.set_by_path(ret, "ServerPlan", (None if self.m_plan is None else self.m_plan.api_serialize(withClean)) if withClean else ({
'ID': "0"
} if self.m_plan is None else self.m_plan.api_serialize_id()))
else:
if self.is_new:
missing.append("plan")
if withClean or self.n_ifaces:
Util.set_by_path(ret, "Interfaces", [])
for r2 in self.m_ifaces:
v = None
v = (None if r2 is None else r2.api_serialize(withClean)) if withClean else ({
'ID': "0"
} if r2 is None else r2.api_serialize_id())
(ret["Interfaces"] if "Interfaces" in ret else None).append(v)
if withClean or self.n_instance:
Util.set_by_path(ret, "Instance", (None if self.m_instance is None else self.m_instance.api_serialize(withClean)) if withClean else ({
'ID': "0"
} if self.m_instance is None else self.m_instance.api_serialize_id()))
if withClean or self.n_availability:
Util.set_by_path(ret, "Availability", self.m_availability)
if len(missing) > 0:
raise SaklientException("required_field", "Required fields must be set before the Server creation: " + ", ".join(missing))
return ret
|
{
"content_hash": "b49203e5699008be79143fce2b502a8b",
"timestamp": "",
"source": "github",
"line_count": 497,
"max_line_length": 171,
"avg_line_length": 32.967806841046276,
"alnum_prop": 0.57113213304852,
"repo_name": "hnakamur/saklient.python",
"id": "ec6230e988f44e2b54bd2eb65b18c39346ab66d6",
"size": "17243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saklient/cloud/resources/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "539448"
},
{
"name": "Shell",
"bytes": "874"
}
],
"symlink_target": ""
}
|
"""
tinycss
-------
A CSS parser, and nothing else.
:copyright: (c) 2012 by Simon Sapin.
:license: BSD, see LICENSE for more details.
"""
import sys
from .version import VERSION
__version__ = VERSION
from .css21 import CSS21Parser
from .page3 import CSSPage3Parser
PARSER_MODULES = {
'page3': CSSPage3Parser,
}
def make_parser(*features, **kwargs):
"""Make a parser object with the chosen features.
:param features:
Positional arguments are base classes the new parser class will extend.
The string ``'page3'`` is accepted as short for
:class:`~page3.CSSPage3Parser`.
:param kwargs:
Keyword arguments are passed to the parser’s constructor.
:returns:
An instance of a new subclass of :class:`CSS21Parser`
"""
if features:
bases = tuple(PARSER_MODULES.get(f, f) for f in features)
parser_class = type('CustomCSSParser', bases + (CSS21Parser,), {})
else:
parser_class = CSS21Parser
return parser_class(**kwargs)
|
{
"content_hash": "25e71cfb0c2296d0941ab770da04ecd1",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 79,
"avg_line_length": 24.209302325581394,
"alnum_prop": 0.6493756003842459,
"repo_name": "jbhamilton/Which-CSS",
"id": "9eca2b1b4648687759ddde847349b13eb4c9d0da",
"size": "1058",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "codebase/tinycss/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "64185"
},
{
"name": "JavaScript",
"bytes": "3157"
},
{
"name": "PHP",
"bytes": "128412"
},
{
"name": "Python",
"bytes": "230738"
}
],
"symlink_target": ""
}
|
"""Integration tests for TensorBoard.
These tests start up a full-fledged TensorBoard server.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import gzip
import json
import numbers
import os
import shutil
import threading
import zlib
from six import BytesIO
from six.moves import http_client
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.contrib.tensorboard.plugins.projector.projector_config_pb2 import ProjectorConfig
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.platform import resource_loader
from tensorflow.python.summary import event_multiplexer
from tensorflow.tensorboard.backend import server
from tensorflow.tensorboard.plugins import REGISTERED_PLUGINS
class TensorboardServerTest(tf.test.TestCase):
_only_use_meta_graph = False # Server data contains only a GraphDef
# Number of scalar-containing events to make.
_SCALAR_COUNT = 99
def setUp(self):
self._GenerateTestData()
self._multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=server.TENSORBOARD_SIZE_GUIDANCE)
server.ReloadMultiplexer(self._multiplexer, {self.get_temp_dir(): None})
# 0 to pick an unused port.
self._server = server.BuildServer(
self._multiplexer, 'localhost', 0, '/foo/logdir/argument')
self._server_thread = threading.Thread(target=self._server.serve_forever)
self._server_thread.daemon = True
self._server_thread.start()
self._connection = http_client.HTTPConnection(
'localhost', self._server.server_address[1])
def tearDown(self):
self._connection.close()
self._server.shutdown()
self._server.server_close()
def _get(self, path, headers={}):
"""Perform a GET request for the given path."""
self._connection.request('GET', path, None, headers)
return self._connection.getresponse()
def _getJson(self, path):
"""Perform a GET request and decode the result as JSON."""
self._connection.request('GET', path)
response = self._connection.getresponse()
self.assertEqual(response.status, 200)
data = response.read()
if response.getheader('Content-Encoding') == 'gzip':
data = gzip.GzipFile('', 'rb', 9, BytesIO(data)).read()
return json.loads(data.decode('utf-8'))
def testBasicStartup(self):
"""Start the server up and then shut it down immediately."""
pass
def testRequestMainPage(self):
"""Navigate to the main page and verify that it returns a 200."""
response = self._get('/')
self.assertEqual(response.status, 200)
def testRequestNonexistentPage(self):
"""Request a page that doesn't exist; it should 404."""
response = self._get('/asdf')
self.assertEqual(response.status, 404)
def testDirectoryTraversal(self):
"""Attempt a directory traversal attack."""
response = self._get('/..' * 30 + '/etc/passwd')
self.assertEqual(response.status, 400)
def testLogdir(self):
"""Test the format of the data/logdir endpoint."""
parsed_object = self._getJson('/data/logdir')
self.assertEqual(parsed_object, {'logdir': '/foo/logdir/argument'})
def testRuns(self):
"""Test the format of the /data/runs endpoint."""
run_json = self._getJson('/data/runs')
# Don't check the actual timestamp since it's time-dependent.
self.assertTrue(isinstance(run_json['run1']['firstEventTimestamp'],
numbers.Number))
del run_json['run1']['firstEventTimestamp']
self.assertEqual(run_json, {'run1': {
'compressedHistograms': ['histogram'],
'scalars': ['simple_values'],
'histograms': ['histogram'],
'images': ['image'],
'audio': ['audio'],
# if only_use_meta_graph, the graph is extracted from the metagraph
'graph': True,
'meta_graph': self._only_use_meta_graph,
'run_metadata': ['test run']}})
def testApplicationPaths_getCached(self):
"""Test the format of the /data/runs endpoint."""
for path in ('/',): # TODO(jart): '/app.js' in open source
connection = http_client.HTTPConnection(
'localhost', self._server.server_address[1])
connection.request('GET', path)
response = connection.getresponse()
self.assertEqual(response.status, 200, msg=path)
self.assertEqual(response.getheader('Cache-Control'),
'private, max-age=3600', msg=path)
connection.close()
def testDataPaths_disableAllCaching(self):
"""Test the format of the /data/runs endpoint."""
for path in ('/data/runs',
'/data/logdir',
'/data/scalars?run=run1&tag=simple_values',
'/data/scalars?run=run1&tag=simple_values&format=csv',
'/data/images?run=run1&tag=image',
'/data/individualImage?run=run1&tag=image&index=0',
'/data/audio?run=run1&tag=audio',
'/data/run_metadata?run=run1&tag=test%20run'):
connection = http_client.HTTPConnection(
'localhost', self._server.server_address[1])
connection.request('GET', path)
response = connection.getresponse()
self.assertEqual(response.status, 200, msg=path)
self.assertEqual(response.getheader('Expires'), '0', msg=path)
response.read()
connection.close()
def testHistograms(self):
"""Test the format of /data/histograms."""
self.assertEqual(
self._getJson('/data/histograms?tag=histogram&run=run1'),
[[0, 0, [0, 2.0, 3.0, 6.0, 5.0, [0.0, 1.0, 2.0], [1.0, 1.0, 1.0]]]])
def testSampleScalars(self):
"""Test the sample_count parameter of /data/scalars."""
for i in xrange(10, self._SCALAR_COUNT, 10):
samples = self._getJson('/data/scalars?sample_count=%d' % i)
values = samples['run1']['simple_values']
# Verify that we got the right amount of values and that we got the
# endpoints.
self.assertEqual(len(values), i)
self.assertEqual(values[0], [100, 10, 1])
self.assertEqual(values[-1], [9900, 990, 99])
def testSampleScalarsWithLargeSampleCount(self):
"""Test using a large sample_count."""
samples = self._getJson('/data/scalars?sample_count=999999')
values = samples['run1']['simple_values']
self.assertEqual(len(values), self._SCALAR_COUNT)
def testImages(self):
"""Test listing images and retrieving an individual image."""
image_json = self._getJson('/data/images?tag=image&run=run1')
image_query = image_json[0]['query']
# We don't care about the format of the image query.
del image_json[0]['query']
self.assertEqual(image_json, [{
'wall_time': 0,
'step': 0,
'height': 1,
'width': 1
}])
response = self._get('/data/individualImage?%s' % image_query)
self.assertEqual(response.status, 200)
def testAudio(self):
"""Test listing audio and retrieving an individual audio clip."""
audio_json = self._getJson('/data/audio?tag=audio&run=run1')
audio_query = audio_json[0]['query']
# We don't care about the format of the audio query.
del audio_json[0]['query']
self.assertEqual(audio_json, [{
'wall_time': 0,
'step': 0,
'content_type': 'audio/wav'
}])
response = self._get('/data/individualAudio?%s' % audio_query)
self.assertEqual(response.status, 200)
def testGraph(self):
"""Test retrieving the graph definition."""
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs')
self.assertEqual(response.status, 200)
graph_pbtxt = response.read()
# Parse the graph from pbtxt into a graph message.
graph = tf.GraphDef()
graph = text_format.Parse(graph_pbtxt, graph)
self.assertEqual(len(graph.node), 2)
self.assertEqual(graph.node[0].name, 'a')
self.assertEqual(graph.node[1].name, 'b')
# Make sure the second node has an attribute that was filtered out because
# it was too large and was added to the "too large" attributes list.
self.assertEqual(list(graph.node[1].attr.keys()), ['_very_large_attrs'])
self.assertEqual(graph.node[1].attr['_very_large_attrs'].list.s,
[b'very_large_attr'])
def testProjectorRunsWithEmbeddings(self):
"""Test the format of /runs endpoint in projector."""
if 'projector' not in REGISTERED_PLUGINS:
return
run_json = self._getJson('/data/plugin/projector/runs')
self.assertEqual(run_json, ['run1'])
def testProjectorInfo(self):
"""Test the format of /info endpoint in projector."""
if 'projector' not in REGISTERED_PLUGINS:
return
info_json = self._getJson('/data/plugin/projector/info?run=run1')
self.assertEqual(info_json['tensors'], {
'var1': {
'shape': [1, 2],
'name': 'var1',
'metadataFile': None,
'bookmarksFile': None,
},
'var2': {
'shape': [10, 10],
'name': 'var2',
'metadataFile': None,
'bookmarksFile': None,
},
'var3': {
'shape': [100, 100],
'name': 'var3',
'metadataFile': None,
'bookmarksFile': None,
}
})
def testProjectorTensor(self):
"""Test the format of /tensor endpoint in projector."""
if 'projector' not in REGISTERED_PLUGINS:
return
tensor_tsv = (self._get('/data/plugin/projector/tensor?run=run1&name=var1')
.read())
self.assertEqual(tensor_tsv, b'6.0\t6.0')
def testAcceptGzip_compressesResponse(self):
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs',
{'Accept-Encoding': 'gzip'})
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader('Content-Encoding'), 'gzip')
pbtxt = gzip.GzipFile('', 'rb', 9, BytesIO(response.read())).read()
graph = text_format.Parse(pbtxt, tf.GraphDef())
self.assertEqual(len(graph.node), 2)
def testAcceptAnyEncoding_compressesResponse(self):
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs',
{'Accept-Encoding': '*'})
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader('Content-Encoding'), 'gzip')
pbtxt = gzip.GzipFile('', 'rb', 9, BytesIO(response.read())).read()
graph = text_format.Parse(pbtxt, tf.GraphDef())
self.assertEqual(len(graph.node), 2)
def testAcceptDoodleEncoding_doesNotCompressResponse(self):
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs',
{'Accept-Encoding': 'doodle'})
self.assertEqual(response.status, 200)
self.assertIsNone(response.getheader('Content-Encoding'))
graph = text_format.Parse(response.read(), tf.GraphDef())
self.assertEqual(len(graph.node), 2)
def testAcceptGzip_doesNotCompressImage(self):
response = self._get('/data/individualImage?run=run1&tag=image&index=0',
{'Accept-Encoding': 'gzip'})
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader('Content-Encoding'), None)
def testRunMetadata(self):
"""Test retrieving the run metadata information."""
response = self._get('/data/run_metadata?run=run1&tag=test%20run')
self.assertEqual(response.status, 200)
run_metadata_pbtxt = response.read()
# Parse from pbtxt into a message.
run_metadata = tf.RunMetadata()
text_format.Parse(run_metadata_pbtxt, run_metadata)
self.assertEqual(len(run_metadata.step_stats.dev_stats), 1)
self.assertEqual(run_metadata.step_stats.dev_stats[0].device, 'test device')
def _GenerateTestData(self):
"""Generates the test data directory.
The test data has a single run named run1 which contains:
- a histogram
- an image at timestamp and step 0
- scalar events containing the value i at step 10 * i and wall time
100 * i, for i in [1, _SCALAR_COUNT).
- a graph definition
"""
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
run1_path = os.path.join(temp_dir, 'run1')
os.makedirs(run1_path)
writer = tf.train.SummaryWriter(run1_path)
histogram_value = tf.HistogramProto(min=0,
max=2,
num=3,
sum=6,
sum_squares=5,
bucket_limit=[0, 1, 2],
bucket=[1, 1, 1])
# Add a simple graph event.
graph_def = tf.GraphDef()
node1 = graph_def.node.add()
node1.name = 'a'
node2 = graph_def.node.add()
node2.name = 'b'
node2.attr['very_large_attr'].s = b'a' * 2048 # 2 KB attribute
meta_graph_def = meta_graph_pb2.MetaGraphDef(graph_def=graph_def)
if self._only_use_meta_graph:
writer.add_meta_graph(meta_graph_def)
else:
writer.add_graph(graph_def)
# Add a simple run metadata event.
run_metadata = tf.RunMetadata()
device_stats = run_metadata.step_stats.dev_stats.add()
device_stats.device = 'test device'
writer.add_run_metadata(run_metadata, 'test run')
# 1x1 transparent GIF.
encoded_image = base64.b64decode(
'R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7')
image_value = tf.Summary.Image(height=1,
width=1,
colorspace=1,
encoded_image_string=encoded_image)
audio_value = tf.Summary.Audio(sample_rate=44100,
length_frames=22050,
num_channels=2,
encoded_audio_string=b'',
content_type='audio/wav')
writer.add_event(tf.Event(wall_time=0,
step=0,
summary=tf.Summary(value=[
tf.Summary.Value(tag='histogram',
histo=histogram_value),
tf.Summary.Value(tag='image',
image=image_value),
tf.Summary.Value(tag='audio',
audio=audio_value)
])))
# Write 100 simple values.
for i in xrange(1, self._SCALAR_COUNT + 1):
writer.add_event(tf.Event(
# We use different values for wall time, step, and the value so we can
# tell them apart.
wall_time=100 * i,
step=10 * i,
summary=tf.Summary(value=[tf.Summary.Value(tag='simple_values',
simple_value=i)])))
writer.flush()
writer.close()
if 'projector' in REGISTERED_PLUGINS:
self._GenerateProjectorTestData(run1_path)
def _GenerateProjectorTestData(self, run_path):
# Write a projector config file in run1.
config_path = os.path.join(run_path, 'projector_config.pbtxt')
config = ProjectorConfig()
config_pbtxt = text_format.MessageToString(config)
with tf.gfile.GFile(config_path, 'w') as f:
f.write(config_pbtxt)
# Write a checkpoint with some dummy variables.
with tf.Graph().as_default():
sess = tf.Session()
checkpoint_path = os.path.join(run_path, 'model')
tf.get_variable(
'var1', [1, 2], initializer=tf.constant_initializer(6.0))
tf.get_variable('var2', [10, 10])
tf.get_variable('var3', [100, 100])
sess.run(tf.initialize_all_variables())
saver = tf.train.Saver(write_version=tf.train.SaverDef.V1)
saver.save(sess, checkpoint_path)
class TensorboardServerUsingMetagraphOnlyTest(TensorboardServerTest):
# Tests new ability to use only the MetaGraphDef
_only_use_meta_graph = True # Server data contains only a MetaGraphDef
class ParseEventFilesSpecTest(tf.test.TestCase):
def testRunName(self):
logdir_string = 'lol:/cat'
expected = {'/cat': 'lol'}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testPathWithColonThatComesAfterASlash_isNotConsideredARunName(self):
logdir_string = '/lol:/cat'
expected = {'/lol:/cat': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testMultipleDirectories(self):
logdir_string = '/a,/b'
expected = {'/a': None, '/b': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testNormalizesPaths(self):
logdir_string = '/lol/.//cat/../cat'
expected = {'/lol/cat': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testAbsolutifies(self):
logdir_string = 'lol/cat'
expected = {os.path.realpath('lol/cat'): None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testRespectsGCSPath(self):
logdir_string = 'gs://foo/path'
expected = {'gs://foo/path': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testDoesNotExpandUserInGCSPath(self):
logdir_string = 'gs://~/foo/path'
expected = {'gs://~/foo/path': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testDoesNotNormalizeGCSPath(self):
logdir_string = 'gs://foo/./path//..'
expected = {'gs://foo/./path//..': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
class TensorBoardAssetsTest(tf.test.TestCase):
def testTagFound(self):
tag = resource_loader.load_resource('tensorboard/TAG')
self.assertTrue(tag)
if __name__ == '__main__':
tf.test.main()
|
{
"content_hash": "bed1f521fb31a77b933b074e00c1aeef",
"timestamp": "",
"source": "github",
"line_count": 468,
"max_line_length": 97,
"avg_line_length": 38.68162393162393,
"alnum_prop": 0.6244821300336961,
"repo_name": "juharris/tensorflow",
"id": "0132085f5d0892e71a9e2fc36965cea567e26e93",
"size": "18792",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/tensorboard/backend/server_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "156005"
},
{
"name": "C++",
"bytes": "9229239"
},
{
"name": "CMake",
"bytes": "29372"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "HTML",
"bytes": "783708"
},
{
"name": "Java",
"bytes": "39181"
},
{
"name": "JavaScript",
"bytes": "10779"
},
{
"name": "Jupyter Notebook",
"bytes": "1773496"
},
{
"name": "Protocol Buffer",
"bytes": "112087"
},
{
"name": "Python",
"bytes": "6699482"
},
{
"name": "Shell",
"bytes": "185658"
},
{
"name": "TypeScript",
"bytes": "410434"
}
],
"symlink_target": ""
}
|
from django.contrib.auth.models import User
from django.db import models
from tastypie.utils import now, aware_datetime
class Note(models.Model):
author = models.ForeignKey(User, blank=True, null=True,
on_delete=models.CASCADE)
title = models.CharField(max_length=100)
slug = models.SlugField()
content = models.TextField(blank=True)
is_active = models.BooleanField(default=True)
created = models.DateTimeField(default=now)
updated = models.DateTimeField(default=now)
def __unicode__(self):
return self.title
def save(self, *args, **kwargs):
self.updated = now()
return super(Note, self).save(*args, **kwargs)
def what_time_is_it(self):
return aware_datetime(2010, 4, 1, 0, 48)
def get_absolute_url(self):
return '/some/fake/path/%s/' % self.pk
@property
def my_property(self):
return 'my_property'
|
{
"content_hash": "69667462706d347bf07516d3591e96bf",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 59,
"avg_line_length": 30.225806451612904,
"alnum_prop": 0.6467449306296692,
"repo_name": "Perkville/django-tastypie",
"id": "4db7cc6466024b69e1eb9c24add6017ed91c0f1a",
"size": "937",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tests/profilingtests/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "988"
},
{
"name": "Python",
"bytes": "830931"
},
{
"name": "Shell",
"bytes": "1253"
}
],
"symlink_target": ""
}
|
class Scene(object):
def enter(self):
pass
class Engine(object):
def __init__(self, scene_map):
pass
def play(self):
pass
class Death(Scene):
def enter(self):
pass
class Centralcorridor(Scene):
def enter(self):
pass
class LaserWeaponArmory(Scene):
def enter(self):
pass
class TheBridge(Scene):
def enter(self):
pass
class EscapePod(Scene):
def enter(self):
pass
class Map(object):
def __int__(self, start_scene):
self.strat_scene = strat_scene
def next_scene(self, scene_name):
pass
def opening_scene(self):
print self.start_scene
a_map= Map("Central_corridor")
var=a_map.opening_scene()
print var
a_game= Engine(a_map)
a_game.play()
|
{
"content_hash": "18ad5855221fb83b4f575c98a306ef5a",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 34,
"avg_line_length": 11.813559322033898,
"alnum_prop": 0.6743185078909613,
"repo_name": "anbet/99-Prolog-Problems",
"id": "f0820fbbcd811d31239d08e5e235fca6d65787b9",
"size": "697",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ex43_classes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "21600"
}
],
"symlink_target": ""
}
|
"""Python binding of I2C wrapper of LetMeCreate library."""
import ctypes
_LIB = ctypes.CDLL('libletmecreate_core.so')
def init():
"""Initialise I2C bus on both mikrobus.
The current I2C bus is set to MIKROBUS_1. The timeout is disabled for
both busses.
Note: If an error occurs during the initialisation, an exception is thrown.
"""
ret = _LIB.i2c_init()
if ret < 0:
raise Exception("i2c init failed")
def select_bus(mikrobus_index):
"""Selects the I2C bus.
mikrobus_index: must be 0 (MIKROBUS_1) or 1 (MIKROBUS_2). If any other value
is given, this function does nothing.
"""
_LIB.i2c_select_bus(mikrobus_index)
def get_current_bus():
"""Returns the current I2C bus active: 0 for MIKROBUS_1 or 1 for MIKROBUS_2.
Note: An exception is thrown if it fails to get the current bus.
"""
current_bus = ctypes.c_uint8(0)
ret = _LIB.i2c_get_current_bus(ctypes.byref(current_bus))
if ret < 0:
raise Exception("i2c get current bus failed")
return current_bus.value
def write(slave_address, data):
"""Sends data to I2C slave on the current mikrobus.
slave_address: 7-bit or 12-bit address of I2C slave.
data: A list of bytes.
Note: If an error occurs while sending data, an exception is thrown.
"""
arr = (ctypes.c_uint8 * len(data))(*data)
ret = _LIB.i2c_write(slave_address, arr, len(data))
if ret < 0:
raise Exception("i2c write failed")
def read(slave_address, length):
"""Returns a list of bytes from the I2C slave on the current mikrobus.
slave_address: 7-bit or 12-bit address of I2C slave.
length: Number of bytes to read.
Note: If an error occurs while receiving data, an exception is thrown.
"""
arr = (ctypes.c_uint8 * length)()
ret = _LIB.i2c_read(slave_address, arr, length)
if ret < 0:
raise Exception("i2c read failed")
return [arr[i] for i in range(length)]
def write_byte(slave_address, data):
"""Send one byte to I2C slave on the current mikrobus.
slave_address: 7-bit or 12-bit address of I2C slave.
data: 8-bit integer to send.
Note: If an error occurs while sending data, an exception is thrown.
"""
ret = _LIB.i2c_write_byte(slave_address, data)
if ret < 0:
raise Exception("i2c write byte failed")
def read_byte(slave_address):
"""Reads one byte from I2C slave on the current mikrobus.
slave_address: 7-bit or 12-bit address of I2C slave.
Note: If an error occurs while receiving data, an exception is thrown.
"""
data = ctypes.c_uint8(0)
ret = _LIB.i2c_read_byte(slave_address, ctypes.byref(data))
if ret < 0:
raise Exception("i2c write byte failed")
return data.value
def get_timeout():
"""Returns the timeout in milliseconds of the current bus."""
return _LIB.i2c_get_timeout()
def set_timeout(timeout):
"""Set the timeout in milliseconds of the current bus.
Set timeout to 0 disables it.
"""
_LIB.i2c_set_timeout(timeout)
def release():
"""Release I2C bus on both mikrobus.
Note: An exception is thrown if it fails to release I2C buses.
"""
ret = _LIB.i2c_release()
if ret < 0:
raise Exception("i2c release failed")
|
{
"content_hash": "f93c14190b0a592947bfebbf8098e97b",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 80,
"avg_line_length": 27.291666666666668,
"alnum_prop": 0.6564885496183206,
"repo_name": "francois-berder/PyLetMeCreate",
"id": "bf8e6a1c4bf3c7819932288a98228360f91dbe10",
"size": "3298",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "letmecreate/core/i2c.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "85404"
}
],
"symlink_target": ""
}
|
import os
from django.conf import settings
def configure_settings():
"""
Configures settings for manage.py and for run_tests.py.
"""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
if not settings.configured:
# Determine the database settings depending on if a test_db var is set in CI mode or not
test_db = os.environ.get('DB', None)
if test_db is None:
db_config = {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'ambition',
'USER': 'ambition',
'PASSWORD': 'ambition',
'HOST': 'db',
'TEST': {
'CHARSET': 'UTF8',
}
}
elif test_db == 'postgres':
db_config = {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'USER': 'postgres',
'NAME': 'entity',
}
else:
raise RuntimeError('Unsupported test DB {0}'.format(test_db))
installed_apps = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'entity',
'entity.tests',
]
settings.configure(
TEST_RUNNER='django_nose.NoseTestSuiteRunner',
NOSE_ARGS=['--nocapture', '--nologcapture', '--verbosity=1'],
DATABASES={
'default': db_config,
},
MIDDLEWARE_CLASSES={},
INSTALLED_APPS=installed_apps,
ROOT_URLCONF='entity.urls',
DEBUG=False,
DDF_FILL_NULLABLE_FIELDS=False,
)
|
{
"content_hash": "84581ef5896534a8ce022e9e7fe7591b",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 96,
"avg_line_length": 31.4,
"alnum_prop": 0.4956572090330052,
"repo_name": "ambitioninc/django-entity",
"id": "c8d73ae431aed988910a53d646a4bc6036fd06f9",
"size": "1727",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "170632"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('osf', '0006_merge'),
]
operations = [
migrations.AddField(
model_name='abstractnode',
name='keenio_read_key',
field=models.CharField(blank=True, max_length=1000, null=True),
),
migrations.AlterField(
model_name='institution',
name='domains',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=255), blank=True, db_index=True, null=True, size=None),
),
migrations.AlterField(
model_name='institution',
name='email_domains',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=255), blank=True, db_index=True, null=True, size=None),
),
migrations.AlterField(
model_name='institution',
name='login_url',
field=models.URLField(blank=True, null=True),
),
migrations.AlterField(
model_name='institution',
name='logout_url',
field=models.URLField(blank=True, null=True),
),
]
|
{
"content_hash": "8724ffba1be0772d5d81139e88fac098",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 154,
"avg_line_length": 33.48717948717949,
"alnum_prop": 0.599540581929556,
"repo_name": "mluo613/osf.io",
"id": "792f2261dbac9480f6fb1feca91d6ea82898a764",
"size": "1376",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "osf/migrations/0007_auto_20161017_1039.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "176516"
},
{
"name": "HTML",
"bytes": "181210"
},
{
"name": "JavaScript",
"bytes": "2015658"
},
{
"name": "Jupyter Notebook",
"bytes": "19626"
},
{
"name": "Mako",
"bytes": "748050"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "8492180"
},
{
"name": "Shell",
"bytes": "379"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.