text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""
Copyright (c) 2014-2018 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import axis_ep
import eth_ep
module = 'eth_axis_tx'
testbench = 'test_%s' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Parameters
DATA_WIDTH = 8
KEEP_ENABLE = (DATA_WIDTH>8)
KEEP_WIDTH = int(DATA_WIDTH/8)
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:0])
s_eth_hdr_valid = Signal(bool(0))
s_eth_dest_mac = Signal(intbv(0)[48:])
s_eth_src_mac = Signal(intbv(0)[48:])
s_eth_type = Signal(intbv(0)[16:])
s_eth_payload_axis_tdata = Signal(intbv(0)[DATA_WIDTH:])
s_eth_payload_axis_tkeep = Signal(intbv(1)[KEEP_WIDTH:])
s_eth_payload_axis_tvalid = Signal(bool(0))
s_eth_payload_axis_tlast = Signal(bool(0))
s_eth_payload_axis_tuser = Signal(bool(0))
m_axis_tready = Signal(bool(0))
# Outputs
m_axis_tdata = Signal(intbv(0)[DATA_WIDTH:])
m_axis_tkeep = Signal(intbv(1)[KEEP_WIDTH:])
m_axis_tvalid = Signal(bool(0))
m_axis_tlast = Signal(bool(0))
m_axis_tuser = Signal(bool(0))
s_eth_hdr_ready = Signal(bool(0))
s_eth_payload_axis_tready = Signal(bool(0))
busy = Signal(bool(0))
# sources and sinks
source_pause = Signal(bool(0))
sink_pause = Signal(bool(0))
source = eth_ep.EthFrameSource()
source_logic = source.create_logic(
clk,
rst,
eth_hdr_ready=s_eth_hdr_ready,
eth_hdr_valid=s_eth_hdr_valid,
eth_dest_mac=s_eth_dest_mac,
eth_src_mac=s_eth_src_mac,
eth_type=s_eth_type,
eth_payload_tdata=s_eth_payload_axis_tdata,
eth_payload_tkeep=s_eth_payload_axis_tkeep,
eth_payload_tvalid=s_eth_payload_axis_tvalid,
eth_payload_tready=s_eth_payload_axis_tready,
eth_payload_tlast=s_eth_payload_axis_tlast,
eth_payload_tuser=s_eth_payload_axis_tuser,
pause=source_pause,
name='source'
)
sink = axis_ep.AXIStreamSink()
sink_logic = sink.create_logic(
clk,
rst,
tdata=m_axis_tdata,
tkeep=m_axis_tkeep,
tvalid=m_axis_tvalid,
tready=m_axis_tready,
tlast=m_axis_tlast,
tuser=m_axis_tuser,
pause=sink_pause,
name='sink'
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
s_eth_hdr_valid=s_eth_hdr_valid,
s_eth_hdr_ready=s_eth_hdr_ready,
s_eth_dest_mac=s_eth_dest_mac,
s_eth_src_mac=s_eth_src_mac,
s_eth_type=s_eth_type,
s_eth_payload_axis_tdata=s_eth_payload_axis_tdata,
s_eth_payload_axis_tkeep=s_eth_payload_axis_tkeep,
s_eth_payload_axis_tvalid=s_eth_payload_axis_tvalid,
s_eth_payload_axis_tready=s_eth_payload_axis_tready,
s_eth_payload_axis_tlast=s_eth_payload_axis_tlast,
s_eth_payload_axis_tuser=s_eth_payload_axis_tuser,
m_axis_tdata=m_axis_tdata,
m_axis_tkeep=m_axis_tkeep,
m_axis_tvalid=m_axis_tvalid,
m_axis_tready=m_axis_tready,
m_axis_tlast=m_axis_tlast,
m_axis_tuser=m_axis_tuser,
busy=busy
)
@always(delay(4))
def clkgen():
clk.next = not clk
def wait_normal():
while s_eth_payload_axis_tvalid or m_axis_tvalid or s_eth_hdr_valid:
yield clk.posedge
def wait_pause_source():
while s_eth_payload_axis_tvalid or m_axis_tvalid or s_eth_hdr_valid:
yield clk.posedge
yield clk.posedge
source_pause.next = False
yield clk.posedge
source_pause.next = True
yield clk.posedge
source_pause.next = False
def wait_pause_sink():
while s_eth_payload_axis_tvalid or m_axis_tvalid or s_eth_hdr_valid:
sink_pause.next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
sink_pause.next = False
yield clk.posedge
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
for payload_len in range(1,KEEP_WIDTH*2+2):
yield clk.posedge
print("test 1: test packet, length %d" % payload_len)
current_test.next = 1
test_frame = eth_ep.EthFrame()
test_frame.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame.eth_src_mac = 0x5A5152535455
test_frame.eth_type = 0x8000
test_frame.payload = bytearray(range(payload_len))
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source.send(test_frame)
yield clk.posedge
yield clk.posedge
yield wait()
yield sink.wait()
rx_frame = sink.recv()
check_frame = eth_ep.EthFrame()
check_frame.parse_axis(rx_frame)
assert check_frame == test_frame
assert sink.empty()
yield delay(100)
yield clk.posedge
print("test 2: back-to-back packets, length %d" % payload_len)
current_test.next = 2
test_frame1 = eth_ep.EthFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A5152535455
test_frame1.eth_type = 0x8000
test_frame1.payload = bytearray(range(payload_len))
test_frame2 = eth_ep.EthFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A5152535455
test_frame2.eth_type = 0x8000
test_frame2.payload = bytearray(range(payload_len))
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source.send(test_frame1)
source.send(test_frame2)
yield clk.posedge
yield clk.posedge
yield wait()
yield sink.wait()
rx_frame = sink.recv()
check_frame = eth_ep.EthFrame()
check_frame.parse_axis(rx_frame)
assert check_frame == test_frame1
yield sink.wait()
rx_frame = sink.recv()
check_frame = eth_ep.EthFrame()
check_frame.parse_axis(rx_frame)
assert check_frame == test_frame2
assert sink.empty()
yield delay(100)
yield clk.posedge
print("test 3: tuser assert, length %d" % payload_len)
current_test.next = 3
test_frame1 = eth_ep.EthFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A5152535455
test_frame1.eth_type = 0x8000
test_frame1.payload = bytearray(range(payload_len))
test_frame2 = eth_ep.EthFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A5152535455
test_frame2.eth_type = 0x8000
test_frame2.payload = bytearray(range(payload_len))
test_frame1.payload.user = 1
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source.send(test_frame1)
source.send(test_frame2)
yield clk.posedge
yield clk.posedge
yield wait()
yield sink.wait()
rx_frame = sink.recv()
check_frame = eth_ep.EthFrame()
check_frame.parse_axis(rx_frame)
assert check_frame == test_frame1
assert rx_frame.user[-1]
yield sink.wait()
rx_frame = sink.recv()
check_frame = eth_ep.EthFrame()
check_frame.parse_axis(rx_frame)
assert check_frame == test_frame2
assert sink.empty()
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
|
{
"content_hash": "2d3201c839c8204aa0dc72c4d5e64556",
"timestamp": "",
"source": "github",
"line_count": 316,
"max_line_length": 77,
"avg_line_length": 30.332278481012658,
"alnum_prop": 0.5833072509128847,
"repo_name": "alexforencich/xfcp",
"id": "5f305fbce7c575e297de08342f2759d143441b1e",
"size": "9607",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/eth/tb/test_eth_axis_tx.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "259479"
},
{
"name": "Python",
"bytes": "3200270"
},
{
"name": "Shell",
"bytes": "14435"
},
{
"name": "Tcl",
"bytes": "29878"
},
{
"name": "Verilog",
"bytes": "4179456"
}
],
"symlink_target": ""
}
|
"""
Services templatetags
"""
from coffin import template
from maker.core.rendering import render_to_string
from jinja2 import contextfunction, Markup
from django.template import RequestContext
register = template.Library()
@contextfunction
def services_ticket_list(context, tickets, skip_group=False,
tick_group=None, nomass=False, group_by=None,
by_assigned=False, by_status=False, noheader=False):
"Print a list of tickets"
request = context['request']
response_format = 'html'
if 'response_format' in context:
response_format = context['response_format']
return Markup(render_to_string('services/tags/ticket_list',
{'tickets': tickets,
'tick_group': tick_group,
'skip_group': skip_group,
'by_assigned': by_assigned,
'by_status': by_status,
'group_by': group_by,
'noheader': noheader,
'nomass': nomass},
context_instance=RequestContext(request),
response_format=response_format))
register.object(services_ticket_list)
@contextfunction
def services_service_list(context, services, skip_group=False):
"Print a list of services"
request = context['request']
response_format = 'html'
if 'response_format' in context:
response_format = context['response_format']
return Markup(render_to_string('services/tags/service_list',
{'services': services, 'skip_group': skip_group},
context_instance=RequestContext(request),
response_format=response_format))
register.object(services_service_list)
@contextfunction
def services_queue_list(context, queues, skip_group=False):
"Print a list of queues"
request = context['request']
response_format = 'html'
if 'response_format' in context:
response_format = context['response_format']
return Markup(render_to_string('services/tags/queue_list',
{'queues': queues, 'skip_group': skip_group},
context_instance=RequestContext(request),
response_format=response_format))
register.object(services_queue_list)
|
{
"content_hash": "830d003b1127637f972d8182d6950963",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 80,
"avg_line_length": 38.15151515151515,
"alnum_prop": 0.56791104050834,
"repo_name": "alejo8591/maker",
"id": "ec7f5414028e113fd61052c16b968e018bed4318",
"size": "2570",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "services/templatetags/services.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1578070"
},
{
"name": "Perl",
"bytes": "164"
},
{
"name": "Python",
"bytes": "2863599"
},
{
"name": "Shell",
"bytes": "3561"
}
],
"symlink_target": ""
}
|
"""Config flow to configure the Freebox integration."""
import logging
from aiofreepybox.exceptions import AuthorizationError, HttpRequestError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_HOST, CONF_PORT
from .const import DOMAIN # pylint: disable=unused-import
from .router import get_api
_LOGGER = logging.getLogger(__name__)
class FreeboxFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Initialize Freebox config flow."""
self._host = None
self._port = None
def _show_setup_form(self, user_input=None, errors=None):
"""Show the setup form to the user."""
if user_input is None:
user_input = {}
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(CONF_HOST, default=user_input.get(CONF_HOST, "")): str,
vol.Required(CONF_PORT, default=user_input.get(CONF_PORT, "")): int,
}
),
errors=errors or {},
)
async def async_step_user(self, user_input=None):
"""Handle a flow initiated by the user."""
errors = {}
if user_input is None:
return self._show_setup_form(user_input, errors)
self._host = user_input[CONF_HOST]
self._port = user_input[CONF_PORT]
# Check if already configured
await self.async_set_unique_id(self._host)
self._abort_if_unique_id_configured()
return await self.async_step_link()
async def async_step_link(self, user_input=None):
"""Attempt to link with the Freebox router.
Given a configured host, will ask the user to press the button
to connect to the router.
"""
if user_input is None:
return self.async_show_form(step_id="link")
errors = {}
fbx = await get_api(self.hass, self._host)
try:
# Open connection and check authentication
await fbx.open(self._host, self._port)
# Check permissions
await fbx.system.get_config()
await fbx.lan.get_hosts_list()
await self.hass.async_block_till_done()
# Close connection
await fbx.close()
return self.async_create_entry(
title=self._host,
data={CONF_HOST: self._host, CONF_PORT: self._port},
)
except AuthorizationError as error:
_LOGGER.error(error)
errors["base"] = "register_failed"
except HttpRequestError:
_LOGGER.error("Error connecting to the Freebox router at %s", self._host)
errors["base"] = "connection_failed"
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Unknown error connecting with Freebox router at %s", self._host
)
errors["base"] = "unknown"
return self.async_show_form(step_id="link", errors=errors)
async def async_step_import(self, user_input=None):
"""Import a config entry."""
return await self.async_step_user(user_input)
async def async_step_discovery(self, discovery_info):
"""Initialize step from discovery."""
return await self.async_step_user(discovery_info)
|
{
"content_hash": "6da0d1cb9b40295ccc6c5e2a6b51299a",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 88,
"avg_line_length": 31.71171171171171,
"alnum_prop": 0.5946022727272727,
"repo_name": "tchellomello/home-assistant",
"id": "0589dfb2ef10fff713c5075eab9658443bf5ac87",
"size": "3520",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/freebox/config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "26713364"
},
{
"name": "Shell",
"bytes": "4528"
}
],
"symlink_target": ""
}
|
"""Support for esphome numbers."""
from __future__ import annotations
import math
from typing import cast
from aioesphomeapi import NumberInfo, NumberState
import voluptuous as vol
from homeassistant.components.number import NumberEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import EsphomeEntity, esphome_state_property, platform_async_setup_entry
ICON_SCHEMA = vol.Schema(cv.icon)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up esphome numbers based on a config entry."""
await platform_async_setup_entry(
hass,
entry,
async_add_entities,
component_key="number",
info_type=NumberInfo,
entity_type=EsphomeNumber,
state_type=NumberState,
)
# https://github.com/PyCQA/pylint/issues/3150 for all @esphome_state_property
# pylint: disable=invalid-overridden-method
class EsphomeNumber(EsphomeEntity[NumberInfo, NumberState], NumberEntity):
"""A number implementation for esphome."""
@property
def icon(self) -> str | None:
"""Return the icon."""
if not self._static_info.icon:
return None
return cast(str, ICON_SCHEMA(self._static_info.icon))
@property
def min_value(self) -> float:
"""Return the minimum value."""
return super()._static_info.min_value
@property
def max_value(self) -> float:
"""Return the maximum value."""
return super()._static_info.max_value
@property
def step(self) -> float:
"""Return the increment/decrement step."""
return super()._static_info.step
@esphome_state_property
def value(self) -> float | None:
"""Return the state of the entity."""
if math.isnan(self._state.state):
return None
if self._state.missing_state:
return None
return self._state.state
async def async_set_value(self, value: float) -> None:
"""Update the current value."""
await self._client.number_command(self._static_info.key, value)
|
{
"content_hash": "8f7f247ff710053aca7f7204805e8b3d",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 79,
"avg_line_length": 29.564102564102566,
"alnum_prop": 0.6738941890719862,
"repo_name": "lukas-hetzenecker/home-assistant",
"id": "1a90cdbeb24cc21d260fe6e750bcb25f5218e233",
"size": "2306",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/esphome/number.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38023745"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
"""Contains classes for the feeds"""
from django.contrib.syndication.views import Feed
from django.utils.feedgenerator import Atom1Feed
from package.models import Package
class RssLatestPackagesFeed(Feed):
"""RSS Feed for the packages"""
title = "Latest Django packages added"
link = "/packages/latest/"
description = "The last 15 packages added"
def items(self):
"""Returns 15 most recently created repositories"""
return Package.objects.all().order_by("-created")[:15]
def item_title(self, item):
"""Get title of the repository"""
return item.title
def item_description(self, item):
"""Get description of the repository"""
return item.repo_description
def item_pubdate(self, item):
"""Get publication date"""
return item.created
class AtomLatestPackagesFeed(RssLatestPackagesFeed):
"""Atom feed for the packages"""
feed_type = Atom1Feed
subtitle = RssLatestPackagesFeed.description
|
{
"content_hash": "3623217bd5412ec29101c241ba16ea14",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 62,
"avg_line_length": 31.71875,
"alnum_prop": 0.6768472906403941,
"repo_name": "cartwheelweb/packaginator",
"id": "52cd16069c4734ac324fd3c575e37b336724e041",
"size": "1015",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "apps/feeds/feeds.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "24198"
},
{
"name": "JavaScript",
"bytes": "245128"
},
{
"name": "Perl",
"bytes": "470"
},
{
"name": "Python",
"bytes": "433671"
}
],
"symlink_target": ""
}
|
import spotipy
import spotipy.oauth2 as oauth2
from models import artist, track, history, user
import time
from calendar import timegm
class spotify:
def __init__(self, config):
self.PATTERN = '%Y-%m-%dT%H:%M:%S.%fZ'
self.oauth = oauth2.SpotifyOAuth(
config['client_id'],
config['client_secret'],
config['redirect_uri']
)
self.token = config['access_token']
self.refresh = config['refresh_token']
self.api_url = '{0}/{1}/'.format(config['api_base_url'], config['api_version'])
self.check_token()
self.sp = spotipy.Spotify(auth=self.token)
# elems
self.artists = []
self.tracks = []
self.historics = []
self.user_id = config['user_id']
def check_token(self):
resp = self.oauth.refresh_access_token(self.refresh)
self.token = resp['access_token']
self.refresh = resp['refresh_token']
def get_user_profile(self):
result = self.sp.me()
self.user_id = result['id']
return user.User(
self.user_id,
result['display_name'],
result['followers']['total'],
result['uri']
)
def get_history(self):
results = self.sp._get('me/player/recently-played', limit=50)
tracks, artists, historics = [], [], []
for item in results['items']:
# fetch artists related to track
for a in item['track']['artists']:
artists.append(
artist.Artist(
a['id'],
a['uri'],
a['name']
)
)
# fetch track information
tracks.append(
track.Track(
item['track']['id'],
item['track']['uri'],
item['track']['duration_ms'] // 1000,
item['track']['artists'][0]['id'], # FIXME create relational table to avoid choosing only the first artist related to a track
item['track']['name']
)
)
# fetch item information
historics.append(
history.History(
item['track']['id'],
timegm(time.strptime(item['played_at'], self.PATTERN)),
self.user_id
)
)
# save data
self.artists, self.tracks, self.historics = artists, tracks, historics
# return historics
return self.historics
def get_history_related_artists(self):
return self.artists
def get_history_related_tracks(self):
return self.tracks
|
{
"content_hash": "cf9ab8a1443e88cc60ef3e63e75952aa",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 146,
"avg_line_length": 32.666666666666664,
"alnum_prop": 0.4996355685131195,
"repo_name": "babastienne/spotistory",
"id": "c6e573aec31bc2971aa2f97e7d7cb56380f7831b",
"size": "2769",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spotistory/spotify.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "133"
},
{
"name": "Python",
"bytes": "9323"
},
{
"name": "TSQL",
"bytes": "1073"
}
],
"symlink_target": ""
}
|
from django.core.urlresolvers import reverse
from django.db import models
from django.contrib.auth.models import User
class Brief(models.Model):
brief = models.CharField('七嘴八舌', max_length=1000)
thumbnail = models.ImageField('配图', upload_to='brief', null=True, blank=True)
user = models.ForeignKey(User, default=1)
createtime = models.DateTimeField(auto_now_add=True)
updatetime = models.DateTimeField(auto_now=True)
class Meta:
db_table = 'albums_brief'
verbose_name = "简语"
verbose_name_plural = "简语"
def __unicode__(self):
return self.brief
def get_absolute_url(self):
return reverse('brief_detail', kwargs={'pk': self.pk})
|
{
"content_hash": "17709cdfd5a73acb4d0ce8577077eb05",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 81,
"avg_line_length": 30.608695652173914,
"alnum_prop": 0.6775568181818182,
"repo_name": "madre/PersonalWeb",
"id": "39689220770eb105dc125687ded32c17ace250ab",
"size": "738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "brief/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "79689"
},
{
"name": "Nginx",
"bytes": "1140"
},
{
"name": "Python",
"bytes": "55561"
}
],
"symlink_target": ""
}
|
from os import sys
import synth_common
from synth_common import s_to_ns
trace = synth_common.create_trace()
trace.add_chrome_metadata(os_name="Android")
track1 = 1234
track2 = 4567
gpu_track = 7890
trace.add_process_track_descriptor(track1, pid=0)
trace.add_process_track_descriptor(track2, pid=2)
trace.add_process_track_descriptor(gpu_track, pid=4)
frame_period = s_to_ns(1.0 / 60)
trace.add_track_event_slice("VSync", ts=s_to_ns(3), dur=10, track=gpu_track)
trace.add_track_event_slice(
"VSync", ts=s_to_ns(3) + frame_period, dur=10, track=gpu_track)
# Frame skipped, but modified rail mode won't go back to foreground_idle
trace.add_track_event_slice(
"VSync", ts=s_to_ns(3) + frame_period * 3, dur=10, track=gpu_track)
# Larger gap now when mode will go to foreground_idle
trace.add_track_event_slice(
"VSync", ts=s_to_ns(3) + frame_period * 12, dur=10, track=gpu_track)
trace.add_track_event_slice(
"VSync", ts=s_to_ns(3) + frame_period * 13, dur=10, track=gpu_track)
trace.add_track_event_slice(
"VSync", ts=s_to_ns(3) + frame_period * 14, dur=10, track=gpu_track)
trace.add_track_event_slice(
"InputLatency::GestureScrollBegin", ts=s_to_ns(3), dur=10)
trace.add_track_event_slice(
"InputLatency::GestureScrollEnd", ts=s_to_ns(3) + frame_period * 4, dur=10)
trace.add_rail_mode_slice(
ts=0,
dur=s_to_ns(1),
track=track1,
mode=trace.prototypes.ChromeRAILMode.RAIL_MODE_RESPONSE)
trace.add_rail_mode_slice(
ts=s_to_ns(1),
dur=s_to_ns(2),
track=track1,
mode=trace.prototypes.ChromeRAILMode.RAIL_MODE_LOAD)
trace.add_rail_mode_slice(
ts=s_to_ns(3),
dur=-1,
track=track1,
mode=trace.prototypes.ChromeRAILMode.RAIL_MODE_IDLE)
trace.add_rail_mode_slice(
ts=0,
dur=s_to_ns(1),
track=track2,
mode=trace.prototypes.ChromeRAILMode.RAIL_MODE_ANIMATION)
trace.add_rail_mode_slice(
ts=s_to_ns(1),
dur=s_to_ns(2.5),
track=track2,
mode=trace.prototypes.ChromeRAILMode.RAIL_MODE_IDLE)
trace.add_rail_mode_slice(
ts=s_to_ns(2.5),
dur=s_to_ns(1),
track=track2,
mode=trace.prototypes.ChromeRAILMode.RAIL_MODE_ANIMATION)
trace.add_rail_mode_slice(
ts=s_to_ns(3.5),
dur=s_to_ns(1),
track=track2,
mode=trace.prototypes.ChromeRAILMode.RAIL_MODE_IDLE)
sys.stdout.buffer.write(trace.trace.SerializeToString())
|
{
"content_hash": "f993a8f00ee512cd080413f104933048",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 79,
"avg_line_length": 30.81578947368421,
"alnum_prop": 0.695132365499573,
"repo_name": "google/perfetto",
"id": "e7b612e819da2688ff2a159b3d4398016355d208",
"size": "2966",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/trace_processor/chrome/modified_rail_modes_with_input.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "58347"
},
{
"name": "C++",
"bytes": "10532953"
},
{
"name": "CSS",
"bytes": "6080"
},
{
"name": "Dockerfile",
"bytes": "6650"
},
{
"name": "HTML",
"bytes": "15653"
},
{
"name": "Java",
"bytes": "12441"
},
{
"name": "JavaScript",
"bytes": "115174"
},
{
"name": "Makefile",
"bytes": "10869"
},
{
"name": "Meson",
"bytes": "1635"
},
{
"name": "Python",
"bytes": "969677"
},
{
"name": "SCSS",
"bytes": "116843"
},
{
"name": "Shell",
"bytes": "79903"
},
{
"name": "Starlark",
"bytes": "222184"
},
{
"name": "TypeScript",
"bytes": "1740641"
}
],
"symlink_target": ""
}
|
import argparse
import distutils.dir_util
import os
import re
import shutil
import subprocess
import sys
##########################################################################
# Globals
##########################################################################
Corefx_url = 'https://github.com/dotnet/corefx.git'
# This should be factored out of build.sh
Unix_name_map = {
'Linux': 'Linux',
'Darwin': 'OSX',
'FreeBSD': 'FreeBSD',
'OpenBSD': 'OpenBSD',
'NetBSD': 'NetBSD',
'SunOS': 'SunOS'
}
Is_windows = (os.name == 'nt')
##########################################################################
# Delete protocol
##########################################################################
def del_rw(action, name, exc):
os.chmod(name, 0651)
os.remove(name)
##########################################################################
# Argument Parser
##########################################################################
description = 'Tool to facilitate running CoreFx tests from the CoreCLR repo'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-arch', dest='arch', default='x64')
parser.add_argument('-build_type', dest='build_type', default='Debug')
parser.add_argument('-clr_root', dest='clr_root', default=None)
parser.add_argument('-fx_root', dest='fx_root', default=None)
parser.add_argument('-fx_branch', dest='fx_branch', default='master')
parser.add_argument('-fx_commit', dest='fx_commit', default=None)
parser.add_argument('-env_script', dest='env_script', default=None)
##########################################################################
# Helper Functions
##########################################################################
def validate_args(args):
""" Validate all of the arguments parsed.
Args:
args (argparser.ArgumentParser): Args parsed by the argument parser.
Returns:
(arch, build_type, clr_root, fx_root, fx_branch, fx_commit, env_script)
(str, str, str, str, str, str, str)
Notes:
If the arguments are valid then return them all in a tuple. If not, raise
an exception stating x argument is incorrect.
"""
arch = args.arch
build_type = args.build_type
clr_root = args.clr_root
fx_root = args.fx_root
fx_branch = args.fx_branch
fx_commit = args.fx_commit
env_script = args.env_script
def validate_arg(arg, check):
""" Validate an individual arg
Args:
arg (str|bool): argument to be validated
check (lambda: x-> bool): test that returns either True or False
: based on whether the check passes.
Returns:
is_valid (bool): Is the argument valid?
"""
helper = lambda item: item is not None and check(item)
if not helper(arg):
raise Exception('Argument: %s is not valid.' % (arg))
valid_archs = ['x86', 'x64', 'arm', 'arm64']
valid_build_types = ['Debug', 'Checked', 'Release']
arch = next((a for a in valid_archs if a.lower() == arch.lower()), arch)
build_type = next((b for b in valid_build_types if b.lower() == build_type.lower()), build_type)
validate_arg(arch, lambda item: item in valid_archs)
validate_arg(build_type, lambda item: item in valid_build_types)
validate_arg(fx_branch, lambda item: True)
if fx_commit is None:
fx_commit = 'HEAD'
if clr_root is None:
clr_root = nth_dirname(os.path.abspath(sys.argv[0]), 3)
else:
clr_root = os.path.normpath(clr_root)
validate_arg(clr_root, lambda item: os.path.isdir(clr_root))
if fx_root is None:
fx_root = os.path.join(clr_root, '_', 'fx')
else:
fx_root = os.path.normpath(fx_root)
if env_script is not None:
validate_arg(env_script, lambda item: os.path.isfile(env_script))
env_script = os.path.abspath(env_script)
args = (arch, build_type, clr_root, fx_root, fx_branch, fx_commit, env_script)
log('Configuration:')
log(' arch: %s' % arch)
log(' build_type: %s' % build_type)
log(' clr_root: %s' % clr_root)
log(' fx_root: %s' % fx_root)
log(' fx_branch: %s' % fx_branch)
log(' fx_commit: %s' % fx_commit)
log(' env_script: %s' % env_script)
return args
def nth_dirname(path, n):
""" Find the Nth parent directory of the given path
Args:
path (str): path name containing at least N components
n (int): num of basenames to remove
Returns:
outpath (str): path with the last n components removed
Notes:
If n is 0, path is returned unmodified
"""
assert n >= 0
for i in range(0, n):
path = os.path.dirname(path)
return path
def dotnet_rid_os(dotnet_path):
""" Determine the OS identifier from the RID as reported by dotnet
Args:
dotnet_path (str): path to folder containing dotnet(.exe)
Returns:
rid_os (str): OS component of RID as reported by dotnet
"""
dotnet_info = subprocess.check_output([os.path.join(dotnet_path, 'dotnet'), '--info'])
m = re.search('^\s*RID:\s+([^-]*)-(\S*)\s*$', dotnet_info, re.MULTILINE)
return m.group(1)
def log(message):
""" Print logging information
Args:
message (str): message to be printed
"""
print '[%s]: %s' % (sys.argv[0], message)
##########################################################################
# Main
##########################################################################
def main(args):
global Corefx_url
global Unix_name_map
testing = False
arch, build_type, clr_root, fx_root, fx_branch, fx_commit, env_script = validate_args(
args)
clr_os = 'Windows_NT' if Is_windows else Unix_name_map[os.uname()[0]]
core_root = os.path.join(clr_root,
'bin',
'Product',
'%s.%s.%s' % (clr_os, arch, build_type))
# corefx creates both files that are read-only and files that include non-ascii
# characters. Using onerror=del_rw allows us to delete all of the read-only files.
# To delete the files with non-ascii characters, when rmtree fails due to those
# files, we then will call rd on Windows.
if not testing and os.path.exists(fx_root):
if Is_windows:
while True:
res = subprocess.check_output(['tasklist'])
if not 'VBCSCompiler.exe' in res:
break
os.chdir(fx_root)
os.system('git clean -fxd')
os.chdir(clr_root)
shutil.rmtree(fx_root, onerror=del_rw)
# Clone the corefx branch
command = 'git clone -b %s --single-branch %s %s' % (
fx_branch, Corefx_url, fx_root)
log(command)
if testing:
if not os.path.exists(fx_root):
os.makedirs(fx_root)
returncode = 0
else:
returncode = os.system(command)
# Change directory to the corefx root
cwd = os.getcwd()
log('[cd] ' + fx_root)
os.chdir(fx_root)
# Checkout the appropriate corefx commit
command = "git checkout %s" % fx_commit
log(command)
returncode = 0 if testing else os.system(command)
if not returncode == 0:
sys.exit(returncode)
# On Unix, coreFx build.sh requires HOME to be set, and it isn't by default
# under our CI system, so set it now.
if not Is_windows:
fx_home = os.path.join(fx_root, 'tempHome')
if not os.path.exists(fx_home):
os.makedirs(fx_home)
os.putenv('HOME', fx_home)
log('HOME=' + fx_home)
# Determine the RID to specify the to corefix build scripts. This seems to
# be way harder than it ought to be.
if testing:
rid_os = dotnet_rid_os('')
else:
if Is_windows:
rid_os = "win7"
else:
rid_os = dotnet_rid_os(os.path.join(clr_root, 'Tools', 'dotnetcli'))
# Gather up some arguments to pass to both build and build-tests.
config_args = '-Release -RuntimeOS=%s -ArchGroup=%s' % (rid_os, arch)
# Run the primary (non-test) corefx build
command = ' '.join(('build.cmd' if Is_windows else './build.sh', config_args))
log(command)
returncode = 0 if testing else os.system(command)
if returncode != 0:
sys.exit(returncode)
# Copy the coreclr runtime we wish to run tests against. This is the recommended
# hack until a full-stack test solution is ready. This assumes there is a single
# directory under <fx_root>/bin/runtime into which we copy coreclr binaries. We
# assume the appropriate coreclr has already been built.
fx_runtime_dir = os.path.join(fx_root, 'bin', 'runtime')
overlay_dest = os.path.join(fx_runtime_dir, os.listdir(fx_runtime_dir)[0])
log('[overlay] %s -> %s' % (core_root, overlay_dest))
if not testing:
distutils.dir_util.copy_tree(core_root, overlay_dest)
# Build the build-tests command line.
if Is_windows:
command = 'build-tests.cmd'
if env_script is not None:
command = ('cmd /c %s&&' % env_script) + command
else:
command = './build-tests.sh'
if env_script is not None:
command = ('. %s;' % env_script) + command
command = ' '.join((
command,
config_args,
'--',
'/p:WithoutCategories=IgnoreForCI'
))
if not Is_windows:
command += ' /p:TestWithLocalNativeLibraries=true'
# Run the corefx test build and run the tests themselves.
log(command)
returncode = 0 if testing else os.system(command)
sys.exit(returncode)
##########################################################################
# setup for Main
##########################################################################
if __name__ == '__main__':
Args = parser.parse_args(sys.argv[1:])
main(Args)
|
{
"content_hash": "e55502fa4c0505e355a25e1c1ea4a913",
"timestamp": "",
"source": "github",
"line_count": 315,
"max_line_length": 100,
"avg_line_length": 31.571428571428573,
"alnum_prop": 0.5534439416792358,
"repo_name": "neurospeech/coreclr",
"id": "f0111da19cf5e6ede6588e2b8dbf3a1eccac12f7",
"size": "10580",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/scripts/run-corefx-tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "939880"
},
{
"name": "Awk",
"bytes": "5652"
},
{
"name": "Batchfile",
"bytes": "32979"
},
{
"name": "C",
"bytes": "5984302"
},
{
"name": "C#",
"bytes": "106444917"
},
{
"name": "C++",
"bytes": "65205641"
},
{
"name": "CMake",
"bytes": "515969"
},
{
"name": "Groff",
"bytes": "526151"
},
{
"name": "Makefile",
"bytes": "2314"
},
{
"name": "Objective-C",
"bytes": "229080"
},
{
"name": "Perl",
"bytes": "21087"
},
{
"name": "PowerShell",
"bytes": "4332"
},
{
"name": "Python",
"bytes": "8165"
},
{
"name": "Shell",
"bytes": "27831"
}
],
"symlink_target": ""
}
|
from .client import CatalogServiceClient
from .async_client import CatalogServiceAsyncClient
__all__ = (
"CatalogServiceClient",
"CatalogServiceAsyncClient",
)
|
{
"content_hash": "c3a9360072fb7f85bbafb92e6582e6e0",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 51,
"avg_line_length": 24.142857142857142,
"alnum_prop": 0.7692307692307693,
"repo_name": "googleapis/python-recommendations-ai",
"id": "dcec94d439ff2a9ad23a15aa450afda81d6f8e80",
"size": "769",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/recommendationengine_v1beta1/services/catalog_service/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1316965"
},
{
"name": "Shell",
"bytes": "30696"
}
],
"symlink_target": ""
}
|
import unittest2
class TestClient(unittest2.TestCase):
def setUp(self):
KLASS = self._getTargetClass()
self.original_cnxn_class = KLASS._connection_class
KLASS._connection_class = _MockConnection
def tearDown(self):
KLASS = self._getTargetClass()
KLASS._connection_class = self.original_cnxn_class
def _getTargetClass(self):
from gcloud.client import Client
return Client
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_ctor_defaults(self):
from gcloud._testing import _Monkey
from gcloud import client
CREDENTIALS = object()
FUNC_CALLS = []
def mock_get_credentials():
FUNC_CALLS.append('get_credentials')
return CREDENTIALS
with _Monkey(client, get_credentials=mock_get_credentials):
client_obj = self._makeOne()
self.assertTrue(isinstance(client_obj.connection, _MockConnection))
self.assertTrue(client_obj.connection.credentials is CREDENTIALS)
self.assertEqual(FUNC_CALLS, ['get_credentials'])
def test_ctor_explicit(self):
CREDENTIALS = object()
HTTP = object()
client_obj = self._makeOne(credentials=CREDENTIALS, http=HTTP)
self.assertTrue(isinstance(client_obj.connection, _MockConnection))
self.assertTrue(client_obj.connection.credentials is CREDENTIALS)
self.assertTrue(client_obj.connection.http is HTTP)
def test_from_service_account_json(self):
from gcloud._testing import _Monkey
from gcloud import client
KLASS = self._getTargetClass()
CREDENTIALS = object()
_CALLED = []
def mock_creds(arg1):
_CALLED.append((arg1,))
return CREDENTIALS
BOGUS_ARG = object()
with _Monkey(client, get_for_service_account_json=mock_creds):
client_obj = KLASS.from_service_account_json(BOGUS_ARG)
self.assertTrue(client_obj.connection.credentials is CREDENTIALS)
self.assertEqual(_CALLED, [(BOGUS_ARG,)])
def test_from_service_account_json_fail(self):
KLASS = self._getTargetClass()
CREDENTIALS = object()
self.assertRaises(TypeError, KLASS.from_service_account_json, None,
credentials=CREDENTIALS)
def test_from_service_account_p12(self):
from gcloud._testing import _Monkey
from gcloud import client
KLASS = self._getTargetClass()
CREDENTIALS = object()
_CALLED = []
def mock_creds(arg1, arg2):
_CALLED.append((arg1, arg2))
return CREDENTIALS
BOGUS_ARG1 = object()
BOGUS_ARG2 = object()
with _Monkey(client, get_for_service_account_p12=mock_creds):
client_obj = KLASS.from_service_account_p12(BOGUS_ARG1, BOGUS_ARG2)
self.assertTrue(client_obj.connection.credentials is CREDENTIALS)
self.assertEqual(_CALLED, [(BOGUS_ARG1, BOGUS_ARG2)])
def test_from_service_account_p12_fail(self):
KLASS = self._getTargetClass()
CREDENTIALS = object()
self.assertRaises(TypeError, KLASS.from_service_account_p12, None,
None, credentials=CREDENTIALS)
class TestJSONClient(unittest2.TestCase):
def setUp(self):
KLASS = self._getTargetClass()
self.original_cnxn_class = KLASS._connection_class
KLASS._connection_class = _MockConnection
def tearDown(self):
KLASS = self._getTargetClass()
KLASS._connection_class = self.original_cnxn_class
def _getTargetClass(self):
from gcloud.client import JSONClient
return JSONClient
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_ctor_defaults(self):
from gcloud._testing import _Monkey
from gcloud import client
PROJECT = object()
CREDENTIALS = object()
FUNC_CALLS = []
def mock_get_proj():
FUNC_CALLS.append('_get_production_project')
return PROJECT
def mock_get_credentials():
FUNC_CALLS.append('get_credentials')
return CREDENTIALS
with _Monkey(client, get_credentials=mock_get_credentials,
_get_production_project=mock_get_proj):
client_obj = self._makeOne()
self.assertTrue(client_obj.project is PROJECT)
self.assertTrue(isinstance(client_obj.connection, _MockConnection))
self.assertTrue(client_obj.connection.credentials is CREDENTIALS)
self.assertEqual(FUNC_CALLS,
['_get_production_project', 'get_credentials'])
def test_ctor_missing_project(self):
from gcloud._testing import _Monkey
from gcloud import client
FUNC_CALLS = []
def mock_get_proj():
FUNC_CALLS.append('_get_production_project')
return None
with _Monkey(client, _get_production_project=mock_get_proj):
self.assertRaises(ValueError, self._makeOne)
self.assertEqual(FUNC_CALLS, ['_get_production_project'])
def test_ctor_explicit(self):
PROJECT = object()
CREDENTIALS = object()
HTTP = object()
client_obj = self._makeOne(project=PROJECT, credentials=CREDENTIALS,
http=HTTP)
self.assertTrue(client_obj.project is PROJECT)
self.assertTrue(isinstance(client_obj.connection, _MockConnection))
self.assertTrue(client_obj.connection.credentials is CREDENTIALS)
self.assertTrue(client_obj.connection.http is HTTP)
class _MockConnection(object):
def __init__(self, credentials=None, http=None):
self.credentials = credentials
self.http = http
|
{
"content_hash": "721764bb2d26367284e8d043c3196af1",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 79,
"avg_line_length": 32.837078651685395,
"alnum_prop": 0.6295979469632165,
"repo_name": "ikool/metact06-djan",
"id": "77214ccbff3349646e63ff8ccca3c9b1a5b977d0",
"size": "6442",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/gcloud/test_client.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "4935"
},
{
"name": "Protocol Buffer",
"bytes": "20396"
},
{
"name": "Python",
"bytes": "4111257"
},
{
"name": "Shell",
"bytes": "868"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render
from .models import OrderItem
from .forms import OrderCreateForm
from cart.cart import Cart
from .tasks import order_created
from django.contrib.admin.views.decorators import staff_member_required
from django.shortcuts import get_object_or_404
from .models import Order
def order_create(request):
cart = Cart(request)
if request.method == 'POST':
form = OrderCreateForm(request.POST)
if form.is_valid():
order = form.save(commit=False)
if cart.coupon:
order.coupon = cart.coupon
order.discount = cart.coupon.discount
order.save()
for item in cart:
OrderItem.objects.create(order=order,
product=item['product'],
price=item['price'],
quantity=item['quantity'])
# clear the cart
cart.clear()
# launch asynchronous task
order_created.delay(order.id)
return render(request,
'orders/order/created.html',
{'order': order})
else:
form = OrderCreateForm()
return render(request,
'orders/order/create.html',
{'cart': cart, 'form': form})
@staff_member_required
def admin_order_detail(request, order_id):
order = get_object_or_404(Order, id=order_id)
return render(request,
'admin/orders/order/detail.html',
{'order': order})
|
{
"content_hash": "f6d18a349b9ee7567863b58f89bd1baf",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 71,
"avg_line_length": 37.27906976744186,
"alnum_prop": 0.5527136618839675,
"repo_name": "PikaDm/clave-online-shop-template",
"id": "6416497cfe58b2063c137febc3a3a9d014dfb45e",
"size": "1603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orders/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11913"
},
{
"name": "HTML",
"bytes": "16677"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "36018"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, print_function, unicode_literals
import datetime
import unittest
from django.contrib.auth import get_user_model
from django import forms
from django.test import TestCase
from model_mommy import mommy
import six
from core.forms import (ChecklistForm, SampleForm, SampleSelectOrCreateForm,
SubstrateForm, TrackProjectForm, WizardBasicInfoForm)
from core.models import Project, Sample, Substrate, ProcessType
class TestSubstrateForm(unittest.TestCase):
def test_clean_empty(self):
form = SubstrateForm(data={})
errors = dict(form.errors)
self.assertIsNotNone(errors.get('__all__'))
self.assertListEqual(errors.get('__all__'),
['Cannot leave all substrate fields blank.'])
def test_clean_single_specified(self):
form = SubstrateForm(data={
'comment': 'test',
})
errors = dict(form.errors)
self.assertDictEqual(errors, {})
class TestSampleForm(TestCase):
def test_save(self):
form = SampleForm(data={
'comment': 'test',
})
self.assertDictEqual(dict(form.errors), {})
sample = form.save(commit=False)
self.assertEqual(sample.comment, 'test')
class TestSampleSelectOrCreateForm(TestCase):
def test_clean_specify_nonexistant_sample(self):
form = SampleSelectOrCreateForm(data={
'existing_or_new': 'existing-sample',
'sample_uuid': 's0001',
})
errors = dict(form.errors)
self.assertIsNotNone(errors.get('sample_uuid'))
self.assertListEqual(errors.get('sample_uuid'),
['Sample s0001 not found'])
def test_clean_incorrect_format(self):
form = SampleSelectOrCreateForm(data={
'existing_or_new': 'existing-sample',
'sample_uuid': 'testing',
})
errors = dict(form.errors)
self.assertIsNotNone(errors.get('sample_uuid'))
self.assertListEqual(errors.get('sample_uuid'),
['Sample UUID is not in the correct format'])
def test_clean_specify_existing(self):
sample = Sample.objects.create(substrate=mommy.make(Substrate))
form = SampleSelectOrCreateForm(data={
'existing_or_new': 'existing-sample',
'sample_uuid': sample.uuid,
})
errors = dict(form.errors)
self.assertDictEqual(errors, {})
def test_clean_specify_existing_and_new(self):
sample = Sample.objects.create(substrate=mommy.make(Substrate))
form = SampleSelectOrCreateForm(data={
'existing_or_new': 'existing-sample',
'sample_uuid': sample.uuid,
'sample_comment': 'sample comment',
'substrate_comment': 'substrate comment',
'substrate_serial': 'substrate serial',
'substrate_source': 'substrate source',
})
errors = dict(form.errors)
self.assertDictEqual(errors, {})
def test_clean_new_sample(self):
form = SampleSelectOrCreateForm(data={
'existing_or_new': 'new-sample',
'sample_comment': 'sample comment',
'substrate_comment': 'substrate comment',
'substrate_serial': 'substrate serial',
'substrate_source': 'substrate source',
})
errors = dict(form.errors)
self.assertDictEqual(errors, {})
def test_clean_empty(self):
form = SampleSelectOrCreateForm(data={})
errors = dict(form.errors)
self.assertIsNotNone(errors.get('existing_or_new'))
self.assertListEqual(errors.get('existing_or_new'),
['This field is required.'])
class TestTrackProjectForm(TestCase):
def test_save(self):
user = get_user_model().objects.create_user('username', password='')
project = mommy.make(Project)
form = TrackProjectForm(data={
'project': project.id,
'is_owner': True,
})
tracking = form.save(user=user)
self.assertEqual(project.id, tracking.project_id)
self.assertEqual(user.id, tracking.user_id)
self.assertTrue(tracking.is_owner)
class TestChecklistForm(unittest.TestCase):
@classmethod
def setUpClass(cls):
class TestChecklistForm(ChecklistForm):
checklist_fields = [
'first',
'second',
'third',
'fourth',
]
cls.form_class = TestChecklistForm
def test_init(self):
form = self.form_class()
for i, ((name, field), label) in enumerate(zip(six.iteritems(form.fields),
self.form_class.checklist_fields)):
self.assertEqual('field_{}'.format(i), name)
self.assertEqual(field.label, label)
self.assertTrue(field.required)
self.assertEqual(field.__class__, forms.BooleanField)
class TestWizardBasicInfoForm(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user('username1',
password='')
self.processtype = mommy.make(ProcessType, type='test')
def test_run_date_validation_today(self):
date = datetime.date.today()
form = WizardBasicInfoForm(user=self.user, data={
'user': self.user,
'type': self.processtype,
'run_date': date,
'title': 'title',
})
valid = form.is_valid()
def test_run_date_validation_future(self):
date = datetime.date.today() + datetime.timedelta(days=2)
form = WizardBasicInfoForm(user=self.user, data={
'user': self.user,
'type': self.processtype,
'run_date': date,
'title': 'title',
})
valid = form.is_valid()
def test_run_date_validation_past(self):
date = datetime.date.today() - datetime.timedelta(days=2)
form = WizardBasicInfoForm(user=self.user, data={
'user': self.user,
'type': self.processtype,
'run_date': date,
'title': 'title',
})
valid = form.is_valid()
|
{
"content_hash": "61b49b5a7991f5d9268a4d0c54a2e8f1",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 86,
"avg_line_length": 34.35164835164835,
"alnum_prop": 0.5889315419065899,
"repo_name": "emergence-lab/emergence-lab",
"id": "ba5536c662b6fa4ec5411845ab47005ea435e00e",
"size": "6276",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "core/tests/test_forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4017"
},
{
"name": "HTML",
"bytes": "255519"
},
{
"name": "JavaScript",
"bytes": "16743"
},
{
"name": "Python",
"bytes": "539642"
}
],
"symlink_target": ""
}
|
"""
@file
@brief @see cl Database
"""
from pyquickhelper.loghelper.flog import fLOG
module_odbc = None
class DatabaseObject:
"""
Methods for database related to object, see @see cl Database.
"""
def fill_table_with_objects(
self, tablename, iterator_on, check_existence=False, skip_exception=False):
"""
Fill a table of a database with object (from ``iterator_on``) following the interface:
- a property ``schema_database`` which gives the schema
- a property ``asrow`` which puts all values in a row
- a property ``index`` which precises the index to unless it returns None
- a property ``indexes`` which precises other indexes to create (optional)
The property ``asrow`` must not include other objects, only their ids.
If the table does not exists, it creates it.
@param tablename name of a table (created if it does not exists)
@param iterator_on iterator_on on StreamRSS object
@param check_existence avoid adding an element if it already exists (based on the index columns)
@param skip_exception skip exception while inserting an element
The function do not check duplicate among elements sent in ``iterator_on``,
it only checks duplicate between the new and the old ones (meaning in the database).
"""
schema = None
index = None
indexes = None
for _ in iterator_on:
schema = _.schema_database
index = _.index
try:
indexes = _.indexes
except Exception:
pass
break
if schema is None:
# nothing to do, it is empty
return
if tablename not in self.get_table_list():
fLOG("create table ", tablename)
cursor = self.create_table(tablename, schema)
if index is not None:
self.create_index(
index + "_" + tablename + "_index",
tablename,
[index],
unique=True)
if indexes is not None:
for ind in indexes:
if isinstance(ind, str):
self.create_index(
ind + "_" + tablename + "_index",
tablename,
[ind],
unique=False)
else:
self.create_index(
"_".join(ind) + "_" + tablename + "_index",
tablename,
ind,
unique=False)
ite = map(lambda m: m.asrow, iterator_on)
self.append_values(
ite,
tablename,
schema,
cursor=cursor,
skip_exception=skip_exception)
else:
if check_existence:
if index is None:
raise ValueError(
"unable to check existence because index property is not set up")
def enumerate_nodup(iterator):
for it in iterator:
val = it.__dict__[index]
view = self.execute_view(
"SELECT * FROM %s WHERE %s=\"%s\";" %
(tablename, index, val))
if len(view) == 0:
yield it
ite = map(lambda m: m.asrow, enumerate_nodup(iterator_on))
self.append_values(
ite,
tablename,
schema,
cursor=None,
skip_exception=skip_exception)
else:
ite = map(lambda m: m.asrow, iterator_on)
self.append_values(
ite,
tablename,
schema,
cursor=None,
skip_exception=skip_exception)
self.commit()
def enumerate_objects(self, table, classObject):
"""
Iterator on objects assuming each row of a table is a object (classObject type).
The classObject must have the following properties:
- a staticmethod ``schema_database_read`` which gives the schema
- the constructor must accept a constructor where parameter have the same name as the column names
@param table table name
@param classObject class object
@return iterator on object
Example:
::
for blog in db.enumerate_objects ("blogs", StreamRSS):
#...
"""
schema = classObject.schema_database_read()
for row in self.execute("SELECT * FROM %s" % table):
di = {schema[i][0]: v for i, v in enumerate(row)}
yield classObject(**di)
|
{
"content_hash": "286d26c264c7dfc5f23399c820428dc4",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 113,
"avg_line_length": 36.827338129496404,
"alnum_prop": 0.48427427231881226,
"repo_name": "sdpython/pyensae",
"id": "b8d01dd49581748c4d5283f759a92c4454d8a5d8",
"size": "5137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pyensae/sql/database_object.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ANTLR",
"bytes": "129141"
},
{
"name": "Batchfile",
"bytes": "1818"
},
{
"name": "CSS",
"bytes": "689"
},
{
"name": "Jupyter Notebook",
"bytes": "750986"
},
{
"name": "Python",
"bytes": "3101205"
},
{
"name": "R",
"bytes": "3146"
},
{
"name": "Shell",
"bytes": "715"
}
],
"symlink_target": ""
}
|
import csv
import codecs
# Pour le fuzzy matching permettant de faire le lien entre les noms et prénoms
# selon le secrétariat et selon EDT ...
from fuzzywuzzy import process
from app.models import *
def load_teachers(filename, encoding='cp1252'):
#with open(filename, 'rb', encoding='cp1252') as csvfile:
with codecs.open(filename,"rb",encoding) as csvfile:
print(csvfile)
reader = csv.DictReader(csvfile, delimiter=';', quotechar='"')
return list(reader)
def load_items():
file = open("data/items.csv").readlines()
item_list = []
for line in file:
item_list.append(line.strip().split(";"))
return item_list
def print_row(row):
for code, value in row.items():
print(code, " ==> ", value)
def insert_reservation(db, row, start_date, end_date):
'''
NUMERO ==> 1
POND. ==> 1
H.DEBUT ==> 08h10
EFFECTIF ==> 0
MAT_CODE ==> GY
SALLE ==>
MAT_LIBELLE ==> Education physique
MODALITE ==> CG
CO-ENS. ==> N
DUREE ==> 2h30
ALTERNANCE ==> H
PROF_PRENOM ==> Marco
PROF_NOM ==> Catillaz
JOUR ==> lundi
FREQUENCE ==> H
CLASSE ==> 1EC1
'''
# charger le professeur lié à la ligne. Splitter sur les virgules. S'il n'y a pas de virgule, on reçoit une liste contenant un seul élément
teachers_fnames = [t.strip() for t in row['PROF_PRENOM'].split(',')]
teachers_lnames = [t.strip() for t in row['PROF_NOM'].split(',')]
#print(teachers_fnames, teachers_lnames)
teachers = []
errors = []
# Tous les noms et prénom selon la base de données pour faire le matching
# flou pour permettre de faire le lien entre le fichier des secrétaires et
# celui de EDT qui ne sont parfois pas synchronisés
choices = [t.first_name + '|' + t.last_name for t in User.query.all()]
for fn, ln in zip(teachers_fnames, teachers_lnames):
teacher = User.query.filter_by(first_name=fn, last_name=ln).first()
if teacher is None and (fn + ln).strip() != '':
# Essayer de trouver la meilleure correspondance ... avec une
# recherche floue
match = process.extract(fn + ln, choices, limit=1)
db_full_name = match[0][0]
print('Association entre ', fn + ln, "et", match[0][0], match[0][1])
fn, ln = db_full_name.split('|')
teacher = User.query.filter_by(first_name=fn, last_name=ln).first()
# Discordance entre le fichier CSV et
# profs-sigles-courriel.txt
print('Erreur de chargement (problème nommage du prof) : ', row)
errors.append(row)
teachers.append(teacher)
with open('profs-error.log', 'w') as fd:
for e in errors:
fd.write(str(e) + '\n')
# Si le champ 'teacher' est vide, il s'agit d'une heure générique qu'il ne
# faut pas insérer dans l'occupation des salles. Elle n'est là que pour la
# forme dans le fichier edt.csv ==> autre alternative serait de faire un
# prétraitement sur ce fichier edt.csv Ne pas tenir compte des heures de
# gymnastique
if teachers == [None] or row['MAT_CODE'] == 'GY' or row['SALLE'] == '': return
duration = int(row['DUREE'].split('h')[0])
# certaines lignes dans le fichier csv se déroulent dans plusieurs salles...
room_names = [r.strip() for r in row['SALLE'].split(',')]
# si la salle n'existe pas encore dans la base de données (retourne None),
# alors elle est crée
rooms = [Room.query.filter_by(name=room_name).first() or Room(name=room_name) for room_name in room_names]
weekday = Weekday.query.filter_by(
# il faut rajouter .title() pour faire lundi ==> Lundi
name=row['JOUR'].title()
).first()
# Chercher l'utilisateur admin (pas effiace ==> requête faite à chaque fois)
admin_role = Role.query.filter_by(name='admin').first()
admin_user = User.query.filter_by(role=admin_role).first()
# Si on est à une heure inférieure à 10h, les heures sont notées 08h10 dans
# le fichier edt.csv mais 8h10 dans la base de données ==> il faut donc
# transformer ce qui est dans le fichier csv pour correspondre au format de
# la base de données
hours, minutes = row['H.DEBUT'].split('h')
hours = hours[1] if hours[0] == '0' else hours
start_timeslot = Timeslot.query.filter_by(start_time=hours+'h'+minutes).first()
timeslots = Timeslot.query.filter(
Timeslot.order.between(
start_timeslot.order,
start_timeslot.order + (duration-1)
)
).all()
# Pour chaque salle, il faut faire une réservation identique
for room in rooms:
reservation = Reservation(
# dates du début et de fin d'année
start_date=start_date,
end_date=end_date,
reason_short=row['MAT_CODE'],
reason_details=row['MAT_LIBELLE'],
duration=duration,
student_group=row['CLASSE'],
users=teachers,
room=room,
timeslots=timeslots,
weekday=weekday,
owner=admin_user
)
db.session.add(reservation)
db.session.commit()
def load_reservations(db, start_date, end_date, filename='data/edt.csv', encoding='cp1252'):
with codecs.open(filename,"rb", encoding) as csvfile:
reader = csv.DictReader(csvfile, delimiter=';', quotechar='"')
for row in reader:
try:
insert_reservation(db, row, start_date, end_date)
except Exception as e:
print("Impossible d'insérer la réservation", row, "raison : ", str(e))
print("Erreur", e)
db.session.rollback()
if __name__ == '__main__':
db = None
load_csv(db, 'data/edt.csv')
|
{
"content_hash": "ee3afe71fa4f100c416e4b33a5ed67d4",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 143,
"avg_line_length": 34.825581395348834,
"alnum_prop": 0.5894824707846411,
"repo_name": "csud-reservation/flask-backend",
"id": "3ab6a11c6dd91c4d484fc33637539b9ddc00c8f4",
"size": "6023",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "load_csv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6704"
},
{
"name": "Dockerfile",
"bytes": "1752"
},
{
"name": "HTML",
"bytes": "66564"
},
{
"name": "JavaScript",
"bytes": "107280"
},
{
"name": "Makefile",
"bytes": "4853"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "76410"
},
{
"name": "Shell",
"bytes": "2910"
},
{
"name": "TSQL",
"bytes": "587"
}
],
"symlink_target": ""
}
|
from CIM14.ENTSOE.Equipment.Core.IdentifiedObject import IdentifiedObject
class BaseVoltage(IdentifiedObject):
"""Defines a nominal base voltage which is referenced in the system.For the 2010 ENTSOE IOP, the BaseVoltage association for SeriesCompensator and TransformerWinding is required.
"""
def __init__(self, nominalVoltage=0.0, ConductingEquipment=None, VoltageLevel=None, *args, **kw_args):
"""Initialises a new 'BaseVoltage' instance.
@param nominalVoltage: The PowerSystemResource's base voltage.Should be a positive value - not zero.
@param ConductingEquipment: Use association to ConductingEquipment only when there is no VoltageLevel container used.
@param VoltageLevel: The VoltageLevels having this BaseVoltage.
"""
#: The PowerSystemResource's base voltage.Should be a positive value - not zero.
self.nominalVoltage = nominalVoltage
self._ConductingEquipment = []
self.ConductingEquipment = [] if ConductingEquipment is None else ConductingEquipment
self._VoltageLevel = []
self.VoltageLevel = [] if VoltageLevel is None else VoltageLevel
super(BaseVoltage, self).__init__(*args, **kw_args)
_attrs = ["nominalVoltage"]
_attr_types = {"nominalVoltage": float}
_defaults = {"nominalVoltage": 0.0}
_enums = {}
_refs = ["ConductingEquipment", "VoltageLevel"]
_many_refs = ["ConductingEquipment", "VoltageLevel"]
def getConductingEquipment(self):
"""Use association to ConductingEquipment only when there is no VoltageLevel container used.
"""
return self._ConductingEquipment
def setConductingEquipment(self, value):
for x in self._ConductingEquipment:
x.BaseVoltage = None
for y in value:
y._BaseVoltage = self
self._ConductingEquipment = value
ConductingEquipment = property(getConductingEquipment, setConductingEquipment)
def addConductingEquipment(self, *ConductingEquipment):
for obj in ConductingEquipment:
obj.BaseVoltage = self
def removeConductingEquipment(self, *ConductingEquipment):
for obj in ConductingEquipment:
obj.BaseVoltage = None
def getVoltageLevel(self):
"""The VoltageLevels having this BaseVoltage.
"""
return self._VoltageLevel
def setVoltageLevel(self, value):
for x in self._VoltageLevel:
x.BaseVoltage = None
for y in value:
y._BaseVoltage = self
self._VoltageLevel = value
VoltageLevel = property(getVoltageLevel, setVoltageLevel)
def addVoltageLevel(self, *VoltageLevel):
for obj in VoltageLevel:
obj.BaseVoltage = self
def removeVoltageLevel(self, *VoltageLevel):
for obj in VoltageLevel:
obj.BaseVoltage = None
|
{
"content_hash": "de3d23553aa00756b262a3b6abc59960",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 182,
"avg_line_length": 38.24,
"alnum_prop": 0.6823570432357043,
"repo_name": "rwl/PyCIM",
"id": "1a7665b2f340c5c8da0fff6b0f179d969ae58b57",
"size": "3968",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CIM14/ENTSOE/Equipment/Core/BaseVoltage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7420564"
}
],
"symlink_target": ""
}
|
"""Handles all requests to the conductor service."""
from oslo.config import cfg
from nova import baserpc
from nova.conductor import manager
from nova.conductor import rpcapi
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
from nova import utils
conductor_opts = [
cfg.BoolOpt('use_local',
default=False,
help='Perform nova-conductor operations locally'),
cfg.StrOpt('topic',
default='conductor',
help='the topic conductor nodes listen on'),
cfg.StrOpt('manager',
default='nova.conductor.manager.ConductorManager',
help='full class name for the Manager for conductor'),
cfg.IntOpt('workers',
help='Number of workers for OpenStack Conductor service')
]
conductor_group = cfg.OptGroup(name='conductor',
title='Conductor Options')
CONF = cfg.CONF
CONF.register_group(conductor_group)
CONF.register_opts(conductor_opts, conductor_group)
LOG = logging.getLogger(__name__)
class LocalAPI(object):
"""A local version of the conductor API that does database updates
locally instead of via RPC.
"""
def __init__(self):
# TODO(danms): This needs to be something more generic for
# other/future users of this sort of functionality.
self._manager = utils.ExceptionHelper(manager.ConductorManager())
def wait_until_ready(self, context, *args, **kwargs):
# nothing to wait for in the local case.
pass
def instance_update(self, context, instance_uuid, **updates):
"""Perform an instance update in the database."""
return self._manager.instance_update(context, instance_uuid,
updates, 'compute')
def instance_get(self, context, instance_id):
return self._manager.instance_get(context, instance_id)
def instance_get_by_uuid(self, context, instance_uuid,
columns_to_join=None):
return self._manager.instance_get_by_uuid(context, instance_uuid,
columns_to_join)
def instance_destroy(self, context, instance):
return self._manager.instance_destroy(context, instance)
def instance_get_all_by_host(self, context, host, columns_to_join=None):
return self._manager.instance_get_all_by_host(
context, host, columns_to_join=columns_to_join)
def instance_get_all_by_host_and_node(self, context, host, node):
return self._manager.instance_get_all_by_host(context, host, node)
def instance_get_all_by_filters(self, context, filters,
sort_key='created_at',
sort_dir='desc',
columns_to_join=None):
return self._manager.instance_get_all_by_filters(context,
filters,
sort_key,
sort_dir,
columns_to_join)
def instance_get_active_by_window_joined(self, context, begin, end=None,
project_id=None, host=None):
return self._manager.instance_get_active_by_window_joined(
context, begin, end, project_id, host)
def instance_info_cache_update(self, context, instance, values):
return self._manager.instance_info_cache_update(context,
instance,
values)
def instance_info_cache_delete(self, context, instance):
return self._manager.instance_info_cache_delete(context, instance)
def instance_type_get(self, context, instance_type_id):
return self._manager.instance_type_get(context, instance_type_id)
def instance_fault_create(self, context, values):
return self._manager.instance_fault_create(context, values)
def migration_get_in_progress_by_host_and_node(self, context, host, node):
return self._manager.migration_get_in_progress_by_host_and_node(
context, host, node)
def migration_update(self, context, migration, status):
return self._manager.migration_update(context, migration, status)
def aggregate_host_add(self, context, aggregate, host):
return self._manager.aggregate_host_add(context, aggregate, host)
def aggregate_host_delete(self, context, aggregate, host):
return self._manager.aggregate_host_delete(context, aggregate, host)
def aggregate_get(self, context, aggregate_id):
return self._manager.aggregate_get(context, aggregate_id)
def aggregate_get_by_host(self, context, host, key=None):
return self._manager.aggregate_get_by_host(context, host, key)
def aggregate_metadata_add(self, context, aggregate, metadata,
set_delete=False):
return self._manager.aggregate_metadata_add(context, aggregate,
metadata,
set_delete)
def aggregate_metadata_delete(self, context, aggregate, key):
return self._manager.aggregate_metadata_delete(context,
aggregate,
key)
def aggregate_metadata_get_by_host(self, context, host,
key='availability_zone'):
return self._manager.aggregate_metadata_get_by_host(context,
host,
key)
def bw_usage_get(self, context, uuid, start_period, mac):
return self._manager.bw_usage_update(context, uuid, mac, start_period)
def bw_usage_update(self, context, uuid, mac, start_period,
bw_in, bw_out, last_ctr_in, last_ctr_out,
last_refreshed=None, update_cells=True):
return self._manager.bw_usage_update(context, uuid, mac, start_period,
bw_in, bw_out,
last_ctr_in, last_ctr_out,
last_refreshed,
update_cells=update_cells)
def security_group_get_by_instance(self, context, instance):
return self._manager.security_group_get_by_instance(context, instance)
def security_group_rule_get_by_security_group(self, context, secgroup):
return self._manager.security_group_rule_get_by_security_group(
context, secgroup)
def provider_fw_rule_get_all(self, context):
return self._manager.provider_fw_rule_get_all(context)
def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
return self._manager.agent_build_get_by_triple(context, hypervisor,
os, architecture)
def block_device_mapping_create(self, context, values):
return self._manager.block_device_mapping_update_or_create(context,
values,
create=True)
def block_device_mapping_update(self, context, bdm_id, values):
values = dict(values)
values['id'] = bdm_id
return self._manager.block_device_mapping_update_or_create(
context, values, create=False)
def block_device_mapping_update_or_create(self, context, values):
return self._manager.block_device_mapping_update_or_create(context,
values)
def block_device_mapping_get_all_by_instance(self, context, instance,
legacy=True):
return self._manager.block_device_mapping_get_all_by_instance(
context, instance, legacy)
def block_device_mapping_destroy(self, context, bdms):
return self._manager.block_device_mapping_destroy(context, bdms=bdms)
def block_device_mapping_destroy_by_instance_and_device(self, context,
instance,
device_name):
return self._manager.block_device_mapping_destroy(
context, instance=instance, device_name=device_name)
def block_device_mapping_destroy_by_instance_and_volume(self, context,
instance,
volume_id):
return self._manager.block_device_mapping_destroy(
context, instance=instance, volume_id=volume_id)
def vol_get_usage_by_time(self, context, start_time):
return self._manager.vol_get_usage_by_time(context, start_time)
def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
wr_bytes, instance, last_refreshed=None,
update_totals=False):
return self._manager.vol_usage_update(context, vol_id,
rd_req, rd_bytes,
wr_req, wr_bytes,
instance, last_refreshed,
update_totals)
def service_get_all(self, context):
return self._manager.service_get_all_by(context)
def service_get_all_by_topic(self, context, topic):
return self._manager.service_get_all_by(context, topic=topic)
def service_get_all_by_host(self, context, host):
return self._manager.service_get_all_by(context, host=host)
def service_get_by_host_and_topic(self, context, host, topic):
return self._manager.service_get_all_by(context, topic, host)
def service_get_by_compute_host(self, context, host):
result = self._manager.service_get_all_by(context, 'compute', host)
# FIXME(comstud): A major revision bump to 2.0 should return a
# single entry, so we should just return 'result' at that point.
return result[0]
def service_get_by_args(self, context, host, binary):
return self._manager.service_get_all_by(context, host=host,
binary=binary)
def action_event_start(self, context, values):
return self._manager.action_event_start(context, values)
def action_event_finish(self, context, values):
return self._manager.action_event_finish(context, values)
def service_create(self, context, values):
return self._manager.service_create(context, values)
def service_destroy(self, context, service_id):
return self._manager.service_destroy(context, service_id)
def compute_node_create(self, context, values):
return self._manager.compute_node_create(context, values)
def compute_node_update(self, context, node, values, prune_stats=False):
return self._manager.compute_node_update(context, node, values,
prune_stats)
def compute_node_delete(self, context, node):
return self._manager.compute_node_delete(context, node)
def service_update(self, context, service, values):
return self._manager.service_update(context, service, values)
def task_log_get(self, context, task_name, begin, end, host, state=None):
return self._manager.task_log_get(context, task_name, begin, end,
host, state)
def task_log_begin_task(self, context, task_name, begin, end, host,
task_items=None, message=None):
return self._manager.task_log_begin_task(context, task_name,
begin, end, host,
task_items, message)
def task_log_end_task(self, context, task_name, begin, end, host,
errors, message=None):
return self._manager.task_log_end_task(context, task_name,
begin, end, host,
errors, message)
def notify_usage_exists(self, context, instance, current_period=False,
ignore_missing_network_data=True,
system_metadata=None, extra_usage_info=None):
return self._manager.notify_usage_exists(
context, instance, current_period, ignore_missing_network_data,
system_metadata, extra_usage_info)
def security_groups_trigger_handler(self, context, event, *args):
return self._manager.security_groups_trigger_handler(context,
event, args)
def security_groups_trigger_members_refresh(self, context, group_ids):
return self._manager.security_groups_trigger_members_refresh(context,
group_ids)
def network_migrate_instance_start(self, context, instance, migration):
return self._manager.network_migrate_instance_start(context,
instance,
migration)
def network_migrate_instance_finish(self, context, instance, migration):
return self._manager.network_migrate_instance_finish(context,
instance,
migration)
def quota_commit(self, context, reservations, project_id=None,
user_id=None):
return self._manager.quota_commit(context, reservations,
project_id=project_id,
user_id=user_id)
def quota_rollback(self, context, reservations, project_id=None,
user_id=None):
return self._manager.quota_rollback(context, reservations,
project_id=project_id,
user_id=user_id)
def get_ec2_ids(self, context, instance):
return self._manager.get_ec2_ids(context, instance)
def compute_unrescue(self, context, instance):
return self._manager.compute_unrescue(context, instance)
class LocalComputeTaskAPI(object):
def __init__(self):
# TODO(danms): This needs to be something more generic for
# other/future users of this sort of functionality.
self._manager = utils.ExceptionHelper(
manager.ComputeTaskManager())
def resize_instance(self, context, instance, extra_instance_updates,
scheduler_hint, flavor, reservations):
# NOTE(comstud): 'extra_instance_updates' is not used here but is
# needed for compatibility with the cells_rpcapi version of this
# method.
self._manager.migrate_server(
context, instance, scheduler_hint, False, False, flavor,
None, None, reservations)
def live_migrate_instance(self, context, instance, host_name,
block_migration, disk_over_commit):
scheduler_hint = {'host': host_name}
self._manager.migrate_server(
context, instance, scheduler_hint, True, False, None,
block_migration, disk_over_commit, None)
def build_instances(self, context, instances, image,
filter_properties, admin_password, injected_files,
requested_networks, security_groups, block_device_mapping,
legacy_bdm=True):
utils.spawn_n(self._manager.build_instances, context,
instances=instances, image=image,
filter_properties=filter_properties,
admin_password=admin_password, injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping,
legacy_bdm=legacy_bdm)
def unshelve_instance(self, context, instance):
utils.spawn_n(self._manager.unshelve_instance, context,
instance=instance)
class API(LocalAPI):
"""Conductor API that does updates via RPC to the ConductorManager."""
def __init__(self):
self._manager = rpcapi.ConductorAPI()
self.base_rpcapi = baserpc.BaseAPI(topic=CONF.conductor.topic)
def wait_until_ready(self, context, early_timeout=10, early_attempts=10):
'''Wait until a conductor service is up and running.
This method calls the remote ping() method on the conductor topic until
it gets a response. It starts with a shorter timeout in the loop
(early_timeout) up to early_attempts number of tries. It then drops
back to the globally configured timeout for rpc calls for each retry.
'''
attempt = 0
timeout = early_timeout
while True:
# NOTE(danms): Try ten times with a short timeout, and then punt
# to the configured RPC timeout after that
if attempt == early_attempts:
timeout = None
attempt += 1
# NOTE(russellb): This is running during service startup. If we
# allow an exception to be raised, the service will shut down.
# This may fail the first time around if nova-conductor wasn't
# running when this service started.
try:
self.base_rpcapi.ping(context, '1.21 GigaWatts',
timeout=timeout)
break
except rpc_common.Timeout:
LOG.warning(_('Timed out waiting for nova-conductor. '
'Is it running? Or did this service start '
'before nova-conductor?'))
def instance_update(self, context, instance_uuid, **updates):
"""Perform an instance update in the database."""
return self._manager.instance_update(context, instance_uuid,
updates, 'conductor')
class ComputeTaskAPI(object):
"""ComputeTask API that queues up compute tasks for nova-conductor."""
def __init__(self):
self.conductor_compute_rpcapi = rpcapi.ComputeTaskAPI()
def resize_instance(self, context, instance, extra_instance_updates,
scheduler_hint, flavor, reservations):
# NOTE(comstud): 'extra_instance_updates' is not used here but is
# needed for compatibility with the cells_rpcapi version of this
# method.
self.conductor_compute_rpcapi.migrate_server(
context, instance, scheduler_hint, False, False, flavor,
None, None, reservations)
def live_migrate_instance(self, context, instance, host_name,
block_migration, disk_over_commit):
scheduler_hint = {'host': host_name}
self.conductor_compute_rpcapi.migrate_server(
context, instance, scheduler_hint, True, False, None,
block_migration, disk_over_commit, None)
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping, legacy_bdm=True):
self.conductor_compute_rpcapi.build_instances(context,
instances=instances, image=image,
filter_properties=filter_properties,
admin_password=admin_password, injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping,
legacy_bdm=legacy_bdm)
def unshelve_instance(self, context, instance):
self.conductor_compute_rpcapi.unshelve_instance(context,
instance=instance)
|
{
"content_hash": "710a7db3dccbbe88d51b3954f124410d",
"timestamp": "",
"source": "github",
"line_count": 437,
"max_line_length": 79,
"avg_line_length": 46.890160183066364,
"alnum_prop": 0.5750817432043336,
"repo_name": "citrix-openstack-build/nova",
"id": "eea5886a470e87755751b894c4c77f3bd0e9efd3",
"size": "21096",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nova/conductor/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13194052"
},
{
"name": "Shell",
"bytes": "17194"
}
],
"symlink_target": ""
}
|
"""Some handy interfaces to the ADB :shell service.
The :shell service is pretty straightforward, you send 'shell:command' and
the device runs /bin/sh -c 'command'. The ADB daemon on the device sets up a
PTY, similar to what ssh would do, to provide interactive terminal features.
This makes things difficult if you're trying to pipe binary data through a
remote command; however, we provide some facilities for 'raw' commands, where
we first execute an ioctl to turn off things like character translation and
local echo, providing a more sane programmatic interface.
The ShellService also provides some handy methods for running commands
asynchronously, either by returning a handle, or using a with: context.
Note that this service differs from the FilesyncService in that streams opened
to the :shell service are closed immediately after the command completes;
streams opened to the :sync service remain open for multiple sync requests,
until they are closed explicitly. This means there's no point in keeping a
stream around ShellService, we need to keep an AdbConnection around instead.
Some examples of how to use this service:
adb_cnxn = adb_protocol.AdbConnection.Connect(my_transport)
shell = shell_service.ShellService(adb_cnxn)
# Run a simple command.
output = shell.Command('echo foo')
# output == 'foo\r\n'
# Run a command that outputs binary data, like recording a minute of audio.
output = shell.RawCommand('arecord -Dhw:CARD=0,DEV=0 -c 2 -d 60')
# Run a command in the background, do some other stuff, then read the
# command's output, waiting on it to complete.
cmd = shell.AsyncCommand('echo foo; sleep 10')
bar = shell.Command('echo bar')
foo = cmd.Wait()
baz = shell.Command('echo baz')
# A version using a with context to do the same thing:
with shell.AsyncCommand('echo foo; sleep 10') as c:
bar = shell.Command('echo bar')
foo = c.Wait()
baz = shell.Command('echo baz')
# Run a command in the background while we do some other stuff, save the
# output to a StringIO buffer so we can access it later. Use a context to
# automatically wait for the asynchronous command to finish.
output = cStringIO.StringIO()
with shell.AsyncRawCommand(
'arecord -Dhw:CARD=0,DEV=0 -c 2 -d 60', stdout=output):
# Do some stuff, play some sounds on some fixture speakers, for example.
pass
# Execution won't get here until the arecord command completes, and
# output.getvalue() now contains the output of the arecord command.
"""
import cStringIO
import threading
import time
from openhtf.plugs.usb import adb_protocol
from openhtf.plugs.usb import usb_exceptions
from openhtf.util import timeouts
class AsyncCommandHandle(object):
"""This class is used for interacting with an asynchronous command.
This handle is used to close a command or to wait on it to complete. Data is
read from stdin and written to the command's stdin, and output from the
command is written to stdout. If stdin is None, no input is written to the
command. If stdout is None, the output from the command is buffered
internally, and will be returned from a call to Wait() - see the Wait() method
for details.
You can tell if a stream was closed locally by checking the
'force_closed_or_timeout' attribute. If a command completes instead of being
closed by a call to Close (or a timeout), then 'force_closed_or_timeout' will
be False, otherwise it will be True.
"""
def __init__(self, stream, stdin, stdout, timeout, is_raw): #pylint: disable=too-many-arguments
"""Create a handle to use for interfacing with an AsyncCommand.
Args:
stream: Stream to use for communicating with the running command.
stdin: File-like object to use for reading stdin for the command, can be
None, in which case no input is sent to the command.
stdout: File-like object to use for writing output of the command to, can
be None, in which case output can be obtained by calling Wait().
timeout: timeouts.PolledTimeout to use for the command.
is_raw: If True, we'll do reads from stdin, otherwise we do readlines
instead to play nicer with potential interactive uses (read doesn't
return until EOF, but interactively you want to send each line and
then see the response). stdout is treated the same in either case,
read is used - AdbStreams don't support readline.
"""
self.stream = stream
self.stdin = stdin
self.stdout = stdout or cStringIO.StringIO()
self.force_closed_or_timeout = False
self.reader_thread = threading.Thread(target=self._ReaderThread)
self.reader_thread.daemon = True
self.reader_thread.start()
if stdin:
self.writer_thread = threading.Thread(target=self._WriterThread,
args=(is_raw,))
self.writer_thread.daemon = True
self.writer_thread.start()
# Close ourselves after timeout expires, ignored if timeout won't expire.
timeouts.ExecuteAfterDelay(timeout, self.Close)
def _WriterThread(self, is_raw):
"""Write as long as the stream is not closed."""
# If we're not in raw mode, do line-buffered reads to play nicer with
# potential interactive uses, max of MAX_ADB_DATA, since anything we write
# to the stream will get packetized to that size anyway.
#
# Loop until our stream gets closed, which will cause one of these
# operations to raise. Since we're in a separate thread, it'll just get
# ignored, which is what we want.
reader = self.stdin.read if is_raw else self.stdin.readline
while not self.stream.IsClosed():
self.stream.Write(reader(adb_protocol.MAX_ADB_DATA))
def _ReaderThread(self):
"""Read until the stream is closed."""
for data in self.stream.ReadUntilClose():
if self.stdout is not None:
self.stdout.write(data)
def __enter__(self): # pylint: disable=invalid-name
return self
def __exit__(self, exc_type, exc_value, exc_tb): # pylint: disable=invalid-name
if exc_type:
return False
self.Wait()
return True
def Close(self):
"""Close this handle immediately - you may lose output."""
self.force_closed_or_timeout = True
self.stream.Close()
def IsDone(self):
"""Return True if this command has completed."""
return self.stream.IsClosed()
def Wait(self, timeout_ms=None):
"""Block until this command has completed.
Args:
timeout_ms: Timeout, in milliseconds, to wait.
Returns:
Output of the command if it complete and self.stdout is a StringIO
object or was passed in as None. Returns True if the command completed but
stdout was provided (and was not a StringIO object). Returns None if the
timeout expired before the command completed. Be careful to check the
return value explicitly for None, as the output may be ''.
"""
closed = timeouts.LoopUntilTimeoutOrTrue(
timeouts.PolledTimeout.FromMillis(timeout_ms),
self.stream.IsClosed, .1)
if closed:
if hasattr(self.stdout, 'getvalue'):
return self.stdout.getvalue()
return True
return None
class ShellService(object):
"""Class providing a high-level interface to ADB's :shell service.
This class provides synchronous and asynchronous commands, and a variety of
ways for getting input into and out of them.
"""
def __init__(self, adb_connection):
self.adb_connection = adb_connection
@staticmethod
def _ToRawCommand(command):
"""Convert the command to a raw signal."""
# Android doesn't have stty, so we manually do the ioctl (yuck). This ioctl
# is a TCSETA (0x5403) with the following flags set:
# Control bits:
# B38400 (set baud rate)
# CS8 (8-bit bytes)
# CREAD (Enable input from terminal)
# Input, Output, Local bits all cleared
#
# We also update VMIN from 0x0 to 0xff so read() waits for at least one byte
# to be ready before returning (we leave the default VTIME at 0x4). Note
# that we leave the other control characters at their defaults, but they
# should be ignored since we disable them with flags and put the terminal
# into non-canonical input mode (not newline delimited).
return ('ioctl -l 23 -a 1 /proc/self/fd/0 0x5403 ' # TCSETA (0x5403)
'0 0 0 0 0 0 0 0 0xbf 0 0 0 0 0 0 0 ' # Flags
'0 0x3 0x1c 0x7f 0x15 0x4 0xff ' # Control characters
'&>/dev/null;%s' % command)
def Command(self, command, raw=False, timeout_ms=None):
"""Run the given command and return the output."""
return ''.join(self.StreamingCommand(command, raw, timeout_ms))
def StreamingCommand(self, command, raw=False, timeout_ms=None):
"""Run the given command and yield the output as we receive it."""
if raw:
command = self._ToRawCommand(command)
return self.adb_connection.StreamingCommand('shell', command, timeout_ms)
# pylint: disable=too-many-arguments
def AsyncCommand(self, command, stdin=None, stdout=None, raw=False,
timeout_ms=None):
"""Run the given command on the device asynchronously.
Input will be read from stdin, output written to stdout. ADB doesn't
distinguish between stdout and stdin on the device, so they get interleaved
into stdout here. stdin and stdout should be file-like objects, so you
could use sys.stdin and sys.stdout to emulate the 'adb shell' commandline.
Args:
command: The command to run, will be run with /bin/sh -c 'command' on
the device.
stdin: File-like object to read from to pipe to the command's stdin. Can
be None, in which case nothing will be written to the command's stdin.
stdout: File-like object to write the command's output to. Can be None,
in which case the command's output will be buffered internally, and can
be access via the return value of Wait().
raw: If True, run the command as per RawCommand (see above).
timeout_ms: Timeout for the command, in milliseconds.
Returns:
An AsyncCommandHandle instance that can be used to send/receive data to
and from the command or wait on the command to finish.
Raises:
AdbStreamUnavailableError: If the remote devices doesn't support the
shell: service.
"""
timeout = timeouts.PolledTimeout.FromMillis(timeout_ms)
if raw:
command = self._ToRawCommand(command)
stream = self.adb_connection.OpenStream('shell:%s' % command, timeout)
if not stream:
raise usb_exceptions.AdbStreamUnavailableError(
'%s does not support service: shell', self)
if raw and stdin is not None:
# Short delay to make sure the ioctl to set raw mode happens before we do
# any writes to the stream, if we don't do this bad things happen...
time.sleep(.1)
return AsyncCommandHandle(stream, stdin, stdout, timeout, raw)
# pylint: enable=too-many-arguments
@classmethod
def UsingConnection(cls, adb_connection):
"""Factory method to match the interface of FilesyncService."""
return cls(adb_connection)
|
{
"content_hash": "954c5089237d7dddc80abc4a8d211d1c",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 98,
"avg_line_length": 42.39923954372624,
"alnum_prop": 0.7028069231459062,
"repo_name": "amyxchen/openhtf",
"id": "903c2120bed9222a4b792ec4e8743dd4e311e461",
"size": "11746",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openhtf/plugs/usb/shell_service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3564"
},
{
"name": "Protocol Buffer",
"bytes": "12143"
},
{
"name": "Python",
"bytes": "388070"
}
],
"symlink_target": ""
}
|
if __name__ == '__main__':
raise NotImplementedError("Setup has not been created yet.")
|
{
"content_hash": "6b31b431d5a9465c737b3a52a78a1f03",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 64,
"avg_line_length": 46,
"alnum_prop": 0.6413043478260869,
"repo_name": "fatbeau/tribe",
"id": "82d7ffc80e6d2f298728e2f7b53073c84a795b08",
"size": "115",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "608"
},
{
"name": "Python",
"bytes": "28453"
}
],
"symlink_target": ""
}
|
"""
[2016-05-23] Challenge #268 [Easy] Network and Cards: Part 1, The network
https://www.reddit.com/r/dailyprogrammer/comments/4knivr/20160523_challenge_268_easy_network_and_cards/
#Description
This week we are creating a game playable over network. This will be a 3-parter.
The first part is to set up a connection between a server and one or more client.
The server needs to send out a heartbeat message to the clients and the clients need to respond to it.
For those who want to be prepared, we are going to deal and play cards over the network.
#Formal Inputs & Outputs
##Input description
No input for the server, but the client needs to know where the server is.
##Output description
The client needs to echo the heartbeat from the server.
#Notes/Hints
The server needs to able to handle multiple clients in the end, so a multithreaded approach is advised.
It is advised to think of some command like pattern, so you can send messages to the server and back.
For the server and client, just pick some random ports that you can use.
[Here](https://en.wikipedia.org/wiki/List_of_TCP_and_UDP_port_numbers) you have a list off all "reserved" ports.
For the connection, TCP connections are the easiest way to do this in most languages. But you are not limited to that
if you want to use something more high-level if your language of choice supports that.
#Bonus
- Make the server broadcast it's existince on the network, so clients can detect him.
- Send messages to the server and broadcast it to all the clients
- Let the client identify itself (username)
- Create a way to list all connected clients
- Send messages to the server and relay it to a requested client
These bonuses are not required, but it will make the next part a whole lot easier.
#Finally
Have a good challenge idea?
Consider submitting it to /r/dailyprogrammer_ideas
"""
def main():
pass
if __name__ == "__main__":
main()
|
{
"content_hash": "c575d5aa1777a0129a86fcd79eb6d21c",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 117,
"avg_line_length": 46.707317073170735,
"alnum_prop": 0.7671018276762402,
"repo_name": "DayGitH/Python-Challenges",
"id": "2f57aa3376ed4e529dc001e89aa0da1c64377be4",
"size": "1915",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DailyProgrammer/DP20160523A.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "5002"
},
{
"name": "Python",
"bytes": "2471582"
}
],
"symlink_target": ""
}
|
"""Generic JAX training loop for experiments."""
import functools
import os
from typing import (Any, Callable, Dict, Optional, Sequence, Tuple)
from absl import logging
from clu import metric_writers
import flax
from flax import jax_utils
from flax import linen as nn
from flax import struct
from flax.training import checkpoints
import gin
import jax
import jax.numpy as jnp
import metrics_summary
import optimizer_config as opt_config
import training_task
import numpy as np
import tensorflow.compat.v2 as tf
PRNGKeys = training_task.PRNGKeys
TrainState = training_task.TrainState
TrainingTask = training_task.TrainingTask
StepFunction = training_task.StepFunction
Metrics = training_task.Metrics
MetricWriter = metric_writers.MetricWriter
MetricsSummary = metrics_summary.MetricsSummary
gfile = tf.io.gfile
unfreeze = flax.core.unfreeze
flatten_dict = flax.traverse_util.flatten_dict
should_run = training_task.should_run
# TODO(cstaats): Use a Protocol to specify that it must be possible to call
# the function with parameters (step: int, mode: str). This won't be feasible
# until we start using Python 3.8 or later.
StepModeCallable = Callable[..., None]
# This variable should *only* be set from register_interstep_callbacks.
_interstep_callbacks: Optional[Tuple[StepModeCallable, ...]] = None
@gin.configurable
def register_interstep_callbacks(**kwargs: StepModeCallable) -> None:
"""Populates _interstep_callbacks from gin.
This function should be called exactly ONCE and that call should happen AFTER
flag initialization (and more specifically, after gin parsing). And the caller
should NOT specify any arguments.
In gin configurations, a callback can be specified with an arbitrary name
like so:
register_interstep_callbacks.my_callback_name = @my_callback_function
Multiple callbacks can be registered without overriding each other as long as
they all have different names. Conversely, if you *want* to override a
callback, you need to give that callback the same name.
Args:
**kwargs: Specified by gin. Each argument should be a function (callable)
that can be called as my_function(step, mode), where step is an int and
mode is a str.
Raises:
ValueError: Raised on the second (and any subsequent) function call.
"""
global _interstep_callbacks
logging.info("registering functions: %s", kwargs.keys())
if _interstep_callbacks is not None:
raise ValueError("register_interstep_callbacks may only be called once.")
_interstep_callbacks = tuple(kwargs.values())
def clear_interstep_callbacks():
"""Clear all registered callbacks, so that new ones can be registered."""
global _interstep_callbacks
_interstep_callbacks = None
def run_interstep_callbacks(mode: str, step: int, sub_step: int = 0):
"""Run the registered callbacks.
Args:
mode: mode of the task to execute callbacks for.
step: training step number.
sub_step: For tasks that execute multiple iterations within a step.
E.g. a test cycle that runs multiple testing steps.
"""
for func in _interstep_callbacks:
func(sub_step or step, mode)
@gin.configurable
@struct.dataclass
class Trainer:
"""Implements a JAX training loop."""
# Returns a Flax module for the model.
# Takes a single argument mode, which can be "test", "train", or "generate".
model_definition: Any = gin.REQUIRED
# Iterator over trainining data.
get_training_dataset_iterator: Callable[[], Any] = gin.REQUIRED
# Iterator over test data.
get_test_dataset_iterator: Optional[Callable[[], Any]] = None
workdir: str = "" # Working directory for checkpoints.
load_dir: str = "" # Optional directory to load model.
num_steps: int = 100000 # Number of steps to train.
status_every_steps: int = 10 # Log step number every N steps.
log_every_steps: int = 100 # Log scalar data every N steps.
test_every_steps: int = 10 # Test model every N steps.
num_test_steps: int = 1 # Number of iterations to test.
generate_every_steps: int = 1000 # Generate examples every N steps.
print_input_every_steps: int = 1000 # Print example data every N steps.
save_checkpoints: bool = True # Save training checkpoints
checkpoint_every_steps: int = 5000 # Save checkpoints every N steps.
restore_checkpoints: bool = True # Restore from previous checkpoint.
restore_state_variables: bool = True # Restore TrainState.state from chkpt.
# Record metrics for "train", "test", etc. in separate directories.
# Otherwise they will be saved with separate prefixes.
use_separate_metric_directories: bool = True
# Optimizer options.
optimizer_factory: opt_config.OptimizerConfig = gin.REQUIRED
learning_rate_schedule: Callable[[jnp.ndarray, int], jnp.ndarray] = (
opt_config.lr_cosine_decay)
# Maximum steps for the LR schedule. Zero means use num_steps.
max_scheduled_steps: int = 0
warmup_steps: int = 1000 # Number of warmup steps.
learning_rate_multiplier: float = 1.0 # Used to scale the learning rate.
random_seed: int = 42 # Initial random seed.
# Names of random number generators used by the model.
rng_key_names: Optional[Sequence[str]] = ("dropout",)
# Debug options.
replicate_mode: bool = True # pmap over multiple replicas.
trace_debug_mode: bool = False # Run in eager mode to trace results.
print_variables: bool = False # Dump parameters/variables to stdout.
# Function to compute additional summary information.
# Takes a MetricsSummary object and a mode string (e.g. "test") as arguments,
# returns a MetricsSummary object.
process_summaries_function: Optional[Callable[[Any, str], Any]] = None
# Function to pretty print the input for each training step.
pretty_print_input_function: Optional[Callable[[Any], Any]] = None
# Classes to use for summarizing metrics.
metrics_summary_factory: Any = metrics_summary.MetricsSummary
extra_summaries_fn: training_task.ExtraSummariesFunction = (
lambda mode, step: dict())
post_save_checkpoint_fn: Callable[[str, int], None] = lambda mode, step: None
post_load_checkpoint_fn: Callable[[str, int], None] = lambda mode, step: None
def learning_rate_schedule_fn(self, step):
"""Returns the learning rate for the given step."""
# There are four components to the learning rate.
#
# The base_lrate is defined by the optimizer, and different optimizers have
# different relative rates, e.g. Adafactor requires a higher LR than Adam.
# By default, the base_lrate is 1.0 for Adafactor.
#
# The base_lrate is then multiplied by the learning rate decay schedule,
# which typically starts at a maximum value and decays over time.
# Each schedule can be individually configured, e.g. from 0.01 to 0.001.
# The max_scheduled_steps parameter controls the decay rate of the schedule.
#
# Finally, the LR is scaled by the learning_rate_multiplier, which provides
# an easy way to scale the LR for hyperparameter tuning in a way that is
# independent of the choice of schedule or optimizer. The default is 1.0.
#
# During the warmp period, the learning rate ramps up linearly from zero.
step = jnp.asarray(step, dtype=jnp.float32)
if self.max_scheduled_steps == 0:
max_steps = self.num_steps
else:
max_steps = self.max_scheduled_steps
base_lrate = float(self.optimizer_factory.learning_rate)
lr_multiplier = float(self.learning_rate_multiplier)
# Linear increase in learning rate up to warmup_steps.
warmup_steps = float(self.warmup_steps)
lr_warmup_ramp = jnp.minimum(step, warmup_steps) / warmup_steps
# Hold step at a constant value during the warmup period.
# Required for some schedules, like rsqrt_decay.
step = jnp.maximum(step, warmup_steps)
# Get the scheduled learning rate.
lrate = self.learning_rate_schedule(step, max_steps)
# Multiply lrate by the base, warmup and multiplier factors.
lrate = lrate * base_lrate * lr_warmup_ramp * lr_multiplier
return jnp.asarray(lrate, dtype=jnp.float32)
def _init_rngs(self, rngs: PRNGKeys, step: int) -> PRNGKeys:
# Get a new random number generator for each step
rngs = jax.random.fold_in(rngs, step)
rngs = jax.random.split(rngs, len(self.rng_key_names))
rngs = {key: rngs[i] for i, key in enumerate(self.rng_key_names)}
return rngs
def train_step(self, model: nn.Module, tstate: TrainState, x: Any,
rngs: PRNGKeys) -> Tuple[TrainState, Metrics]:
"""Perform a training step, pmapped over multiple devices.
Args:
model: The model to use for the step function.
tstate: Values for state variables, and the optimizer.
x: A batch of inputs to train on.
rngs: PRNGKey (possibly replicated).
Returns:
Tuple of (new_tstate, metrics: dictionary of scalar values)
"""
mutable_keys = [k for (k, _) in tstate.state.items()]
step = tstate.optimizer.state.step
rngs = self._init_rngs(rngs, step)
# Refactor the model as a loss function from trainable params to loss, so
# that we can differentiate with jax and get {d}loss/{d}params.
# Inputs and non-trainable params are bound within the closure.
# model:: x, { state_params } -> (loss, metrics), { new_state_params }
# loss_fn:: params -> (loss, (metrics, new_state))
def loss_fn(params):
"""Loss function."""
(loss, mets), nstate = model.apply({"params": params, **tstate.state},
x,
rngs=rngs,
mutable=mutable_keys)
return loss, (mets, nstate)
# grad_fn:: params -> ((loss, (aux, nstate)), param_gradients)
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
# Run forward and backward pass.
(loss, (metrics, new_state)), param_grads = grad_fn(tstate.optimizer.target)
del loss # loss is only recorded if it is part of the metrics
if self.replicate_mode:
param_grads = jax.lax.pmean(param_grads, axis_name="batch")
lrate = self.learning_rate_schedule_fn(step)
new_optimizer = tstate.optimizer.apply_gradient(
param_grads, learning_rate=lrate)
# Metrics are summary values that will be logged.
if self.replicate_mode:
# Merge metrics (take mean/sum etc.) over replicas on-device.
summary_class = self.metrics_summary_factory
metrics = summary_class.merge_replicated_metrics(
metrics, model.metrics_summary_operations(aggregate_over="devices"))
metrics["learning_rate"] = lrate
return (TrainState(new_optimizer, new_state), metrics)
def other_step(self, model: nn.Module, tstate: TrainState, x: Any,
rngs: PRNGKeys) -> Tuple[TrainState, Metrics]:
"""Perform a test or generate step, pmapped over multiple devices.
Args:
model: The model to use for the step function.
tstate: Values for state variables, and the optimizer.
x: A batch of inputs to train on.
rngs: PRNGKey (possibly replicated).
Returns:
Tuple of (new_tstate, metrics: dictionary of scalar values)
"""
mutable_keys = [k for (k, _) in tstate.state.items()]
step = tstate.optimizer.state.step
rngs = self._init_rngs(rngs, step)
params = tstate.optimizer.target
(loss, metrics), new_state = model.apply({"params": params, **tstate.state},
x,
rngs=rngs,
mutable=mutable_keys)
del loss # loss is only recorded if it is part of the metrics
# Metrics are summary values that will be logged.
if self.replicate_mode:
# Merge metrics (take mean/sum etc.) over replicas on-device.
summary_class = self.metrics_summary_factory
metrics = summary_class.merge_replicated_metrics(
metrics, model.metrics_summary_operations(aggregate_over="devices"))
return (TrainState(tstate.optimizer, new_state), metrics)
def initialize_model(self) -> Tuple[TrainState, int, nn.Module, PRNGKeys]:
"""Initialize the model and/or load it from a checkpoint.
Returns:
(tstate: TrainState, -- The parameters and state for the the model.
start_step: int, -- The step number, when restoring from checkpoint.
imodel: nn.Module, -- A model object (created with mode "init").
rngs: PRNGkeys) -- Initial random numbers.
"""
# Set up random number generators.
# ---------------------------------
logging.info("==== Training loop: initializing model ====")
logging.info("Process %d of %d", jax.process_index(), jax.process_count())
logging.info("Local device count = %d", jax.local_device_count())
logging.info("Number of replicas = %d",
jax.process_count() * jax.local_device_count())
logging.info("Using random number seed %d", self.random_seed)
prng = jax.random.PRNGKey(self.random_seed)
prng, init_rng = jax.random.split(prng)
# Grab rngs, which provide different random numbers for each replica.
if self.replicate_mode:
prngs = jax.random.split(prng, jax.local_device_count())
else:
prngs = prng
del prng
# Create a dictionary of prng keys for initialization.
rng_key_names_init = list(self.rng_key_names) + ["params"]
init_rngs = jax.random.split(init_rng, len(rng_key_names_init))
init_rngs = {key: init_rngs[i] for i, key in enumerate(rng_key_names_init)}
del init_rng
# Build Model
# -------------------------------------------------------------------------
logging.info("Initializing the model.")
# Create a model, which will be used to initialize trainable parameters.
imodel = self.model_definition(mode="init")
# The init function will lazily initialize the model, given a fake input.
# It returns initialized variables, without doing a fwd pass.
model_init_fn = jax.jit(imodel.init)
variables = model_init_fn(init_rngs, imodel.get_fake_input())
# Split variables into trainable and non-trainable sets.
mstate, params = variables.pop("params")
del variables # Delete to avoid wasting resources.
# Create an optimizer for params.
optimizer_def = self.optimizer_factory.create_optimizer_def()
optimizer = optimizer_def.create(params)
# tstate holds the full training state of the model.
tstate = TrainState(optimizer, mstate)
if self.print_variables:
logging.info("params = %s", tstate.optimizer.target)
logging.info("state = %s", tstate.state)
# Load a pre-trained model or restore it from checkpoint.
if self.workdir or self.load_dir:
restore_checkpoints = self.restore_checkpoints
else:
restore_checkpoints = False
start_step = 0
if restore_checkpoints:
tstate = self.restore_checkpoint(tstate)
start_step = int(tstate.optimizer.state.step)
# Log info on trainable parameters (before replicating them).
self._write_parameter_info(tstate)
# raise ValueError("That's all folks!")
# Replicate the training state across local devices.
if self.replicate_mode:
tstate = jax_utils.replicate(tstate)
return (tstate, start_step, imodel, prngs)
def restore_checkpoint(self, train_state: TrainState) -> TrainState:
"""Load a pre-trained model or restore it from a checkpoint."""
# Figure out if we have an existing checkpoint.
if not self.workdir:
logging.info("No working directory specified.")
existing_checkpoint = False
elif not gfile.exists(self.workdir):
logging.info("No existing checkpoint directory %s", self.workdir)
existing_checkpoint = False
elif not gfile.isdir(self.workdir):
raise ValueError(f"workdir {self.workdir} must be a directory.")
else:
ckpath = checkpoints.latest_checkpoint(self.workdir, "checkpoint_")
if ckpath:
logging.info("Found existing checkpoint in %s", self.workdir)
existing_checkpoint = True
else:
logging.info("No existing checkpoint in %s", self.workdir)
existing_checkpoint = False
# If any checkpoints exist in workdir, then use those first.
# This will ensure that the task will restore properly if it's preempted.
if existing_checkpoint:
logging.info("Restoring model from last checkpoint %s:", self.workdir)
load_dir = self.workdir
elif self.load_dir:
logging.info("Loading pre-trained model from %s:", self.load_dir)
load_dir = self.load_dir
else:
logging.warning("Unable to load model.")
return train_state
loaded_train_state = checkpoints.restore_checkpoint(load_dir, train_state)
step = int(loaded_train_state.optimizer.state.step)
self.post_load_checkpoint_fn(load_dir, step)
if self.restore_state_variables:
# Restore complete state.
logging.info("Restoring all variables and state.")
train_state = loaded_train_state
del loaded_train_state
else:
# Restore trainable variables, but not other state.
logging.info("Only restoring trainable parameters.")
train_state = TrainState(loaded_train_state.optimizer, train_state.state)
del loaded_train_state
return train_state
def save_checkpoint(self, tstate: TrainState, step: int,
param_summary: Optional[MetricsSummary]):
"""Save a checkpoint with the model state.
Args:
tstate: The training state.
step: The current step number.
param_summary: Optional metrics summary to write parameter statistics.
"""
logging.info("Saving checkpoint in directory %s", self.workdir)
if self.replicate_mode:
save_state = jax_utils.unreplicate(tstate)
else:
save_state = tstate
checkpoints.save_checkpoint(self.workdir, save_state, step)
# While we're at it, record distributions of trainable parameters.
if param_summary is not None:
logging.info("Recording parameter distributions.")
params_dict = jax.device_get(
_flatten_dict_string_keys(save_state.optimizer.target))
param_distribs = self._compute_parameter_distributions(params_dict)
param_summary.add(param_distribs)
def create_training_task(self, mode: str, imodel: nn.Module, prngs: PRNGKeys,
writers: Dict[str, MetricWriter]) -> TrainingTask:
"""Create a new TrainingTask for the given mode.
Args:
mode: The mode for the task, e.g. "train", "test", "generate".
imodel: The model object from initialize_model.
prngs: The PRNGKeys from initialize_model.
writers: A dictionary of summary writers.
Returns:
A TrainingTask object.
"""
logging.info("Training loop: creating task for mode %s", mode)
if self.use_separate_metric_directories:
prefix = ""
else:
prefix = mode
if mode == "train":
ds = self.get_training_dataset_iterator
elif mode == "test":
ds = self.get_test_dataset_iterator
else:
ds = None
# We summarize metrics over multiple training steps.
# These types control how the summary is computed.
metric_summary_ops = {
"step_time": "mean",
"learning_rate": "last",
**imodel.metrics_summary_operations(aggregate_over="steps")
}
summary = self.metrics_summary_factory(metric_summary_ops)
extra_summary = self.metrics_summary_factory({})
summary_writer = self._get_summary_writer(mode, writers)
return TrainingTask(
mode=mode,
dataset=ds,
step_function=self._compile_step_function(mode),
prng_keys=prngs,
summary=summary,
extra_summary=extra_summary,
summary_writer=summary_writer,
summary_prefix=prefix,
# --- options ---
replicate_mode=self.replicate_mode,
print_input_every_steps=self.print_input_every_steps,
pretty_print_input_function=self.pretty_print_input_function,
process_summaries_function=self.process_summaries_function,
extra_summaries_function=self.extra_summaries_fn)
def train(self):
"""Runs the training and evaluation loop."""
# The master process saves checkpoints and summaries to disk.
is_master_process = jax.process_index() == 0
if self.workdir:
save_checkpoints = self.save_checkpoints
else:
save_checkpoints = False
# --- Create and initialize the model. ---
(tstate, start_step, imodel, prngs) = self.initialize_model()
# Log experiment hyper-parameters.
writers = {}
train_writer = self._get_summary_writer("train", writers)
if start_step == 0:
self._write_config(train_writer)
# Additional summary objects.
param_summary = self.metrics_summary_factory({}) # Parameter statistics.
# --- Create task objects for test, train, and generate. ---
tasks = {}
train_task = self.create_training_task("train", imodel, prngs, writers)
tasks["train"] = train_task
if (self.get_test_dataset_iterator is not None and
self.test_every_steps != 0):
test_task = self.create_training_task("test", imodel, prngs, writers)
tasks["test"] = test_task
if self.generate_every_steps != 0:
gen_task = self.create_training_task("generate", imodel, prngs,
writers)
tasks["generate"] = gen_task
# Register any additional actions.
register_interstep_callbacks()
# Main Training Loop
# --------------------------------------------------------------------------
logging.info("==== Training loop: starting main loop ====")
with metric_writers.ensure_flushes(*writers.values()):
for step in range(start_step, self.num_steps):
# Log status every so often to monitor progress.
if should_run(step, self.status_every_steps):
logging.info("Step: %d", step)
# Train.
train_x = train_task.get_next_input()
(tstate, _) = train_task.run_step(tstate, train_x, step)
run_interstep_callbacks("train", step)
del train_x
# Test.
if should_run(step, self.test_every_steps):
if self.num_test_steps > 1:
logging.info("Test cycle: %d iterations.", self.num_test_steps)
for sub_step in range(0, self.num_test_steps):
test_x = test_task.get_next_input()
# TODO(delesley): This is an ugly hack to run generate steps.
# Run a generate step using test data.
# Generate is run just *before* the last test iteration.
if ((sub_step == self.num_test_steps - 1) and
should_run(step, self.generate_every_steps)):
logging.info("Generate cycle.")
(tstate, _) = gen_task.run_step(tstate, test_x, step)
run_interstep_callbacks("generate", step)
(tstate, _) = test_task.run_step(tstate, test_x, step,
sub_step=sub_step)
run_interstep_callbacks("test", step, sub_step)
del test_x
# --- Save checkpoints on the master host. ---
is_last_step = (step == self.num_steps - 1)
checkpoint_current_step = (
save_checkpoints and
(should_run(step, self.checkpoint_every_steps) or is_last_step))
if checkpoint_current_step:
if is_master_process:
self.save_checkpoint(tstate, step, param_summary)
self.post_save_checkpoint_fn(self.workdir, step)
# --- Flush summaries to disk. ---
if should_run(step, self.log_every_steps):
for tsk in tasks.values():
tsk.flush(step)
param_summary.write(train_writer, step, prefix="params")
logging.info("Training Finished.")
if self.replicate_mode:
tstate = jax_utils.unreplicate(tstate)
if self.print_variables:
logging.info("params = %s", tstate.optimizer.target)
logging.info("state = %s", tstate.state)
def _compile_step_function(self, mode: str) -> StepFunction:
"""Compile a step function (training or test)."""
# Create a model object, and a step function that is a closure over the
# object. Flax modules are supposed to be "stateless", in that all state
# is contained the TrainState object that is passed as an input parameter.
# However, creating the model object may involve allocating expensive
# data structures, or launching processes, and should only be done once.
model = self.model_definition(mode=mode)
if mode == "train":
step_fn = functools.partial(self.train_step, model)
else:
step_fn = functools.partial(self.other_step, model)
if self.replicate_mode:
assert not self.trace_debug_mode
logging.info("Compiling mode %s with pmap.", mode)
p_fn = jax.pmap(step_fn, donate_argnums=(0,), axis_name="batch")
elif self.trace_debug_mode:
logging.info("Compiling mode %s with trace_debug.", mode)
p_fn = step_fn
else:
logging.info("Compiling mode %s with jit.", mode)
p_fn = jax.jit(step_fn, donate_argnums=(0,))
return p_fn
def _get_summary_writer(self, mode: str,
writers: Dict[str, MetricWriter]) -> MetricWriter:
"""Create a summary writer for the given mode.
Args:
mode: the mode for the summaries, e.g. "test", "train"
writers: a dictionary which caches previously-created writers.
Returns:
A writer for the given mode.
"""
if self.use_separate_metric_directories:
# Create a separate writer & directory for each mode.
w_mode = mode
summary_dir = os.path.join(self.workdir, mode)
else:
# Create a single default writer for all modes.
w_mode = "train"
summary_dir = self.workdir
if w_mode in writers:
# Return previously created and cached writer.
logging.info("Returning cached summary writer (%s) for mode %s",
w_mode, mode)
return writers[w_mode]
if not self.workdir:
# No working directory, so log only.
logging.info("Creating logging writer (%s) for mode %s", w_mode, mode)
writer = metric_writers.LoggingWriter()
else:
# Create a new writer for workdir.
# Only the master will actually write summaries to workdir.
logging.info("Creating summary writer (%s) for mode %s in directory %s",
w_mode, mode, summary_dir)
is_master = jax.process_index() == 0
gfile.makedirs(summary_dir)
writer = metric_writers.create_default_writer(summary_dir,
just_logging=not is_master)
writers[w_mode] = writer
return writer
def _write_config(self, writer):
"""Write the configuration file to the working directory."""
is_master = jax.process_index() == 0
config_str = gin.operative_config_str()
logging.info("Gin config: \n%s", config_str)
# Write configuration to workdir.
if is_master and self.workdir:
config_file_name = os.path.join(self.workdir, "config.gin")
with gfile.GFile(config_file_name, "w") as f:
f.write(config_str)
# Write config string text to tensorboard.
writer.write_texts(0, {"config": gin.markdown(config_str)})
def _write_parameter_info(self, tstate: TrainState):
"""Write information on state and trainable parameters to the log."""
# Write information on parameters to log file.
params_dict = _flatten_dict_string_keys(tstate.optimizer.target)
total_nparams = 0
for (k, v) in params_dict.items():
nparams = np.prod(v.shape)
total_nparams += nparams
logging.info("parameter: %s, shape %s, size %d", k, v.shape, nparams)
logging.info("Total parameters: %d", total_nparams)
# Write information on state variables to log file.
state_dict = _flatten_dict_string_keys(tstate.state)
state_size = 0
total_state = 0
for (k, v) in state_dict.items():
if hasattr(v, "shape"):
state_size = np.prod(v.shape)
total_state += state_size
logging.info("state: %s, shape %s, size %d", k, v.shape, state_size)
else:
# Some other stuff may be stored in the state.
logging.info("state: %s [unknown]", k)
logging.info("Total state size: %d", total_state)
def _compute_parameter_distributions(self, params_dict):
"""Compute info on distributions of parameters."""
scalar_params_dict = {}
for (k, v) in params_dict.items():
# Convert from bfloat16, which crashes when serializing a NaN.
v = np.asarray(v, dtype=jnp.float32)
scalar_params_dict[k + "_mean"] = np.mean(v)
scalar_params_dict[k + "_stddev"] = np.std(v)
# scalar_params_dict[k + "_min"] = np.min(v)
# scalar_params_dict[k + "_max"] = np.max(v)
return scalar_params_dict
def _flatten_dict_string_keys(params):
"""Flattens a nested dictionary to have string keys and '/' separators."""
return {"/".join(k): v for k, v in flatten_dict(unfreeze(params)).items()}
|
{
"content_hash": "e692abff96eb8cb81c8411aef7135803",
"timestamp": "",
"source": "github",
"line_count": 743,
"max_line_length": 80,
"avg_line_length": 39.398384925975776,
"alnum_prop": 0.6583541147132169,
"repo_name": "google-research/meliad",
"id": "6293890c7ea427066b3dc5fe48435726f12630b8",
"size": "29845",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "training_loop.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "400462"
}
],
"symlink_target": ""
}
|
from climb.exceptions import UnknownCommand
def command(function):
function.command = True
return function
def completers(*compl):
def wrapper(function):
function.completers = compl
return function
return wrapper
class Commands(object):
def __init__(self, cli):
self._cli = cli
self._commands = {}
def execute(self, name, *args, **kwargs):
if hasattr(self, name):
method = getattr(self, name)
if getattr(method, 'command', None):
return method(*args, **kwargs)
raise UnknownCommand("There is no action for command {}".format(command))
def get_completer(self, name, position):
if hasattr(self, name):
method = getattr(self, name)
compl = getattr(method, 'completers', [])
if compl:
pos = position-1
if len(compl) > pos:
return compl[pos]
else:
return compl[0]
raise UnknownCommand("No completer for command {}".format(command))
@command
def help(self, parser, all_commands, subject):
if subject:
subparsers = [cmd for cmd in all_commands
if cmd.name == subject]
if subparsers:
parser = subparsers[0].parser
return parser.print_help()
@command
def exit(self):
self._cli.set_running(False)
|
{
"content_hash": "43f39b6bf38e2c1b2868edd971201bef",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 81,
"avg_line_length": 26.142857142857142,
"alnum_prop": 0.5525956284153005,
"repo_name": "m110/climb",
"id": "6a17b32e8bdc24aad39624654fb5370495e8807a",
"size": "1464",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "climb/commands.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14635"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('buggy', '0004_bug_fulltext'),
]
operations = [
migrations.RunSQL(
"""
CREATE INDEX bug_state_assigned_to_index ON buggy_bug (assigned_to_id) WHERE (state != 'closed');
""",
"""
DROP INDEX bug_state_assigned_to_index;
"""
)
]
|
{
"content_hash": "572fadecdf31300d1a053ed7e6d3bd02",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 109,
"avg_line_length": 22.523809523809526,
"alnum_prop": 0.5369978858350951,
"repo_name": "fusionbox/buggy",
"id": "4a16baec4d5caea2c0aad5fa922768d468550bdc",
"size": "544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "buggy/migrations/0005_add_bug_index.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "35980"
},
{
"name": "HTML",
"bytes": "16730"
},
{
"name": "JavaScript",
"bytes": "31881"
},
{
"name": "Makefile",
"bytes": "31"
},
{
"name": "Python",
"bytes": "74163"
}
],
"symlink_target": ""
}
|
from flask import Flask, url_for
from flask.ext.admin import Admin, form
from flask.ext.admin.contrib.sqla import ModelView
import json
from models import *
import logging
from flask.ext.admin.contrib import fileadmin
from flask_wtf import Form
from wtforms import StringField
from jinja2 import Markup
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/donation")
def donation():
return render_template("donation.html")
@app.route("/donationDetails")
def donationDetails():
email = request.form.get("email")
phone = request.form.get("phone")
details = [{"name":"Rohan","amount":"1000","status":"Operated"}]
#details = json.loads(details)
#TODO get the details from db and pass to donation details.html
return render_template("donationdetails.html", details=details)
@app.route('/child', methods=['GET','POST','DELETE'])
@app.route('/child/<child_id>', methods=['GET','DELETE'])
def child(child_id=None):
if request.method == 'GET':
if not child_id:
allChildren = Child.query.all()
return json.dumps(allChildren)
else:
Children = Child.query.filter_by(id=child_id)
return json.dumps(Children)
try:
if request.method == 'POST':
# Decide create/update
args = request.form
child_exists = Child.query.filter_by(name=args.get('name')).all()
if child_exists:
assert len(child_exists) == 1, logging.warn("Too many child records found in db")
oldChild = child_exists[0]
if request.files:
# upload files
bytestring = request.files.get('imgFile')[0].body
img = Image.fromstring(bytestring)
file_path = base_path+"images/" + datetime.datetime.utctimenow().strftime("%d-%B-%Y-%H:%M")
img.save(file_path)
pic_paths = json.loads(oldChild.pic_paths)
oldChild.pic_paths = pic_paths.append(file_path)
oldChild.pic_paths = oldChild.pic_paths[-5:] # Max 5 images
for k,v in args.iteritems():
if k in utils.get_user_attributes(Child):
oldChild[k] = v
db.session.add(oldChild)
db.session.commit()
# update
return json.dumps({'status':'success'})
else:
# create request
newChild = Child(args.get('name'), args.get('cost'), args.get('status','new'))
db.session.add(newChild)
db.session.commit()
return json.dumps({'id':newChild.id})
if request.method == 'DELETE':
if not child_id:
logging.warn("Found multiple chiled records for one name ")
return json.dumps({'status':'fail'})
else:
children = Child.query.filter_by(id=child_id).all()
assert len(children) == 1, logging.warn("Found multiple chiled records for one name ")
for child in children:
db.session.delete(child)
db.session.commit()
return json.dumps({'status':'success'})
except Exception:
db.session.rollback()
@app.route('/surgery', methods=['GET','POST','DELETE'])
@app.route('/surgery/<surg_id>', methods=['GET','DELETE'])
def SurgeryApi():
if request.method == 'GET':
if not surg_id:
allSurgeries = Surgery.query.all()
return json.dumps(allSurgeries)
else:
Surgeries = Surgery.query.filter_by(id=surg_id)
return json.dumps(Surgeries)
try:
if request.method == 'POST':
args = request.form
surgery_exists = Surgery.query.filter_by(name=args.get('name')).all()
if surgery_exists:
assert len(surgery_exists) == 1, logging.warn("Too many surgery records found in db")
oldSurgery = surgery_exists[0]
for k,v in args.iteritems():
if k in utils.get_user_attributes(Surgery):
oldSurgery[k] = v
db.session.add(oldSurgery)
db.session.commit()
# update
return json.dumps({'status':'success'})
else:
# create request
newSurgery = Surgery(args.get('name'), args.get('cost'), args.get('status','new'))
db.session.add(newSurgery)
db.session.commit()
return json.dumps({'id':newSurgery.id})
if request.method == 'DELETE':
if not surg_id:
logging.warn("Found multiple chiled records for one name ")
return json.dumps({'status':'fail'})
else:
Surgeries = Surgery.query.filter_by(id=surg_id).all()
assert len(Surgeries) == 1, logging.warn("Found multiple surgery records for one name ")
for surg in Surgeries:
db.session.delete(surg)
db.session.commit()
return json.dumps({'status':'success'})
except Exception:
db.session.rollback()
@app.route('/transaction', methods=['GET','POST','DELETE'])
@app.route('/transaction/<trxn_id>', methods=['GET','DELETE'])
def Transaction():
if request.method == 'GET':
if not trxn_id:
allTrxns = Trxns.query.all()
return json.dumps(allTrxns)
else:
Trxns = Trxns.query.filter_by(id=trxn_id)
return json.dumps(Trxns)
try:
if request.method == 'POST':
import pdb; pdb.set_trace()
args = request.form
trxn_exists = Trxns.query.filter_by(name=args.get('name')).all()
if trxn_exists:
assert len(trxn_exists) == 1, logging.warn("Too many trxn records found in db")
oldTransaction = trxn_exists[0]
for k,v in args.iteritems():
if k in utils.get_user_attributes(Trxns):
oldTransaction[k] = v
db.session.add(oldTransaction)
db.session.commit()
return json.dumps({'status':'success'})
# update
else:
# create request
newTransaction = Transaction(args.get('name'), args.get('cost'), args.get('status','new'))
db.session.add(newTransaction)
db.session.commit()
return json.dumps({'id':newTransaction.id})
if request.method == 'DELETE':
if not trxn_id:
logging.warn("Found multiple chiled records for one name ")
return json.dumps({'status':'fail'})
else:
Trxns = Transaction.query.filter_by(id=trxn_id).all()
assert len(Trxns) == 1, logging.warn("Found multiple surgery records for one name ")
for trxn in Trxns:
db.session.delete(trxn)
db.session.commit()
return json.dumps({'status':'success'})
except Exception:
db.session.rollback()
@app.route('/donor', methods=['GET','POST','DELETE'])
@app.route('/donor/<donor_id>', methods=['GET','DELETE'])
def Donors():
if request.method == 'GET':
if not trxn_id:
allDonors = Donor.query.all()
return json.dumps(allDonors)
else:
Donors = Donor.query.filter_by(id=trxn_id)
return json.dumps(Donors)
try:
if request.method == 'POST':
args = request.form
donor_exists = Donor.query.filter_by(name=args.get('name')).all()
if donor_exists:
assert len(donor_exists) == 1, logging.warn("Too many donor records found in db")
oldDonor = donor_exists[0]
for k,v in args.iteritems():
if k in utils.get_user_attributes(Donor):
oldDonor[k] = v
db.session.add(oldDonor)
db.session.commit()
else:
# create request
newDonor = Donor(args.get('name'), args.get('donated_amnt'), args.get('email',None), args.get('p_type','onetime'))
db.session.add(newDonor)
db.session.commit()
# update
if request.method == 'DELETE':
if not donor_id:
logging.warn("Found multiple chiled records for one name ")
return json.dumps({'status':'fail'})
else:
Donors = Donor.query.filter_by(id=donor_id).all()
assert len(Donors) == 1, logging.warn("Found multiple surgery records for one name ")
for donor in Donors:
db.session.delete(donor)
db.session.commit()
return json.dumps({'status':'success'})
except Exception,e:
logging.error(e)
db.session.rollback()
admin = Admin(app)
admin.add_view(ModelView(Donor,db.session))
admin.add_view(ModelView(Trxns,db.session))
admin.add_view(ModelView(Surgery,db.session))
admin.add_view(fileadmin.FileAdmin(base_path+ "/images", name="images"))
class UserView(ModelView):
def _list_thumbnail(view, context, model, name):
if not model.pic_paths:
return ''
return Markup('<img src="%s">' % url_for('static',
filename="images/"+form.thumbgen_filename(model.pic_paths)))
column_formatters = {
'pic_paths': _list_thumbnail
}
# Alternative way to contribute field is to override it completely.
# In this case, Flask-Admin won't attempt to merge various parameters for the field.
form_extra_fields = {
'pic_paths': form.ImageUploadField('Image',
base_path=base_path+"/images",
thumbnail_size=(100, 100, True))
}
admin.add_view(UserView(Child,db.session))
if __name__ == '__main__':
app.config.update(
DEBUG=True,
SECRET_KEY='sdfjalskdfj',
)
# Create upload directory
try:
os.mkdir(base_path)
except OSError:
pass
# Create DB
db.create_all()
# Start app
app.run(debug=True)
|
{
"content_hash": "139c74ab83fdd4e8e6f7be0c188a25c7",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 130,
"avg_line_length": 39.06716417910448,
"alnum_prop": 0.5444126074498568,
"repo_name": "ebayohblr2014/eBay-Opportunity-Hack-Blr-2014",
"id": "faae0d626dddaf2351c2a5660ee02605615b4671",
"size": "10470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sumuka/api.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1675651"
},
{
"name": "D",
"bytes": "27288"
},
{
"name": "Groovy",
"bytes": "8822"
},
{
"name": "Java",
"bytes": "4765673"
},
{
"name": "JavaScript",
"bytes": "6757918"
},
{
"name": "PHP",
"bytes": "5516633"
},
{
"name": "Perl",
"bytes": "142"
},
{
"name": "Python",
"bytes": "76491"
},
{
"name": "Shell",
"bytes": "24443"
},
{
"name": "Visual Basic",
"bytes": "155"
}
],
"symlink_target": ""
}
|
from django.contrib.postgres.fields.jsonb import JsonAdapter
from django.db.models import Lookup, Field
@Field.register_lookup
class DataLookup(Lookup):
"""
Lookup for values inside the data JSON of questionnaires. This is based on a
text search inside the JSON of the questiongroup. It is therefore not
extremely fast and also quite fuzzy!
A dictionary with the following lookup parameters is required:
"lookup_by": string, required. Either "string" or "key_value". Either
look for a string (case insensitive) or look for a specific key/value
pair (case sensitive).
"questiongroup": string, required. The keyword of the questiongroup
which is looked up.
"value": string, required. The value which is looked for.
"key": string, required when using "lookup_by": "key_value". Can
optionally be used for "lookup_by": "string" to narrow the search.
"lookup_in_list": boolean, defaults to False. By default, only the first
element of a data list is searched. If set to True, the entire data
list of the questiongroup is searched.
Use as:
Questionnaire.objects.filter(data__qs_data=lookup_params)
where lookup_params is a dict containing the lookup parameters.
"""
lookup_name = 'qs_data'
def as_postgresql(self, compiler, connection):
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
# This fix was needed after upgrading Django (to 1.11) to access the
# dict of the rhs_param.
if len(rhs_params) == 1 and isinstance(rhs_params[0], JsonAdapter):
rhs_params = [rhs_params[0].adapted]
# Check general format of params
if len(rhs_params) != 1 or not isinstance(rhs_params[0], dict):
raise NotImplementedError('RHS params must be exactly 1 dict.')
lookup_params = rhs_params[0]
# Check required params
value = lookup_params.get('value')
if value is None:
raise Exception('Value must be provided.')
questiongroup = lookup_params.get('questiongroup')
if questiongroup is None:
raise Exception('Questiongroup must be provided.')
# Additional params
key = lookup_params.get('key')
lookup_in_list = lookup_params.get('lookup_in_list', False)
lookup_by = lookup_params.get('lookup_by')
if lookup_by == 'string':
# Lookup for simple string search.
if key is not None:
params = '%"{}":%{}%'.format(key, value)
else:
params = '%{}%'.format(value)
query_operator = 'ILIKE'
elif lookup_by == 'key_value':
# Lookup for exact key/value matches.
if key is None:
raise Exception(
'Key must be provided when using lookup_by "key_value".')
params = '%"{}": "{}"%'.format(key, value)
query_operator = 'LIKE'
else:
raise NotImplementedError(
'Unknown lookup_by "{}".'.format(lookup_by))
if lookup_in_list is True:
element_accessor = " ->> '{}'".format(questiongroup)
else:
element_accessor = " -> '{}' ->> 0".format(questiongroup)
return " ".join([lhs, element_accessor, query_operator, rhs]), [params]
|
{
"content_hash": "df1aa177c51dcd03c4fe56859554dcf0",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 80,
"avg_line_length": 38.853932584269664,
"alnum_prop": 0.6093117408906883,
"repo_name": "CDE-UNIBE/qcat",
"id": "44215d7dccf2073d5a2bcf0ff2ea1f2ee3ff9372",
"size": "3458",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "apps/questionnaire/lookups.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1098"
},
{
"name": "HTML",
"bytes": "823938"
},
{
"name": "Handlebars",
"bytes": "224139"
},
{
"name": "JavaScript",
"bytes": "153067"
},
{
"name": "Python",
"bytes": "3515948"
},
{
"name": "SCSS",
"bytes": "165400"
},
{
"name": "Shell",
"bytes": "1943"
}
],
"symlink_target": ""
}
|
import sys
from setuptools import setup, find_packages
from cement.utils import version
VERSION = version.get_version()
LONG = """
Cement is an advanced CLI Application Framework for Python. Its goal is to
introduce a standard, and feature-full platform for both simple and complex
command line applications as well as support rapid development needs without
sacrificing quality.
For more information please visit the official site at:
* http://builtoncement.com/
"""
DEPS = [
### Required to build documentation
# "Sphinx >= 1.0",
### Required for testing
# "nose",
# "coverage",
]
# Python < 2.7/3.2 require argparse
if (sys.version_info[0] < 3 and sys.version_info < (2, 7)) or \
(sys.version_info[0] >= 3 and sys.version_info < (3, 2)):
DEPS.append('argparse')
setup(name='cement',
version=VERSION,
description="CLI Application Framework for Python",
long_description=LONG,
classifiers=[],
keywords='cli framework',
author='Data Folk Labs, LLC',
author_email='team@datafolklabs.com',
url='http://builtoncement.org',
license='BSD',
packages=find_packages(exclude=['ez_setup', 'tests*']),
include_package_data=True,
zip_safe=False,
test_suite='nose.collector',
install_requires=DEPS,
setup_requires=[],
entry_points="""
""",
namespace_packages=[],
)
|
{
"content_hash": "a655b5e855bd4b7756262495357a66e7",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 76,
"avg_line_length": 26.901960784313726,
"alnum_prop": 0.673469387755102,
"repo_name": "rjdp/cement",
"id": "d1318e7d1b13ec5e2dda1eebcff9d8f531134bb0",
"size": "1373",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "19"
},
{
"name": "Python",
"bytes": "354081"
},
{
"name": "Shell",
"bytes": "1345"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
__version__ = "6.2.0"
|
{
"content_hash": "cfaa779524afebea890dd415ad63de24",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 39,
"avg_line_length": 31,
"alnum_prop": 0.6612903225806451,
"repo_name": "pombredanne/frappe",
"id": "2a69af1984f0914bd7f2a49d978621d9300240b3",
"size": "62",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "frappe/__version__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "246475"
},
{
"name": "HTML",
"bytes": "140783"
},
{
"name": "JavaScript",
"bytes": "1043937"
},
{
"name": "Python",
"bytes": "1138246"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
}
|
from datetime import timedelta
import sys
from django.db import transaction, models, router, connection
from django.utils import timezone
from reversion.models import Revision, Version
from reversion.management.commands.deleterevisions import Command as RevisionCommand
class Command(RevisionCommand):
help = "Deletes revisions (by chunks) for a given app [and model]"
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
"--chunks",
default=1000,
type=int,
help="Delete only revisions by batch of `chunks` records.",
)
parser.add_argument(
"--vacuum",
action='store_true',
default=False,
help="Run `VACUUM` on tables after deletion.",
)
parser.add_argument(
"--vacuum-full",
action='store_true',
default=False,
help="Run `VACUUM FULL` instead of `VACUUM`.",
)
def handle(self, *app_labels, **options):
verbosity = options["verbosity"]
using = options["using"]
model_db = options["model_db"]
days = options["days"]
keep = options["keep"]
chunks = options["chunks"]
vacuum_full = options["vacuum_full"]
vacuum = options["vacuum"]
# Delete revisions.
using = using or router.db_for_write(Revision)
revisions_to_delete_count = 0
revision_query = models.Q()
keep_revision_ids = set()
# By default, delete nothing.
can_delete = False
# Get all revisions for the given revision manager and model.
for model in self.get_models(options):
if verbosity >= 1:
self.stdout.write("Finding stale revisions for {name}".format(
name=model._meta.verbose_name,
))
# Find all matching revision IDs.
model_query = Version.objects.using(using).get_for_model(
model,
model_db=model_db,
)
if keep:
overflow_object_ids = list(Version.objects.using(using).get_for_model(
model,
model_db=model_db,
).order_by().values_list("object_id").annotate(
count=models.Count("object_id"),
).filter(
count__gt=keep,
).values_list("object_id", flat=True).iterator())
# Only delete overflow revisions.
model_query = model_query.filter(object_id__in=overflow_object_ids)
for object_id in overflow_object_ids:
if verbosity >= 2:
self.stdout.write("- Finding stale revisions for {name} #{object_id}".format(
name=model._meta.verbose_name,
object_id=object_id,
))
# But keep the underflow revisions.
keep_revision_ids.update(Version.objects.using(using).get_for_object_reference(
model,
object_id,
model_db=model_db,
).values_list("revision_id", flat=True)[:keep].iterator())
# Add to revision query.
revision_query |= models.Q(
pk__in=model_query.order_by().values_list("revision_id", flat=True)
)
# If we have at least one model, then we can delete.
can_delete = True
if can_delete:
revisions_to_delete = Revision.objects.using(using).filter(
revision_query,
date_created__lt=timezone.now() - timedelta(days=days),
).exclude(
pk__in=keep_revision_ids
).order_by()
else:
revisions_to_delete = Revision.objects.using(using).none()
# Print out a message, if feeling verbose.
if verbosity >= 1:
revisions_to_delete_count = revisions_to_delete.count()
chunked_delete_ids = []
chunks_counter = 1
for revision_id in revisions_to_delete.values_list("id", flat=True).iterator():
chunked_delete_ids.append(revision_id)
if (chunks_counter % chunks) == 0 or chunks_counter == revisions_to_delete_count:
# Wrap into a transaction because of CASCADE, post_delete signals. (e.g. `revision_revision`)
with transaction.atomic(using=using):
chunked_revisions_to_delete = Revision.objects.filter(id__in=chunked_delete_ids)
if verbosity >= 1:
progress = "\rDeleting {chunk}/{total} revisions...".format(
chunk=chunks_counter,
total=revisions_to_delete_count
)
sys.stdout.write(progress)
sys.stdout.flush()
chunked_revisions_to_delete.delete()
chunked_delete_ids = []
chunks_counter += 1
# Carriage return
print("")
if vacuum is True or vacuum_full is True:
self._do_vacuum(vacuum_full)
print("Done!")
def _do_vacuum(self, full=False):
cursor = connection.cursor()
if full:
print("Vacuuming (full) table {}...".format(Revision._meta.db_table))
cursor.execute("VACUUM FULL {}".format(Revision._meta.db_table))
print("Vacuuming (full) table {}...".format(Version._meta.db_table))
cursor.execute("VACUUM FULL {}".format(Version._meta.db_table))
else:
print("Vacuuming table {}...".format(Revision._meta.db_table))
cursor.execute("VACUUM {}".format(Revision._meta.db_table))
print("Vacuuming table {}...".format(Version._meta.db_table))
cursor.execute("VACUUM {}".format(Version._meta.db_table))
connection.commit()
|
{
"content_hash": "ed6e3d33fac554106fc974485e4d937b",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 109,
"avg_line_length": 39.82236842105263,
"alnum_prop": 0.5372542540888815,
"repo_name": "kobotoolbox/kobocat",
"id": "29b1267a4703d26f66c8723a2ed0423f9077a000",
"size": "6069",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "onadata/apps/logger/management/commands/delete_revisions.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "146326"
},
{
"name": "Dockerfile",
"bytes": "3965"
},
{
"name": "HTML",
"bytes": "136962"
},
{
"name": "JavaScript",
"bytes": "734122"
},
{
"name": "Less",
"bytes": "19821"
},
{
"name": "Makefile",
"bytes": "2286"
},
{
"name": "Python",
"bytes": "1264157"
},
{
"name": "Shell",
"bytes": "9858"
}
],
"symlink_target": ""
}
|
import threading
import subprocess
import datetime
import traceback
import shutil
import psutil
import sys
import os
import numpy as np
from beaker.cache import CacheManager
from beaker.util import parse_cache_config_options
import logging
import time
import colorlog
from tqdm import tqdm
import re
from operator import itemgetter
PDB_PATTERN = '[0-9][0-9a-z]{3}' # check if first letter as to be numeral
ECOD_PATTERN = 'e[0-9][0-9a-z]{3}[0-9A-Za-z\.]{1,2}[0-9]' # ecod domain pattern
PIFACE_PATTERN = '[0-9][0-9A-Z]{3}[A-Z][A-Z]' # piface interface pattern
def split_iter(string):
return (x.group(0) for x in re.finditer(r"[A-Za-z']+", string))
class TqdmHandler(logging.StreamHandler):
def __init__(self):
logging.StreamHandler.__init__(self)
def emit(self, record):
msg = self.format(record)
tqdm.write(msg)
def get_logger(name):
logger = colorlog.getLogger(name)
logger.setLevel(logging.DEBUG)
handler = TqdmHandler()
handler.setFormatter(colorlog.ColoredFormatter(
'%(log_color)s%(name)s | %(asctime)s | %(levelname)s | %(message)s',
datefmt='%Y-%d-%d %H:%M:%S',
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'SUCCESS:': 'white',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red,bg_white'}, ))
logger.addHandler(handler)
return logger
def unforamt(raw_string, fmt):
tmpl = re.sub(r'\\{(\d+)\\}', r'(?P<_\1>.+)', re.escape(fmt))
matches = re.match(tmpl, raw_string)
return tuple(map(itemgetter(1), sorted(matches.groupdict().items())))
#########################################################################
def ispdbid(s):
return re.match(PDB_PATTERN,s)
def isecodid(s):
return re.match(ECOD_PATTERN,s)
def ispifaceid(s):
return re.match(PIFACE_PATTERN,s)
cache_opts = {
'cache.type': 'memory',
'cache.lock_dir': '/tmp/cache/lock'
}
CACHE_MANAGER = CacheManager(**parse_cache_config_options(cache_opts))
#TR = tracker.SummaryTracker()
_proc_status = '/proc/%d/status' % os.getpid()
_scale = {'kB': 1024.0, 'mB': 1024.0*1024.0,
'KB': 1024.0, 'MB': 1024.0*1024.0}
def _VmB(VmKey):
'''Private.
'''
global _proc_status, _scale
# get pseudo file /proc/<pid>/status
try:
t = open(_proc_status)
v = t.read()
t.close()
except:
return 0.0 # non-Linux?
# get VmKey line e.g. 'VmRSS: 9999 kB\n ...'
i = v.index(VmKey)
v = v[i:].split(None, 3) # whitespace
if len(v) < 3:
return 0.0 # invalid format?
# convert Vm value to bytes
return float(v[1]) * _scale[v[2]]
def memory(since=0.0):
'''Return memory usage in bytes.
'''
return _VmB('VmSize:') - since
def handleError(err, data=None, braise=False, btrace=True):
print("Err: %s Data: %s" % (err, data))
if btrace: traceback.print_exc()
if braise: raise err
def popenAndCall(onExit, popenArgs):
"""
Runs the given args in a subprocess.Popen, and then calls the function
onExit when the subprocess completes.
onExit is a callable object, and popenArgs is a list/tuple of args that
would give to subprocess.Popen.
"""
def runInThread(onExit, popenArgs):
proc = subprocess.Popen(popenArgs)
proc.wait()
onExit()
return
thread = threading.Thread(target=runInThread, args=(onExit, popenArgs))
thread.start()
# returns immediately after the thread starts
return thread
def call(popenArgs, errlog):
try:
# return subprocess.check_call(popenArgs, stdin=None, stdout=open('/dev/null'), stderr=subprocess.STDOUT, shell=False)
return subprocess.call(popenArgs, stdin=None, stdout=open('/dev/null'), stderr=open(errlog, 'w+'), shell=False)
except OSError as err:
handleError(err, braise=True)
class ProgressBar(object):
def __init__(self, n):
self.n = n
self.indx = 0
sys.stdout.write('\n')
self.start = time.time()
def update(self, i):
start = self.start
curr = time.time()
secs = 1./(1.*i/self.n) * (curr - start)
eta = datetime.timedelta(seconds=secs)
sys.stdout.write("\r %s of %s ETA: %s" % (i, self.n, eta))
def increment(self, i=1):
self.indx += i
self.update(self.indx)
def finish(self):
sys.stdout.write('\n')
def cleanup(path):
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
def ensure_exists(dirname):
if not os.path.exists(dirname):
try:
os.makedirs(dirname)
except OSError:
pass
def reduce(f, L, acc):
while L:
y = acc
x = L.pop()
acc = f(x, y)
return acc
if __name__ == '__main__':
print('len(list()):%s\tlen([]):%s' % (len(list()), len([])))
try:
print('A'.split(":")[1])
except IndexError as err:
print(range(-1000, 1000000)[5])
print(np.array([3., 7.]) < 4.)
P = psutil.Process(os.getpid())
print(P.memory_percent())
print(memory())
|
{
"content_hash": "60b00bdbe1a1cd390090bd200011d6f0",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 126,
"avg_line_length": 25.330049261083744,
"alnum_prop": 0.587709062621548,
"repo_name": "yotamfr/prot2vec",
"id": "bd5f9973061721562b60e72ec9d4476308ec71e6",
"size": "5142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/utils.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2019539"
},
{
"name": "Python",
"bytes": "789743"
},
{
"name": "R",
"bytes": "6560"
},
{
"name": "Shell",
"bytes": "3652"
}
],
"symlink_target": ""
}
|
"""\
===========================
Generic 3D Topology Viewer
===========================
A 3D version of TopologyViewer plus hierarchy topology support, pygame based
display of graph topologies. Rendering and physics laws can be customised
for specific applications.
Example Usage
-------------
A simple console driven topology viewer::
Pipeline( ConsoleReader(),
lines_to_tokenlists(),
TopologyViewer3D(),
).run()
Then at runtime try typing these commands to change the topology in real time::
>>> DEL ALL
>>> ADD NODE 1 "1st node" (0,0,-10) teapot
>>> ADD NODE 2 "2nd node" randompos sphere
>>> ADD NODE 3 "3rd node" randompos -
>>> ADD NODE 1:1 "1st child node of the 1st node" " ( 0 , 0 , -10 ) " -
>>> ADD NODE 1:2 "2nd child node of the 1st node" randompos -
>>> ADD LINK 1 2
>>> ADD LINK 3 2
>>> DEL LINK 1 2
>>> ADD LINK 1:1 1:2
>>> DEL NODE 1
User Interface
--------------
TopologyViewer3D manifests as a pygame OpenGL display surface. As it is sent
topology information, nodes and links between them will appear.
You can click a node with the mouse to select it. Depending on the application,
this may display additional data or, if integrated into another app, have some
other effect.
Click and drag with the left mouse button to move nodes around. Note that a
simple physics model or repulsion and attraction forces is always active. This
causes nodes to move around to help make it visually clearer, however you may
still need to drag nodes about to tidy it up.
For hierarchy topology, double-click a particle (or select one then press return key)
to show its child topology; right-click (or press backspace key) to show last level's
topology.
Operations supported:
* esc --- quit
* a --- viewer position moves left
* d --- viewer position moves right
* w --- viewer position moves up
* s --- viewer position moves down
* pgup --- viewer position moves forward (zoom in)
* pgdn --- viewer position moves backward (zoom out)
* left --- rotate selected particles to left around y axis (all particles if none of them is selected)
* right --- rotate selected particles to right around y axis (all particles if none of them is selected)
* up --- rotate selected particles to up around x axis (all particles if none of them is selected)
* down --- rotate selected particles to down around x axis (all particles if none of them is selected)
* < --- rotate selected particles anticlock-wise around z axis (all particles if none of them is selected)
* > --- rotate selected particles clock-wise around z axis (all particles if none of them is selected)
* return --- show next level's topology of the selected particle when only one particle is selected
* backspace --- show last level's topology
* Mouse click --- click particle to select one, click empty area to deselect all
* Mouse drag --- move particles
* Mouse double-click --- show next level's topology of the particle clicked
* Mouse right-click --- show last level's topology
* shift --- multi Select Mode; shift+click for multiple selection/ deselection
* ctrl --- rotation Mode; when ctrl is pressed, mouse motion will rotate the selected particle
(all particles if none of them is selected)
How does it work?
-----------------
TopologyViewer3D is a Kamaeila component which renders Topology on a pygame OpenGL display surface.
A 3D topology (graph) of nodes and links between them is rendered to the surface.
You can specify an initial topology by providing a list of instantiated
particles and another list of pairs of those particles to show how they are
linked.
TopologyViewer3D responds to commands arriving at its "inbox" inbox
instructing it on how to change the topology. A command is a list/tuple.
Commands recognised are:
[ "ADD", "NODE", <id>, <name>, <posSpec>, <particle type> ]
Add a node, using:
- id -- a unique ID used to refer to the particle in other topology commands. Cannot be None.
For hierarchy topology, the id is joined by its parent id with ":" to represent the
hierarchy structure.
E.g., suppose the topology has 3 levels. The id of a particle in the 1st level is 1Node;
it has a child particle whose id is 2Node; 2Node also has a child particle whose id is 3Node;
then their ids are represented as
1Node
1Node:2Node
1Node:2Node:3Node
- name -- string name label for the particle
- posSpec -- string describing initial (x,y,z) (see _generateXY); spaces are allowed
within the tuple, but quotation is needed in this case.
E.g., " ( 0 , 0 , -10 ) "
- particleType -- particle type (default provided is "-", unless custom types are provided - see below)
currently supported: "-" same as cuboid, cuboid, sphere and teapot
Note: it would be much slower than cuboid if either sphere or teapot is used.
[ "DEL", "NODE", <id> ]
Remove a node (also removes all links to and from it)
[ "ADD", "LINK", <id from>, <id to> ]
Add a link, directional from fromID to toID
[ "DEL", "LINK", <id from>, <id to> ]
Remove a link, directional from fromID to toID
[ "DEL", "ALL" ]
Clears all nodes and links
[ "GET", "ALL" ]
Outputs the current topology as a list of commands, just like
those used to build it. The list begins with a 'DEL ALL'.
[ "UPDATE_NAME", "NODE", <id>, <new name> ]
If the node does not already exist, this does NOT cause it to be created.
[ "GET_NAME", "NODE", <id> ]
Returns UPDATE_NAME NODE message for the specified node
Commands are processed immediately, in the order in which they arrive. You
therefore cannot refer to a node or linkage that has not yet been created, or
that has already been destroyed.
If a stream of commands arrives in quick succession, rendering and physics will
be temporarily stopped, so commands can be processed more quickly. This is
necessary because when there is a large number of particles, physics and
rendering starts to take a long time, and will therefore bottleneck the
handling of commands.
However, there is a 1 second timeout, so at least one update of the visual
output is guaranteed per second.
TopologyViewer sends any output to its "outbox" outbox in the same
list/tuple format as used for commands sent to its "inbox" inbox. The following
may be output:
[ "SELECT", "NODE", <id> ]
Notification that a given node has been selected.
[ "SELECT", "NODE", None ]
Notificaion that *no node* is now selected.
[ "TOPOLOGY", <topology command list> ]
List of commands needed to build the topology, as it currently stands.
The list will start with a ("DEL","ALL") command.
This is sent in response to receiving a ("GET","ALL") command.
Error and tip information is printed out directly when applied.
For hierarchy topology, the id of particles should be joined by its parent id with ":"
to represent the hierarchy structure. See "ADD NODE" command above for more information.
Termination
-----------
If a shutdownMicroprocess message is received on this component's "control"
inbox, it will pass it on out of its "signal" outbox and immediately
terminate.
NOTE: Termination is currently rather cludgy - it raises an exception which
will cause the rest of a kamaelia system to halt. Do not rely on this behaviour
as it will be changed to provide cleaner termination at some point.
Customising the 3D topology viewer
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You can customise:
- the 'types' of particles (nodes)
- visual appearance of particles (nodes) and the links between them;
- the physics laws used to assist with layout
Use the particleTypes argument of the initialiser to specify classes that
should be instantiated to render each type of particle (nodes). particleTypes
should be a dictionary mapping names for particle types to the respective
classes, for example::
{ "major" : BigParticle, "minor" : SmallParticle }
See below for information on how to write your own particle classes.
Layout of the nodes on the surface is assisted by a physics model, provided
by an instance of the Kamaelia.Support.Particles.ParticleSystem class. Freeze them
if you want to make some particles not subject to the law (particle.freeze()).
Customise the laws used for each particle type by providing a
Kamaelia.Phyics.Simple.MultipleLaws object at initialisation.
Writing your own particle class
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
should inherit from Kamaelia.PhysicsGraph3D.Particles3D.Particle3D
and implement the following method (for rendering purposes):
draw()
draw OpenGL particles and links in this method.
TODO: Reduce CPU usage, improve responsive speed
References: 1. Kamaelia.Visualisation.PhysicsGraph.TopologyViewer
2. Kamaelia.UI.OpenGL.OpenGLComponent
3. Kamaelia.UI.OpenGL.MatchedTranslationInteractor
"""
import math, random
import time
import re
import sys
import pygame
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import Axon
import Kamaelia.Support.Particles
from Kamaelia.UI.OpenGL.OpenGLDisplay import OpenGLDisplay
from Kamaelia.UI.OpenGL.Vector import Vector
from Kamaelia.UI.OpenGL.Intersect import Intersect
_cat = Axon.CoordinatingAssistantTracker
from Particles3D import CuboidParticle3D, SphereParticle3D, TeapotParticle3D
from Kamaelia.Support.Particles.ParticleSystem import ParticleSystem
class TopologyViewer3D(Axon.Component.component):
"""\
TopologyViewer3D(...) -> new TopologyViewer3D component.
A component that takes incoming topology (change) data and displays it live
using pygame OpenGL. A simple physics model assists with visual layout. Particle
types, appearance and physics interactions can be customised.
Keyword arguments (in order):
- screensize -- (width,height) of the display area (default = (800,600))
- fullscreen -- True to start up in fullscreen mode (default = False)
- caption -- Caption for the pygame window (default = "3D Topology Viewer")
- particleTypes -- dict("type" -> klass) mapping types of particle to classes used to render them (default = {"-":CuboidParticle3D})
- initialTopology -- (nodes,bonds) where bonds=list((src,dst)) starting state for the topology (default=([],[]))
- laws -- Physics laws to apply between particles (default = SimpleLaws(bondlength=2))
- simCyclesPerRedraw -- number of physics sim cycles to run between each redraw (default=1)
- border -- Minimum distance from edge of display area that new particles appear (default=0)
"""
Inboxes = { "inbox" : "Topology (change) data describing an Axon system",
"control" : "Shutdown signalling",
"callback" : "for the response after a displayrequest",
"events" : "Place where we recieve events from the outside world",
}
Outboxes = { "signal" : "Control signalling",
"outbox" : "Notification and topology output",
"display_signal" : "Requests to Pygame Display service",
}
def __init__(self, screensize = (800,600),
fullscreen = False,
caption = "3D Topology Viewer",
particleTypes = None,
initialTopology = None,
laws = None,
simCyclesPerRedraw = 1,
border = 0):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(TopologyViewer3D, self).__init__()
glutInit(sys.argv)
tracker = _cat.coordinatingassistanttracker.getcat()
try:
self.display = tracker.retrieveService("ogl_display")[0]
except KeyError:
self.display = OpenGLDisplay(width=screensize[0], height=screensize[1],fullscreen=fullscreen,
title=caption)
self.display.activate()
OpenGLDisplay.setDisplayService(self.display, tracker)
self.display = OpenGLDisplay.getDisplayService()[0]
self.link((self,"display_signal"), (self.display,"notify"))
self.link((self.display,"signal"), (self,"control"))
self.border = border
if particleTypes == None:
self.particleTypes = {"-":CuboidParticle3D, "cuboid":CuboidParticle3D, "sphere":SphereParticle3D,
"teapot":TeapotParticle3D}
else:
self.particleTypes = particleTypes
if initialTopology == None:
initialTopology = ([],[])
self.initialNodes = list(initialTopology[0])
self.initialBonds = list(initialTopology[1])
self.hitParticles = []
self.multiSelectMode = False
self.selectedParticles = []
self.grabbed = False
self.rotationMode = False
if laws==None:
self.laws = Kamaelia.Support.Particles.SimpleLaws(bondLength=2)
else:
self.laws = laws
self.physics = ParticleSystem(self.laws, [], 0)
self.biggestRadius = 0
# Do interaction
self.simCyclesPerRedraw = simCyclesPerRedraw
self.lastIdleTime = time.time()
# Tell if new node is added; if true, new id needs adding to OpenGLDisplay list
self.isNewNode = False
# For hierarchy structure
self.maxLevel = 0
self.currentLevel = 0
self.previousParentParticleID = self.currentParentParticleID = ''
self.viewerOldPos = Vector()
self.levelViewerPos = {}
# The Physics particle system of current display level for display
self.currentDisplayedPhysics = ParticleSystem(self.laws, [], 0)
# For double click
self.lastClickPos = (0,0)
self.lastClickTime = time.time()
self.dClickRes = 0.3
def initialiseComponent(self):
"""Initialises."""
self.addListenEvents( [pygame.MOUSEBUTTONDOWN, pygame.MOUSEBUTTONUP, pygame.MOUSEMOTION, pygame.KEYDOWN, pygame.KEYUP ])
# For key holding handling
pygame.key.set_repeat(100,100)
for node in self.initialNodes:
self.addParticle(*node)
for source,dest in self.initialBonds:
self.makeBond(source, dest)
def main(self):
"""Main loop."""
# Make display request for event listening purpose
self.size = Vector(0,0,0)
disprequest = { "OGL_DISPLAYREQUEST" : True,
"objectid" : id(self),
"callback" : (self,"callback"),
"events" : (self, "events"),
"size": self.size
}
# send display request
self.send(disprequest, "display_signal")
# Wait for response on displayrequest and get identifier of the viewer
while not self.dataReady("callback"): yield 1
self.identifier = self.recv("callback")
self.initialiseComponent()
while True:
# Process incoming messages
if self.dataReady("inbox"):
message = self.recv("inbox")
self.doCommand(message)
# Wait for response on displayrequest and get identifier of the particle
if self.isNewNode:
while not self.dataReady("callback"): yield 1
self.physics.particles[-1].identifier = self.recv("callback")
self.isNewNode = False
else:
self.lastIdleTime = 0
yield 1
if self.lastIdleTime + 1.0 < time.time():
#Freeze selected particles so that they are not subject to the physics law
for particle in self.selectedParticles:
particle.freeze()
# Do interaction between particles
self.currentDisplayedPhysics.run(self.simCyclesPerRedraw)
# Unfreeze selected particles
for particle in self.selectedParticles:
particle.unFreeze()
# Draw particles if new or updated
for particle in self.currentDisplayedPhysics.particles:
if particle.needRedraw:
self.drawParticles(particle)
self.handleEvents()
# Perform transformation
for particle in self.currentDisplayedPhysics.particles:
transform_update = particle.applyTransforms()
if transform_update is not None:
self.send(transform_update, "display_signal")
self.lastIdleTime = time.time()
else:
yield 1
if self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, Axon.Ipc.shutdownMicroprocess):
self.quit(msg)
def quit(self,msg=Axon.Ipc.shutdownMicroprocess()):
"""Cause termination."""
print ('Shut down...')
self.send(msg, "signal")
self.scheduler.stop()
def draw(self):
"""\
Dummy method reserved for future use
Invoke draw() and save its commands to a newly generated displaylist.
The displaylist name is then sent to the display service via a
"DISPLAYLIST_UPDATE" request.
"""
pass
def drawParticles(self, *particles):
"""\
Sends particles drawing opengl command to the display service.
"""
for particle in particles:
# Display list id
displaylist = glGenLists(1)
# Draw object to its displaylist
glNewList(displaylist, GL_COMPILE)
particle.draw()
glEndList()
# Send displaylist
dl_update = { "DISPLAYLIST_UPDATE": True,
"objectid": id(particle),
"displaylist": displaylist
}
self.send(dl_update, "display_signal")
def addListenEvents(self, events):
"""\
Sends listening request for pygame events to the display service.
The events parameter is expected to be a list of pygame event constants.
"""
for event in events:
self.send({"ADDLISTENEVENT":event, "objectid":id(self)}, "display_signal")
def removeListenEvents(self, events):
"""\
Sends stop listening request for pygame events to the display service.
The events parameter is expected to be a list of pygame event constants.
"""
for event in events:
self.send({"REMOVELISTENEVENT":event, "objectid":id(self)}, "display_signal")
def handleEvents(self):
"""Handle events."""
while self.dataReady("events"):
event = self.recv("events")
if event.type == pygame.MOUSEBUTTONDOWN or event.type == pygame.MOUSEMOTION or event.type == pygame.MOUSEBUTTONUP:
self.handleMouseEvents(event)
elif event.type == pygame.KEYDOWN or event.type == pygame.KEYUP:
self.handleKeyEvents(event)
# Scroll if self.display.viewerposition changes
if self.display.viewerposition.copy() != self.viewerOldPos:
self.scroll()
self.viewerOldPos = self.display.viewerposition.copy()
def handleMouseEvents(self, event):
"""Handle mouse events."""
if event.type == pygame.MOUSEBUTTONDOWN or pygame.MOUSEMOTION and self.grabbed:
if not self.rotationMode:
for particle in self.hitParticles:
p1 = Vector(*particle.pos).copy()
p1.x += 10
p2 = Vector(*particle.pos).copy()
p2.y += 10
# Get the position of mouse
z = Intersect.ray_Plane(Vector(0,0,0), event.direction, [Vector(*particle.pos)-Vector(0,0,self.display.viewerposition.z), p1-Vector(0,0,self.display.viewerposition.z), p2-Vector(0,0,self.display.viewerposition.z)])
newpoint = event.direction * z
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
# Handle double click
clickPos = event.pos
currentTime = time.time()
elapsedTime = currentTime - self.lastClickTime
# If it's a double-click
if clickPos == self.lastClickPos and elapsedTime<self.dClickRes:
self.gotoDisplayLevel(1)
else: # Single click
if not self.rotationMode: # Select particle
for particle in self.currentDisplayedPhysics.particles:
if particle.identifier in event.hitobjects:
self.grabbed = True
self.hitParticles.append(particle)
self.selectParticle(particle)
# If click places other than particles in non multiSelectMode, deselect all
if not self.hitParticles and not self.multiSelectMode:
self.deselectAll()
self.lastClickPos = clickPos
self.lastClickTime = currentTime
elif event.button == 3: # Right-clicked
self.gotoDisplayLevel(-1)
elif event.button == 4: # Scrolled-up: zoom out
if self.selectedParticles:
particles = self.selectedParticles
else:
particles = self.currentDisplayedPhysics.particles
for particle in particles:
posVector = Vector(*particle.pos)
posVector.z -= 1
particle.pos = posVector.toTuple()
elif event.button == 5: # Scrolled-down: zoom in
if self.selectedParticles:
particles = self.selectedParticles
else:
particles = self.currentDisplayedPhysics.particles
for particle in particles:
posVector = Vector(*particle.pos)
posVector.z += 1
particle.pos = posVector.toTuple()
if event.type == pygame.MOUSEBUTTONUP:
if event.button == 1:
for particle in self.hitParticles:
self.grabbed = False
particle.oldpoint = None
self.hitParticles.pop(self.hitParticles.index(particle))
if event.type == pygame.MOUSEMOTION:
if not self.rotationMode and self.grabbed: # Drag particles
for particle in self.hitParticles:
try:
if particle.oldpoint is not None:
diff = newpoint-particle.oldpoint
amount = (diff.x, diff.y)
particle.pos = (Vector(*particle.pos)+Vector(*amount)).toTuple()
except NameError: pass
# Redraw the link so that the link can move with the particle
for p in particle.bondedFrom:
p.needRedraw = True
elif self.rotationMode: # Rotate particles
dAnglex = float(event.rel[1])
dAngley = -float(event.rel[0])
self.rotateParticles(self.selectedParticles, (dAnglex,dAngley,0))
try:
for particle in self.hitParticles:
particle.oldpoint = newpoint
except NameError: pass
def handleKeyEvents(self, event):
"""Handle keyboard events."""
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.quit()
elif event.key == pygame.K_BACKSPACE:
self.gotoDisplayLevel(-1)
elif event.key == pygame.K_RETURN:
self.gotoDisplayLevel(1)
elif event.key == pygame.K_LSHIFT or event.key == pygame.K_RSHIFT:
self.multiSelectMode = True
elif event.key == pygame.K_LCTRL or event.key == pygame.K_RCTRL:
self.rotationMode = True
# Change viewer position
elif event.key == pygame.K_PAGEUP:
self.display.viewerposition.z -= 0.5
elif event.key == pygame.K_PAGEDOWN:
self.display.viewerposition.z += 0.5
elif event.key == pygame.K_w:
self.display.viewerposition.y += 0.5
elif event.key == pygame.K_s:
self.display.viewerposition.y -= 0.5
elif event.key == pygame.K_a:
self.display.viewerposition.x -= 0.5
elif event.key == pygame.K_d:
self.display.viewerposition.x += 0.5
# Rotate particles
elif event.key == pygame.K_UP:
self.rotateParticles(self.selectedParticles, (-20,0,0))
elif event.key == pygame.K_DOWN:
self.rotateParticles(self.selectedParticles, (20,0,0))
elif event.key == pygame.K_LEFT:
self.rotateParticles(self.selectedParticles, (0,20,0))
elif event.key == pygame.K_RIGHT:
self.rotateParticles(self.selectedParticles, (0,-20,0))
elif event.key == pygame.K_COMMA:
self.rotateParticles(self.selectedParticles, (0,0,20))
elif event.key == pygame.K_PERIOD:
self.rotateParticles(self.selectedParticles, (0,0,-20))
# Key exit (release) handling
elif event.type == pygame.KEYUP:
if event.key == pygame.K_LSHIFT or event.key == pygame.K_RSHIFT:
# Return to normal mode from multiSelectMode
self.multiSelectMode = False
elif event.key == pygame.K_LCTRL or event.key == pygame.K_RCTRL:
# Return to normal mode from rotationMode
self.rotationMode = False
def scroll( self ):
"""Scroll the surface by resetting gluLookAt."""
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
self.display.setProjection()
def rotateParticles( self, particles, dAngle ):
"""\
Rotate the particles around their common centre dAngle degree.
Particles is a list; dAngle is a triple tuple of degree.
If particles are given an empty list, rotate all particles instead.
"""
if particles == []:
particles = self.currentDisplayedPhysics.particles
centrePoint = Vector()
for particle in particles:
posVector = Vector(*particle.pos)
centrePoint += posVector
centrePoint /= len(particles)
if dAngle[0] != 0: # Rotate around x axis
for particle in particles:
posVector = Vector(*particle.pos)
relativePosVector = posVector - centrePoint
radius = (relativePosVector.z*relativePosVector.z+relativePosVector.y*relativePosVector.y)**0.5
newAngle = (math.atan2(relativePosVector.z,relativePosVector.y)+dAngle[0]*math.pi/180)
particle.pos = (posVector.x, radius*math.cos(newAngle)+centrePoint.y, radius*math.sin(newAngle)+centrePoint.z)
particle.drotation += Vector(dAngle[0],0,0)
if dAngle[1] != 0: # Rotate around y axis
for particle in particles:
posVector = Vector(*particle.pos)
relativePosVector = posVector - centrePoint
radius = (relativePosVector.z*relativePosVector.z+relativePosVector.x*relativePosVector.x)**0.5
newAngle = (math.atan2(relativePosVector.z,relativePosVector.x)+dAngle[1]*math.pi/180)
particle.pos = (radius*math.cos(newAngle)+centrePoint.x, posVector.y, radius*math.sin(newAngle)+centrePoint.z)
particle.drotation += Vector(0,-dAngle[1],0)
if dAngle[2] != 0: # Rotate around z axis
for particle in particles:
posVector = Vector(*particle.pos)
relativePosVector = posVector - centrePoint
radius = (relativePosVector.x*relativePosVector.x+relativePosVector.y*relativePosVector.y)**0.5
newAngle = (math.atan2(relativePosVector.y,relativePosVector.x)+dAngle[2]*math.pi/180)
particle.pos = (radius*math.cos(newAngle)+centrePoint.x, radius*math.sin(newAngle)+centrePoint.y, posVector.z)
particle.drotation += Vector(0,0,dAngle[2])
# An angle keeps the same with when it minus muptiple 360
particle.drotation %= 360
def gotoDisplayLevel( self, dlevel):
"""Switch to another display level."""
isValid = False
if self.currentLevel + dlevel > self.maxLevel:
print ("Warning: max hierarchy level has reached!")
elif self.currentLevel + dlevel < 0:
print ("Warning: The first hierarchy level has reached!")
else:
if dlevel < 0: # Go to the last dlevel level
self.previousParentParticleID = self.currentParentParticleID
items = self.currentParentParticleID.split(':')
for _ in xrange(-dlevel):
items.pop()
self.currentParentParticleID = ':'.join(items)
isValid = True
if dlevel == 1: # It only makes sense if dlevel == 1 when go to next dlevel level
if len(self.selectedParticles) == 1:
hasChildParticles = False
for particle in self.physics.particles:
if particle.ID.find(self.selectedParticles[0].ID) == 0 and particle.ID != self.selectedParticles[0].ID:
hasChildParticles = True
break
if hasChildParticles:
self.previousParentParticleID = self.currentParentParticleID
self.currentParentParticleID = self.selectedParticles[0].ID
isValid = True
else:
print ('Warning: The particle you double-clicked has no children!')
else:
print ("Tips: To extend a node, please double-click the node you want to extend")
# Show the specified display level if valid
if isValid:
# Save current level's viewer position
self.levelViewerPos[self.currentLevel, self.previousParentParticleID] = self.display.viewerposition.copy()
# Deselect all
self.deselectAll()
# Display next level
self.currentLevel += dlevel
# Reset viewer position to previous
try:
self.display.viewerposition = self.levelViewerPos[self.currentLevel, self.currentParentParticleID].copy()
except KeyError:
self.display.viewerposition = self.levelViewerPos[self.currentLevel, self.currentParentParticleID] = Vector()
# Remove current displayed particles
for particle in self.currentDisplayedPhysics.particles:
self.display.ogl_displaylists.pop(id(particle))
self.display.ogl_transforms.pop(id(particle))
self.currentDisplayedPhysics.removeByID(*self.currentDisplayedPhysics.particleDict.keys())
# Add current level's particles to self.currentDisplayedPhysics.particles for display
self.currentDisplayedPhysics.particles = []
if self.physics.particles != []:
for particle in self.physics.particles:
if self.currentParentParticleID == '': # If no parent, it's the top level
if ':' not in particle.ID:
self.currentDisplayedPhysics.add( particle )
particle.oldpos = particle.initialpos
# The child particles of self.currentParentParticleID
elif particle.ID.find(self.currentParentParticleID) == 0 and particle.ID.count(':') == self.currentLevel:
self.currentDisplayedPhysics.add( particle )
particle.oldpos = particle.initialpos
def doCommand(self, msg):
"""\
Proceses a topology command tuple:
[ "ADD", "NODE", <id>, <name>, <positionSpec>, <particle type> ]
[ "DEL", "NODE", <id> ]
[ "ADD", "LINK", <id from>, <id to> ]
[ "DEL", "LINK", <id from>, <id to> ]
[ "DEL", "ALL" ]
[ "GET", "ALL" ]
"""
if len(msg) >= 2:
cmd = msg[0].upper(), msg[1].upper()
# Add default arguments when they are not provided
if cmd == ("ADD", "NODE"):
if len(msg) == 4:
msg += ['randompos', '-']
elif len(msg) == 5:
msg += ['-']
if cmd == ("ADD", "NODE") and len(msg) == 6:
if msg[2] in [p.ID for p in self.physics.particles]:
print ("Node exists, please use a new node ID!")
else:
if ( msg[5] in self.particleTypes ):
ptype = self.particleTypes[msg[5]]
ident = msg[2]
name = msg[3]
posSpec = msg[4]
pos = self._generatePos(posSpec)
particle = ptype(position = pos, ID=ident, name=name)
particle.originaltype = msg[5]
self.addParticle(particle)
self.isNewNode = True
elif cmd == ("DEL", "NODE") and len(msg) == 3:
ident = msg[2]
self.removeParticle(ident)
elif cmd == ("ADD", "LINK") and len(msg) == 4:
src = msg[2]
dst = msg[3]
self.makeBond(src, dst)
elif cmd == ("DEL", "LINK") and len(msg) == 4:
src = msg[2]
dst = msg[3]
self.breakBond(src, dst)
elif cmd == ("DEL", "ALL") and len(msg) == 2:
self.removeParticle(*self.physics.particleDict.keys())
self.currentLevel = 0
self.currentParentParticleID = ''
elif cmd == ("GET", "ALL") and len(msg) == 2:
topology = [("DEL","ALL")]
topology.extend(self.getTopology())
self.send( ("TOPOLOGY", topology), "outbox" )
elif cmd == ("UPDATE_NAME", "NODE") and len(msg) == 4:
node_id = msg[2]
new_name = msg[3]
self.updateParticleLabel(node_id, new_name)
self.send( ("UPDATE_NAME", "NODE", node_id, new_name), "outbox" )
elif cmd == ("GET_NAME", "NODE") and len(msg) == 3:
node_id = msg[2]
name = self.getParticleLabel(node_id)
self.send( ("GET_NAME", "NODE", node_id, name), "outbox" )
else:
print ("Command Error: please check your command format!")
else:
print ("Command Error: not enough parameters!")
def _generatePos(self, posSpec):
"""\
generateXY(posSpec) -> (x,y,z) or raises ValueError
posSpec == "randompos" or "auto" -> random (x,y,z) within the surface (specified border distance in from the edege)
posSpec == "(XXX,YYY,ZZZ)" -> specified x,y,z (positive or negative integers)
spaces are allowed within the tuple, but quotation is needed in this case.
E.g., " ( 0 , 0 , -10 ) "
"""
posSpec = posSpec.lower()
if posSpec == "randompos" or posSpec == "auto" :
zLim = self.display.nearPlaneDist, self.display.farPlaneDist
z = -1*random.randrange(int((zLim[1]-zLim[0])/20)+self.border,int((zLim[1]-zLim[0])/8)-self.border,1)
yLim = z*math.tan(self.display.perspectiveAngle*math.pi/360.0), -z*math.tan(self.display.perspectiveAngle*math.pi/360.0)
xLim = yLim[0]*self.display.aspectRatio, yLim[1]*self.display.aspectRatio
y = random.randrange(int(yLim[0])+self.border,int(yLim[1])-self.border,1)
x = random.randrange(int(xLim[0])+self.border,int(xLim[1])-self.border,1)
# Apply camera/ viewer transformation
x += self.display.viewerposition.x
y += self.display.viewerposition.y
z += self.display.viewerposition.z
return x,y,z
else: # given specified position
posSpec = posSpec.strip()
# Use triple tuple format for position
match = re.match("^\( *([+-]?\d+) *, *([+-]?\d+) *, *([+-]?\d+) *\)$", posSpec)
if match:
x = int(match.group(1))
y = int(match.group(2))
z = int(match.group(3))
return x,y,z
raise ValueError("Unrecognised position specification")
def addParticle(self, *particles):
"""Add particles to the system"""
for p in particles:
if p.radius > self.biggestRadius:
self.biggestRadius = p.radius
pLevel = p.ID.count(':')
if self.maxLevel < pLevel:
self.maxLevel = pLevel
# Make display request for every particle added
disprequest = { "OGL_DISPLAYREQUEST" : True,
"objectid" : id(p),
"callback" : (self,"callback"),
"events" : (self, "events"),
"size": p.size
}
# Send display request
self.send(disprequest, "display_signal")
self.physics.add( *particles )
# Add new particles to self.currentDisplayedPhysics
for particle in particles:
if self.currentParentParticleID == '': # If no parent, it's the top level
if ':' not in particle.ID:
self.currentDisplayedPhysics.add( particle )
particle.oldpos = particle.initialpos
# The child particles of self.currentParentParticleID
elif particle.ID.find(self.currentParentParticleID) == 0 and particle.ID.count(':') == self.currentLevel:
self.currentDisplayedPhysics.add( particle )
particle.oldpos = particle.initialpos
def removeParticle(self, *ids):
"""\
Remove particle(s) specified by their ids.
Also breaks any bonds to/from that particle.
"""
for ident in ids:
self.physics.particleDict[ident].breakAllBonds()
try:
self.display.ogl_objects.remove(id(self.physics.particleDict[ident]))
self.display.ogl_names.pop(id(self.physics.particleDict[ident]))
self.display.ogl_displaylists.pop(id(self.physics.particleDict[ident]))
self.display.ogl_transforms.pop(id(self.physics.particleDict[ident]))
except KeyError: pass
self.physics.removeByID(*ids)
for ident in ids:
try:
self.currentDisplayedPhysics.removeByID(ident)
except KeyError: pass
def selectParticle(self, particle):
"""Select the specified particle."""
if self.multiSelectMode:
if particle not in self.selectedParticles:
particle.select()
self.selectedParticles.append(particle)
self.send( "('SELECT', 'NODE', '"+particle.name+"')", "outbox" )
else:
particle.deselect()
self.selectedParticles.remove(particle)
self.send( "('DESELECT', 'NODE', '"+particle.name+"')", "outbox" )
else:
self.deselectAll()
self.selectedParticles = []
particle.select()
self.selectedParticles.append(particle)
self.send( "('SELECT', 'NODE', '"+particle.name+"')", "outbox" )
def deselectAll(self):
"""Deselect all particles."""
for particle in self.selectedParticles:
particle.deselect()
self.selectedParticles = []
def makeBond(self, source, dest):
"""Make a bond from source to destination particle, specified by IDs"""
self.physics.particleDict[source].makeBond(self.physics.particleDict, dest)
self.physics.particleDict[source].needRedraw = True
def breakBond(self, source, dest):
"""Break a bond from source to destination particle, specified by IDs"""
self.physics.particleDict[source].breakBond(self.physics.particleDict, dest)
self.physics.particleDict[source].needRedraw = True
def updateParticleLabel(self, node_id, new_name):
"""\
updateParticleLabel(node_id, new_name) -> updates the given nodes name & visual label if it exists
node_id - an id for an already existing node
new_name - a string (may include spaces) defining the new node name
"""
for p in self.physics.particles:
if p.ID == node_id:
p.set_label(new_name)
p.needRedraw = True
return
def getParticleLabel(self, node_id):
"""\
getParticleLabel(node_id) -> particle's name
Returns the name/label of the specified particle.
"""
for p in self.physics.particles:
if p.ID == node_id:
return p.name
def getTopology(self):
"""getTopology() -> list of command tuples that would build the current topology"""
topology = []
# first, enumerate the particles
for particle in self.physics.particles:
topology.append( ( "ADD","NODE",
particle.ID,
particle.name,
"random",
particle.originaltype
) )
# now enumerate the linkages
for particle in self.physics.particles:
for dst in particle.getBondedTo():
topology.append( ( "ADD","LINK", particle.ID, dst.ID ) )
return topology
__kamaelia_components__ = ( TopologyViewer3D, )
if __name__ == "__main__":
from Kamaelia.Util.DataSource import DataSource
from Kamaelia.Visualisation.PhysicsGraph.lines_to_tokenlists import lines_to_tokenlists
from Kamaelia.Util.Console import ConsoleEchoer,ConsoleReader
from Kamaelia.Chassis.Graphline import Graphline
# Data can be from both DataSource and console inputs
print ("Please type the command you want to draw")
Graphline(
CONSOLEREADER = ConsoleReader(">>> "),
# DATASOURCE = DataSource(['ADD NODE 1Node 1Node randompos -', 'ADD NODE 2Node 2Node randompos -',
# 'ADD NODE 3Node 3Node randompos -', 'ADD NODE 4Node 4Node randompos -',
# 'ADD LINK 1Node 2Node','ADD LINK 2Node 3Node', 'ADD LINK 3Node 4Node',
# 'ADD LINK 4Node 1Node']),
DATASOURCE = DataSource(['ADD NODE 1Node 1Node randompos teapot',
'ADD NODE 2Node 2Node randompos -',
'ADD NODE 3Node 3Node randompos sphere', 'ADD NODE 4Node 4Node randompos -',
'ADD NODE 5Node 5Node randompos sphere', 'ADD NODE 6Node 6Node randompos -',
'ADD NODE 7Node 7Node randompos sphere',
'ADD LINK 1Node 2Node',
'ADD LINK 1Node 3Node', 'ADD LINK 1Node 4Node',
'ADD LINK 1Node 5Node','ADD LINK 1Node 6Node', 'ADD LINK 1Node 7Node',
'ADD NODE 1Node:1Node 1Node:1Node randompos -', 'ADD NODE 1Node:2Node 1Node:2Node randompos -',
'ADD NODE 1Node:3Node 1Node:3Node randompos -', 'ADD NODE 1Node:4Node 1Node:4Node randompos -',
'ADD LINK 1Node:1Node 1Node:2Node', 'ADD LINK 1Node:2Node 1Node:3Node',
'ADD LINK 1Node:3Node 1Node:4Node', 'ADD LINK 1Node:4Node 1Node:1Node',
'ADD NODE 1Node:1Node:1Node 1Node:1Node:1Node randompos -',
'ADD NODE 1Node:1Node:2Node 1Node:1Node:2Node randompos -',
'ADD LINK 1Node:1Node:1Node 1Node:1Node:2Node',
'ADD NODE 5Node:1Node 5Node:1Node randompos sphere',
'ADD NODE 5Node:2Node 5Node:2Node randompos sphere',
'ADD LINK 5Node:1Node 5Node:2Node'
]),
TOKENS = lines_to_tokenlists(),
VIEWER = TopologyViewer3D(),
CONSOLEECHOER = ConsoleEchoer(),
linkages = {
("CONSOLEREADER","outbox") : ("TOKENS","inbox"),
("DATASOURCE","outbox") : ("TOKENS","inbox"),
("TOKENS","outbox") : ("VIEWER","inbox"),
("VIEWER","outbox") : ("CONSOLEECHOER","inbox"),
}
).run()
# Licensed to the BBC under a Contributor Agreement: CL
|
{
"content_hash": "bedb1863a63aee6024a9c606f7ac0f2a",
"timestamp": "",
"source": "github",
"line_count": 1063,
"max_line_length": 234,
"avg_line_length": 44.636876763875826,
"alnum_prop": 0.5652173913043478,
"repo_name": "sparkslabs/kamaelia_",
"id": "ccc98c51da020abd80cdd2c8d0124d02e9e0ab4d",
"size": "48355",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Code/Python/Kamaelia/Kamaelia/Visualisation/PhysicsGraph3D/TopologyViewer3D.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3814"
},
{
"name": "C",
"bytes": "212854"
},
{
"name": "C++",
"bytes": "327546"
},
{
"name": "CSS",
"bytes": "114434"
},
{
"name": "ChucK",
"bytes": "422"
},
{
"name": "HTML",
"bytes": "1288960"
},
{
"name": "Java",
"bytes": "31832"
},
{
"name": "JavaScript",
"bytes": "829491"
},
{
"name": "Makefile",
"bytes": "5768"
},
{
"name": "NSIS",
"bytes": "18867"
},
{
"name": "PHP",
"bytes": "49059"
},
{
"name": "Perl",
"bytes": "504"
},
{
"name": "Processing",
"bytes": "2885"
},
{
"name": "Pure Data",
"bytes": "7485482"
},
{
"name": "Python",
"bytes": "18896248"
},
{
"name": "Ruby",
"bytes": "4165"
},
{
"name": "Shell",
"bytes": "707430"
}
],
"symlink_target": ""
}
|
from pip._vendor.requests.packages.urllib3.connectionpool import xrange
# Solution to ProjectEuler problem 006 from ProjectEuler.net.
# Calculates the difference between the square of the sum and the
# the sum of the squares of the first 100 natural numbers.
# Created By: Edward Eisenhart
# Created On: Apr-08-2015
# Contact: EdwardEisenhart@EdwardEisenhart.com
# Calculates the sum of the squares of natural numbers up to n
def calculateSumOfSquares(n):
return (2*n*n*n+3*n*n+n)/6 # See Faulhaber's formula
# Calculates the square of the sum of natural numbers up to n
def calculateSquareOfSum(n):
sum = (n*n+n)/2 # See Faulhaber's formula
return sum*sum
# Main
print(calculateSquareOfSum(100) - calculateSumOfSquares(100))
|
{
"content_hash": "ee0b28485bbd91292a1850a86e64b41e",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 71,
"avg_line_length": 37.05,
"alnum_prop": 0.7678812415654521,
"repo_name": "EdwardEisenhart/Project_Euler",
"id": "e7087c6ea6c84bf4b31146e9e6fd86e220b7295d",
"size": "741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Problem006/Python/Problem006.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4043"
}
],
"symlink_target": ""
}
|
"""
Summary
-------
Tools for basic data manipulation.
"""
from __future__ import division
import numpy as np
from .utils import check_random_state
def atleast_3d(x):
x = np.asarray(x)
if x.ndim >= 3:
return x
elif x.ndim == 2:
return x[np.newaxis, ...]
else:
return x[np.newaxis, np.newaxis, :]
def cut_segments(x2d, tr, start, stop):
"""Cut continuous signal into segments.
Parameters
----------
x2d : array, shape (m, n)
Input data with m signals and n samples.
tr : list of int
Trigger positions.
start : int
Window start (offset relative to trigger).
stop : int
Window end (offset relative to trigger).
Returns
-------
x3d : array, shape (len(tr), m, stop-start)
Segments cut from data. Individual segments are stacked along the first
dimension.
See also
--------
cat_trials : Concatenate segments.
Examples
--------
>>> data = np.random.randn(5, 1000) # 5 channels, 1000 samples
>>> tr = [750, 500, 250] # three segments
>>> x3d = cut_segments(data, tr, 50, 100) # each segment is 50 samples
>>> x3d.shape
(3, 5, 50)
"""
if start != int(start):
raise ValueError("start index must be an integer")
if stop != int(stop):
raise ValueError("stop index must be an integer")
x2d = np.atleast_2d(x2d)
tr = np.asarray(tr, dtype=int).ravel()
win = np.arange(start, stop, dtype=int)
return np.concatenate([x2d[np.newaxis, :, t + win] for t in tr])
def cat_trials(x3d):
"""Concatenate trials along time axis.
Parameters
----------
x3d : array, shape (t, m, n)
Segmented input data with t trials, m signals, and n samples.
Returns
-------
x2d : array, shape (m, t * n)
Trials are concatenated along the second axis.
See also
--------
cut_segments : Cut segments from continuous data.
Examples
--------
>>> x = np.random.randn(6, 4, 150)
>>> y = cat_trials(x)
>>> y.shape
(4, 900)
"""
x3d = atleast_3d(x3d)
t = x3d.shape[0]
return np.concatenate(np.split(x3d, t, 0), axis=2).squeeze(0)
def dot_special(x2d, x3d):
"""Segment-wise dot product.
This function calculates the dot product of x2d with each trial of x3d.
Parameters
----------
x2d : array, shape (p, m)
Input argument.
x3d : array, shape (t, m, n)
Segmented input data with t trials, m signals, and n samples. The dot
product with x2d is calculated for each trial.
Returns
-------
out : array, shape (t, p, n)
Dot product of x2d with each trial of x3d.
Examples
--------
>>> x = np.random.randn(6, 40, 150)
>>> a = np.ones((7, 40))
>>> y = dot_special(a, x)
>>> y.shape
(6, 7, 150)
"""
x3d = atleast_3d(x3d)
x2d = np.atleast_2d(x2d)
return np.concatenate([x2d.dot(x3d[i, ...])[np.newaxis, ...]
for i in range(x3d.shape[0])])
def randomize_phase(data, random_state=None):
"""Phase randomization.
This function randomizes the spectral phase of the input data along the
last dimension.
Parameters
----------
data : array
Input array.
Returns
-------
out : array
Array of same shape as data.
Notes
-----
The algorithm randomizes the phase component of the input's complex Fourier
transform.
Examples
--------
.. plot::
:include-source:
from pylab import *
from scot.datatools import randomize_phase
np.random.seed(1234)
s = np.sin(np.linspace(0,10*np.pi,1000))
x = np.vstack([s, np.sign(s)])
y = randomize_phase(x)
subplot(2,1,1)
title('Phase randomization of sine wave and rectangular function')
plot(x.T + [1.5, -1.5]), axis([0,1000,-3,3])
subplot(2,1,2)
plot(y.T + [1.5, -1.5]), axis([0,1000,-3,3])
plt.show()
"""
rng = check_random_state(random_state)
data = np.asarray(data)
data_freq = np.fft.rfft(data)
data_freq = np.abs(data_freq) * np.exp(1j*rng.random_sample(data_freq.shape)*2*np.pi)
return np.fft.irfft(data_freq, data.shape[-1])
def acm(x, l):
"""Compute autocovariance matrix at lag l.
This function calculates the autocovariance matrix of `x` at lag `l`.
Parameters
----------
x : array, shape (n_trials, n_channels, n_samples)
Signal data (2D or 3D for multiple trials)
l : int
Lag
Returns
-------
c : ndarray, shape = [nchannels, n_channels]
Autocovariance matrix of `x` at lag `l`.
"""
x = atleast_3d(x)
if l > x.shape[2]-1:
raise AttributeError("lag exceeds data length")
## subtract mean from each trial
#for t in range(x.shape[2]):
# x[:, :, t] -= np.mean(x[:, :, t], axis=0)
if l == 0:
a, b = x, x
else:
a = x[:, :, l:]
b = x[:, :, 0:-l]
c = np.zeros((x.shape[1], x.shape[1]))
for t in range(x.shape[0]):
c += a[t, :, :].dot(b[t, :, :].T) / a.shape[2]
c /= x.shape[0]
return c.T
|
{
"content_hash": "ea3eac9b177ad577dccf9e7e0f11787c",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 89,
"avg_line_length": 24.644549763033176,
"alnum_prop": 0.5542307692307692,
"repo_name": "cbrnr/scot",
"id": "0fe568c3a6ac46ddddcdebbe3614055ef1a124c7",
"size": "5320",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "scot/datatools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "268350"
},
{
"name": "Shell",
"bytes": "3838"
}
],
"symlink_target": ""
}
|
import instruction
import parser
import simulator
def todec(n):
return str(n).zfill(3)
simulator.memory = [0 for i in range(99)]
def main():
while not simulator.halt_flag:
line = raw_input("instruction? ")
label, operator, operand, labelref = parser.parseLine(line)
instr = instruction.build(operator, operand)
print("instr:" + instruction.toString(instr))
simulator.memory[simulator.program_counter] = instr
simulator.cycle()
#TODO: Somehow adding b_reg functionality needs to add b display here too?
print(" pc:" + todec(simulator.program_counter)
+ " a:" + todec(simulator.accumulator)
+ " z:" + str(simulator.z_flag)
+ " p:" + str(simulator.p_flag)
+ " halt:" + str(simulator.halt_flag))
if __name__ == "__main__":
main()
# END
|
{
"content_hash": "83e3ee08b37f65c53cacb9834252a1de",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 76,
"avg_line_length": 22.571428571428573,
"alnum_prop": 0.6632911392405063,
"repo_name": "whaleygeek/MyLittleComputer",
"id": "d16eb69fc50abf1b8807a5debe7f30093ac31742",
"size": "862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/interactive.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "50390"
},
{
"name": "Shell",
"bytes": "1751"
},
{
"name": "TypeScript",
"bytes": "39191"
}
],
"symlink_target": ""
}
|
"""
Test that sockets, inputs, and outputs are functional in python.
"""
import os, unittest
import opensim as osim
test_dir = os.path.join(os.path.dirname(os.path.abspath(osim.__file__)),
'tests')
# Silence warning messages if mesh (.vtp) files cannot be found.
osim.Model.setDebugLevel(0)
# TODO in Component:
#
# SimTK::IteratorPair<...> Component::getSockets()
# SimTK::IteratorPair<...> Component::getOutputs()
# Component::Socket can give you the concrete Socket or Input (using a
# fancy SWIG typemap). Not possible for outputs (well, you can manually
# downcast), since we do not have a registry for those types.
# Wrap common Input types: InputDouble or InputDbl or Input, InputVec3, InputVector, etc.
#
# Component::getComponentList() and Component::getComponentList(type)
#
# TODO ClonePtr<Output> in swig doc3.0: 25.3.15.2
#
# SimTK::IteratorPair<...> Component::getInputs()
class TestSockets(unittest.TestCase):
def test_accessing_sockets(self):
model = osim.Model(os.path.join(test_dir, "arm26.osim"))
ground = model.getGround()
shoulder = model.getJointSet().get("r_shoulder")
# Connect up the model.
model.initSystem()
# With an AbstractSocket, we can call AbstractSocket's methods.
assert shoulder.getSocket("parent_frame").getNumConnectees() == 1
assert (shoulder.getSocket("parent_frame").getConnecteeTypeName() ==
"PhysicalFrame")
# Check that the connectees point to the correct objects.
assert (shoulder.getConnectee("child_frame").this ==
shoulder.getComponent("r_humerus_offset").this)
assert (
type(shoulder.getSocket("child_frame").getConnecteeAsObject())
== osim.OpenSimObject)
# In Python, we are able to get the concrete type from this method.
# by using a SWIG typemap(out).
assert type(shoulder.getConnectee("child_frame")) == osim.PhysicalOffsetFrame
def test_iterate_sockets(self):
model = osim.Model(os.path.join(test_dir, "arm26.osim"))
shoulder = model.getJointSet().get("r_shoulder")
# Connect up the model.
model.initSystem()
names = ["child_frame", "parent_frame"]
# By name.
count_by_name = 0
for name in shoulder.getSocketNames():
assert shoulder.getSocket(name).getName() == names[count_by_name]
count_by_name += 1
assert count_by_name == 2
# By iterator.
# TODO doesn't exist yet.
# TODO count_by_iter = 0
# TODO for socket in shoulder.getSockets():
# TODO count_by_iter += 1
# TODO assert socket.getName() == names[count_by_iter]
# TODO assert count_by_iter == 2
def test_connecting(self):
# We'll create a model from scratch and set up its joints with
# the socket interface.
model = osim.Model()
b1 = osim.Body("b1", 1, osim.Vec3(1), osim.Inertia(1))
b2 = osim.Body("b2", 2, osim.Vec3(1), osim.Inertia(1))
j1 = osim.PinJoint()
j1.setName("j1")
j1.updSocket("parent_frame").connect(model.getGround())
j1.connectSocket_child_frame(b1)
j2 = osim.PinJoint()
j2.setName("j2")
j2.connectSocket_parent_frame(b1)
j2.updSocket("child_frame").connect(b2)
model.addBody(b1)
model.addBody(b2)
model.addJoint(j1)
model.addJoint(j2)
state = model.initSystem()
# Check that the connectees point to the correct object.
assert j1.getConnectee("parent_frame").this == model.getGround().this
assert j1.getConnectee("child_frame").this == b1.this
assert j2.getConnectee("parent_frame").this == b1.this
assert j2.getConnectee("child_frame").this == b2.this
# Make sure we can call methods of the concrete connectee type
# (that the downcast succeeded).
assert j1.getConnectee("child_frame").getMass() == 1
assert j2.getConnectee("child_frame").getMass() == 2
class TestInputsOutputs(unittest.TestCase):
def test_output_values(self):
model = osim.Model(os.path.join(test_dir, "arm26.osim"))
s = model.initSystem()
out = model.getOutput("com_position")
self.assertEqual(out.getTypeName(), "Vec3")
print(out.getValueAsString(s))
# Users should just call the method connected to this output, but
# it may be nice for users to still be able to call this method.
print(osim.OutputVec3.safeDownCast(out).getValue(s))
# TODO print(out.getOutputValue(s, "com_position"))
model.realizeDynamics(s)
for musc in model.getMuscles():
exc = osim.OutputDouble.safeDownCast(musc.getOutput("excitation"))
assert exc.getValue(s) == 0
act = osim.OutputDouble.safeDownCast(musc.getOutput("activation"))
assert act.getValue(s) == 0.05
# AbstractChannel.
coord = model.getCoordinateSet().get(0)
self.assertEquals(coord.getOutput('speed').getChannel('').getPathName(),
'/jointset/r_shoulder/r_shoulder_elev|speed')
# Access the value of a concrete Channel.
# TODO Concrete channels are not wrapped yet.
# TODO OutputChannelDouble.safeDownCast(comp.getOutput(name).getChannel()).getValue(s)
# TODO deal with overloaded template and non-template methods like
# getInput() and getOutput().
# TODO no components have inputs yet.
# When they exist, test connecting inputs and outputs.
def test_iterate_outputs(self):
model = osim.Model(os.path.join(test_dir, "arm26.osim"))
s = model.initSystem()
musc = model.getMuscles().get(0)
num_muscle_outputs = 32
# By name.
count_by_name = 0
for name in musc.getOutputNames():
count_by_name += 1
assert len(musc.getOutput(name).getName()) > 0
# We may add more outputs to Muscle in the future, but it is unlikely
# that we will reduce the number.
assert count_by_name >= 32
# By iterator.
# TODO doesn't exist yet.
# TODO count_by_iter = 0
# TODO for out in musc.getOutputs():
# TODO count_by_iter += 1
# TODO assert len(out.getName()) > 0
# TODO assert count_by_iter == 32
def test_connecting_and_iterate_inputs(self):
m = osim.Model()
b = osim.Body('b1', 2.0, osim.Vec3(1, 0, 0), osim.Inertia(1))
j = osim.PinJoint('pin', m.getGround(), b)
# Source.
source = osim.TableSource()
source.setName("source")
table = osim.TimeSeriesTable()
table.setColumnLabels(('col1', 'col2', 'col3', 'col4'))
row = osim.RowVector([1, 2, 3, 4])
table.appendRow(0.0, row)
row = osim.RowVector([2, 3, 4, 5])
table.appendRow(1.0, row)
source.setTable(table)
# Reporter.
rep = osim.ConsoleReporter()
rep.setName("rep")
m.addBody(b)
m.addJoint(j)
m.addComponent(source)
m.addComponent(rep)
# Connect.
# There are multiple ways to perform the connection, especially
# for reporters.
coord = j.get_coordinates(0)
rep.updInput('inputs').connect(coord.getOutput('value'))
rep.connectInput_inputs(coord.getOutput('speed'), 'spd')
rep.connectInput_inputs(
source.getOutput('column').getChannel('col1'))
rep.addToReport(
source.getOutput('column').getChannel('col2'), 'second_col')
s = m.initSystem()
# Access and iterate through AbstractInputs, using names.
expectedLabels = ['/jointset/pin/pin_coord_0|value', 'spd',
'/source|column:col1', 'second_col']
i = 0
for name in rep.getInputNames():
# Actually, there is only one input, which we connected to 4
# channels.
assert rep.getInput(name).getNumConnectees() == 4
for j in range(4):
assert (rep.getInput(name).getLabel(j) == expectedLabels[j])
i += 1
# Access concrete Input.
# Input value is column 2 at time 0.
assert (osim.InputDouble.safeDownCast(
rep.getInput('inputs')).getValue(s, 3) == 2.0)
def test_input_alias(self):
model_filename = 'test_input_alias.osim'
# This function creates and prints the model to a .osim file. We invoke
# this function below.
def print_model():
model = osim.Model()
model.setName('model')
# Create a body with name 'body', mass of 1 kg, center of mass at
# the origin of the body, and unit inertia
# (Ixx = Iyy = Izz = 1 kg-m^2).
body = osim.Body('body', 1.0, osim.Vec3(0), osim.Inertia(1))
# Create a free joint (all 6 degrees of freedom) with Ground as
# the parent body and 'body' as the child body.
joint = osim.FreeJoint('joint', model.getGround(), body)
# Add the body and joint to the model.
model.addComponent(body)
model.addComponent(joint)
# Create a TableReporter to save quantities to a file after
# simulating.
reporter = osim.TableReporterVec3()
reporter.setName('reporter')
reporter.set_report_time_interval(0.1)
reporter.addToReport(model.getOutput('com_position'))
model.addComponent(reporter)
model.finalizeConnections()
reporter.getInput('inputs').setAlias(0, 'com_pos')
# Display what input-output connections look like in XML
# (in .osim files).
print("Reporter input-output connections in XML:\n" + \
reporter.dump())
model.printToXML(model_filename)
# Create and print the model file.
print_model()
# Load the model file.
deserialized_model = osim.Model(model_filename)
state = deserialized_model.initSystem()
# We can fetch the TableReporter from within the deserialized model.
reporter = osim.TableReporterVec3.safeDownCast(
deserialized_model.getComponent('reporter'))
assert reporter.getInput('inputs').getAlias(0) == 'com_pos'
|
{
"content_hash": "5a8cad58478370b67d3415da226be1c9",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 94,
"avg_line_length": 37.014134275618375,
"alnum_prop": 0.6051551312649165,
"repo_name": "opensim-org/opensim-core",
"id": "feccd8d45044098c1b529c1a98404946481a3df6",
"size": "10475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Bindings/Python/tests/test_sockets_inputs_outputs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2463647"
},
{
"name": "C++",
"bytes": "14727896"
},
{
"name": "CMake",
"bytes": "284589"
},
{
"name": "HTML",
"bytes": "230"
},
{
"name": "Java",
"bytes": "81560"
},
{
"name": "MATLAB",
"bytes": "576488"
},
{
"name": "Python",
"bytes": "320084"
},
{
"name": "SWIG",
"bytes": "155144"
},
{
"name": "Shell",
"bytes": "862"
},
{
"name": "Yacc",
"bytes": "19078"
}
],
"symlink_target": ""
}
|
"""
Canada-specific Form helpers
"""
from django.newforms import ValidationError
from django.newforms.fields import Field, RegexField, Select, EMPTY_VALUES
from django.newforms.util import smart_unicode
from django.utils.translation import gettext, ugettext
import re
phone_digits_re = re.compile(r'^(?:1-?)?(\d{3})[-\.]?(\d{3})[-\.]?(\d{4})$')
sin_re = re.compile(r"^(\d{3})-(\d{3})-(\d{3})$")
class CAPostalCodeField(RegexField):
"""Canadian postal code field."""
default_error_messages = {
'invalid': gettext(u'Enter a postal code in the format XXX XXX.'),
}
def __init__(self, *args, **kwargs):
super(CAPostalCodeField, self).__init__(r'^[ABCEGHJKLMNPRSTVXYZ]\d[A-Z] \d[A-Z]\d$',
max_length=None, min_length=None, *args, **kwargs)
class CAPhoneNumberField(Field):
"""Canadian phone number field."""
default_error_messages = {
'invalid': u'Phone numbers must be in XXX-XXX-XXXX format.',
}
def clean(self, value):
"""Validate a phone number.
"""
super(CAPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = re.sub('(\(|\)|\s+)', '', smart_unicode(value))
m = phone_digits_re.search(value)
if m:
return u'%s-%s-%s' % (m.group(1), m.group(2), m.group(3))
raise ValidationError(self.error_messages['invalid'])
class CAProvinceField(Field):
"""
A form field that validates its input is a Canadian province name or abbreviation.
It normalizes the input to the standard two-leter postal service
abbreviation for the given province.
"""
default_error_messages = {
'invalid': u'Enter a Canadian province or territory.',
}
def clean(self, value):
from ca_provinces import PROVINCES_NORMALIZED
super(CAProvinceField, self).clean(value)
if value in EMPTY_VALUES:
return u''
try:
value = value.strip().lower()
except AttributeError:
pass
else:
try:
return PROVINCES_NORMALIZED[value.strip().lower()].decode('ascii')
except KeyError:
pass
raise ValidationError(self.error_messages['invalid'])
class CAProvinceSelect(Select):
"""
A Select widget that uses a list of Canadian provinces and
territories as its choices.
"""
def __init__(self, attrs=None):
from ca_provinces import PROVINCE_CHOICES # relative import
super(CAProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
class CASocialInsuranceNumberField(Field):
"""
A Canadian Social Insurance Number (SIN).
Checks the following rules to determine whether the number is valid:
* Conforms to the XXX-XXX-XXX format.
* Passes the check digit process "Luhn Algorithm"
See: http://en.wikipedia.org/wiki/Social_Insurance_Number
"""
default_error_messages = {
'invalid': ugettext('Enter a valid Canadian Social Insurance number in XXX-XXX-XXX format.'),
}
def clean(self, value):
super(CASocialInsuranceNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
match = re.match(sin_re, value)
if not match:
raise ValidationError(self.error_messages['invalid'])
number = u'%s-%s-%s' % (match.group(1), match.group(2), match.group(3))
check_number = u'%s%s%s' % (match.group(1), match.group(2), match.group(3))
if not self.luhn_checksum_is_valid(check_number):
raise ValidationError(self.error_messages['invalid'])
return number
def luhn_checksum_is_valid(self, number):
"""
Checks to make sure that the SIN passes a luhn mod-10 checksum
See: http://en.wikipedia.org/wiki/Luhn_algorithm
"""
sum = 0
num_digits = len(number)
oddeven = num_digits & 1
for count in range(0, num_digits):
digit = int(number[count])
if not (( count & 1 ) ^ oddeven ):
digit = digit * 2
if digit > 9:
digit = digit - 9
sum = sum + digit
return ( (sum % 10) == 0 )
|
{
"content_hash": "254a818751c4fdec09e0cabec09444cb",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 101,
"avg_line_length": 33.73015873015873,
"alnum_prop": 0.6035294117647059,
"repo_name": "diofeher/django-nfa",
"id": "b40dba833577bbf673466015aacf508455e0443d",
"size": "4250",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "build/lib/django/contrib/localflavor/ca/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "66105"
},
{
"name": "Python",
"bytes": "5174003"
},
{
"name": "Shell",
"bytes": "804"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
setup(
name='heimdall',
packages=['heimdall'],
version='0.0.6',
description='A library for taking website screenshots, whilst emulating various devices and resolutions.',
author='Distilled R&D',
author_email='tom.anthony@distilled.net',
url='https://github.com/DistilledLtd/heimdall',
download_url='https://github.com/DistilledLtd/heimdall/tarball/0.0.6',
package_data={'': ['take_screenshot.js', 'optparse.js']},
keywords=['screenshot', 'phantomjs'],
classifiers=[],
install_requires=[
'Pillow',
'wheel'
]
)
|
{
"content_hash": "508bf7c5b846e901d57ab0f2575bc18b",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 110,
"avg_line_length": 32.1578947368421,
"alnum_prop": 0.6595744680851063,
"repo_name": "DistilledLtd/heimdall",
"id": "cb07ebf81663389ef6f22a7a8f5f3a3e877ef10b",
"size": "611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "15549"
},
{
"name": "Python",
"bytes": "9142"
}
],
"symlink_target": ""
}
|
"""
Support for the OpenWeatherMap (OWM) service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.openweathermap/
"""
import logging
from datetime import timedelta
from homeassistant.const import CONF_API_KEY, TEMP_CELCIUS, TEMP_FAHRENHEIT
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
REQUIREMENTS = ['pyowm==2.3.0']
_LOGGER = logging.getLogger(__name__)
SENSOR_TYPES = {
'weather': ['Condition', None],
'temperature': ['Temperature', None],
'wind_speed': ['Wind speed', 'm/s'],
'humidity': ['Humidity', '%'],
'pressure': ['Pressure', 'mbar'],
'clouds': ['Cloud coverage', '%'],
'rain': ['Rain', 'mm'],
'snow': ['Snow', 'mm']
}
# Return cached results if last scan was less then this time ago.
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=120)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the OpenWeatherMap sensor."""
if None in (hass.config.latitude, hass.config.longitude):
_LOGGER.error("Latitude or longitude not set in Home Assistant config")
return False
from pyowm import OWM
SENSOR_TYPES['temperature'][1] = hass.config.temperature_unit
unit = hass.config.temperature_unit
forecast = config.get('forecast', 0)
owm = OWM(config.get(CONF_API_KEY, None))
if not owm:
_LOGGER.error(
"Connection error "
"Please check your settings for OpenWeatherMap.")
return False
data = WeatherData(owm, forecast, hass.config.latitude,
hass.config.longitude)
dev = []
try:
for variable in config['monitored_conditions']:
if variable not in SENSOR_TYPES:
_LOGGER.error('Sensor type: "%s" does not exist', variable)
else:
dev.append(OpenWeatherMapSensor(data, variable, unit))
except KeyError:
pass
if forecast == 1:
SENSOR_TYPES['forecast'] = ['Forecast', None]
dev.append(OpenWeatherMapSensor(data, 'forecast', unit))
add_devices(dev)
# pylint: disable=too-few-public-methods
class OpenWeatherMapSensor(Entity):
"""Implementation of an OpenWeatherMap sensor."""
def __init__(self, weather_data, sensor_type, temp_unit):
"""Initialize the sensor."""
self.client_name = 'Weather'
self._name = SENSOR_TYPES[sensor_type][0]
self.owa_client = weather_data
self.temp_unit = temp_unit
self.type = sensor_type
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self.update()
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(self.client_name, self._name)
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
# pylint: disable=too-many-branches
def update(self):
"""Get the latest data from OWM and updates the states."""
self.owa_client.update()
data = self.owa_client.data
fc_data = self.owa_client.fc_data
if self.type == 'weather':
self._state = data.get_detailed_status()
elif self.type == 'temperature':
if self.temp_unit == TEMP_CELCIUS:
self._state = round(data.get_temperature('celsius')['temp'],
1)
elif self.temp_unit == TEMP_FAHRENHEIT:
self._state = round(data.get_temperature('fahrenheit')['temp'],
1)
else:
self._state = round(data.get_temperature()['temp'], 1)
elif self.type == 'wind_speed':
self._state = data.get_wind()['speed']
elif self.type == 'humidity':
self._state = data.get_humidity()
elif self.type == 'pressure':
self._state = round(data.get_pressure()['press'], 0)
elif self.type == 'clouds':
self._state = data.get_clouds()
elif self.type == 'rain':
if data.get_rain():
self._state = round(data.get_rain()['3h'], 0)
self._unit_of_measurement = 'mm'
else:
self._state = 'not raining'
self._unit_of_measurement = ''
elif self.type == 'snow':
if data.get_snow():
self._state = round(data.get_snow(), 0)
self._unit_of_measurement = 'mm'
else:
self._state = 'not snowing'
self._unit_of_measurement = ''
elif self.type == 'forecast':
self._state = fc_data.get_weathers()[0].get_status()
class WeatherData(object):
"""Get the latest data from OpenWeatherMap."""
def __init__(self, owm, forecast, latitude, longitude):
"""Initialize the data object."""
self.owm = owm
self.forecast = forecast
self.latitude = latitude
self.longitude = longitude
self.data = None
self.fc_data = None
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from OpenWeatherMap."""
obs = self.owm.weather_at_coords(self.latitude, self.longitude)
if obs is None:
_LOGGER.warning('Failed to fetch data from OWM')
return
self.data = obs.get_weather()
if self.forecast == 1:
obs = self.owm.three_hours_forecast_at_coords(self.latitude,
self.longitude)
self.fc_data = obs.get_forecast()
|
{
"content_hash": "510458736242ba179cd9ef584cfc6248",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 79,
"avg_line_length": 34.68452380952381,
"alnum_prop": 0.5793718894800068,
"repo_name": "aoakeson/home-assistant",
"id": "c42519733bab81328420f5f9d800c4d57fdf6baf",
"size": "5827",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sensor/openweathermap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1510170"
},
{
"name": "Python",
"bytes": "1994353"
},
{
"name": "Shell",
"bytes": "3570"
}
],
"symlink_target": ""
}
|
import ingredients #import bestanden uit ingredients.py
#whatarewegoingtoeat.py
#def randomhoofdgerecht():
# jsonfile = "hoofdgerecht.json"
###
# return random.choice(data)
d = ingredients. ingredients #linkt an string uit my.data bestand
#def main():
# random_hoofd = randomhoofdgerecht()
#while True:
print "menu 1"
print "voorgerecht: %s" % (d ["voorgerecht"])
print "hoofdgerecht: %s" % (d ["hoofdgerecht"])
print "nagerecht: %s" % (d ["nagerecht"])
print "drinken: %s" % (d ["drinken"])
print "menu 2"
print "voorgerecht: %s" % (d ["voorgerecht"])
print "hoofdgerecht: %s" % (d ["hoofdgerecht"])
print "nagerecht: %s" % (d ["nagerecht"])
print "drinken: %s" % (d ["drinken"])
print "menu 3"
print "voorgerecht: %s" % (d ["voorgerecht"])
print "hoofdgerecht: %s" % (d ["hoofdgerecht"])
print "nagerecht: %s" % (d ["nagerecht"])
print "drinken: %s" % (d ["drinken"])
#if __name__== '__main__':
#3main()
|
{
"content_hash": "56c3f1d2b213d9339c869342ed59e6ee",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 65,
"avg_line_length": 27.818181818181817,
"alnum_prop": 0.6492374727668845,
"repo_name": "ArtezGDA/text-IO",
"id": "59cddf3e5c8c4547bb4288b227f8897d26c7448a",
"size": "947",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Max/IO_Tool/recipe.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "312828"
},
{
"name": "JavaScript",
"bytes": "6634"
},
{
"name": "Python",
"bytes": "2718648"
},
{
"name": "Ruby",
"bytes": "846066"
},
{
"name": "Shell",
"bytes": "613"
}
],
"symlink_target": ""
}
|
import inspect
import os
import sys
from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice
currentDir = os.environ["LIBMOTS_TEST_DIR"]
sys.path.insert(0, currentDir)
import helpers
calledScript = inspect.getfile(inspect.currentframe())
device = helpers.getDevice()
helpers.createSharedElement(device, True)
# One Back button press to hide the keyboard and one to leave the application
helpers.pressBack(device)
helpers.pressBack(device)
result = device.takeSnapshot().getSubImage(helpers.portraitRect)
helpers.checkResult(result, currentDir, calledScript)
|
{
"content_hash": "640f3cb994645ecfb6e4b2206d463490",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 77,
"avg_line_length": 26,
"alnum_prop": 0.8129370629370629,
"repo_name": "wlawski/libmots",
"id": "a9c407ccea1f4b631255e8ba27f44cf89fc50eb2",
"size": "1878",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/monkeyrunner-tests/t0006-shared-elem-back.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "148534"
},
{
"name": "Python",
"bytes": "85672"
},
{
"name": "Shell",
"bytes": "5590"
}
],
"symlink_target": ""
}
|
"""
Created by 捡龙眼
3/11/2016
"""
from __future__ import unicode_literals, print_function, absolute_import
import signal
import tornado.ioloop
import public.tcp_server
import public.global_manager
import public.simple_log
import public.tcp_client
import public.connect_data
import config
import root_logic.handle_server_process
class RootServerConnect(public.tcp_server.ServerConnect):
def handle_process(self, data):
root_logic.handle_server_process.handle_process(self, data)
def on_close_callback(self, data):
print(self.get_address_flag(), "on_connect_close_process", len(data))
class RootTcpServer(public.tcp_server.SimpleTcpServer):
def create_server(self, stream, address):
connect = RootServerConnect(stream, address)
return connect
def handle_terminal(signum=0, e=0):
print("handle_terminal", signum, e)
tornado.ioloop.IOLoop.instance().stop()
public.global_manager.clear_thread()
print("handle_terminal end")
def main():
print(config.VERSION, config.SERVER_NAME, config.ROOT_SERVER_PORT)
public.simple_log.initialize(config.LOG_PATH, config.SERVER_NAME)
signal.signal(signal.SIGINT, handle_terminal)
signal.signal(signal.SIGTERM, handle_terminal)
io_loop = tornado.ioloop.IOLoop.instance()
public.tcp_server.initialize(io_loop)
public.connect_data.initialize(config.CLIENT_TYPE_LIST)
tcp_server = RootTcpServer()
tcp_server.listen(config.ROOT_SERVER_PORT)
io_loop.start()
if __name__ == "__main__":
main()
|
{
"content_hash": "93c62db8ba428a2680c9aaabe8988b51",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 77,
"avg_line_length": 29.0188679245283,
"alnum_prop": 0.7249674902470741,
"repo_name": "yanjianlong/server_cluster",
"id": "ecf14ea662e41e0eed8ca35d97e4ca67fbd458bc",
"size": "1559",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "root_server/root_server.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "242"
},
{
"name": "HTML",
"bytes": "38418"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "90398"
},
{
"name": "Shell",
"bytes": "190"
}
],
"symlink_target": ""
}
|
import networkx as nx
from collections import deque
from itertools import chain, islice, combinations
from ClusterUtility import ClusterUtility
class KCliquePercolation(object):
"""This is a class for graph clustering based on k-clique percolation [Farkas2007]_.
The procedure will find k-clique. If there is any percolation between k-cliques, it will be set as a cluster.
The unnecessary edges will be removed. The use of this method for event log clustering
was presented in [Studiawan2016b]_.
References
----------
.. [Farkas2007] I. J. Farkas, D. Abel, G. Palla, and T. Vicsek, Weighted network modules,
New Journal of Physics, 9(6), p. 180, 2007.
.. [Studiawan2016b] H. Studiawan, B. A. Pratomo, and R. Anggoro, Clustering of SSH brute-force attack logs using
k-clique percolation, in Proceedings of the 10th International Conference on Information
Communication Technology and Systems, pp. 33-36, 2016.
"""
def __init__(self, graph, edges_weight, nodes_id):
"""This is a constructor for class KCliquePercolation.
Parameters
----------
graph : graph
Graph to be clustered.
edges_weight : list[tuple]
List of tuple containing (node1, node2, cosine similarity between these two).
nodes_id : list
List of all node identifier.
Notes
-----
removed_edges : list[tuple]
List of tuple containing edge from (node1, node2).
percolated_nodes : list
List of all percolated nodes.
clique_percolation : dict[frozenset]
Dictionary of nodes in each cluster in frozenset.
"""
self.graph = graph
self.edges_weight = edges_weight
self.nodes_id = nodes_id
self.g = None
self.percolated_nodes = []
self.removed_edges = []
self.clique_percolation = {}
self.cliques = []
def init_kclique_percolation(self, k):
self._build_temp_graph()
kcliques = self._find_kcliques(k)
self.cliques = kcliques
def get_kclique_percolation(self, k):
"""This is the main method to call all k-clique percolation clustering.
Parameters
----------
k : int
Number of percolation or intersection between an individual clique.
Returns
-------
clusters : dict[list]
List of list containing nodes identifier for each cluster.
"""
self._get_percolation_graph(self.cliques, k)
self._remove_outcluster()
clusters = self._get_clusters()
return clusters
def _find_kcliques(self, k):
"""Find all k-cliques in a graph.
Returns
-------
kcliques : list[frozenset]
List of k-cliques found but only return specified k. The frozenset contains nodes identifier.
"""
k_cliques = list(self._enumerate_all_cliques())
kcliques = [frozenset(clique) for clique in k_cliques if len(clique) == k]
return kcliques
def _build_temp_graph(self):
"""Build a temporary graph to get a percolation between individual k-clique.
"""
self.g = nx.Graph()
self.g.add_nodes_from(self.nodes_id)
self.g.add_weighted_edges_from(self.edges_weight)
def _enumerate_all_cliques(self):
"""Returns all cliques in an undirected graph.
This method returns cliques of size (cardinality)
k = 1, 2, 3, ..., maxDegree - 1. Where maxDegree is the maximal
degree of any node in the graph.
Returns
-------
generator of lists: generator of list for each clique.
Notes
-----
Based on the algorithm published by Zhang et al. (2005) [Zhang2005]_
and adapted to output all cliques discovered.
This algorithm is not applicable on directed graphs.
This algorithm ignores self-loops and parallel edges as
clique is not conventionally defined with such edges.
There are often many cliques in graphs.
This algorithm however, hopefully, does not run out of memory
since it only keeps candidate sublists in memory and
continuously removes exhausted sublists.
The original source code is taken from NetworkX development branch [Schult2016]_.
References
----------
.. [Zhang2005] Yun Zhang, Abu-Khzam, F.N., Baldwin, N.E., Chesler, E.J.,
Langston, M.A., Samatova, N.F.,
Genome-Scale Computational Approaches to Memory-Intensive
Applications in Systems Biology.
Supercomputing, 2005. Proceedings of the ACM/IEEE SC 2005
Conference, pp. 12, 12-18 Nov. 2005.
doi: 10.1109/SC.2005.29.
http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1559964&isnumber=33129
.. [Schult2016] Dan Schult, Source code for networkx.algorithms.clique.
https://networkx.github.io/documentation/development/_modules/networkx/algorithms/clique.html
"""
index = {}
nbrs = {}
for u in self.g:
index[u] = len(index)
# Neighbors of u that appear after u in the iteration order of G.
nbrs[u] = {v for v in self.g[u] if v not in index}
queue = deque(([u], sorted(nbrs[u], key=index.__getitem__)) for u in self.g)
# Loop invariants:
# 1. len(base) is nondecreasing.
# 2. (base + cnbrs) is sorted with respect to the iteration order of G.
# 3. cnbrs is a set of common neighbors of nodes in base.
while queue:
base, cnbrs = map(list, queue.popleft())
yield base
for i, u in enumerate(cnbrs):
# Use generators to reduce memory consumption.
queue.append((chain(base, [u]),
filter(nbrs[u].__contains__,
islice(cnbrs, i + 1, None))))
def _get_percolation_graph(self, kcliques, k):
"""Get percolation graph.
This temporary graph also well known as percolation graph in the literatures. A node represents a k-clique
and an edge will be drawn if there is any intersection between two k-cliques.
Parameters
----------
kcliques : list[frozenset]
List of all k-cliques found with user-specified k.
k : int
Number of percolation.
"""
percolation_graph = nx.Graph()
percolation_graph.add_nodes_from(kcliques)
# Add an edge in the percolation graph for each pair of cliques that percolate
for clique1, clique2 in combinations(kcliques, 2):
percolation = clique1.intersection(clique2)
self.percolated_nodes.append(percolation)
if len(percolation) >= (k - 1):
percolation_graph.add_edge(clique1, clique2)
# Get all connected component in percolation graph
cluster_id = 0
for component in nx.connected_components(percolation_graph):
for c in component:
self.clique_percolation[cluster_id] = c
cluster_id += 1
# set cluster id
ClusterUtility.set_cluster_id(self.graph, self.clique_percolation)
def _remove_outcluster(self):
"""Remove edges that connect to other clusters.
This method will first find any edges in the cluster member. If edges connecting to a node does not belong to
the current cluster, then it will be removed.
"""
# remove edge outside cluster
for node in self.g.nodes_iter(data=True):
neighbors = self.g.neighbors(node[0])
for neighbor in neighbors:
# if cluster id of current node is not the same of the connecting node
if self.graph.node[node[0]]['cluster'] != self.graph.node[neighbor]['cluster']:
try:
self.g.remove_edge(node[0], neighbor)
except nx.exception.NetworkXError:
pass
self.removed_edges.append((node[0], neighbor))
def _get_clusters(self):
"""Get final result of the k-clique percolation clustering.
Returns
-------
clusters : dict[list]
Dictionary of list containing nodes identifier for each cluster.
"""
clusters = {}
cluster_id = 0
for components in nx.connected_components(self.g):
clusters[cluster_id] = components
cluster_id += 1
# refine cluster id
ClusterUtility.set_cluster_id(self.graph, clusters)
return clusters
class KCliquePercolationWeighted(KCliquePercolation):
"""This a class derived from KCliquePercolation for the case of weighted graph.
"""
def __init__(self, graph, edges_weight, nodes_id):
"""This is the constructor for class KCliquePercolationWeighted.
Parameters
----------
graph : graph
A graph to be processed for its cluster.
edges_weight : list[tuple]
List of tuple containing (node1, node2, cosine similarity between these two).
nodes_id : list
List of all node identifier.
"""
super(KCliquePercolationWeighted, self).__init__(graph, edges_weight, nodes_id)
def get_kclique_percolation_weighted(self, k, threshold):
"""Main method of weighted k-clique percolation. The weight of k-clique is calculated based on
the geometric mean of its weights.
Parameters
----------
k : int
Number of percolation.
threshold : float
Threshold for the geometric mean.
Notes
-------
weighted_kcliques : list[frozenset]
List of frozenset containing nodes identifier for each weighted k-clique found.
"""
weighted_kcliques = ClusterUtility.get_weighted_cliques(self.graph, self.cliques, threshold)
self._get_percolation_graph(weighted_kcliques, k)
self._remove_outcluster()
clusters = self._get_clusters()
return clusters
|
{
"content_hash": "e80f0bf9ef86c03e9646b3fadbc1d6fe",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 117,
"avg_line_length": 39.68181818181818,
"alnum_prop": 0.5927835051546392,
"repo_name": "studiawan/pygraphc",
"id": "969947d6f6e186e2ceb1305616a3f0b8aac6b0da",
"size": "10476",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pygraphc/clustering/KCliquePercolation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "469965"
}
],
"symlink_target": ""
}
|
from ._version import version as __version__
|
{
"content_hash": "52220eb4aa5d6294fbad6b795e2f884d",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 44,
"avg_line_length": 23,
"alnum_prop": 0.717391304347826,
"repo_name": "flit/cmdis",
"id": "55fcb61e68fbf4535eed62ce379fcd83b3f93d63",
"size": "671",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cmdis/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "198963"
}
],
"symlink_target": ""
}
|
"""The testing directory contains a small set of imaging files to be
used for doctests only. More thorough tests and example data will be
stored in a nipy data packages that you can download separately.
.. note:
We use the ``nose`` testing framework for tests.
Nose is a dependency for the tests, but should not be a dependency
for running the algorithms in the NIPY library. This file should
import without nose being present on the python path.
"""
import os
# Discover directory path
filepath = os.path.abspath(__file__)
basedir = os.path.dirname(filepath)
funcfile = os.path.join(basedir, 'data', 'functional.nii')
anatfile = os.path.join(basedir, 'data', 'structural.nii')
template = funcfile
transfm = funcfile
from nose.tools import *
from numpy.testing import *
from . import decorators as dec
from .utils import skip_if_no_package, package_check
skipif = dec.skipif
def example_data(infile='functional.nii'):
"""returns path to empty example data files for doc tests
it will raise an exception if filename is not in the directory"""
filepath = os.path.abspath(__file__)
basedir = os.path.dirname(filepath)
outfile = os.path.join(basedir, 'data', infile)
if not os.path.exists(outfile):
raise IOError('%s empty data file does NOT exist' % outfile)
return outfile
|
{
"content_hash": "849bfa38977e2776f738c0bc94e7f0e6",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 69,
"avg_line_length": 29.666666666666668,
"alnum_prop": 0.7273408239700374,
"repo_name": "iglpdc/nipype",
"id": "9a4848fb50d6e9538054b47a75b1282eb2656723",
"size": "1449",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "nipype/testing/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "2106"
},
{
"name": "Matlab",
"bytes": "1717"
},
{
"name": "Python",
"bytes": "4458175"
},
{
"name": "Shell",
"bytes": "380"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
}
|
import glob
import os
import shutil
from unittest.mock import patch
import pytest
import settings
settings_dir = 'fortest/server1'
default_config = {'config': 'default'}
dev_config = {'config': 'dev'}
prod_config = {'config': 'prod'}
site_config = {'config': 'site'}
repo_dir = '/tmp/settings-repo'
git_settings_subdir = repo_dir + '/myapp1'
def setup_module():
cmds = ['mkdir -p %s' % git_settings_subdir,
'git init %s' % repo_dir,
'echo "PROD = True" > %s/prod_settings.py' % git_settings_subdir,
'echo "PROD = False" > %s/dev_settings.py' % git_settings_subdir
]
for cmd in cmds:
ret = os.system(cmd)
if ret != 0:
raise Exception('failed: %s' % cmd)
def create_config_lines(config):
lines = []
for kv in config.items():
lines.append('%s = "%s"' % kv)
return lines
def create_config_file(path, config):
open(path, 'w').writelines(create_config_lines(config))
def test_no_settings_dir():
assert settings.get('config') is None, settings.get('config')
create_config_file('default_settings.py', default_config)
settings.reload()
assert settings.get('config') == 'default', settings.get('config')
@patch.dict(os.environ, {'SETTINGS_DIR': settings_dir, 'APP_MODE': 'dev'}, clear=True)
def test_rc():
os.makedirs(settings_dir)
open(os.path.join(settings_dir, '__init__.py'), 'w').close()
open(os.path.join(settings_dir, '../', '__init__.py'), 'w').close()
config_path = os.path.join(settings_dir, 'default_settings.py')
create_config_file(config_path, default_config)
settings.reload()
assert settings.config == 'default'
config_path = os.path.join(settings_dir, 'dev_settings.py')
create_config_file(config_path, dev_config)
settings.reload()
assert settings.config == 'dev'
config_path = os.path.join(settings_dir, 'prod_settings.py')
create_config_file(config_path, prod_config)
settings.reload()
assert settings.config == 'dev'
config_path = os.path.join(settings_dir, 'site_settings.py')
create_config_file(config_path, site_config)
settings.reload()
assert settings.config == 'site'
def test_backward_compatibility():
from converge import settings
def test_env_vars():
config = {'SETTINGS_DIR': 'settings'}
os.environ['SETTINGS_DIR'] = 'settings/site1'
settings.parse_osenv(config)
assert config['SETTINGS_DIR'] == os.environ['SETTINGS_DIR']
os.environ['SETTINGS_DIR'] = 'settings/site2'
settings.parse_osenv(config)
assert config['SETTINGS_DIR'] == os.environ['SETTINGS_DIR']
@patch.dict(os.environ,
{'SETTINGS_DIR': settings_dir,
'APP_MODE': 'prod',
'GIT_SETTINGS_REPO': repo_dir,
'GIT_SETTINGS_SUBDIR': git_settings_subdir,
'PATH': os.environ['PATH']},
clear=True)
def test_git_settings():
settings.reload()
assert settings.PROD is True
def teardown_module():
py_path = 'default_settings.py'
pyc_path = py_path + 'c'
for path in (py_path, pyc_path):
if os.path.exists(path):
os.remove(path)
if glob.glob(os.path.join(settings_dir, '__init__.py')): # playing safe
shutil.rmtree(settings_dir)
if repo_dir.startswith('/tmp'): # playing safe
shutil.rmtree(repo_dir)
def test_rc_file_deprecated():
convergerc = '.convergerc'
open(convergerc, 'w').write("")
try:
with pytest.raises(Exception):
settings.reload()
finally:
os.remove(convergerc)
|
{
"content_hash": "2036fe7b95be7dfe344ce617b48eccaa",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 86,
"avg_line_length": 28.317460317460316,
"alnum_prop": 0.6303251121076233,
"repo_name": "shon/converge",
"id": "6a066019d3f380c14f861fc8649683fd87210ba2",
"size": "3568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7229"
}
],
"symlink_target": ""
}
|
import datetime
import hashlib
import random
import re
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import models
from django.db import transaction
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
try:
from django.utils.timezone import now as datetime_now
except ImportError:
datetime_now = datetime.datetime.now
SHA1_RE = re.compile('^[a-f0-9]{40}$')
class RegistrationManager(models.Manager):
"""
Custom manager for the ``RegistrationProfile`` model.
The methods defined here provide shortcuts for account creation
and activation (including generation and emailing of activation
keys), and for cleaning out expired inactive accounts.
"""
def activate_user(self, activation_key):
"""
Validate an activation key and activate the corresponding
``User`` if valid.
If the key is valid and has not expired, return the ``User``
after activating.
If the key is not valid or has expired, return ``False``.
If the key is valid but the ``User`` is already active,
return ``False``.
To prevent reactivation of an account which has been
deactivated by site administrators, the activation key is
reset to the string constant ``RegistrationProfile.ACTIVATED``
after successful activation.
"""
# Make sure the key we're trying conforms to the pattern of a
# SHA1 hash; if it doesn't, no point trying to look it up in
# the database.
if SHA1_RE.search(activation_key):
try:
profile = self.get(activation_key=activation_key)
except self.model.DoesNotExist:
return False
if not profile.activation_key_expired():
user = profile.user
user.is_active = True
user.save()
profile.activation_key = self.model.ACTIVATED
profile.save()
return user
return False
def create_inactive_user(self, username, email, password,
site, send_email=True):
"""
Create a new, inactive ``User``, generate a
``RegistrationProfile`` and email its activation key to the
``User``, returning the new ``User``.
This requires the ``User`` model to have username, email,
and password fields. If you are using a custom ``User`` model
in Django 1.5, create a ``User`` manually in your registration view,
being sure to set ``is_active = False``, then use the ``create_profile``
method instead of ``create_inactive_user``. If you wish to send an
activation email, you can call ``send_activation_email`` on the
``RegistrationProfile`` object returned by ``create_profile``.
By default, an activation email will be sent to the new
user. To disable this, pass ``send_email=False``.
"""
new_user = get_user_model().objects.create_user(username, email, password)
new_user.is_active = False
new_user.save()
registration_profile = self.create_profile(new_user)
if send_email:
registration_profile.send_activation_email(site)
return new_user
create_inactive_user = transaction.commit_on_success(create_inactive_user)
def create_profile(self, user):
"""
Create a ``RegistrationProfile`` for a given
``User``, and return the ``RegistrationProfile``.
The activation key for the ``RegistrationProfile`` will be a
SHA1 hash, generated from a combination of the ``User``'s
username and a random salt.
"""
salt = hashlib.sha1(str(random.random())).hexdigest()[:5]
username = user.get_full_name()
if isinstance(username, unicode):
username = username.encode('utf-8')
activation_key = hashlib.sha1(salt+username).hexdigest()
return self.create(user=user,
activation_key=activation_key)
def delete_expired_users(self):
"""
Remove expired instances of ``RegistrationProfile`` and their
associated ``User``s.
Accounts to be deleted are identified by searching for
instances of ``RegistrationProfile`` with expired activation
keys, and then checking to see if their associated ``User``
instances have the field ``is_active`` set to ``False``; any
``User`` who is both inactive and has an expired activation
key will be deleted.
It is recommended that this method be executed regularly as
part of your routine site maintenance; this application
provides a custom management command which will call this
method, accessible as ``manage.py cleanupregistration``.
Regularly clearing out accounts which have never been
activated serves two useful purposes:
1. It alleviates the ocasional need to reset a
``RegistrationProfile`` and/or re-send an activation email
when a user does not receive or does not act upon the
initial activation email; since the account will be
deleted, the user will be able to simply re-register and
receive a new activation key.
2. It prevents the possibility of a malicious user registering
one or more accounts and never activating them (thus
denying the use of those usernames to anyone else); since
those accounts will be deleted, the usernames will become
available for use again.
If you have a troublesome ``User`` and wish to disable their
account while keeping it in the database, simply delete the
associated ``RegistrationProfile``; an inactive ``User`` which
does not have an associated ``RegistrationProfile`` will not
be deleted.
"""
for profile in self.all():
try:
if profile.activation_key_expired():
user = profile.user
if not user.is_active:
user.delete()
profile.delete()
except get_user_model().DoesNotExist:
profile.delete()
class RegistrationProfile(models.Model):
"""
A simple profile which stores an activation key for use during
user account registration.
Generally, you will not want to interact directly with instances
of this model; the provided manager includes methods
for creating and activating new accounts, as well as for cleaning
out accounts which have never been activated.
While it is possible to use this model as the value of the
``AUTH_PROFILE_MODULE`` setting, it's not recommended that you do
so. This model's sole purpose is to store data temporarily during
account registration and activation.
"""
ACTIVATED = u"ALREADY_ACTIVATED"
user = models.ForeignKey(settings.AUTH_USER_MODEL, unique=True,
verbose_name=_('user'))
activation_key = models.CharField(_('activation key'), max_length=40)
objects = RegistrationManager()
class Meta:
verbose_name = _('registration profile')
verbose_name_plural = _('registration profiles')
def __unicode__(self):
return u"Registration information for %s" % self.user
def activation_key_expired(self):
"""
Determine whether this ``RegistrationProfile``'s activation
key has expired, returning a boolean -- ``True`` if the key
has expired.
Key expiration is determined by a two-step process:
1. If the user has already activated, the key will have been
reset to the string constant ``ACTIVATED``. Re-activating
is not permitted, and so this method returns ``True`` in
this case.
2. Otherwise, the date the user signed up is incremented by
the number of days specified in the setting
``ACCOUNT_ACTIVATION_DAYS`` (which should be the number of
days after signup during which a user is allowed to
activate their account); if the result is less than or
equal to the current date, the key has expired and this
method returns ``True``.
"""
expiration_date = datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)
return self.activation_key == self.ACTIVATED or \
(self.user.date_joined + expiration_date <= datetime_now())
activation_key_expired.boolean = True
def send_activation_email(self, site):
"""
Send an activation email to the user associated with this
``RegistrationProfile``.
The activation email will make use of two templates:
``registration/activation_email_subject.txt``
This template will be used for the subject line of the
email. Because it is used as the subject line of an email,
this template's output **must** be only a single line of
text; output longer than one line will be forcibly joined
into only a single line.
``registration/activation_email.txt``
This template will be used for the body of the email.
These templates will each receive the following context
variables:
``activation_key``
The activation key for the new account.
``expiration_days``
The number of days remaining during which the account may
be activated.
``site``
An object representing the site on which the user
registered; depending on whether ``django.contrib.sites``
is installed, this may be an instance of either
``django.contrib.sites.models.Site`` (if the sites
application is installed) or
``django.contrib.sites.models.RequestSite`` (if
not). Consult the documentation for the Django sites
framework for details regarding these objects' interfaces.
"""
ctx_dict = {'activation_key': self.activation_key,
'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS,
'site': site}
subject = render_to_string('registration/activation_email_subject.txt',
ctx_dict)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('registration/activation_email.txt',
ctx_dict)
self.user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
|
{
"content_hash": "04f5c1bc9bf79beb7077c8d02e5775b5",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 83,
"avg_line_length": 40.2043795620438,
"alnum_prop": 0.6216412490922295,
"repo_name": "ildarsamit/django-registration",
"id": "54d9e1086501da2b3ed86e342c2d52353d9c6153",
"size": "11016",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "registration/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "75909"
},
{
"name": "Shell",
"bytes": "2985"
}
],
"symlink_target": ""
}
|
"""Test the dumpwallet RPC."""
import datetime
import os
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
def read_dump(file_name, addrs, script_addrs, hd_master_addr_old):
"""
Read the given dump, count the addrs that match, count change and reserve.
Also check that the old hd_master is inactive
"""
with open(file_name, encoding='utf8') as inputfile:
found_comments = []
found_legacy_addr = 0
found_p2sh_segwit_addr = 0
found_bech32_addr = 0
found_script_addr = 0
found_addr_chg = 0
found_addr_rsv = 0
hd_master_addr_ret = None
for line in inputfile:
line = line.strip()
if not line:
continue
if line[0] == '#':
found_comments.append(line)
else:
# split out some data
key_date_label, comment = line.split("#")
key_date_label = key_date_label.split(" ")
# key = key_date_label[0]
date = key_date_label[1]
keytype = key_date_label[2]
imported_key = date == '1970-01-01T00:00:01Z'
if imported_key:
# Imported keys have multiple addresses, no label (keypath) and timestamp
# Skip them
continue
addr_keypath = comment.split(" addr=")[1]
addr = addr_keypath.split(" ")[0]
keypath = None
if keytype == "inactivehdseed=1":
# ensure the old master is still available
assert hd_master_addr_old == addr
elif keytype == "hdseed=1":
# ensure we have generated a new hd master key
assert hd_master_addr_old != addr
hd_master_addr_ret = addr
elif keytype == "script=1":
# scripts don't have keypaths
keypath = None
else:
keypath = addr_keypath.rstrip().split("hdkeypath=")[1]
# count key types
for addrObj in addrs:
if addrObj['address'] == addr.split(",")[0] and addrObj['hdkeypath'] == keypath and keytype == "label=":
if addr.startswith('q') and not addr.startswith('qcrt'):
# P2PKH address
found_legacy_addr += 1
elif addr.startswith('m'):
# P2SH-segwit address
found_p2sh_segwit_addr += 1
elif addr.startswith('qcrt'):
found_bech32_addr += 1
break
elif keytype == "change=1":
found_addr_chg += 1
break
elif keytype == "reserve=1":
found_addr_rsv += 1
break
# count scripts
for script_addr in script_addrs:
if script_addr == addr.rstrip() and keytype == "script=1":
found_script_addr += 1
break
return found_comments, found_legacy_addr, found_p2sh_segwit_addr, found_bech32_addr, found_script_addr, found_addr_chg, found_addr_rsv, hd_master_addr_ret
class WalletDumpTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [["-keypool=90", "-addresstype=legacy"]]
self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.add_nodes(self.num_nodes, extra_args=self.extra_args)
self.start_nodes()
def run_test(self):
self.nodes[0].createwallet("dump")
wallet_unenc_dump = os.path.join(self.nodes[0].datadir, "wallet.unencrypted.dump")
wallet_enc_dump = os.path.join(self.nodes[0].datadir, "wallet.encrypted.dump")
# generate 30 addresses to compare against the dump
# - 10 legacy P2PKH
# - 10 P2SH-segwit
# - 10 bech32
test_addr_count = 10
addrs = []
for address_type in ['legacy', 'p2sh-segwit', 'bech32']:
for _ in range(test_addr_count):
addr = self.nodes[0].getnewaddress(address_type=address_type)
vaddr = self.nodes[0].getaddressinfo(addr) # required to get hd keypath
addrs.append(vaddr)
# Test scripts dump by adding a 1-of-1 multisig address
multisig_addr = self.nodes[0].addmultisigaddress(1, [addrs[1]["address"]])["address"]
# Refill the keypool. getnewaddress() refills the keypool *before* taking a key from
# the keypool, so the final call to getnewaddress leaves the keypool with one key below
# its capacity
self.nodes[0].keypoolrefill()
self.log.info('Mine a block one second before the wallet is dumped')
dump_time = int(time.time())
self.nodes[0].setmocktime(dump_time - 1)
self.nodes[0].generate(1)
self.nodes[0].setmocktime(dump_time)
dump_time_str = '# * Created on {}Z'.format(
datetime.datetime.fromtimestamp(
dump_time,
tz=datetime.timezone.utc,
).replace(tzinfo=None).isoformat())
dump_best_block_1 = '# * Best block at time of backup was {} ({}),'.format(
self.nodes[0].getblockcount(),
self.nodes[0].getbestblockhash(),
)
dump_best_block_2 = '# mined on {}Z'.format(
datetime.datetime.fromtimestamp(
dump_time - 1,
tz=datetime.timezone.utc,
).replace(tzinfo=None).isoformat())
self.log.info('Dump unencrypted wallet')
result = self.nodes[0].dumpwallet(wallet_unenc_dump)
assert_equal(result['filename'], wallet_unenc_dump)
found_comments, found_legacy_addr, found_p2sh_segwit_addr, found_bech32_addr, found_script_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \
read_dump(wallet_unenc_dump, addrs, [multisig_addr], None)
assert '# End of dump' in found_comments # Check that file is not corrupt
assert_equal(dump_time_str, next(c for c in found_comments if c.startswith('# * Created on')))
assert_equal(dump_best_block_1, next(c for c in found_comments if c.startswith('# * Best block')))
assert_equal(dump_best_block_2, next(c for c in found_comments if c.startswith('# mined on')))
assert_equal(found_legacy_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_p2sh_segwit_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_bech32_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_script_addr, 1) # all scripts must be in the dump
assert_equal(found_addr_chg, 0) # 0 blocks where mined
assert_equal(found_addr_rsv, 90 * 2) # 90 keys plus 100% internal keys
# encrypt wallet, restart, unlock and dump
self.nodes[0].encryptwallet('test')
self.nodes[0].walletpassphrase('test', 100)
# Should be a no-op:
self.nodes[0].keypoolrefill()
self.nodes[0].dumpwallet(wallet_enc_dump)
found_comments, found_legacy_addr, found_p2sh_segwit_addr, found_bech32_addr, found_script_addr, found_addr_chg, found_addr_rsv, _ = \
read_dump(wallet_enc_dump, addrs, [multisig_addr], hd_master_addr_unenc)
assert '# End of dump' in found_comments # Check that file is not corrupt
assert_equal(dump_time_str, next(c for c in found_comments if c.startswith('# * Created on')))
assert_equal(dump_best_block_1, next(c for c in found_comments if c.startswith('# * Best block')))
assert_equal(dump_best_block_2, next(c for c in found_comments if c.startswith('# mined on')))
assert_equal(found_legacy_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_p2sh_segwit_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_bech32_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_script_addr, 1)
assert_equal(found_addr_chg, 90 * 2) # old reserve keys are marked as change now
assert_equal(found_addr_rsv, 90 * 2)
# Overwriting should fail
assert_raises_rpc_error(-8, "already exists", lambda: self.nodes[0].dumpwallet(wallet_enc_dump))
# Restart node with new wallet, and test importwallet
self.restart_node(0)
self.nodes[0].createwallet("w2")
# Make sure the address is not IsMine before import
result = self.nodes[0].getaddressinfo(multisig_addr)
assert not result['ismine']
self.nodes[0].importwallet(wallet_unenc_dump)
# Now check IsMine is true
result = self.nodes[0].getaddressinfo(multisig_addr)
assert result['ismine']
self.log.info('Check that wallet is flushed')
with self.nodes[0].assert_debug_log(['Flushing wallet.dat'], timeout=20):
self.nodes[0].getnewaddress()
# Overwriting should fail
assert_raises_rpc_error(-8, "already exists", lambda: self.nodes[0].dumpwallet(wallet_enc_dump))
# Make sure that dumpwallet doesn't have a lock order issue when there is an unconfirmed tx and it is reloaded
# See https://github.com/bitcoin/bitcoin/issues/22489
self.nodes[0].createwallet("w3")
w3 = self.nodes[0].get_wallet_rpc("w3")
w3.importprivkey(privkey=self.nodes[0].get_deterministic_priv_key().key, label="coinbase_import")
w3.sendtoaddress(w3.getnewaddress(), 10)
w3.unloadwallet()
self.nodes[0].loadwallet("w3")
w3.dumpwallet(os.path.join(self.nodes[0].datadir, "w3.dump"))
if __name__ == '__main__':
WalletDumpTest().main()
|
{
"content_hash": "b60639afe92fbee0ee3c3296ac837d67",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 162,
"avg_line_length": 45.648648648648646,
"alnum_prop": 0.5808170515097691,
"repo_name": "qtumproject/qtum",
"id": "0e173d8a6200b36757ca98f2c60c5f929bb4532c",
"size": "10348",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/wallet_dump.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "138147"
},
{
"name": "Batchfile",
"bytes": "4488"
},
{
"name": "C",
"bytes": "1292347"
},
{
"name": "C++",
"bytes": "15628054"
},
{
"name": "CMake",
"bytes": "85710"
},
{
"name": "CSS",
"bytes": "111757"
},
{
"name": "Cap'n Proto",
"bytes": "1256"
},
{
"name": "Dockerfile",
"bytes": "483"
},
{
"name": "HTML",
"bytes": "21833"
},
{
"name": "Java",
"bytes": "695"
},
{
"name": "M4",
"bytes": "229308"
},
{
"name": "Makefile",
"bytes": "165639"
},
{
"name": "Objective-C++",
"bytes": "5478"
},
{
"name": "Perl",
"bytes": "1717"
},
{
"name": "Python",
"bytes": "3201664"
},
{
"name": "QMake",
"bytes": "438"
},
{
"name": "Sage",
"bytes": "39795"
},
{
"name": "Scheme",
"bytes": "25953"
},
{
"name": "Shell",
"bytes": "710214"
}
],
"symlink_target": ""
}
|
from keras.layers import Input, Activation, SpatialDropout2D
from keras.layers.convolutional import Conv2D, UpSampling2D
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.optimizers import Adam
from keras.utils import plot_model
def init_CNN_block(input_tensor,
filters,
kernel_size=3,
scale_factor=2,
dropout_rate=0.2,
activation="relu"):
output_tensor = Conv2D(filters=filters,
kernel_size=kernel_size,
strides=scale_factor,
padding="same")(input_tensor)
output_tensor = BatchNormalization()(output_tensor)
output_tensor = Activation(activation)(output_tensor)
output_tensor = SpatialDropout2D(dropout_rate)(output_tensor)
return output_tensor
def init_deCNN_block(input_tensor,
filters,
kernel_size=3,
scale_factor=2,
dropout_rate=0.2,
activation="relu"):
output_tensor = UpSampling2D(scale_factor)(input_tensor)
output_tensor = Conv2D(filters=filters,
kernel_size=kernel_size,
padding="same")(output_tensor)
if activation != "sigmoid":
output_tensor = BatchNormalization()(output_tensor)
output_tensor = Activation(activation)(output_tensor)
if activation != "sigmoid":
output_tensor = SpatialDropout2D(dropout_rate)(output_tensor)
return output_tensor
def init_model(image_size, filters_list, learning_rate,
model_structure_file_path):
# Retrieve input and output image shape
input_image_row_num, input_image_column_num = image_size
input_image_channel_num, output_image_channel_num = 3, 3
# Define the vanilla input tensor
vanilla_input_tensor = Input(shape=(input_image_row_num,
input_image_column_num,
input_image_channel_num))
# Get the output tensor from the encoder
current_output_tensor = vanilla_input_tensor
for layer_index, filters in enumerate(filters_list):
current_output_tensor = init_CNN_block(current_output_tensor, filters)
# Get the output tensor from the decoder
for layer_index, filters in enumerate(filters_list[:-1][::-1] +
[output_image_channel_num]):
if layer_index != len(filters_list) - 1:
current_output_tensor = init_deCNN_block(current_output_tensor,
filters)
else:
current_output_tensor = init_deCNN_block(current_output_tensor,
filters,
activation="sigmoid")
# Build up model
model = Model(vanilla_input_tensor, current_output_tensor)
model.compile(loss="mse", optimizer=Adam(lr=learning_rate))
model.summary()
plot_model(model,
to_file=model_structure_file_path,
show_shapes=True,
show_layer_names=True)
return model
|
{
"content_hash": "0798512988b30621a45284f9677866bf",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 78,
"avg_line_length": 41.30379746835443,
"alnum_prop": 0.5813668403309837,
"repo_name": "nixingyang/Miscellaneous-Projects",
"id": "a2a77e95ef6c29c722a59af668c322ac0d800db2",
"size": "3263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Deep Image Prior/load_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53957"
}
],
"symlink_target": ""
}
|
from sys import stdout
import pprint
import psycopg2
# Make a new Grouvie table to store all the plans
CREATE_GROUVIE = """
CREATE TABLE GROUVIE(
PHONE_NUMBER CHAR(11) NOT NULL,
LEADER CHAR(11) NOT NULL,
CREATION_DATETIME CHAR(19) NOT NULL,
DATE CHAR(10),
SHOWTIME CHAR(5),
FILM TEXT,
CINEMA TEXT,
ACCEPTED BOOLEAN,
PRIMARY KEY (PHONE_NUMBER, LEADER, CREATION_DATETIME)
)
"""
# Make a new User table to store all user data
CREATE_USERS = """
CREATE TABLE USERS(
PHONE_NUMBER CHAR(11) NOT NULL,
NAME TEXT NOT NULL,
POSTCODE TEXT NOT NULL,
LATITUDE NUMERIC(8, 6) NOT NULL,
LONGITUDE NUMERIC(9, 6) NOT NULL,
PRIMARY KEY (PHONE_NUMBER)
)
"""
# Delete a table
DROP_GROUVIE_TABLE = """
DROP TABLE GROUVIE
"""
# Delete a user table
DROP_USERS_TABLE = """
DROP TABLE USERS
"""
# Insert a new entry into the a table
INSERT_GROUVIE = """
INSERT INTO GROUVIE
VALUES
(%s, %s, %s, %s, %s, %s, %s, %s)
"""
# Insert a new entry into the a table
INSERT_USERS = """
INSERT INTO USERS
VALUES
(%s, %s, %s, %s, %s)
"""
ACCEPT_PLAN = """
UPDATE GROUVIE
SET ACCEPTED = true
WHERE
PHONE_NUMBER = %s AND LEADER = %s AND CREATION_DATETIME = %s
"""
CONFIRM_PLAN = """
UPDATE GROUVIE
SET ACCEPTED = true
WHERE
LEADER = %s AND CREATION_DATETIME = %s AND PHONE_NUMBER = LEADER
"""
IS_PLAN_CONFIRMED = """
SELECT ACCEPTED FROM GROUVIE
WHERE
LEADER = %s AND CREATION_DATETIME = %s AND PHONE_NUMBER = LEADER
"""
# Update an already existing entry in the Grouvie table
UPDATE_GROUVIE = """
UPDATE GROUVIE
SET DATE = %s, SHOWTIME = %s, FILM = %s, CINEMA = %s
WHERE
PHONE_NUMBER = %s AND LEADER = %s AND CREATION_DATETIME = %s
"""
# Update an already existing entry in the USER table
UPDATE_USERS = """
UPDATE USERS
SET NAME = %s, POSTCODE = %s, LATITUDE = %s, LONGITUDE = %s
WHERE
PHONE_NUMBER = %s
"""
RESET_USER_PREFS = """
UPDATE GROUVIE
SET DATE = NULL, SHOWTIME = NULL, FILM = NULL, CINEMA = NULL, ACCEPTED = FALSE
WHERE LEADER = %s AND CREATION_DATETIME = %s AND (PHONE_NUMBER != LEADER)
"""
# Delete entry from a table given a phone_number, leader and showtime
DELETE_SINGLE = """
DELETE FROM GROUVIE
WHERE PHONE_NUMBER = %s and LEADER = %s and CREATION_DATETIME = %s
"""
# Delete entries from a table given a leader and showtime
DELETE_PLAN = """
DELETE FROM GROUVIE
WHERE LEADER = %s and CREATION_DATETIME = %s
"""
# Get group replies
GROUP_REPLIES = """
SELECT * FROM GROUVIE
WHERE
LEADER = %s AND CREATION_DATETIME = %s
"""
# Display everything in the Grouvie table
SELECT_ALL_GROUVIE = """
SELECT * FROM GROUVIE
"""
# Display everything in the Grouvie table
SELECT_ALL_USERS = """
SELECT * FROM USERS
"""
# Select a single entry from the Grouvie table based on phone_number
SELECT_GROUVIE = """
SELECT * FROM GROUVIE
WHERE
PHONE_NUMBER = %s
"""
# Select a single entry from the Grouvie table based on phone_number
SELECT_USERS = """
SELECT * FROM USERS
WHERE
PHONE_NUMBER = %s
"""
SELECT_VALID_USERS = """
SELECT PHONE_NUMBER, NAME FROM USERS
WHERE
PHONE_NUMBER IN {}
"""
GROUVIE = "GROUVIE"
USER = "USER"
class DBManager:
# Establish a new connection with the PostgreSQL database.
# We return the cursor so we can execute on the database, we return the
# connection so we can close it when we're done.
def establish_connection(self):
conn_str = "dbname='g1627137_u' user='g1627137_u'" \
"host='db.doc.ic.ac.uk' password='Vk426n3Kjx'"
try:
cnxn = psycopg2.connect(conn_str)
cnxn.autocommit = True
cursor = cnxn.cursor()
return cnxn, cursor
except Exception as e:
message = e.message + "\nFailed to establish connection. " \
"Check connection string."
exit(message)
# Close a connection to the database, kills the cursor and the connection.
def close_connection(self, cnxn, cursor):
try:
cursor.close()
cnxn.close()
except Exception as e:
message = e.message + "\nFailed to close connection."
exit(message)
# Make a new Grouvie table.
def make_grouvie_table(self):
cnxn, cursor = self.establish_connection()
cursor.execute(CREATE_GROUVIE)
self.close_connection(cnxn, cursor)
# Make a new Users table.
def make_user_table(self):
cnxn, cursor = self.establish_connection()
cursor.execute(CREATE_USERS)
self.close_connection(cnxn, cursor)
# Delete a pre-existing table.
def drop_grouvie_table(self):
cnxn, cursor = self.establish_connection()
cursor.execute(DROP_GROUVIE_TABLE)
self.close_connection(cnxn, cursor)
# Delete a pre-existing table.
def drop_user_table(self):
cnxn, cursor = self.establish_connection()
cursor.execute(DROP_USERS_TABLE)
self.close_connection(cnxn, cursor)
# Insert a new entry into the Grouvie table.
def insert_grouvie(self, phone_number, leader, creation_datetime,
date, showtime, film, cinema, accepted):
cnxn, cursor = self.establish_connection()
cursor.execute(INSERT_GROUVIE, (phone_number, leader, creation_datetime,
date, showtime, film, cinema, accepted))
self.close_connection(cnxn, cursor)
def insert_user(self, phone_number, name, postcode, latitude, longitude):
cnxn, cursor = self.establish_connection()
cursor.execute(INSERT_USERS, (phone_number, name, postcode, latitude,
longitude))
self.close_connection(cnxn, cursor)
def accept_plan(self, phone_number, leader, creation_datetime):
cnxn, cursor = self.establish_connection()
cursor.execute(ACCEPT_PLAN, (phone_number, leader, creation_datetime))
self.close_connection(cnxn, cursor)
# Update an entry in the Grouvie table if it exists.
def update_grouvie(self, phone_number, leader, creation_datetime, date,
showtime, film, cinema):
cnxn, cursor = self.establish_connection()
cursor.execute(UPDATE_GROUVIE, (date, showtime, film, cinema,
phone_number, leader,
creation_datetime))
self.close_connection(cnxn, cursor)
# Update an entry in the USERS table if it exists.
def update_users(self, phone_number, name, postcode, latitude, longitude):
cnxn, cursor = self.establish_connection()
cursor.execute(UPDATE_USERS, (name, postcode, latitude, longitude,
phone_number))
self.close_connection(cnxn, cursor)
def confirm_plan(self, leader, creation_datetime):
cnxn, cursor = self.establish_connection()
cursor.execute(CONFIRM_PLAN, (leader, creation_datetime))
self.close_connection(cnxn, cursor)
def is_plan_confirmed(self, leader, creation_datetime):
cnxn, cursor = self.establish_connection()
cursor.execute(IS_PLAN_CONFIRMED, (leader, creation_datetime))
results = cursor.fetchall()
self.close_connection(cnxn, cursor)
return results[0][0]
# Get group replies for a plan
def group_replies(self, leader, creation_datetime):
cnxn, cursor = self.establish_connection()
cursor.execute(GROUP_REPLIES, (leader, creation_datetime))
results = cursor.fetchall()
self.close_connection(cnxn, cursor)
all_changes = {}
for i in range(len(results)):
user = results[i]
changes_made = {"accepted": user[7]}
if user[3] is not None:
changes_made["date"] = user[3]
if user[4] is not None:
changes_made["showtime"] = user[4]
if user[5] is not None:
changes_made["film"] = user[5]
if user[6] is not None:
changes_made["cinema"] = user[6]
all_changes[user[0]] = changes_made
print all_changes
stdout.flush()
return all_changes
# Reset all user preferences
def reset_user_prefs(self, leader, creation_datetime):
cnxn, cursor = self.establish_connection()
cursor.execute(RESET_USER_PREFS, (leader, creation_datetime))
self.close_connection(cnxn, cursor)
# Delete an entry from the table correlating with a user
def delete_single_grouvie(self, phone_number, leader, creation_datetime):
cnxn, cursor = self.establish_connection()
cursor.execute(DELETE_SINGLE, (phone_number, leader, creation_datetime))
self.close_connection(cnxn, cursor)
# Delete entries from the table correlating with a plan
def delete_plan_grouvie(self, leader, creation_datetime):
cnxn, cursor = self.establish_connection()
cursor.execute(DELETE_PLAN, (leader, creation_datetime))
self.close_connection(cnxn, cursor)
# Select an entry in a table based on phone_number.
def select_grouvie(self, phone_number):
cnxn, cursor = self.establish_connection()
cursor.execute(SELECT_GROUVIE, phone_number)
result = cursor.fetchall()
self.close_connection(cnxn, cursor)
return result
# Select an entry in a table based on phone_number.
def select_users(self, phone_number):
cnxn, cursor = self.establish_connection()
cursor.execute(SELECT_USERS, tuple(phone_number))
result = cursor.fetchall()
self.close_connection(cnxn, cursor)
# There should only be 1 result so we just return that tuple.
return result[0] if result else []
# Select users that actually have a Grouvie account.
def select_valid_users(self, friends):
# Build the placeholders which we require when it comes to searching.
fields = "(" + ','.join(["%s"]*len(friends)) + ")"
cnxn, cursor = self.establish_connection()
# friends_tuple = "(" + ",".join(friends) + ")"
# print friends_tuple
cursor.execute(SELECT_VALID_USERS.format(fields), tuple(friends))
print tuple(friends)
results = cursor.fetchall()
self.close_connection(cnxn, cursor)
return results
# Display everything in the Grouvie table.
def select_all_grouvie(self):
cnxn, cursor = self.establish_connection()
cursor.execute(SELECT_ALL_GROUVIE)
result = cursor.fetchall()
self.close_connection(cnxn, cursor)
return result
# Display everything in the Grouvie table.
def select_all_users(self):
cnxn, cursor = self.establish_connection()
cursor.execute(SELECT_ALL_USERS)
results = cursor.fetchall()
self.close_connection(cnxn, cursor)
return results
if __name__ == '__main__':
data = {'PHONE_NUMBER': "1",
'LEADER': 0,
'SHOWTIME': "s",
'FILM': "GOTG3",
'CINEMA': "MEMES",
'ACCEPTED': False}
query = {'PHONE_NUMBER': "1",
'LEADER': 0,
'SHOWTIME': "s"}
db = DBManager()
# db.drop_user_table()
# db.make_user_table()
# db.insert_user("07587247113", "Erkin", "EN12LZ", 51.636495, -0.069549)
# db.insert_user("07964006128", "Tarun", "RM65DU", 51.579983, 0.124262)
# db.insert_user("07942948248", "Jay", "SW100NJ", 51.482079, -0.182265)
# # print db.select_valid_users(("1", "2", "5", "6"))
# db.drop_grouvie_table()
# db.make_grouvie_table()
pprint.PrettyPrinter(indent=4).pprint(db.select_all_grouvie())
pprint.PrettyPrinter(indent=4).pprint(db.select_all_users())
# print db.select_all_users()
# db.select_valid_users(users)
|
{
"content_hash": "f51476e5bff8d92662d58a53759ffaed",
"timestamp": "",
"source": "github",
"line_count": 364,
"max_line_length": 80,
"avg_line_length": 32.82142857142857,
"alnum_prop": 0.6203230936636812,
"repo_name": "Team-JETT/Grouvie",
"id": "f7eb8928913cb257844804293df7bd496201487c",
"size": "11947",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Back-end/DBManager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "152082"
},
{
"name": "Python",
"bytes": "30781"
},
{
"name": "Shell",
"bytes": "966"
}
],
"symlink_target": ""
}
|
"""Tests for tf.contrib.layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from tensorflow.contrib import layers as layers_lib
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import layers as _layers
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.contrib.losses.python.losses import loss_ops
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import template
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
class AvgPool2DTest(test.TestCase):
def testInvalidDataFormat(self):
height, width = 3, 6
images = np.random.uniform(size=(5, height, width, 3))
with self.assertRaisesRegexp(ValueError,
'data_format has to be either NCHW or NHWC.'):
_layers.avg_pool2d(images, [3, 3], data_format='CHWN')
def testCreateAvgPool(self):
height, width = 3, 6
images = np.random.uniform(size=(5, height, width, 3))
output = _layers.avg_pool2d(images, [3, 3])
self.assertEqual(output.op.name, 'AvgPool2D/AvgPool')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 2, 3])
def testCreateAvgPoolNCHW(self):
height, width = 3, 6
images = np.random.uniform(size=(5, 2, height, width))
output = _layers.avg_pool2d(images, [3, 3], data_format='NCHW')
self.assertEquals(output.op.name, 'AvgPool2D/AvgPool')
self.assertListEqual(output.get_shape().as_list(), [5, 2, 1, 2])
def testCollectOutputs(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.avg_pool2d(images, [3, 3], outputs_collections='outputs')
output_collected = ops.get_collection('outputs')[0]
self.assertEqual(output_collected.aliases, ['AvgPool2D'])
self.assertEqual(output_collected, output)
def testCreateSquareAvgPool(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.avg_pool2d(images, 3)
self.assertEqual(output.op.name, 'AvgPool2D/AvgPool')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 2, 3])
def testCreateAvgPoolWithScope(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.avg_pool2d(images, [3, 3], scope='pool1')
self.assertEqual(output.op.name, 'pool1/AvgPool')
def testCreateAvgPoolWithSamePadding(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.avg_pool2d(images, [3, 3], padding='SAME')
self.assertListEqual(output.get_shape().as_list(), [5, 2, 3, 3])
def testCreateAvgPoolWithSamePaddingNCHW(self):
height, width = 3, 6
images = random_ops.random_uniform((5, 3, height, width), seed=1)
output = _layers.avg_pool2d(
images, [3, 3], padding='SAME', data_format='NCHW')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 2, 3])
def testCreateAvgPoolStrideWithSamePadding(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.avg_pool2d(images, [3, 3], stride=1, padding='SAME')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
def testGlobalAvgPool(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.avg_pool2d(images, images.get_shape()[1:3], stride=1)
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
class PoolTest(test.TestCase):
def testCreatePool(self):
height, width = 3, 3
images = np.random.uniform(size=(5, height, width, 3))
output = _layers.pool(images, [3, 3], pooling_type='AVG')
self.assertEqual(output.op.name, 'avg_pool')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
def testCreatePoolNCHW(self):
height, width = 3, 3
images = np.random.uniform(size=(5, 3, height, width))
output = _layers.pool(
images, [3, 3], pooling_type='AVG', data_format='NCHW')
self.assertEqual(output.op.name, 'avg_pool')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 1, 1])
def testCollectOutputs(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.pool(
images, [3, 3], pooling_type='AVG', outputs_collections='outputs')
output_collected = ops.get_collection('outputs')[0]
self.assertEqual(output_collected.aliases, ['avg_pool'])
self.assertEqual(output_collected, output)
def testCreateSquareAvgPool(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.pool(images, 3, pooling_type='AVG')
self.assertEqual(output.op.name, 'avg_pool')
self.assertEqual(output.get_shape().as_list(), [5, 1, 1, 3])
def testCreateMaxPoolWithScope(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.pool(images, [3, 3], pooling_type='MAX', scope='pool1')
self.assertEqual(output.op.name, 'pool1')
def testCreateMaxPoolWithSamePadding(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.pool(images, [3, 3], pooling_type='MAX', padding='SAME')
self.assertEqual(output.get_shape().as_list(), [5, 3, 3, 3])
def testCreateAvgPoolStrideWithSamePadding(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.pool(
images, [3, 3], stride=1, padding='SAME', pooling_type='AVG')
self.assertEqual(output.get_shape().as_list(), [5, height, width, 3])
def testGlobalAvgPool(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.pool(
images, images.get_shape()[1:3], stride=1, pooling_type='AVG')
self.assertEqual(output.get_shape().as_list(), [5, 1, 1, 3])
def testAvgPoolWithStride(self):
height, width = 5, 8
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.pool(images, [2, 3], stride=[1, 2], pooling_type='AVG')
self.assertEqual(output.get_shape().as_list(), [5, 4, 3, 3])
def testAvgPoolWithDilation(self):
height, width = 5, 8
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.pool(
images, [2, 3], dilation_rate=[1, 2], pooling_type='AVG')
self.assertEqual(output.get_shape().as_list(), [5, 4, 4, 3])
def testAvgPoolWithDilationNCHW(self):
height, width = 5, 8
images = random_ops.random_uniform((5, 3, height, width), seed=1)
output = _layers.pool(
images, [2, 3],
dilation_rate=[1, 2],
pooling_type='AVG',
data_format='NCHW')
self.assertEqual(output.get_shape().as_list(), [5, 3, 4, 4])
class BiasAddTest(test.TestCase):
def testCreate(self):
height, width = 3, 3
with self.test_session():
images = np.random.uniform(size=(5, height, width, 3))
output = _layers.bias_add(images)
self.assertEqual(output.op.name, 'BiasAdd/BiasAdd')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
def testCreateWithActivation(self):
height, width = 3, 3
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.bias_add(images, activation_fn=nn_ops.relu)
self.assertEqual(output.op.name, 'BiasAdd/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
def testCreateDimensions(self):
dims = (2, 3, 4)
shape = [5, 2, 3, 4]
with self.test_session():
for d in dims:
input_shape = shape[:d]
inputs = random_ops.random_uniform(input_shape, seed=1)
output = _layers.bias_add(inputs)
self.assertListEqual(output.get_shape().as_list(), input_shape)
biases = variables.get_variables_by_name('biases')[-1]
self.assertListEqual(biases.get_shape().as_list(), [input_shape[-1]])
class ConvolutionTest(test.TestCase):
def testInvalidDataFormat(self):
height, width = 7, 9
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(ValueError, 'data_format'):
layers_lib.convolution2d(images, 32, 3, data_format='CHWN')
def testCreateConv(self):
height, width = 7, 9
with self.test_session():
images = np.random.uniform(size=(5, height, width, 4))
output = layers_lib.convolution2d(images, 32, [3, 3])
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
weights = variables.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [3, 3, 4, 32])
biases = variables.get_variables_by_name('biases')[0]
self.assertListEqual(biases.get_shape().as_list(), [32])
def testCreateConvNCHW(self):
height, width = 7, 9
with self.test_session():
images = np.random.uniform(size=(5, 4, height, width))
output = layers_lib.convolution2d(images, 32, [3, 3], data_format='NCHW')
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 32, height, width])
weights = variables.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [3, 3, 4, 32])
biases = variables.get_variables_by_name('biases')[0]
self.assertListEqual(biases.get_shape().as_list(), [32])
def testCreateSquareConv(self):
height, width = 7, 9
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.convolution2d(images, 32, 3)
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
def testCreateConvWithTensorShape(self):
height, width = 7, 9
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.convolution2d(images, 32, images.get_shape()[1:3])
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
def testCreateFullyConv(self):
height, width = 7, 9
with self.test_session():
images = random_ops.random_uniform((5, height, width, 32), seed=1)
output = layers_lib.convolution2d(
images, 64, images.get_shape()[1:3], padding='VALID')
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 64])
biases = variables.get_variables_by_name('biases')[0]
self.assertListEqual(biases.get_shape().as_list(), [64])
def testFullyConvWithCustomGetter(self):
height, width = 7, 9
with self.test_session():
called = [0]
def custom_getter(getter, *args, **kwargs):
called[0] += 1
return getter(*args, **kwargs)
with variable_scope.variable_scope('test', custom_getter=custom_getter):
images = random_ops.random_uniform((5, height, width, 32), seed=1)
layers_lib.convolution2d(images, 64, images.get_shape()[1:3])
self.assertEqual(called[0], 2) # Custom getter called twice.
def testCreateVerticalConv(self):
height, width = 7, 9
with self.test_session():
images = random_ops.random_uniform((5, height, width, 4), seed=1)
output = layers_lib.convolution2d(images, 32, [3, 1])
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
weights = variables.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [3, 1, 4, 32])
biases = variables.get_variables_by_name('biases')[0]
self.assertListEqual(biases.get_shape().as_list(), [32])
def testCreateHorizontalConv(self):
height, width = 7, 9
with self.test_session():
images = random_ops.random_uniform((5, height, width, 4), seed=1)
output = layers_lib.convolution2d(images, 32, [1, 3])
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
weights = variables.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [1, 3, 4, 32])
def testCreateConvWithStride(self):
height, width = 6, 8
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.convolution2d(images, 32, [3, 3], stride=2)
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height / 2, width / 2, 32])
def testCreateConvCreatesWeightsAndBiasesVars(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.test_session():
self.assertFalse(variables.get_variables('conv1/weights'))
self.assertFalse(variables.get_variables('conv1/biases'))
layers_lib.convolution2d(images, 32, [3, 3], scope='conv1')
self.assertTrue(variables.get_variables('conv1/weights'))
self.assertTrue(variables.get_variables('conv1/biases'))
def testCreateConvWithScope(self):
height, width = 7, 9
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.convolution2d(images, 32, [3, 3], scope='conv1')
self.assertEqual(output.op.name, 'conv1/Relu')
def testCreateConvWithCollection(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with ops.name_scope('fe'):
conv = layers_lib.convolution2d(
images, 32, [3, 3], outputs_collections='outputs', scope='Conv')
output_collected = ops.get_collection('outputs')[0]
self.assertEqual(output_collected.aliases, ['fe/Conv'])
self.assertEqual(output_collected, conv)
def testCreateConvWithoutActivation(self):
height, width = 7, 9
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.convolution2d(images, 32, [3, 3], activation_fn=None)
self.assertEqual(output.op.name, 'Conv/BiasAdd')
def testCreateConvValid(self):
height, width = 7, 9
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.convolution2d(images, 32, [3, 3], padding='VALID')
self.assertListEqual(output.get_shape().as_list(), [5, 5, 7, 32])
def testCreateConvWithWD(self):
height, width = 7, 9
weight_decay = 0.01
with self.test_session() as sess:
images = random_ops.random_uniform((5, height, width, 3), seed=1)
regularizer = regularizers.l2_regularizer(weight_decay)
layers_lib.convolution2d(
images, 32, [3, 3], weights_regularizer=regularizer)
l2_loss = nn_ops.l2_loss(variables.get_variables_by_name('weights')[0])
wd = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertEqual(wd.op.name, 'Conv/kernel/Regularizer/l2_regularizer')
sess.run(variables_lib.global_variables_initializer())
self.assertAlmostEqual(sess.run(wd), weight_decay * l2_loss.eval())
def testCreateConvNoRegularizers(self):
height, width = 7, 9
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
layers_lib.convolution2d(images, 32, [3, 3])
self.assertEqual(
ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES), [])
def testReuseVars(self):
height, width = 7, 9
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
layers_lib.convolution2d(images, 32, [3, 3], scope='conv1')
self.assertEqual(len(variables.get_variables()), 2)
layers_lib.convolution2d(images, 32, [3, 3], scope='conv1', reuse=True)
self.assertEqual(len(variables.get_variables()), 2)
def testNonReuseVars(self):
height, width = 7, 9
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
layers_lib.convolution2d(images, 32, [3, 3])
self.assertEqual(len(variables.get_variables()), 2)
layers_lib.convolution2d(images, 32, [3, 3])
self.assertEqual(len(variables.get_variables()), 4)
def testReuseConvWithWD(self):
height, width = 7, 9
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
weight_decay = regularizers.l2_regularizer(0.01)
with arg_scope(
[layers_lib.convolution2d], weights_regularizer=weight_decay):
layers_lib.convolution2d(images, 32, [3, 3], scope='conv1')
self.assertEqual(len(variables.get_variables()), 2)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
layers_lib.convolution2d(images, 32, [3, 3], scope='conv1', reuse=True)
self.assertEqual(len(variables.get_variables()), 2)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
def testConvWithBatchNorm(self):
height, width = 7, 9
with self.test_session():
images = random_ops.random_uniform((5, height, width, 32), seed=1)
with arg_scope(
[layers_lib.convolution2d],
normalizer_fn=_layers.batch_norm,
normalizer_params={'decay': 0.9}):
net = layers_lib.convolution2d(images, 32, [3, 3])
net = layers_lib.convolution2d(net, 32, [3, 3])
self.assertEqual(len(variables.get_variables()), 8)
self.assertEqual(len(variables.get_variables('Conv/BatchNorm')), 3)
self.assertEqual(len(variables.get_variables('Conv_1/BatchNorm')), 3)
def testReuseConvWithBatchNorm(self):
height, width = 7, 9
with self.test_session():
images = random_ops.random_uniform((5, height, width, 32), seed=1)
with arg_scope(
[layers_lib.convolution2d],
normalizer_fn=_layers.batch_norm,
normalizer_params={'decay': 0.9}):
net = layers_lib.convolution2d(images, 32, [3, 3], scope='Conv')
net = layers_lib.convolution2d(
net, 32, [3, 3], scope='Conv', reuse=True)
self.assertEqual(len(variables.get_variables()), 4)
self.assertEqual(len(variables.get_variables('Conv/BatchNorm')), 3)
self.assertEqual(len(variables.get_variables('Conv_1/BatchNorm')), 0)
def testCreateConvCreatesWeightsAndBiasesVarsWithRateTwo(self):
height, width = 7, 9
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.test_session():
self.assertFalse(variables.get_variables('conv1/weights'))
self.assertFalse(variables.get_variables('conv1/biases'))
layers_lib.convolution2d(images, 32, [3, 3], rate=2, scope='conv1')
self.assertTrue(variables.get_variables('conv1/weights'))
self.assertTrue(variables.get_variables('conv1/biases'))
def testOutputSizeWithRateTwoSamePadding(self):
num_filters = 32
input_size = [5, 10, 12, 3]
expected_size = [5, 10, 12, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.convolution2d(
images, num_filters, [3, 3], rate=2, padding='SAME')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
with self.test_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithRateTwoValidPadding(self):
num_filters = 32
input_size = [5, 10, 12, 3]
expected_size = [5, 6, 8, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.convolution2d(
images, num_filters, [3, 3], rate=2, padding='VALID')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
with self.test_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithRateTwoThreeValidPadding(self):
num_filters = 32
input_size = [5, 10, 12, 3]
expected_size = [5, 6, 6, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.convolution2d(
images, num_filters, [3, 3], rate=[2, 3], padding='VALID')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
with self.test_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testDynamicOutputSizeWithRateOneValidPadding(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [None, None, None, num_filters]
expected_size_dynamic = [5, 7, 9, num_filters]
with self.test_session():
images = array_ops.placeholder(np.float32,
[None, None, None, input_size[3]])
output = layers_lib.convolution2d(
images, num_filters, [3, 3], rate=1, padding='VALID')
variables_lib.global_variables_initializer().run()
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), expected_size)
eval_output = output.eval({images: np.zeros(input_size, np.float32)})
self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
def testDynamicOutputSizeWithRateOneValidPaddingNCHW(self):
if test.is_gpu_available(cuda_only=True):
num_filters = 32
input_size = [5, 3, 9, 11]
expected_size = [None, num_filters, None, None]
expected_size_dynamic = [5, num_filters, 7, 9]
with self.test_session(use_gpu=True):
images = array_ops.placeholder(np.float32,
[None, input_size[1], None, None])
output = layers_lib.convolution2d(
images,
num_filters, [3, 3],
rate=1,
padding='VALID',
data_format='NCHW')
variables_lib.global_variables_initializer().run()
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), expected_size)
eval_output = output.eval({images: np.zeros(input_size, np.float32)})
self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
def testDynamicOutputSizeWithRateTwoValidPadding(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [None, None, None, num_filters]
expected_size_dynamic = [5, 5, 7, num_filters]
with self.test_session():
images = array_ops.placeholder(np.float32,
[None, None, None, input_size[3]])
output = layers_lib.convolution2d(
images, num_filters, [3, 3], rate=2, padding='VALID')
variables_lib.global_variables_initializer().run()
self.assertEqual(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), expected_size)
eval_output = output.eval({images: np.zeros(input_size, np.float32)})
self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
def testWithScope(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [5, 5, 7, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.convolution2d(
images, num_filters, [3, 3], rate=2, padding='VALID', scope='conv7')
with self.test_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'conv7/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testWithScopeWithoutActivation(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [5, 5, 7, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.convolution2d(
images,
num_filters, [3, 3],
rate=2,
padding='VALID',
activation_fn=None,
scope='conv7')
with self.test_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'conv7/BiasAdd')
self.assertListEqual(list(output.eval().shape), expected_size)
class Convolution2dTransposeTests(test.TestCase):
def testTrainableFlagIsPassedOn(self):
for trainable in [True, False]:
with ops.Graph().as_default():
num_filters = 32
input_size = [5, 10, 12, 3]
images = random_ops.random_uniform(input_size, seed=1)
layers_lib.conv2d_transpose(
images, num_filters, [3, 3], stride=1, trainable=trainable)
model_variables = variables.get_model_variables()
trainable_variables = variables_lib.trainable_variables()
for model_variable in model_variables:
self.assertEqual(trainable, model_variable in trainable_variables)
def testInvalidDataFormat(self):
height, width = 7, 9
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.assertRaisesRegexp(
ValueError, 'data_format has to be either NCHW or NHWC.'):
_layers.convolution2d_transpose(images, 32, 3, data_format='CHWN')
def testOutputSizeWithStrideOneSamePaddingNCHW(self):
# `NCHW` data fomat is only supported for `GPU` device.
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True) as sess:
num_filters = 32
input_size = [5, 3, 10, 12]
expected_size = [5, num_filters, 10, 12]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [3, 3],
stride=1,
padding='SAME',
data_format='NCHW')
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
sess.run(variables_lib.global_variables_initializer())
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStrideOneValidPaddingNCHW(self):
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True) as sess:
num_filters = 32
input_size = [5, 3, 10, 12]
expected_size = [5, num_filters, 12, 14]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [3, 3],
stride=1,
padding='VALID',
data_format='NCHW')
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
sess.run(variables_lib.global_variables_initializer())
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStrideTwoValidPaddingNCHW(self):
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True) as sess:
num_filters = 32
input_size = [5, 3, 9, 11]
expected_size = [5, num_filters, 19, 23]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [3, 3],
stride=[2, 2],
padding='VALID',
data_format='NCHW')
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
sess.run(variables_lib.global_variables_initializer())
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWith1x1StrideTwoSamePaddingNCHW(self):
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True) as sess:
num_filters = 1
input_size = [1, 1, 1, 1]
expected_size = [1, num_filters, 2, 2]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [2, 2],
stride=[2, 2],
padding='SAME',
data_format='NCHW')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWith1x1StrideTwoValidPaddingNCHW(self):
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True) as sess:
num_filters = 1
input_size = [1, 1, 1, 1]
expected_size = [1, num_filters, 2, 2]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [2, 2],
stride=[2, 2],
padding='VALID',
data_format='NCHW')
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWith2x2StrideTwoSamePaddingNCHW(self):
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True) as sess:
num_filters = 1
input_size = [1, 1, 2, 2]
expected_size = [1, num_filters, 4, 4]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [2, 2],
stride=[2, 2],
padding='SAME',
data_format='NCHW')
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWith2x2StrideTwoValidPaddingNCHW(self):
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True) as sess:
num_filters = 1
input_size = [1, 1, 2, 2]
expected_size = [1, num_filters, 4, 4]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [2, 2],
stride=[2, 2],
padding='VALID',
data_format='NCHW')
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStride2x1NCHW(self):
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True) as sess:
num_filters = 1
input_size = [1, 1, 3, 2]
expected_size = [1, num_filters, 6, 5]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [2, 4],
stride=[2, 1],
padding='VALID',
data_format='NCHW')
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStride2x4NCHW(self):
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True) as sess:
num_filters = 1
input_size = [1, 1, 3, 2]
expected_size = [1, num_filters, 6, 8]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [2, 4],
stride=[2, 4],
padding='VALID',
data_format='NCHW')
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStride2x5NCHW(self):
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True) as sess:
num_filters = 1
input_size = [1, 1, 3, 2]
expected_size = [1, num_filters, 6, 10]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [2, 4],
stride=[2, 5],
padding='VALID',
data_format='NCHW')
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStrideOneSamePadding(self):
num_filters = 32
input_size = [5, 10, 12, 3]
expected_size = [5, 10, 12, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [3, 3], stride=1, padding='SAME')
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
with self.test_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStrideOneValidPadding(self):
num_filters = 32
input_size = [5, 10, 12, 3]
expected_size = [5, 12, 14, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [3, 3], stride=1, padding='VALID')
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
with self.test_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStrideTwoValidPadding(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [5, 19, 23, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [3, 3], stride=[2, 2], padding='VALID')
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
with self.test_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWith1x1StrideTwoSamePadding(self):
num_filters = 1
input_size = [1, 1, 1, 1]
expected_size = [1, 2, 2, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [2, 2], stride=[2, 2], padding='SAME')
self.assertListEqual(list(output.get_shape().as_list()), expected_size)
with self.test_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWith1x1StrideTwoValidPadding(self):
num_filters = 1
input_size = [1, 1, 1, 1]
expected_size = [1, 2, 2, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [2, 2], stride=[2, 2], padding='VALID')
with self.test_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWith2x2StrideTwoSamePadding(self):
num_filters = 1
input_size = [1, 2, 2, 1]
expected_size = [1, 4, 4, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [2, 2], stride=[2, 2], padding='SAME')
with self.test_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWith2x2StrideTwoValidPadding(self):
num_filters = 1
input_size = [1, 2, 2, 1]
expected_size = [1, 4, 4, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [2, 2], stride=[2, 2], padding='VALID')
with self.test_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStride2x1(self):
num_filters = 1
input_size = [1, 3, 2, 1]
expected_size = [1, 6, 5, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [2, 4], stride=[2, 1], padding='VALID')
with self.test_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStride2x4(self):
num_filters = 1
input_size = [1, 3, 2, 1]
expected_size = [1, 6, 8, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [2, 4], stride=[2, 4], padding='VALID')
with self.test_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeWithStride2x5(self):
num_filters = 1
input_size = [1, 3, 2, 1]
expected_size = [1, 6, 10, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [2, 4], stride=[2, 5], padding='VALID')
with self.test_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(list(output.eval().shape), expected_size)
def testOutputSizeRandomSizesAndStridesValidPadding(self):
np.random.seed(0)
max_image_size = 10
for _ in range(10):
num_filters = 1
input_size = [
1, np.random.randint(1, max_image_size),
np.random.randint(1, max_image_size), 1
]
filter_size = [
np.random.randint(1, input_size[1] + 1),
np.random.randint(1, input_size[2] + 1)
]
stride = [np.random.randint(1, 3), np.random.randint(1, 3)]
ops.reset_default_graph()
graph = ops.Graph()
with graph.as_default():
images = random_ops.random_uniform(input_size, seed=1)
transpose = layers_lib.conv2d_transpose(
images, num_filters, filter_size, stride=stride, padding='VALID')
conv = layers_lib.conv2d(
transpose, num_filters, filter_size, stride=stride, padding='VALID')
with self.test_session(graph=graph) as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertListEqual(list(conv.eval().shape), input_size)
def testDynamicOutputSizeWithStrideTwoValidPadding(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [None, None, None, num_filters]
expected_size_dynamic = [5, 19, 23, num_filters]
images = array_ops.placeholder(np.float32,
[None, None, None, input_size[3]])
output = layers_lib.conv2d_transpose(
images, num_filters, [3, 3], stride=[2, 2], padding='VALID')
self.assertListEqual(output.get_shape().as_list(), expected_size)
with self.test_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
eval_output = output.eval({images: np.zeros(input_size, np.float32)})
self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
def testDynamicOutputSizeWithStrideTwoSamePadding(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [None, None, None, num_filters]
expected_size_dynamic = [5, 18, 22, num_filters]
with self.test_session():
images = array_ops.placeholder(np.float32,
[None, None, None, input_size[3]])
output = layers_lib.conv2d_transpose(
images, num_filters, [3, 3], stride=[2, 2], padding='SAME')
variables_lib.global_variables_initializer().run()
self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')
self.assertListEqual(output.get_shape().as_list(), expected_size)
eval_output = output.eval({images: np.zeros(input_size, np.float32)})
self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
def testWithScope(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [5, 19, 23, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images, num_filters, [3, 3], stride=2, padding='VALID', scope='conv7')
self.assertEqual(output.op.name, 'conv7/Relu')
with self.test_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertListEqual(list(output.eval().shape), expected_size)
def testWithScopeWithoutActivation(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [5, 19, 23, num_filters]
images = random_ops.random_uniform(input_size, seed=1)
output = layers_lib.conv2d_transpose(
images,
num_filters, [3, 3],
stride=2,
padding='VALID',
activation_fn=None,
scope='conv7')
self.assertEqual(output.op.name, 'conv7/BiasAdd')
with self.test_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertListEqual(list(output.eval().shape), expected_size)
def testDeconvWithoutBiasesProducesConv2dTranspose(self):
num_filters = 32
input_size = [5, 9, 11, 3]
expected_size = [5, 19, 23, num_filters]
stride = 2
padding = 'VALID'
with self.test_session() as sess:
images = random_ops.random_uniform(input_size, seed=1)
output_deconv = layers_lib.conv2d_transpose(
images,
num_filters, [3, 3],
stride=stride,
padding=padding,
activation_fn=None,
scope='conv7')
weights = variables.get_variables_by_name('conv7/weights')[0]
output_conv2d_transpose = nn_ops.conv2d_transpose(
images,
weights,
expected_size, [1, stride, stride, 1],
padding=padding)
sess.run(variables_lib.global_variables_initializer())
output_deconv, output_conv2d_transpose = sess.run(
[output_deconv, output_conv2d_transpose])
self.assertTrue(
np.isclose(output_deconv, output_conv2d_transpose, 1e-5, 1e-5).all())
class ConvolutionInPlaneTest(test.TestCase):
def testHorzConvWithBlankImage(self):
image = array_ops.ones((1, 10, 10, 1))
horz_gradients = layers_lib.conv2d_in_plane(
image,
weights_initializer=init_ops.constant_initializer([1, -1]),
kernel_size=[1, 2],
padding='VALID',
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
result = sess.run(horz_gradients)
expected = np.zeros((1, 10, 9, 1))
self.assertAllEqual(result, expected)
def testHorzConvWithBlankImageAndPlaceholder(self):
image = array_ops.placeholder(dtypes.float32, shape=(None, None, None, 1))
horz_gradients = layers_lib.conv2d_in_plane(
image,
weights_initializer=init_ops.constant_initializer([1, -1]),
kernel_size=[1, 2],
padding='VALID',
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
result = sess.run(horz_gradients,
feed_dict={image: np.ones((1, 10, 10, 1))})
expected = np.zeros((1, 10, 9, 1))
self.assertAllEqual(result, expected)
def testHorzConvWithRandomImageMultiBatch(self):
np.random.seed(1)
image = np.random.rand(5, 10, 10, 1)
expected = image[:, :, 0:-1, :] - image[:, :, 1:, :]
tf_image = constant_op.constant(image, dtype=dtypes.float32)
horz_gradients = layers_lib.conv2d_in_plane(
tf_image,
weights_initializer=init_ops.constant_initializer([1, -1]),
kernel_size=[1, 2],
padding='VALID',
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
result = sess.run(horz_gradients)
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
def testHorzConvWithRandomImageMultiBatchMultiChannel(self):
np.random.seed(1)
image = np.random.rand(5, 10, 10, 7)
expected = image[:, :, 0:-1, :] - image[:, :, 1:, :]
tf_image = constant_op.constant(image, dtype=dtypes.float32)
horz_gradients = layers_lib.conv2d_in_plane(
tf_image,
weights_initializer=init_ops.constant_initializer([1, -1]),
kernel_size=[1, 2],
padding='VALID',
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
result = sess.run(horz_gradients)
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
def testHorzConvWithVaryingImage(self):
image = np.asmatrix(('1.0 2.0 3.0;' '1.1 2.0 4.0;' '-4.3 0.0 8.9'))
expected = np.asmatrix(('-1.0 -1.0;' '-0.9 -2.0;' '-4.3 -8.9'))
expected = np.reshape(np.asarray(expected), (1, 3, 2, 1))
tf_image = constant_op.constant(
image, shape=(1, 3, 3, 1), dtype=dtypes.float32)
horz_gradients = layers_lib.conv2d_in_plane(
tf_image,
weights_initializer=init_ops.constant_initializer([1, -1]),
kernel_size=[1, 2],
padding='VALID',
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
result = sess.run(horz_gradients)
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
def testVertConvWithBlankImage(self):
image = array_ops.ones((1, 10, 10, 1))
vert_gradients = layers_lib.conv2d_in_plane(
image,
weights_initializer=init_ops.constant_initializer([1, -1]),
kernel_size=[2, 1],
padding='VALID',
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
result = sess.run(vert_gradients)
expected = np.zeros((1, 9, 10, 1))
self.assertAllEqual(result, expected)
def testVertConvWithVaryingImage(self):
image = np.asmatrix(('1.0 2.0 3.0;' '1.1 2.0 4.0;' '-4.3 0.0 8.9'))
expected = np.asmatrix(('-0.1 0.0 -1.0;' ' 5.4 2.0 -4.9'))
expected = np.reshape(np.asarray(expected), (1, 2, 3, 1))
tf_image = constant_op.constant(
image, shape=(1, 3, 3, 1), dtype=dtypes.float32)
vert_gradients = layers_lib.conv2d_in_plane(
tf_image,
weights_initializer=init_ops.constant_initializer([1, -1]),
kernel_size=[2, 1],
padding='VALID',
activation_fn=None)
init_op = variables_lib.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
result = sess.run(vert_gradients)
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
class DropoutTest(test.TestCase):
def testCreateDropout(self):
height, width = 3, 3
with self.test_session():
images = np.random.uniform(size=(5, height, width, 3))
output = _layers.dropout(images)
self.assertEqual(output.op.name, 'Dropout/dropout/mul')
output.get_shape().assert_is_compatible_with(
ops.convert_to_tensor(images).get_shape())
def testCreateDropoutWithConstantTrue(self):
height, width = 3, 3
with self.test_session():
is_training = constant_op.constant(True)
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.dropout(images, is_training=is_training)
output.get_shape().assert_is_compatible_with(images.get_shape())
def testCreateDropoutWithConstantFalse(self):
height, width = 3, 3
with self.test_session():
is_training = constant_op.constant(False)
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.dropout(images, is_training=is_training)
output.get_shape().assert_is_compatible_with(images.get_shape())
def testCreateDropoutWithPlaceholder(self):
height, width = 3, 3
with self.test_session():
is_training = array_ops.placeholder(dtype=dtypes.bool, shape=[])
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.dropout(images, is_training=is_training)
self.assertEqual(output.op.name, 'Dropout/cond/Merge')
output.get_shape().assert_is_compatible_with(images.get_shape())
def testCollectOutputs(self):
height, width = 3, 3
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.dropout(images, outputs_collections='outputs')
c_output = ops.get_collection('outputs')[0]
self.assertEqual(c_output.aliases, ['Dropout'])
self.assertEqual(c_output, output)
def testDropout(self):
height, width = 10, 10
with self.test_session() as sess:
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
num_elem_initial = math_ops.reduce_mean(math_ops.to_float(images > 0))
output = _layers.dropout(images)
num_elem = math_ops.reduce_mean(math_ops.to_float(output > 0))
sess.run(variables_lib.global_variables_initializer())
num_elem, num_elem_initial = sess.run([num_elem, num_elem_initial])
self.assertLess(num_elem, num_elem_initial / 2 + 0.1)
self.assertGreater(num_elem, num_elem_initial / 2 - 0.1)
def testCreateDropoutNoTraining(self):
height, width = 3, 3
with self.test_session() as sess:
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
num_elem_initial = math_ops.reduce_mean(math_ops.to_float(images > 0))
output = _layers.dropout(images, is_training=False)
num_elem = math_ops.reduce_mean(math_ops.to_float(output > 0))
sess.run(variables_lib.global_variables_initializer())
num_elem, num_elem_initial = sess.run([num_elem, num_elem_initial])
self.assertEqual(num_elem, num_elem_initial)
outputs, inputs = sess.run([output, images])
self.assertAllClose(outputs, inputs)
def testCreateFCFollowByDropout(self):
height, width = 3, 3
with self.test_session() as sess:
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
output = _layers.fully_connected(images, 50)
num_elem_initial = math_ops.reduce_mean(math_ops.to_float(output > 0))
output = _layers.dropout(output)
num_elem = math_ops.reduce_mean(math_ops.to_float(output > 0))
sess.run(variables_lib.global_variables_initializer())
num_elem, num_elem_initial = sess.run([num_elem, num_elem_initial])
self.assertLess(num_elem, num_elem_initial / 2 + 0.1)
self.assertGreater(num_elem, num_elem_initial / 2 - 0.1)
def testCreateFCWithDropout(self):
height, width = 3, 3
with self.test_session() as sess:
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
output = _layers.fully_connected(
images, 50, normalizer_fn=_layers.dropout)
num_elem = math_ops.reduce_mean(math_ops.to_float(output > 0))
sess.run(variables_lib.global_variables_initializer())
num_elem = sess.run(num_elem)
self.assertLess(num_elem, 0.5)
self.assertGreater(num_elem, 0.1)
class FlattenTest(test.TestCase):
def testInvalidRank(self):
with ops.Graph().as_default() as g, self.test_session(g):
inputs = array_ops.placeholder(dtype=dtypes.float32)
inputs.set_shape(tensor_shape.TensorShape((5,)))
with self.assertRaisesRegexp(ValueError,
'must have a least 2 dimensions'):
_layers.flatten(inputs)
def testUnknownLastDim(self):
with ops.Graph().as_default() as g, self.test_session(g):
inputs = array_ops.placeholder(dtype=dtypes.float32)
inputs.set_shape(tensor_shape.TensorShape((5, None)))
with self.assertRaisesRegexp(ValueError, '2nd dimension must be defined'):
_layers.flatten(inputs)
def testCollectOutputs(self):
height, width = 3, 3
with self.test_session():
images = np.random.uniform(size=(5, height, width, 3))
output = _layers.flatten(images, outputs_collections='outputs')
c_output = ops.get_collection('outputs')[0]
self.assertEqual(c_output.aliases, ['Flatten'])
self.assertEqual(c_output, output)
def testFlatten4D(self):
height, width = 3, 3
with self.test_session():
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
output = _layers.flatten(images)
self.assertEqual(output.get_shape().num_elements(),
images.get_shape().num_elements())
self.assertEqual(output.get_shape()[0], images.get_shape()[0])
def testFlatten3D(self):
height, width = 3, 3
with self.test_session():
images = random_ops.random_uniform(
(5, height, width), seed=1, name='images')
output = _layers.flatten(images)
self.assertEqual(output.get_shape().num_elements(),
images.get_shape().num_elements())
self.assertEqual(output.get_shape()[0], images.get_shape()[0])
def testFlattenBatchSize(self):
height, width = 3, 3
with self.test_session() as sess:
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
inputs = array_ops.placeholder(dtypes.int32, (None, height, width, 3))
output = _layers.flatten(inputs)
self.assertEqual(output.get_shape().as_list(), [None, height * width * 3])
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.size, images.get_shape().num_elements())
self.assertEqual(output.shape[0], images.get_shape()[0])
def _sparsify(array, threshold=0.5):
array[array < threshold] = 0
non_zero = np.where(array)
indices = np.vstack(non_zero).T
values = array[non_zero]
shape = array.shape
return indices, values, shape
class PartialFlattenTest(test.TestCase):
def testDensePartialFlatten(self):
"""Test `_inner_flatten` on `Tensor`s."""
shape = [2, 3, 4, 5, 6]
np.random.seed(5446)
inputs = np.random.randint(0, 100, size=shape)
for new_rank in [1, 2, 3, 4, 5]:
expected_new_shape = (
shape[:new_rank - 1] + [np.prod(shape[new_rank - 1:])])
expected_flattened = np.reshape(inputs, expected_new_shape)
flattened_t = _layers._inner_flatten(inputs, new_rank)
static_shape = flattened_t.get_shape().as_list()
self.assertEqual(static_shape, expected_new_shape)
with self.test_session() as sess:
flattened = sess.run(flattened_t)
np.testing.assert_array_equal(expected_flattened, flattened)
def testSparsePartialFlatten(self):
"""Test `_inner_flatten` on `SparseTensor`s."""
shape = [4, 3, 11, 6, 1, 3]
np.random.seed(10301)
random_ = np.random.rand(*shape)
indices, values, _ = _sparsify(random_)
for new_rank in [1, 2, 3, 4, 5]:
expected_shape = (shape[:new_rank - 1] + [np.prod(shape[new_rank - 1:])])
reshaped_random_ = np.reshape(random_, expected_shape)
expected_indices, expected_values, _ = _sparsify(reshaped_random_)
inputs_t = sparse_tensor.SparseTensor(indices, values, shape)
flattened_t = _layers._inner_flatten(inputs_t, new_rank)
with self.test_session() as sess:
flattened = sess.run(flattened_t)
np.testing.assert_array_equal(expected_indices, flattened.indices)
np.testing.assert_array_equal(expected_values, flattened.values)
np.testing.assert_array_equal(expected_shape, flattened.dense_shape)
def testIncompleteShape(self):
"""Test `_inner_flatten` shape inference for incomplete shapes."""
shape = [2, None, 4, None, 5, 6]
inputs = array_ops.placeholder(dtypes.int32)
inputs.set_shape(shape)
flattened1 = _layers._inner_flatten(inputs, 1)
self.assertEqual([None], flattened1.get_shape().as_list())
flattened2 = _layers._inner_flatten(inputs, 2)
self.assertEqual([2, None], flattened2.get_shape().as_list())
flattened3 = _layers._inner_flatten(inputs, 3)
self.assertEqual([2, None, None], flattened3.get_shape().as_list())
flattened4 = _layers._inner_flatten(inputs, 4)
self.assertEqual([2, None, 4, None], flattened4.get_shape().as_list())
flattened5 = _layers._inner_flatten(inputs, 5)
self.assertEqual([2, None, 4, None, 30], flattened5.get_shape().as_list())
class FCTest(test.TestCase):
def testCreateFC(self):
height, width = 3, 3
for layer_fn in (_layers.fully_connected, layers_lib.relu):
with ops.Graph().as_default() as g, self.test_session(g):
inputs = np.random.uniform(size=(5, height * width * 3))
output = layer_fn(inputs, 32)
self.assertEqual(output.op.name, 'fully_connected/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 32])
weights = variables.get_variables_by_name('weights')[0]
self.assertListEqual(weights.get_shape().as_list(), [3 * 3 * 3, 32])
biases = variables.get_variables_by_name('biases')[0]
self.assertListEqual(biases.get_shape().as_list(), [32])
def testCreateFCWithScope(self):
height, width = 3, 3
with self.test_session():
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
output = _layers.fully_connected(inputs, 32, scope='fc1')
self.assertEqual(output.op.name, 'fc1/Relu')
def testCreateFCWithCollection(self):
height, width = 3, 3
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
with ops.name_scope('fe'):
fc = _layers.fully_connected(
inputs, 7, outputs_collections='outputs', scope='fc')
output_collected = ops.get_collection('outputs')[0]
self.assertEqual(output_collected.aliases, ['fe/fc'])
self.assertEqual(output_collected, fc)
def testCreateFcCreatesWeightsAndBiasesVars(self):
height, width = 3, 3
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
with self.test_session():
self.assertFalse(variables.get_variables('fc1/weights'))
self.assertFalse(variables.get_variables('fc1/biases'))
_layers.fully_connected(inputs, 32, scope='fc1')
self.assertTrue(variables.get_variables('fc1/weights'))
self.assertTrue(variables.get_variables('fc1/biases'))
def testReuseVars(self):
height, width = 3, 3
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
with self.test_session():
_layers.fully_connected(inputs, 32, scope='fc1')
self.assertEqual(len(variables.get_variables('fc1')), 2)
_layers.fully_connected(inputs, 32, scope='fc1', reuse=True)
self.assertEqual(len(variables.get_variables('fc1')), 2)
def testNonReuseVars(self):
height, width = 3, 3
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
with self.test_session():
_layers.fully_connected(inputs, 32)
self.assertEqual(len(variables.get_variables('fully_connected')), 2)
_layers.fully_connected(inputs, 32)
self.assertEqual(len(variables.get_variables('fully_connected')), 4)
def testReuseWithRegularizer(self):
height, width = 3, 3
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
_layers.fully_connected(
inputs, 32, scope='fc1', weights_regularizer=regularizer)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
self.assertEqual(len(loss_ops.get_regularization_losses()), 1)
_layers.fully_connected(
inputs, 32, scope='fc1', weights_regularizer=regularizer, reuse=True)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
self.assertEqual(len(loss_ops.get_regularization_losses()), 1)
with variable_scope.variable_scope('outer', reuse=False):
_layers.fully_connected(inputs, 32, weights_regularizer=regularizer)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 2)
self.assertEqual(len(loss_ops.get_regularization_losses()), 2)
with variable_scope.variable_scope('outer', reuse=True):
_layers.fully_connected(inputs, 32, weights_regularizer=regularizer)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 2)
self.assertEqual(len(loss_ops.get_regularization_losses()), 2)
def testCreateFCWithoutActivation(self):
height, width = 3, 3
with self.test_session():
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
output = _layers.fully_connected(inputs, 32, activation_fn=None)
self.assertEqual(output.op.name, 'fully_connected/BiasAdd')
def testCreateFCWithWD(self):
height, width = 3, 3
with self.test_session() as sess:
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
weight_decay = regularizers.l2_regularizer(0.01)
_layers.fully_connected(inputs, 32, weights_regularizer=weight_decay)
wd = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertEqual(wd.op.name,
'fully_connected/weights/Regularizer/l2_regularizer')
sess.run(variables_lib.global_variables_initializer())
self.assertLess(sess.run(wd), 0.4)
def testCreateFCWithBD(self):
height, width = 3, 3
with self.test_session() as sess:
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
bias_decay = regularizers.l2_regularizer(0.01)
_layers.fully_connected(inputs, 32, biases_regularizer=bias_decay)
wd = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertEqual(wd.op.name,
'fully_connected/bias/Regularizer/l2_regularizer')
sess.run(variables_lib.global_variables_initializer())
self.assertLess(sess.run(wd), 0.4)
def testCreateNoRegularizers(self):
height, width = 3, 3
with self.test_session():
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
_layers.fully_connected(inputs, 32)
self.assertEqual(
ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES), [])
def testReuseFCWithWD(self):
height, width = 3, 3
with self.test_session():
inputs = random_ops.random_uniform((5, height * width * 3), seed=1)
weight_decay = regularizers.l2_regularizer(0.01)
_layers.fully_connected(
inputs, 32, weights_regularizer=weight_decay, scope='FC')
self.assertEqual(len(variables.get_variables()), 2)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
_layers.fully_connected(
inputs, 32, weights_regularizer=weight_decay, scope='FC', reuse=True)
self.assertEqual(len(variables.get_variables()), 2)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
def testFCWithBatchNorm(self):
height, width = 3, 3
with self.test_session():
images = random_ops.random_uniform((5, height * width * 3), seed=1)
with arg_scope(
[_layers.fully_connected],
normalizer_fn=_layers.batch_norm,
normalizer_params={'decay': 0.9}):
net = _layers.fully_connected(images, 27)
net = _layers.fully_connected(net, 27)
self.assertEqual(len(variables.get_variables()), 8)
self.assertEqual(
len(variables.get_variables('fully_connected/BatchNorm')), 3)
self.assertEqual(
len(variables.get_variables('fully_connected_1/BatchNorm')), 3)
def testReuseFCWithBatchNorm(self):
height, width = 3, 3
with self.test_session():
images = random_ops.random_uniform((5, height * width * 3), seed=1)
with arg_scope(
[_layers.fully_connected],
normalizer_fn=_layers.batch_norm,
normalizer_params={'decay': 0.9}):
net = _layers.fully_connected(images, 27, scope='fc1')
net = _layers.fully_connected(net, 27, scope='fc1', reuse=True)
self.assertEqual(len(variables.get_variables()), 4)
self.assertEqual(len(variables.get_variables('fc1/BatchNorm')), 3)
class BatchNormTest(test.TestCase):
def _addBesselsCorrection(self, sample_size, expected_var):
correction_factor = sample_size / (sample_size - 1)
expected_var *= correction_factor
return expected_var, correction_factor
def testUnknownShape(self):
with ops.Graph().as_default() as g, self.test_session(g):
inputs = array_ops.placeholder(dtype=dtypes.float32)
with self.assertRaisesRegexp(ValueError, 'undefined rank'):
_layers.batch_norm(inputs)
def testInvalidDataFormat(self):
with ops.Graph().as_default() as g, self.test_session(g):
inputs = array_ops.placeholder(dtype=dtypes.float32)
with self.assertRaisesRegexp(
ValueError, 'data_format has to be either NCHW or NHWC.'):
_layers.batch_norm(inputs, data_format='CHWN')
def testUnknownChannelsDimNHWC(self):
with ops.Graph().as_default() as g, self.test_session(g):
inputs = array_ops.placeholder(dtype=dtypes.float32)
inputs.set_shape(tensor_shape.TensorShape((5, 3, 3, None)))
with self.assertRaisesRegexp(ValueError, 'undefined'):
_layers.batch_norm(inputs, data_format='NHWC')
def testUnknownChannelsDimNCHW(self):
with ops.Graph().as_default() as g, self.test_session(g):
inputs = array_ops.placeholder(dtype=dtypes.float32)
inputs.set_shape(tensor_shape.TensorShape((5, None, 3, 3)))
with self.assertRaisesRegexp(ValueError, 'undefined'):
_layers.batch_norm(inputs, data_format='NCHW')
def testWeightedMomentsFused(self):
with ops.Graph().as_default() as g, self.test_session(g):
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=(5, 3, 3, 7))
batch_weights = array_ops.placeholder(dtype=dtypes.float32)
with self.assertRaisesRegexp(ValueError, 'Weighted mean and variance'):
_layers.batch_norm(inputs, batch_weights=batch_weights, fused=True)
def _testCreateOp(self, fused):
height, width = 3, 3
with self.test_session():
images = np.random.uniform(size=(5, height, width, 3)).astype('f')
output = _layers.batch_norm(images, fused=fused)
expected_name = ('BatchNorm/FusedBatchNorm' if fused else
'BatchNorm/batchnorm')
self.assertTrue(output.op.name.startswith(expected_name))
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
def testCreateOpDefault(self):
self._testCreateOp(False)
def testCreateOpFused(self):
self._testCreateOp(True)
def testCreateVariables(self):
height, width = 3, 3
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
_layers.batch_norm(images, scale=True)
beta = variables.get_variables_by_name('beta')[0]
gamma = variables.get_variables_by_name('gamma')[0]
self.assertEqual(beta.op.name, 'BatchNorm/beta')
self.assertEqual(gamma.op.name, 'BatchNorm/gamma')
moving_mean = variables.get_variables_by_name('moving_mean')[0]
moving_variance = variables.get_variables_by_name('moving_variance')[0]
self.assertEqual(moving_mean.op.name, 'BatchNorm/moving_mean')
self.assertEqual(moving_variance.op.name, 'BatchNorm/moving_variance')
def testMovingAverageVariables(self):
height, width = 3, 3
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
_layers.batch_norm(images, scale=True)
self.assertEqual(len(variables.get_model_variables()), 4)
moving_mean = variables.get_variables_by_name('moving_mean')[0]
moving_variance = variables.get_variables_by_name('moving_variance')[0]
self.assertEqual(moving_mean.op.name, 'BatchNorm/moving_mean')
self.assertEqual(moving_variance.op.name, 'BatchNorm/moving_variance')
def testMovingAverageVariablesZeroDebias(self):
height, width = 3, 3
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
_layers.batch_norm(images, scale=True, zero_debias_moving_mean=True)
self.assertEqual(len(variables.get_model_variables()), 6)
moving_mean = variables.get_variables_by_name('moving_mean')[0]
moving_variance = variables.get_variables_by_name('moving_variance')[0]
biased = variables.get_variables_by_name('biased')[0]
local_step = variables.get_variables_by_name('local_step')[0]
self.assertEqual(moving_mean.op.name, 'BatchNorm/moving_mean')
self.assertEqual(moving_variance.op.name, 'BatchNorm/moving_variance')
self.assertEqual(biased.op.name, 'BatchNorm/BatchNorm/moving_mean/biased')
self.assertEqual(local_step.op.name,
'BatchNorm/BatchNorm/moving_mean/local_step')
def testUpdatesCollection(self):
height, width = 3, 3
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
_layers.batch_norm(images, updates_collections='my_update_ops')
update_layers = ops.get_collection('my_update_ops')
update_moving_mean = update_layers[0]
update_moving_variance = update_layers[1]
self.assertEqual(update_moving_mean.op.name, 'BatchNorm/AssignMovingAvg')
self.assertEqual(update_moving_variance.op.name,
'BatchNorm/AssignMovingAvg_1')
def testReuseVariables(self):
height, width = 3, 3
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
_layers.batch_norm(images, scale=True, scope='bn')
_layers.batch_norm(images, scale=True, scope='bn', reuse=True)
beta = variables.get_variables_by_name('beta')
gamma = variables.get_variables_by_name('gamma')
self.assertEqual(len(beta), 1)
self.assertEqual(len(gamma), 1)
moving_mean = variables.get_variables_by_name('moving_mean')
moving_variance = variables.get_variables_by_name('moving_variance')
moving_vars = moving_mean + moving_variance
self.assertEqual(len(moving_vars), 2)
def testReuseUpdateOps(self):
height, width = 3, 3
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with arg_scope([_layers.batch_norm], updates_collections='update_ops'):
_layers.batch_norm(images, scope='bn')
self.assertEqual(len(ops.get_collection('update_ops')), 2)
_layers.batch_norm(images, scope='bn', reuse=True)
self.assertEqual(len(ops.get_collection('update_ops')), 4)
def testCreateMovingVars(self):
height, width = 3, 3
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
_ = _layers.batch_norm(images)
moving_mean = variables.get_variables('BatchNorm/moving_mean')
self.assertEqual(len(moving_mean), 1)
self.assertEqual(moving_mean[0].op.name, 'BatchNorm/moving_mean')
moving_variance = variables.get_variables('BatchNorm/moving_variance')
self.assertEqual(len(moving_variance), 1)
self.assertEqual(moving_variance[0].op.name, 'BatchNorm/moving_variance')
def testZeroDebiasMovingMean(self):
height, width = 3, 3
batch_size = 10
channels = 3
np.random.seed(1)
image_shape = (batch_size, height, width, channels)
axis = (0, 1, 2)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=axis)
expected_var = np.var(image_values, axis=axis)
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
output = _layers.batch_norm(
images,
decay=0.1,
updates_collections=None,
zero_debias_moving_mean=True)
moving_mean = variables.get_variables_by_name('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables_by_name('moving_variance')[0]
biased = variables.get_variables_by_name('biased')[0]
local_step = variables.get_variables_by_name('local_step')[0]
with self.test_session() as sess:
sess.run(variables_lib.global_variables_initializer())
self.assertAllClose(local_step.eval(), 0)
self.assertAllClose(moving_mean.eval(), [0] * channels)
self.assertAllClose(biased.eval(), [0] * channels)
self.assertAllClose(moving_variance.eval(), [1] * channels)
for i in range(10):
self.assertAllClose(local_step.eval(), i)
sess.run([output])
# In this case moving_mean == expected_mean after each update
self.assertAllClose(moving_mean.eval(), expected_mean)
# After 10 updates with decay 0.1 moving_mean == expected_mean,
# biased == expected_mean and moving_variance == expected_var.
self.assertAllClose(moving_mean.eval(), expected_mean)
self.assertAllClose(moving_variance.eval(), expected_var)
self.assertAllClose(biased.eval(), expected_mean)
def _testNoneUpdatesCollections(self,
fused,
data_format='NHWC',
zero_debias_moving_mean=False):
height, width = 2, 2
batch_size = 10
channels = 3
np.random.seed(1)
use_gpu = fused
with self.test_session(use_gpu=use_gpu) as sess:
if data_format == 'NHWC':
image_shape = (batch_size, height, width, channels)
axis = (0, 1, 2)
else:
image_shape = (batch_size, channels, height, width)
axis = (0, 2, 3)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=axis)
expected_var = np.var(image_values, axis=axis)
if fused:
# Add Bessel's correction
expected_var, _ = self._addBesselsCorrection(batch_size * height *
width, expected_var)
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
output = _layers.batch_norm(
images,
decay=0.1,
updates_collections=None,
fused=fused,
data_format=data_format,
zero_debias_moving_mean=zero_debias_moving_mean)
# updates_ops are not added to UPDATE_OPS collection.
self.assertEqual(ops.get_collection(ops.GraphKeys.UPDATE_OPS), [])
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * channels)
self.assertAllClose(variance, [1] * channels)
for _ in range(10):
sess.run([output])
if zero_debias_moving_mean:
# In this case moving_mean == expected_mean after update
self.assertAllClose(moving_mean.eval(), expected_mean)
mean = moving_mean.eval()
variance = moving_variance.eval()
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
def testNoneUpdatesCollectionsNHWC(self):
self._testNoneUpdatesCollections(False, data_format='NHWC')
def testNoneUpdatesCollectionsNCHW(self):
self._testNoneUpdatesCollections(False, data_format='NCHW')
def testNoneUpdatesCollectionsNHWCZeroDebias(self):
self._testNoneUpdatesCollections(
False, data_format='NHWC', zero_debias_moving_mean=True)
def testNoneUpdatesCollectionsNCHWZeroDebias(self):
self._testNoneUpdatesCollections(
False, data_format='NCHW', zero_debias_moving_mean=True)
def testNoneUpdatesCollectionsFusedNCHW(self):
if test.is_gpu_available(cuda_only=True):
self._testNoneUpdatesCollections(True, data_format='NCHW')
def testNoneUpdatesCollectionsFusedNHWC(self):
self._testNoneUpdatesCollections(True, data_format='NHWC')
def testNoneUpdatesCollectionsFusedNCHWZeroDebias(self):
if test.is_gpu_available(cuda_only=True):
self._testNoneUpdatesCollections(
True, data_format='NCHW', zero_debias_moving_mean=True)
def testNoneUpdatesCollectionsFusedNHWCZeroDebias(self):
self._testNoneUpdatesCollections(
True, data_format='NHWC', zero_debias_moving_mean=True)
def _testDelayedUpdateMovingVars(self,
fused,
data_format='NHWC',
zero_debias_moving_mean=False):
height, width = 2, 2
batch_size = 10
channels = 3
np.random.seed(1)
use_gpu = fused
with self.test_session(use_gpu=use_gpu) as sess:
if data_format == 'NHWC':
image_shape = (batch_size, height, width, channels)
axis = (0, 1, 2)
else:
image_shape = (batch_size, channels, height, width)
axis = (0, 2, 3)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=axis)
expected_var = np.var(image_values, axis=axis)
if fused:
# Add Bessel's correction
expected_var, correction_factor = self._addBesselsCorrection(
batch_size * height * width, expected_var)
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
output = _layers.batch_norm(
images,
decay=0.1,
fused=fused,
data_format=data_format,
zero_debias_moving_mean=zero_debias_moving_mean)
update_ops = ops.get_collection(ops.GraphKeys.UPDATE_OPS)
# updates_ops are added to UPDATE_OPS collection.
self.assertEqual(len(update_ops), 2)
with ops.control_dependencies(update_ops):
barrier = control_flow_ops.no_op(name='barrier')
output = control_flow_ops.with_dependencies([barrier], output)
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * channels)
self.assertAllClose(variance, [1] * channels)
for _ in range(10):
sess.run([output])
if zero_debias_moving_mean:
# In this case moving_mean == expected_mean after update
self.assertAllClose(moving_mean.eval(), expected_mean)
mean = moving_mean.eval()
variance = moving_variance.eval()
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(mean, expected_mean)
if fused:
# Add Bessel's correction
moving_variance_corrected = moving_variance / correction_factor
correct_moving_variance = state_ops.assign(moving_variance,
moving_variance_corrected)
sess.run(correct_moving_variance)
self.assertAllClose(variance, expected_var)
def testDelayedUpdateMovingVarsNHWC(self):
self._testDelayedUpdateMovingVars(False, data_format='NHWC')
def testDelayedUpdateMovingVarsNCHW(self):
self._testDelayedUpdateMovingVars(False, data_format='NCHW')
def testDelayedUpdateMovingVarsFusedNCHW(self):
if test.is_gpu_available(cuda_only=True):
self._testDelayedUpdateMovingVars(True, data_format='NCHW')
def testDelayedUpdateMovingVarsFusedNHWC(self):
self._testDelayedUpdateMovingVars(True, data_format='NHWC')
def testDelayedUpdateMovingVars(self):
self._testDelayedUpdateMovingVars(False)
def _testEvalMovingVars(self, zero_debias_moving_mean=False):
height, width = 3, 3
with self.test_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=(0, 1, 2))
expected_var = np.var(image_values, axis=(0, 1, 2))
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
output = _layers.batch_norm(images, decay=0.1, is_training=False)
self.assertEqual(ops.get_collection(ops.GraphKeys.UPDATE_OPS), [])
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 3)
self.assertAllClose(variance, [1] * 3)
# Simulate assigment from saver restore.
init_assigns = [
state_ops.assign(moving_mean, expected_mean),
state_ops.assign(moving_variance, expected_var)
]
sess.run(init_assigns)
for _ in range(10):
sess.run([output], {images: np.random.rand(*image_shape)})
mean = moving_mean.eval()
variance = moving_variance.eval()
# Although we feed different images, the moving_mean and moving_variance
# shouldn't change.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
def testEvalMovingVars(self):
self._testEvalMovingVars()
def testEvalMovingVarsZeroDebias(self):
self._testEvalMovingVars(True)
def testEvalMovingVarsWithPartitioner(self):
# This test makes sure that the moving-mean and moving-variance logic works
# when `batch_norm` is called within a variable-scope that has a variable
# partitioner.
partitioner = partitioned_variables.fixed_size_partitioner(2, axis=0)
with variable_scope.variable_scope(
variable_scope.get_variable_scope(), partitioner=partitioner):
self.testEvalMovingVars()
def _testReuseVars(self, fused, zero_debias_moving_mean=False):
height, width = 3, 3
batch_size = 10
channels = 3
with self.test_session() as sess:
image_shape = (batch_size, height, width, channels)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=(0, 1, 2))
expected_var = np.var(image_values, axis=(0, 1, 2))
if fused:
# Add Bessel's correction
expected_var, correction_factor = self._addBesselsCorrection(
batch_size * height * width, expected_var)
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
output_train = _layers.batch_norm(
images,
decay=0.1,
is_training=True,
scope='BN',
fused=fused,
zero_debias_moving_mean=zero_debias_moving_mean)
output_eval = _layers.batch_norm(
images,
decay=0.1,
is_training=False,
scope='BN',
reuse=True,
fused=fused,
zero_debias_moving_mean=zero_debias_moving_mean)
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
moving_mean = variables.get_variables('BN/moving_mean')[0]
moving_variance = variables.get_variables('BN/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * channels)
self.assertAllClose(variance, [1] * channels)
update_ops = ops.get_collection(ops.GraphKeys.UPDATE_OPS)
with ops.control_dependencies(update_ops):
barrier = control_flow_ops.no_op(name='barrier')
train_op = control_flow_ops.with_dependencies([barrier], output_train)
# Before updates the outputs are different for train and eval.
self.assertFalse(
np.allclose(sess.run([output_train]), sess.run([output_eval])))
for _ in range(10):
sess.run([train_op])
mean = moving_mean.eval()
variance = moving_variance.eval()
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(mean, expected_mean)
if fused:
# Add Bessel's correction
moving_variance_corrected = moving_variance / correction_factor
correct_moving_variance = state_ops.assign(moving_variance,
moving_variance_corrected)
sess.run(correct_moving_variance)
self.assertAllClose(variance, expected_var)
# After convergence output_train and output_eval should be the same.
self.assertAllClose(sess.run([output_train]), sess.run([output_eval]))
def testReuseVarsDefault(self):
self._testReuseVars(False)
def testReuseVarsFused(self):
self._testReuseVars(True)
def testReuseVarsDefaultZeroDebias(self):
self._testReuseVars(False, True)
def testReuseVarsFusedZeroDebias(self):
self._testReuseVars(True, True)
def _testIsTrainingVariable(self,
fused,
data_format='NHWC',
zero_debias_moving_mean=False):
height, width = 2, 2
batch_size = 10
channels = 3
np.random.seed(1)
use_gpu = fused
np.random.seed(1)
with self.test_session(use_gpu=use_gpu) as sess:
if data_format == 'NHWC':
image_shape = (batch_size, height, width, channels)
axis = (0, 1, 2)
else:
image_shape = (batch_size, channels, height, width)
axis = (0, 2, 3)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=axis)
expected_var = np.var(image_values, axis=axis)
if fused:
# Add Bessel's correction
expected_var, correction_factor = self._addBesselsCorrection(
batch_size * height * width, expected_var)
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
is_training = variables_lib.Variable(True)
output = _layers.batch_norm(
images,
decay=0.1,
is_training=is_training,
fused=fused,
data_format=data_format,
zero_debias_moving_mean=zero_debias_moving_mean)
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * channels)
self.assertAllClose(variance, [1] * channels)
# Before updates the outputs are different depending of is_training.
output_true = sess.run([output], {is_training: True})
output_false = sess.run([output], {is_training: False})
self.assertFalse(np.allclose(output_true, output_false))
update_ops = ops.get_collection(ops.GraphKeys.UPDATE_OPS)
with ops.control_dependencies(update_ops):
barrier = control_flow_ops.no_op(name='barrier')
train_op = control_flow_ops.with_dependencies([barrier], output)
for _ in range(10):
sess.run([train_op])
mean = moving_mean.eval()
variance = moving_variance.eval()
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
# After updates to convergence the outputs don't depend on is_training.
output_true = sess.run([output], {is_training: True})
if fused:
# Add Bessel's correction
moving_variance_corrected = moving_variance / correction_factor
correct_moving_variance = state_ops.assign(moving_variance,
moving_variance_corrected)
sess.run(correct_moving_variance)
output_false = sess.run([output], {is_training: False})
self.assertAllClose(output_true, output_false)
def testIsTrainingVariableNHWC(self):
self._testIsTrainingVariable(False, data_format='NHWC')
def testIsTrainingVariableNCHW(self):
self._testIsTrainingVariable(False, data_format='NCHW')
def testIsTrainingVariableNHWCZeroDebias(self):
self._testIsTrainingVariable(
False, data_format='NHWC', zero_debias_moving_mean=True)
def testIsTrainingVariableNCHWZeroDebias(self):
self._testIsTrainingVariable(
False, data_format='NCHW', zero_debias_moving_mean=True)
def testIsTrainingVariableFusedNCHW(self):
if test.is_gpu_available(cuda_only=True):
self._testIsTrainingVariable(True, data_format='NCHW')
def testIsTrainingVariableFusedNHWC(self):
self._testIsTrainingVariable(True, data_format='NHWC')
def testIsTrainingVariableFusedNCHWZeroDebias(self):
if test.is_gpu_available(cuda_only=True):
self._testIsTrainingVariable(
True, data_format='NCHW', zero_debias_moving_mean=True)
def testIsTrainingVariableFusedNHWCZeroDebias(self):
self._testIsTrainingVariable(
True, data_format='NHWC', zero_debias_moving_mean=True)
def testNoUpdatesWhenIsTrainingFalse(self):
height, width = 3, 3
with self.test_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
output = _layers.batch_norm(images, decay=0.1, is_training=False)
update_ops = ops.get_collection(ops.GraphKeys.UPDATE_OPS)
# updates_ops are not added to UPDATE_OPS collection.
self.assertEqual(len(update_ops), 0)
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 3)
self.assertAllClose(variance, [1] * 3)
# When is_training is False batch_norm doesn't update moving_vars.
for _ in range(10):
sess.run([output])
self.assertAllClose(moving_mean.eval(), [0] * 3)
self.assertAllClose(moving_variance.eval(), [1] * 3)
def testNoneUpdatesCollectionNoTraining(self):
height, width = 3, 3
with self.test_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
output = _layers.batch_norm(
images, decay=0.1, updates_collections=None, is_training=False)
# updates_ops are not added to UPDATE_OPS collection.
self.assertEqual(ops.get_collection(ops.GraphKeys.UPDATE_OPS), [])
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 3)
self.assertAllClose(variance, [1] * 3)
# When is_training is False batch_norm doesn't update moving_vars.
for _ in range(10):
sess.run([output])
self.assertAllClose(moving_mean.eval(), [0] * 3)
self.assertAllClose(moving_variance.eval(), [1] * 3)
def _testNoneUpdatesCollectionIsTrainingVariable(self,
fused,
data_format='NHWC'):
height, width = 2, 2
batch_size = 10
channels = 3
np.random.seed(1)
use_gpu = fused
with self.test_session(use_gpu=use_gpu) as sess:
if data_format == 'NHWC':
image_shape = (batch_size, height, width, channels)
axis = (0, 1, 2)
else:
image_shape = (batch_size, channels, height, width)
axis = (0, 2, 3)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=axis)
expected_var = np.var(image_values, axis=axis)
if fused:
# Add Bessel's correction
expected_var, correction_factor = self._addBesselsCorrection(
batch_size * height * width, expected_var)
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
is_training = variables_lib.Variable(True)
output = _layers.batch_norm(
images,
decay=0.1,
updates_collections=None,
is_training=is_training,
fused=fused,
data_format=data_format)
# updates_ops are not added to UPDATE_OPS collection.
self.assertEqual(ops.get_collection(ops.GraphKeys.UPDATE_OPS), [])
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * channels)
self.assertAllClose(variance, [1] * channels)
# When is_training is False batch_norm doesn't update moving_vars.
for _ in range(10):
sess.run([output], {is_training: False})
self.assertAllClose(moving_mean.eval(), [0] * channels)
self.assertAllClose(moving_variance.eval(), [1] * channels)
# Before updates the outputs are different depending of is_training.
output_true = sess.run([output], {is_training: True})
output_false = sess.run([output], {is_training: False})
self.assertFalse(np.allclose(output_true, output_false))
# When is_training is True update moving_vars.
for _ in range(10):
sess.run([output], {is_training: True})
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(moving_mean.eval(), expected_mean)
self.assertAllClose(moving_variance.eval(), expected_var)
# After updates to convergence the outputs don't depend on is_training.
output_true = sess.run([output], {is_training: True})
if fused:
# Add Bessel's correction
moving_variance_corrected = moving_variance / correction_factor
correct_moving_variance = state_ops.assign(moving_variance,
moving_variance_corrected)
sess.run(correct_moving_variance)
output_false = sess.run([output], {is_training: False})
self.assertTrue(np.allclose(output_true, output_false))
def testNoneUpdatesCollectionIsTrainingVariableNHWC(self):
self._testNoneUpdatesCollectionIsTrainingVariable(False, data_format='NHWC')
def testNoneUpdatesCollectionIsTrainingVariableNCHW(self):
self._testNoneUpdatesCollectionIsTrainingVariable(False, data_format='NCHW')
def testNoneUpdatesCollectionIsTrainingVariableFusedNCHW(self):
if test.is_gpu_available(cuda_only=True):
self._testNoneUpdatesCollectionIsTrainingVariable(
True, data_format='NCHW')
def testNoneUpdatesCollectionIsTrainingVariableFusedNHWC(self):
self._testNoneUpdatesCollectionIsTrainingVariable(True, data_format='NHWC')
def _testTrainMovingVars(self, fused, data_format='NHWC'):
# Test that the gradients are stable while the moving_mean is updated.
# Since the moving_mean is used as shift to compute the tf.momments, the
# gradients could diverge, this test checks that gradients remains stable
# while the moving_mean is updated.
height, width = 7, 7
batch_size = 10
channels = 32
np.random.seed(1)
use_gpu = fused
with self.test_session(use_gpu=use_gpu) as sess:
if data_format == 'NHWC':
image_shape = (batch_size, height, width, channels)
axis = (0, 1, 2)
else:
image_shape = (batch_size, channels, height, width)
axis = (0, 2, 3)
image_values = np.random.rand(*image_shape) + 256
expected_mean = np.mean(image_values, axis=axis)
expected_var = np.var(image_values, axis=axis)
if fused:
# Add Bessel's correction
expected_var, _ = self._addBesselsCorrection(batch_size * height *
width, expected_var)
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
output = _layers.batch_norm(
images,
decay=0.2,
updates_collections=None,
is_training=True,
fused=fused,
data_format=data_format)
self.assertEqual(ops.get_collection(ops.GraphKeys.UPDATE_OPS), [])
objective = math_ops.reduce_sum(output)
[images_gradients] = gradients_impl.gradients(objective, images)
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * channels)
self.assertAllClose(variance, [1] * channels)
# Initial input gradients.
images_gradients_value = sess.run(images_gradients)
for _ in range(10):
np_output, new_images_gradients = sess.run([output, images_gradients])
# The outputs should be close to 0.0 mean and 1.0 variance
self.assertAllClose(
np.mean(
np_output, axis=axis), [0] * channels, rtol=0.001, atol=0.001)
self.assertAllClose(
np.var(np_output, axis=axis), [1] * channels, rtol=0.01, atol=0.01)
# The gradients should change slowly while updating moving_mean.
max_diff = np.max(np.abs(images_gradients_value - new_images_gradients))
self.assertGreaterEqual(max_diff, 0.0)
self.assertLess(max_diff, 5e-5)
self.assertAllClose(moving_mean.eval(), expected_mean)
self.assertAllClose(moving_variance.eval(), expected_var)
def testTrainMovingVarsNHWC(self):
self._testTrainMovingVars(False, data_format='NHWC')
def testTrainMovingVarsNCHW(self):
self._testTrainMovingVars(False, data_format='NCHW')
def testTrainMovingVarsFusedNCHW(self):
if test.is_gpu_available(cuda_only=True):
self._testTrainMovingVars(True, data_format='NCHW')
def testTrainMovingVarsFusedNHWC(self):
self._testTrainMovingVars(True, data_format='NHWC')
def testCustomInitializer(self):
height, width = 3, 3
channels = 3
with self.test_session() as sess:
images = (np.ones((5, height, width, channels)) * 9.0).astype('f')
beta = init_ops.constant_initializer((np.ones(channels) * 5.0).astype(
'f'))
gamma = init_ops.constant_initializer((np.ones(channels) * 2.0).astype(
'f'))
mean = init_ops.constant_initializer((np.ones(channels) * 5.0).astype(
'f'))
variance = init_ops.constant_initializer((np.ones(channels) * 4.0).astype(
'f'))
output = _layers.batch_norm(
images,
is_training=False,
scale=True,
epsilon=0.0,
param_initializers={
'beta': beta,
'gamma': gamma,
'moving_mean': mean,
'moving_variance': variance,
})
sess.run(variables_lib.global_variables_initializer())
outs = sess.run(output)
self.assertAllClose(outs, images)
def _runBatchNormalizationWithFormat(self, shape, data_format, is_training):
channels = shape[-1]
with self.test_session() as sess:
images = np.arange(np.product(shape), dtype=np.float32).reshape(shape)
beta = init_ops.constant_initializer(
np.arange(
2, channels + 2, dtype=np.float32))
gamma = init_ops.constant_initializer(
np.arange(
10, channels + 10, dtype=np.float32) * 2.0)
mean = init_ops.constant_initializer(
np.arange(
3, channels + 3, dtype=np.float32) * 5.0)
variance = init_ops.constant_initializer(
np.arange(
1, channels + 1, dtype=np.float32) * 4.0)
if data_format == 'NCHW':
# Reshape inputs from NHWC to NCHW format.
images = array_ops.transpose(
images, [0, len(shape) - 1] + list(range(1, len(shape) - 1)))
output = _layers.batch_norm(
images,
is_training=is_training,
scale=True,
epsilon=0.5,
param_initializers={
'beta': beta,
'gamma': gamma,
'moving_mean': mean,
'moving_variance': variance,
},
data_format=data_format)
if data_format == 'NCHW':
# Reshape outputs from NCHW back to NHWC format.
output = array_ops.transpose(output,
[0] + list(range(2, len(shape))) + [1])
sess.run(variables_lib.global_variables_initializer())
return sess.run(output)
def testNHWCAndNCHWInferenceProduceSameOutput(self):
for shape in [[7, 3, 5], [5, 2, 3, 4], [11, 3, 2, 4, 5]]:
nhwc = self._runBatchNormalizationWithFormat(
data_format='NHWC', shape=shape, is_training=False)
nchw = self._runBatchNormalizationWithFormat(
data_format='NCHW', shape=shape, is_training=False)
self.assertAllClose(nhwc, nchw, atol=1e-4, rtol=1e-4)
def testNHWCAndNCHWTrainingProduceSameOutput(self):
for shape in [[7, 3, 5], [5, 2, 3, 4], [11, 3, 2, 4, 5]]:
nhwc = self._runBatchNormalizationWithFormat(
data_format='NHWC', shape=shape, is_training=True)
nchw = self._runBatchNormalizationWithFormat(
data_format='NCHW', shape=shape, is_training=True)
self.assertAllClose(nhwc, nchw, atol=1e-4, rtol=1e-4)
class LayerNormTest(test.TestCase):
def testUnknownShape(self):
with ops.Graph().as_default() as g, self.test_session(g):
inputs = array_ops.placeholder(dtype=dtypes.float32)
with self.assertRaisesRegexp(ValueError, 'undefined rank'):
_layers.layer_norm(inputs)
def testUnknownLastDim(self):
with ops.Graph().as_default() as g, self.test_session(g):
inputs = array_ops.placeholder(dtype=dtypes.float32)
inputs.set_shape(tensor_shape.TensorShape((5, 3, 3, None)))
with self.assertRaisesRegexp(ValueError, 'undefined last dimension'):
_layers.layer_norm(inputs)
def testCreateOp(self):
height, width = 3, 3
with self.test_session():
images = np.random.uniform(size=(5, height, width, 3))
output = _layers.layer_norm(images)
self.assertTrue(output.op.name.startswith('LayerNorm/batchnorm'))
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
def testCreateVariables(self):
height, width = 3, 3
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
_layers.layer_norm(images)
beta = variables.get_variables_by_name('beta')[0]
gamma = variables.get_variables_by_name('gamma')[0]
self.assertEqual(beta.op.name, 'LayerNorm/beta')
self.assertEqual(gamma.op.name, 'LayerNorm/gamma')
def testReuseVariables(self):
height, width = 3, 3
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
_layers.layer_norm(images, scope='ln')
_layers.layer_norm(images, scope='ln', reuse=True)
beta = variables.get_variables_by_name('beta')
gamma = variables.get_variables_by_name('gamma')
self.assertEqual(len(beta), 1)
self.assertEqual(len(gamma), 1)
def testReuseVars(self):
height, width = 3, 3
with self.test_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
images = constant_op.constant(
image_values, shape=image_shape, dtype=dtypes.float32)
output_train = _layers.layer_norm(images, scope='LN')
output_eval = _layers.layer_norm(images, scope='LN', reuse=True)
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
# output_train and output_eval should be the same.
self.assertAllClose(sess.run([output_train]), sess.run([output_eval]))
def doOutputTest(self, input_shape, tol=1e-3):
for mu in [0.0, 1e2]:
for sigma in [1.0, 0.1]:
input_values = np.random.rand(*input_shape) * sigma + mu
expected_mean = np.zeros(input_shape[0])
expected_var = np.ones(input_shape[0])
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
inputs = constant_op.constant(input_values, shape=input_shape,
dtype=dtypes.float32)
output_op = _layers.layer_norm(inputs, scope='LN')
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
# The mean and variance of the output should be close to 0 and 1
# respectively.
moments_axis = tuple([i for i in range(1, len(input_shape))])
outputs = sess.run(output_op)
# Make sure that there are no NaNs
self.assertFalse(np.isnan(outputs).any())
mean = np.mean(outputs, axis=moments_axis)
var = np.var(outputs, axis=moments_axis)
self.assertAllClose(mean, expected_mean, rtol=tol, atol=tol)
self.assertAllClose(var, expected_var, rtol=tol, atol=tol)
def testOutput2DInput(self):
self.doOutputTest((10, 300))
def testOutput4DInput(self):
self.doOutputTest((100, 10, 10, 3))
def testOutputSmallInput(self):
self.doOutputTest((10, 10, 10, 30))
def testOutputBigInput(self):
self.doOutputTest((1, 100, 100, 1))
class MaxPool2DTest(test.TestCase):
def testInvalidDataFormat(self):
height, width = 3, 6
images = np.random.uniform(size=(5, height, width, 3))
with self.assertRaisesRegexp(ValueError,
'data_format has to be either NCHW or NHWC.'):
_layers.max_pool2d(images, [3, 3], data_format='CHWN')
def testCreateMaxPool(self):
height, width = 3, 6
images = np.random.uniform(size=(5, height, width, 3)).astype(np.float32)
output = _layers.max_pool2d(images, [3, 3])
self.assertEqual(output.op.name, 'MaxPool2D/MaxPool')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 2, 3])
def testCreateMaxPoolNCHW(self):
height, width = 3, 6
images = np.random.uniform(size=(5, 3, height, width)).astype(np.float32)
output = _layers.max_pool2d(images, [3, 3], data_format='NCHW')
self.assertEquals(output.op.name, 'MaxPool2D/MaxPool')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 1, 2])
def testCollectOutputs(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.max_pool2d(images, [3, 3], outputs_collections='outputs')
output_collected = ops.get_collection('outputs')[0]
self.assertEqual(output_collected.aliases, ['MaxPool2D'])
self.assertEqual(output_collected, output)
def testCreateSquareMaxPool(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.max_pool2d(images, 3)
self.assertEqual(output.op.name, 'MaxPool2D/MaxPool')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 2, 3])
def testCreateMaxPoolWithScope(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.max_pool2d(images, [3, 3], scope='pool1')
self.assertEqual(output.op.name, 'pool1/MaxPool')
def testCreateMaxPoolWithSamePadding(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.max_pool2d(images, [3, 3], padding='SAME')
self.assertListEqual(output.get_shape().as_list(), [5, 2, 3, 3])
def testCreateMaxPoolWithSamePaddingNCHW(self):
height, width = 3, 6
images = random_ops.random_uniform((5, 3, height, width), seed=1)
output = _layers.max_pool2d(
images, [3, 3], padding='SAME', data_format='NCHW')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 2, 3])
def testCreateMaxPoolStrideWithSamePadding(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.max_pool2d(images, [3, 3], stride=1, padding='SAME')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
def testGlobalMaxPool(self):
height, width = 3, 6
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = _layers.max_pool2d(images, images.get_shape()[1:3], stride=1)
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
class OneHotEncodingTest(test.TestCase):
def testOneHotEncodingCreate(self):
with self.test_session():
labels = np.array([0, 1, 2])
output = _layers.one_hot_encoding(labels, num_classes=3)
self.assertEqual(output.op.name, 'OneHotEncoding/one_hot')
self.assertListEqual(output.get_shape().as_list(), [3, 3])
def testCollectOutputs(self):
with self.test_session():
labels = constant_op.constant([0, 1, 2])
output = _layers.one_hot_encoding(
labels, num_classes=3, outputs_collections='outputs')
c_output = ops.get_collection('outputs')[0]
self.assertEqual(c_output.aliases, ['OneHotEncoding'])
self.assertEqual(c_output, output)
def testOneHotEncoding(self):
with self.test_session():
labels = constant_op.constant([0, 1, 2])
one_hot_labels = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
output = _layers.one_hot_encoding(labels, num_classes=3)
self.assertAllClose(output.eval(), one_hot_labels.eval())
def testOneHotEncodingInt32(self):
with self.test_session():
labels = constant_op.constant([0, 1, 2], dtype=dtypes.int32)
one_hot_labels = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
output = _layers.one_hot_encoding(labels, num_classes=3)
self.assertAllClose(output.eval(), one_hot_labels.eval())
class RepeatTests(test.TestCase):
def testRepeat(self):
height, width = 3, 3
with self.test_session():
images = np.random.uniform(size=(5, height, width, 3))
output = _layers.repeat(images, 3, layers_lib.conv2d, 32, [3, 3])
self.assertEqual(output.op.name, 'Repeat/convolution_3/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 3, 32])
def testRepeatWithScope(self):
height, width = 3, 3
with self.test_session():
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
output = _layers.repeat(
images, 3, layers_lib.conv2d, 32, [3, 3], scope='conv1')
self.assertEqual(output.op.name, 'conv1/conv1_3/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 3, 32])
class SeparableConv2dTest(test.TestCase):
def testCreateConvInt32(self):
height, width = 3, 3
with self.test_session():
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, dtype=dtypes.int32, maxval=12345)
with self.assertRaisesRegexp(TypeError, 'non-floating point type'):
layers_lib.separable_conv2d(images, 32, [3, 3], 2)
def testCreateConvFloat32(self):
height, width = 3, 3
with self.test_session():
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, dtype=dtypes.float32)
output = layers_lib.separable_conv2d(images, 32, [3, 3], 2)
self.assertEqual(output.op.name, 'SeparableConv2d/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
def testCreateConvFloat64(self):
height, width = 3, 3
with self.test_session():
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, dtype=dtypes.float64)
output = layers_lib.separable_conv2d(images, 32, [3, 3], 2)
self.assertEqual(output.op.name, 'SeparableConv2d/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
def testCreateDepthwiseConv(self):
height, width = 3, 3
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.separable_conv2d(images, None, [3, 3], 2)
self.assertEqual(output.op.name, 'SeparableConv2d/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 6])
def testCreateConvCreatesWeightsAndBiasesVars(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.test_session():
self.assertFalse(variables.get_variables('conv1/depthwise_weights'))
self.assertFalse(variables.get_variables('conv1/pointwise_weights'))
self.assertFalse(variables.get_variables('conv1/biases'))
layers_lib.separable_conv2d(images, 32, [3, 3], 4, scope='conv1')
self.assertTrue(variables.get_variables('conv1/depthwise_weights'))
self.assertTrue(variables.get_variables('conv1/pointwise_weights'))
self.assertTrue(variables.get_variables('conv1/biases'))
def testCreateAtrousConvCreatesWeightsAndBiasesVars(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.test_session():
self.assertFalse(variables.get_variables('conv1/depthwise_weights'))
self.assertFalse(variables.get_variables('conv1/pointwise_weights'))
self.assertFalse(variables.get_variables('conv1/biases'))
layers_lib.separable_conv2d(images, 32, [3, 3], 4, rate=2, scope='conv1')
self.assertTrue(variables.get_variables('conv1/depthwise_weights'))
self.assertTrue(variables.get_variables('conv1/pointwise_weights'))
self.assertTrue(variables.get_variables('conv1/biases'))
def testCreateDepthwiseConvCreatesWeightsAndBiasesVars(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
with self.test_session():
self.assertFalse(variables.get_variables('conv1/depthwise_weights'))
self.assertFalse(variables.get_variables('conv1/pointwise_weights'))
self.assertFalse(variables.get_variables('conv1/biases'))
layers_lib.separable_conv2d(images, None, [3, 3], 4, scope='conv1')
self.assertTrue(variables.get_variables('conv1/depthwise_weights'))
self.assertFalse(variables.get_variables('conv1/pointwise_weights'))
self.assertTrue(variables.get_variables('conv1/biases'))
def testCreateConvWithScope(self):
height, width = 3, 3
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.separable_conv2d(images, 32, [3, 3], 6, scope='conv1')
self.assertEqual(output.op.name, 'conv1/Relu')
def testCreateConvWithoutActivation(self):
height, width = 3, 3
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.separable_conv2d(
images, 32, [3, 3], 8, activation_fn=None)
self.assertEqual(output.op.name, 'SeparableConv2d/BiasAdd')
def testCreateConvValid(self):
height, width = 3, 3
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.separable_conv2d(
images, 32, [3, 3], 2, padding='VALID')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 32])
def testCreateAtrousConvValid(self):
height, width = 5, 5
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.separable_conv2d(
images, 32, [3, 3], 2, padding='VALID', rate=2)
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 32])
def testCreateDepthwiseConvValid(self):
height, width = 3, 3
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.separable_conv2d(
images, None, [3, 3], 2, padding='VALID')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 6])
def testCreateAtrousDepthwiseConvValid(self):
height, width = 5, 5
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = layers_lib.separable_conv2d(
images, None, [3, 3], 2, padding='VALID', rate=2)
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 6])
def testCreateConvWithWeightDecay(self):
random_seed.set_random_seed(0)
height, width = 3, 3
with self.test_session() as sess:
images = random_ops.random_uniform((5, height, width, 3), seed=1)
regularizer = regularizers.l2_regularizer(0.01)
layers_lib.separable_conv2d(
images, 32, [3, 3], 2, weights_regularizer=regularizer)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 2)
weight_decay = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertEqual(
weight_decay.op.name,
'SeparableConv2d/depthwise_kernel/Regularizer/l2_regularizer')
sess.run(variables_lib.global_variables_initializer())
self.assertLessEqual(sess.run(weight_decay), 0.05)
weight_decay = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[1]
self.assertEqual(
weight_decay.op.name,
'SeparableConv2d/pointwise_kernel/Regularizer/l2_regularizer')
self.assertLessEqual(sess.run(weight_decay), 0.05)
def testReuseConvWithWeightDecay(self):
height, width = 3, 3
with self.test_session():
images = random_ops.random_uniform((5, height, width, 3), seed=1)
regularizer = regularizers.l2_regularizer(0.01)
layers_lib.separable_conv2d(
images, 32, [3, 3], 2, weights_regularizer=regularizer, scope='conv1')
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 2)
layers_lib.separable_conv2d(
images,
32, [3, 3],
2,
weights_regularizer=regularizer,
scope='conv1',
reuse=True)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 2)
def testConvWithBatchNorm(self):
height, width = 3, 3
batch_norm_collection = 'moving_vars'
normalizer_params = {
'variables_collections': {
'beta': [batch_norm_collection],
'gamma': [batch_norm_collection],
'moving_mean': [batch_norm_collection],
'moving_variance': [batch_norm_collection],
}
}
images = random_ops.random_uniform((5, height, width, 3), seed=1)
net = layers_lib.separable_conv2d(
images,
8, [3, 3],
2,
normalizer_fn=_layers.batch_norm,
normalizer_params=normalizer_params,
scope='conv1')
net = layers_lib.separable_conv2d(
net,
32, [3, 3],
2,
normalizer_fn=_layers.batch_norm,
normalizer_params=normalizer_params,
scope='conv2')
self.assertEqual(len(ops.get_collection(batch_norm_collection)), 6)
self.assertEqual(len(variables.get_variables('conv1/BatchNorm')), 3)
self.assertEqual(len(variables.get_variables('conv2/BatchNorm')), 3)
def testConvWithInputsViaPlaceHolder(self):
height, width = 3, 3
images_placeholder = array_ops.placeholder(
dtypes.float32, shape=(None, None, None, 3))
net = layers_lib.separable_conv2d(
images_placeholder,
8, [3, 3],
2,
normalizer_fn=_layers.batch_norm,
normalizer_params={},
scope='conv1')
init_op = variables_lib.global_variables_initializer()
with self.test_session() as sess:
images = np.random.rand(5, height, width, 3)
sess.run(init_op)
sess.run(net, feed_dict={images_placeholder: images})
class SoftmaxTests(test.TestCase):
def setUp(self):
self.low = 1 / (1 + math.e)
self.high = math.e / (1 + math.e)
def testSoftmax2D(self):
logits = constant_op.constant([[0.0, 1], [1, 1], [1, 0]])
prediction = _layers.softmax(logits)
exp_prediction = np.array([[self.low, self.high], [0.5, 0.5],
[self.high, self.low]])
with self.test_session() as sess:
prediction = sess.run(prediction)
self.assertAllClose(exp_prediction, prediction)
def testSoftmax3D(self):
logits = np.ones((2, 3, 2))
logits[0, 0, 0] = 0
logits[1, 1, 1] = 0
logits = constant_op.constant(logits)
exp_prediction = 0.5 * np.ones((2, 3, 2))
exp_prediction[0, 0, 0] = self.low
exp_prediction[0, 0, 1] = self.high
exp_prediction[1, 1, 0] = self.high
exp_prediction[1, 1, 1] = self.low
prediction = _layers.softmax(logits)
with self.test_session() as sess:
prediction = sess.run(prediction)
self.assertAllClose(exp_prediction, prediction)
def testSoftmax3DUnknownSize(self):
logits = np.ones((2, 3, 2))
logits[0, 0, 0] = 0
logits[1, 1, 1] = 0
logit_placeholder = array_ops.placeholder(
dtypes.float32, shape=(None, None, 2))
feed_dict = {logit_placeholder: logits}
exp_prediction = 0.5 * np.ones((2, 3, 2))
exp_prediction[0, 0, 0] = self.low
exp_prediction[0, 0, 1] = self.high
exp_prediction[1, 1, 0] = self.high
exp_prediction[1, 1, 1] = self.low
prediction = _layers.softmax(logit_placeholder)
with self.test_session() as sess:
prediction = sess.run(prediction, feed_dict=feed_dict)
self.assertAllClose(exp_prediction, prediction)
def testSoftmaxUndefinedNthDimension(self):
logits = array_ops.placeholder(dtypes.float32)
with self.assertRaises(ValueError):
_layers.softmax(logits)
class StackTests(test.TestCase):
def testStackFullyConnected(self):
height, width = 3, 3
with self.test_session():
images = np.random.uniform(size=(5, height * width * 3))
output = _layers.stack(images, _layers.fully_connected, [10, 20, 30])
self.assertEqual(output.op.name, 'Stack/fully_connected_3/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 30])
def testStackRelu(self):
height, width = 3, 3
with self.test_session():
images = random_ops.random_uniform(
(5, height * width * 3), seed=1, name='images')
output = _layers.stack(images, layers_lib.relu, [10, 20, 30])
self.assertEqual(output.op.name, 'Stack/fully_connected_3/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 30])
def testStackConvolution2d(self):
height, width = 3, 3
with self.test_session():
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
output = _layers.stack(
images,
layers_lib.convolution2d, [10, 20, 30],
kernel_size=[3, 3],
padding='SAME')
self.assertEqual(output.op.name, 'Stack/convolution_3/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 3, 30])
def testStackWithScope(self):
height, width = 3, 3
with self.test_session():
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
output = _layers.stack(
images,
layers_lib.convolution2d, [10, 20, 30],
kernel_size=[3, 3],
padding='SAME',
scope='conv1')
self.assertEqual(output.op.name, 'conv1/conv1_3/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 3, 3, 30])
class UnitNormTests(test.TestCase):
def testUnitNormWithRandomMatrix(self):
height, width = 2, 3
for dim in range(3):
random_seed.set_random_seed(0)
image = random_ops.random_uniform((height, width, 3))
output = _layers.unit_norm(image, dim=dim, epsilon=1e-6)
norms = math_ops.sqrt(
math_ops.reduce_sum(
math_ops.square(output), reduction_indices=dim))
shape = [height, width, 3]
del shape[dim]
expected = np.ones(shape)
with self.test_session():
actual = norms.eval()
self.assertAllClose(expected, actual, 1e-4, 1e-4)
def testDimEqualToRankRaisesError(self):
height, width = 2, 3
random_seed.set_random_seed(0)
image = random_ops.random_uniform((height, width, 3))
with self.assertRaises(ValueError):
_layers.unit_norm(image, dim=3, epsilon=1e-6)
def testUnknownRankRaisesError(self):
image = array_ops.placeholder(dtypes.float32)
with self.assertRaises(ValueError):
_layers.unit_norm(image, dim=2)
def testKnownRankUnknownDimsSucceeds(self):
height, width = 2, 3
for dim in range(3):
placeholder_value = np.ones((height, width, 3))
shape = [height, width, 3]
del shape[dim]
expected = np.ones(shape)
image = array_ops.placeholder(dtypes.float32, (None, None, 3))
output = _layers.unit_norm(image, dim=dim, epsilon=1e-6)
norms = math_ops.sqrt(
math_ops.reduce_sum(
math_ops.square(output), reduction_indices=dim))
with self.test_session():
actual = norms.eval({image: placeholder_value})
self.assertAllClose(expected, actual, 1e-4, 1e-4)
# TODO(b/28426988): Add separate tests for non-legacy versions.
class LegacyFullyConnectedTest(test.TestCase):
def setUp(self):
test.TestCase.setUp(self)
random_seed.set_random_seed(1234)
self.input = constant_op.constant([[1., 2., 3.], [-4., 15., -6.]])
self.input_3_dim_arr = [[[1., 1.1, 1.2],
[2., 2.1, 2.2],
[3., 3.1, 3.2],
[4., 4.1, 4.2]],
[[5., 5.1, 5.2],
[6., 6.1, 6.2],
[7., 7.1, 7.2],
[8., 8.1, 8.2]]]
self.input_3_dim = constant_op.constant(self.input_3_dim_arr)
assert not ops.get_collection(ops.GraphKeys.SUMMARIES)
def _fully_connected_basic_use(self, x, num_output_units, expected_shape):
output = _layers.legacy_fully_connected(
x, num_output_units, activation_fn=nn_ops.relu)
with session.Session() as sess:
with self.assertRaises(errors_impl.FailedPreconditionError):
sess.run(output)
variables_lib.global_variables_initializer().run()
out_value, shape_value = sess.run([output, array_ops.shape(output)])
self.assertAllClose(shape_value, expected_shape)
self.assertEqual(output.get_shape().as_list(), expected_shape)
self.assertTrue(np.all(out_value >= 0), 'Relu should have all values >= 0.')
self.assertEqual(2,
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)))
self.assertEqual(
0, len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)))
def test_fully_connected_basic_use(self):
self._fully_connected_basic_use(self.input, 8, [2, 8])
def test_fully_connected_basic_use_multi_dim(self):
for last_dim in [1, 3]:
self.setUp()
self._fully_connected_basic_use(self.input_3_dim, last_dim,
[2, 4, last_dim])
def test_relu_layer_basic_use(self):
output = layers_lib.legacy_relu(self.input, 8)
with session.Session() as sess:
with self.assertRaises(errors_impl.FailedPreconditionError):
sess.run(output)
variables_lib.global_variables_initializer().run()
out_value = sess.run(output)
self.assertEqual(output.get_shape().as_list(), [2, 8])
self.assertTrue(np.all(out_value >= 0), 'Relu should have all values >= 0.')
self.assertEqual(2,
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)))
self.assertEqual(
0, len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)))
def test_variable_reuse_with_scope(self):
with variable_scope.variable_scope('test') as vs:
output1 = layers_lib.legacy_relu(self.input, 8)
output2 = layers_lib.legacy_relu(self.input, 8)
with variable_scope.variable_scope(vs, reuse=True):
output3 = layers_lib.legacy_relu(self.input, 8)
with session.Session() as sess:
variables_lib.global_variables_initializer().run()
out_value1, out_value2, out_value3 = sess.run([output1, output2, output3])
self.assertFalse(np.allclose(out_value1, out_value2))
self.assertAllClose(out_value1, out_value3)
def test_variable_reuse_with_template(self):
tmpl1 = template.make_template(
'test', _layers.legacy_fully_connected, num_output_units=8)
output1 = tmpl1(self.input)
output2 = tmpl1(self.input)
with session.Session() as sess:
variables_lib.global_variables_initializer().run()
out_value1, out_value2 = sess.run([output1, output2])
self.assertAllClose(out_value1, out_value2)
def _custom_initializers(self, x, num_output_units, expected_outputs):
output = layers_lib.legacy_relu(
x,
num_output_units,
weight_init=init_ops.constant_initializer(2.0),
bias_init=init_ops.constant_initializer(1.0))
with session.Session() as sess:
variables_lib.global_variables_initializer().run()
out_value = sess.run(output)
self.assertAllClose(np.array(expected_outputs), out_value)
def test_custom_initializers(self):
self._custom_initializers(self.input, 2, [[13.0, 13.0], [11.0, 11.0]])
def test_custom_initializers_multi_dim(self):
self._custom_initializers(self.input_3_dim, 2,
[[[7.6, 7.6],
[13.6, 13.6],
[19.6, 19.6],
[25.6, 25.6]],
[[31.6, 31.6],
[37.6, 37.6],
[43.6, 43.6],
[49.6, 49.6]]])
def test_custom_collections(self):
layers_lib.legacy_relu(
self.input,
2,
weight_collections=['unbiased'],
bias_collections=['biased'],
output_collections=['output'])
self.assertEqual(1, len(ops.get_collection('unbiased')))
self.assertEqual(1, len(ops.get_collection('biased')))
self.assertEqual(1, len(ops.get_collection('output')))
self.assertEqual(2, len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
def test_all_custom_collections(self):
layers_lib.legacy_relu(
self.input,
2,
weight_collections=['unbiased', 'all'],
bias_collections=['biased', 'all'])
self.assertEqual(1, len(ops.get_collection('unbiased')))
self.assertEqual(1, len(ops.get_collection('biased')))
self.assertEqual(
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES),
ops.get_collection('all'))
def test_no_bias(self):
layers_lib.legacy_relu(self.input, 2, bias_init=None)
self.assertEqual(1, len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
def test_no_activation(self):
y = _layers.legacy_fully_connected(self.input, 2)
self.assertEqual(2, len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
self.assertEqual('BiasAdd', y.op.type)
def test_no_activation_no_bias(self):
y = _layers.legacy_fully_connected(self.input, 2, bias_init=None)
self.assertEqual(1, len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
self.assertEqual('MatMul', y.op.type)
def test_regularizer(self):
cnt = [0]
tensor = constant_op.constant(5.0)
def test_fn(_):
cnt[0] += 1
return tensor
_layers.legacy_fully_connected(self.input, 2, weight_regularizer=test_fn)
self.assertEqual([tensor],
ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES))
self.assertEqual(1, cnt[0])
def test_regularizer_with_multiple_variables(self):
cnt = [0]
tensor = constant_op.constant(5.0)
def test_fn(_):
cnt[0] += 1
return tensor
_layers.legacy_fully_connected(self.input, 2, weight_regularizer=test_fn)
_layers.legacy_fully_connected(self.input, 2, weight_regularizer=test_fn)
self.assertEqual([tensor, tensor],
ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES))
self.assertEqual(2, cnt[0])
def test_regularizer_with_variable_reuse(self):
cnt = [0]
tensor = constant_op.constant(5.0)
def test_fn(_):
cnt[0] += 1
return tensor
with variable_scope.variable_scope('test') as vs:
_layers.legacy_fully_connected(self.input, 2, weight_regularizer=test_fn)
with variable_scope.variable_scope(vs, reuse=True):
_layers.legacy_fully_connected(self.input, 2, weight_regularizer=test_fn)
self.assertEqual([tensor],
ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES))
self.assertEqual(1, cnt[0])
def test_empty_x_results_in_empty_output(self):
# Empty x is common if someone masks their input with tf.boolean_mask in
# order to drop missing entries, and in a particular batch all entries are
# missing.
with self.test_session():
x = np.array([]).reshape(0, 3)
self.assertEqual(0, array_ops.size(x).eval())
y = _layers.legacy_fully_connected(x, 2, activation_fn=nn_ops.softmax)
variables_lib.global_variables_initializer().run()
expected_y = np.array([]).reshape(0, 2)
np.testing.assert_array_equal(expected_y, y.eval())
def test_shapes_variable_first_dim(self):
# first dimension is not known statically.
x = array_ops.placeholder(dtypes.float32, shape=[None, 4, 3])
y = _layers.legacy_fully_connected(x, 1)
# in the output we still only know the 2nd and 3rd dimensions statically.
self.assertEqual(y.get_shape().as_list(), [None, 4, 1])
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
# we can feed in input with first dimension 2
shape_value = sess.run(array_ops.shape(y),
feed_dict={x: self.input_3_dim_arr})
self.assertAllClose(shape_value, [2, 4, 1])
# we can feed in input with first dimension 1
shape_value = sess.run(array_ops.shape(y),
feed_dict={x: [self.input_3_dim_arr[0]]})
self.assertAllClose(shape_value, [1, 4, 1])
# we cannot feed in input with inconsistent dimensions
with self.assertRaises(ValueError):
sess.run(array_ops.shape(y), feed_dict={x: [[[]]]})
def _unknown_dim_invalid_input(self, last_dim):
x = array_ops.placeholder(dtypes.float32, shape=[3, last_dim])
_layers.legacy_fully_connected(x, 2, activation_fn=None)
def test_known_dim_valid_input(self):
self._unknown_dim_invalid_input(last_dim=3)
def test_unknown_dim_invalid_input(self):
with self.assertRaisesRegexp(
ValueError, 'last dimension of x must be known but is None'):
self._unknown_dim_invalid_input(last_dim=None)
def test_1d_invalid_input(self):
with self.test_session():
with self.assertRaisesRegexp(ValueError,
'rank of x must be at least 2 not: 1'):
x = constant_op.constant([[]], shape=[0])
_layers.legacy_fully_connected(x, 2, activation_fn=nn_ops.softmax)
if __name__ == '__main__':
test.main()
|
{
"content_hash": "24528159948b76616287b410b33b4176",
"timestamp": "",
"source": "github",
"line_count": 3333,
"max_line_length": 80,
"avg_line_length": 41.80858085808581,
"alnum_prop": 0.650106208915808,
"repo_name": "elingg/tensorflow",
"id": "d1b35e33c26df1a706613469355fc5825d10bb6d",
"size": "140037",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/layers/python/layers/layers_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6963"
},
{
"name": "C",
"bytes": "126495"
},
{
"name": "C++",
"bytes": "20090320"
},
{
"name": "CMake",
"bytes": "111800"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "96872"
},
{
"name": "HTML",
"bytes": "538462"
},
{
"name": "Java",
"bytes": "215285"
},
{
"name": "JavaScript",
"bytes": "13406"
},
{
"name": "Jupyter Notebook",
"bytes": "4068483"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "29647"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64592"
},
{
"name": "Python",
"bytes": "16219111"
},
{
"name": "Shell",
"bytes": "314152"
},
{
"name": "TypeScript",
"bytes": "761620"
}
],
"symlink_target": ""
}
|
from neutron_lib.callbacks import events as local_events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources as local_resources
from oslo_log import log as logging
import oslo_messaging
from neutron.api.rpc.callbacks import events
from neutron.api.rpc.handlers import resources_rpc
from neutron.services.trunk import constants as t_const
from neutron.services.trunk.drivers.linuxbridge.agent import trunk_plumber
from neutron.services.trunk.rpc import agent as trunk_rpc
LOG = logging.getLogger(__name__)
def init_handler(resource, event, trigger, payload=None):
"""Handler for agent init event."""
LinuxBridgeTrunkDriver()
@registry.has_registry_receivers
class LinuxBridgeTrunkDriver(trunk_rpc.TrunkSkeleton):
"""Driver responsible for handling trunk/subport/port events.
Receives data model events from the server and VIF events
from the agent and uses these to drive a Plumber instance
to wire up VLAN subinterfaces for any trunks.
"""
def __init__(self, plumber=None, trunk_api=None):
self._plumber = plumber or trunk_plumber.Plumber()
self._tapi = trunk_api or _TrunkAPI(trunk_rpc.TrunkStub())
super(LinuxBridgeTrunkDriver, self).__init__()
def handle_trunks(self, context, resource_type, trunks, event_type):
"""Trunk data model change from the server."""
for trunk in trunks:
if event_type in (events.UPDATED, events.CREATED):
self._tapi.put_trunk(trunk.port_id, trunk)
self.wire_trunk(context, trunk)
elif event_type == events.DELETED:
self._tapi.put_trunk(trunk.port_id, None)
self._plumber.delete_trunk_subports(trunk)
def handle_subports(self, context, resource_type, subports, event_type):
"""Subport data model change from the server."""
affected_trunks = set()
if event_type == events.DELETED:
method = self._tapi.delete_trunk_subport
else:
method = self._tapi.put_trunk_subport
for s in subports:
affected_trunks.add(s['trunk_id'])
method(s['trunk_id'], s)
for trunk_id in affected_trunks:
trunk = self._tapi.get_trunk_by_id(context, trunk_id)
if not trunk:
continue
self.wire_trunk(context, trunk)
@registry.receives(local_resources.PORT_DEVICE,
[local_events.AFTER_DELETE])
def agent_port_delete(self, resource, event, trigger, context, port_id,
**kwargs):
"""Agent informed us a VIF was removed."""
# NOTE(kevinbenton): we don't need to do anything to cleanup VLAN
# interfaces if a trunk was removed because the kernel will do that
# for us. We also don't update the trunk status to DOWN because we
# don't want to race with another agent that the trunk may have been
# moved to.
@registry.receives(local_resources.PORT_DEVICE,
[local_events.AFTER_UPDATE])
def agent_port_change(self, resource, event, trigger, context,
device_details, **kwargs):
"""The agent hath informed us thusly of a port update or create."""
trunk = self._tapi.get_trunk(context, device_details['port_id'])
if trunk:
# a wild trunk has appeared! make its children
self.wire_trunk(context, trunk)
return
# clear any VLANs in case this was a trunk that changed status while
# agent was offline.
self._plumber.delete_subports_by_port_id(device_details['port_id'])
def wire_trunk(self, context, trunk):
"""Wire up subports while keeping the server trunk status apprised."""
if not self._plumber.trunk_on_host(trunk):
LOG.debug("Trunk %s not present on this host", trunk.port_id)
return
self._tapi.bind_subports_to_host(context, trunk)
try:
self._plumber.ensure_trunk_subports(trunk)
self._tapi.set_trunk_status(context, trunk, t_const.ACTIVE_STATUS)
except Exception:
if not self._plumber.trunk_on_host(trunk):
LOG.debug("Trunk %s removed during wiring", trunk.port_id)
return
# something broke
LOG.exception("Failure setting up subports for %s", trunk.port_id)
self._tapi.set_trunk_status(context, trunk,
t_const.DEGRADED_STATUS)
class _TrunkAPI(object):
"""Our secret stash of trunks stored by port ID. Tell no one."""
def __init__(self, trunk_stub):
self.server_api = trunk_stub
self._trunk_by_port_id = {}
self._trunk_by_id = {}
self._sub_port_id_to_trunk_port_id = {}
def _fetch_trunk(self, context, port_id):
try:
t = self.server_api.get_trunk_details(context, port_id)
LOG.debug("Found trunk %(t)s for port %(p)s", dict(p=port_id, t=t))
return t
except resources_rpc.ResourceNotFound:
return None
except oslo_messaging.RemoteError as e:
if e.exc_type != 'CallbackNotFound':
raise
LOG.debug("Trunk plugin disabled on server. Assuming port %s is "
"not a trunk.", port_id)
return None
def set_trunk_status(self, context, trunk, status):
self.server_api.update_trunk_status(context, trunk.id, status)
def bind_subports_to_host(self, context, trunk):
self.server_api.update_subport_bindings(context, trunk.sub_ports)
def put_trunk_subport(self, trunk_id, subport):
LOG.debug("Adding subport %(sub)s to trunk %(trunk)s",
dict(sub=subport, trunk=trunk_id))
if trunk_id not in self._trunk_by_id:
# not on this agent
return
trunk = self._trunk_by_id[trunk_id]
trunk.sub_ports = [s for s in trunk.sub_ports
if s.port_id != subport.port_id] + [subport]
def delete_trunk_subport(self, trunk_id, subport):
LOG.debug("Removing subport %(sub)s from trunk %(trunk)s",
dict(sub=subport, trunk=trunk_id))
if trunk_id not in self._trunk_by_id:
# not on this agent
return
trunk = self._trunk_by_id[trunk_id]
trunk.sub_ports = [s for s in trunk.sub_ports
if s.port_id != subport.port_id]
def put_trunk(self, port_id, trunk):
if port_id in self._trunk_by_port_id:
# already existed. expunge sub_port cross ref
self._sub_port_id_to_trunk_port_id = {
s: p for s, p in self._sub_port_id_to_trunk_port_id.items()
if p != port_id}
self._trunk_by_port_id[port_id] = trunk
if not trunk:
return
self._trunk_by_id[trunk.id] = trunk
for sub in trunk.sub_ports:
self._sub_port_id_to_trunk_port_id[sub.port_id] = trunk.port_id
def get_trunk_by_id(self, context, trunk_id):
"""Gets trunk object based on trunk_id. None if not in cache."""
return self._trunk_by_id.get(trunk_id)
def get_trunk(self, context, port_id):
"""Gets trunk object for port_id. None if not trunk."""
if port_id not in self._trunk_by_port_id:
# TODO(kevinbenton): ask the server for *all* trunk port IDs on
# start and eliminate asking the server if every port is a trunk
# TODO(kevinbenton): clear this on AMQP reconnect
LOG.debug("Cache miss for port %s, fetching from server", port_id)
self.put_trunk(port_id, self._fetch_trunk(context, port_id))
return self.get_trunk(context, port_id)
return self._trunk_by_port_id[port_id]
def get_trunk_for_subport(self, context, port_id):
"""Returns trunk if port_id is a subport, else None."""
trunk_port = self._sub_port_id_to_trunk_port_id.get(port_id)
if trunk_port:
return self.get_trunk(context, trunk_port)
|
{
"content_hash": "0a3d8fd10f757d4eec8d7e4c27cec7d8",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 79,
"avg_line_length": 43.48663101604278,
"alnum_prop": 0.6115346778160354,
"repo_name": "noironetworks/neutron",
"id": "0be8e2e3d9b8869afa4467c63fd455118b28725d",
"size": "8707",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "neutron/services/trunk/drivers/linuxbridge/agent/driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "11420614"
},
{
"name": "Shell",
"bytes": "38791"
}
],
"symlink_target": ""
}
|
import os, sys, random
import logging as log
from optparse import OptionParser
import numpy as np
import text.util, unsupervised.nmf, unsupervised.rankings, unsupervised.util
# --------------------------------------------------------------
def main():
parser = OptionParser(usage="usage: %prog [options] corpus_file")
parser.add_option("--seed", action="store", type="int", dest="seed", help="initial random seed", default=1000)
parser.add_option("--kmin", action="store", type="int", dest="kmin", help="minimum number of topics", default=5)
parser.add_option("--kmax", action="store", type="int", dest="kmax", help="maximum number of topics", default=5)
parser.add_option("-r","--runs", action="store", type="int", dest="runs", help="number of runs", default=1)
parser.add_option("--maxiters", action="store", type="int", dest="maxiter", help="maximum number of iterations", default=10)
parser.add_option("-s", "--sample", action="store", type="float", dest="sample_ratio", help="sampling ratio of documents to include in each run (range is 0 to 1)", default=0.8)
parser.add_option("-o","--outdir", action="store", type="string", dest="dir_out", help="base output directory (default is current directory)", default=None)
parser.add_option("-w","--writefactors", action="store_true", dest="write_factors", help="write complete factorization results")
parser.add_option('-d','--debug',type="int",help="Level of log output; 0 is less, 5 is all", default=3)
(options, args) = parser.parse_args()
if len(args) < 1:
parser.error( "Must specify at least one corpus file" )
log.basicConfig(level=max(50 - (options.debug * 10), 10), format='%(asctime)-18s %(levelname)-10s %(message)s', datefmt='%d/%m/%Y %H:%M',)
# use nimfa instead of sklearn?
use_nimfa = True
if options.dir_out is None:
dir_out_base = os.getcwd()
else:
dir_out_base = options.dir_out
# Load the cached corpus
corpus_path = args[0]
(X,terms,doc_ids,classes) = text.util.load_corpus( corpus_path )
# Choose implementation
if use_nimfa:
impl = unsupervised.nmf.NimfaNMF( max_iters = options.maxiter, init_strategy = "random", update = "euclidean" )
else:
impl = unsupervised.nmf.SklNMF( max_iters = options.maxiter, init_strategy = "random" )
n_documents = X.shape[0]
n_sample = int( options.sample_ratio * n_documents )
indices = np.arange(n_documents)
# Generate all NMF topic models for the specified numbers of topics
log.info( "Testing models in range k=[%d,%d]" % ( options.kmin, options.kmax ) )
log.info( "Sampling ratio = %.2f - %d/%d documents per run" % ( options.sample_ratio, n_sample, n_documents ) )
for k in range(options.kmin, options.kmax+1):
# Set random state
np.random.seed( options.seed )
random.seed( options.seed )
log.info( "Applying NMF (k=%d, runs=%d, seed=%s - %s) ..." % ( k, options.runs, options.seed, impl.__class__.__name__ ) )
dir_out_k = os.path.join( dir_out_base, "nmf_k%02d" % k )
if not os.path.exists(dir_out_k):
os.makedirs(dir_out_k)
log.debug( "Results will be written to %s" % dir_out_k )
# Run NMF
for r in range(options.runs):
log.info( "NMF run %d/%d (k=%d, max_iters=%d)" % (r+1, options.runs, k, options.maxiter ) )
file_suffix = "%s_%03d" % ( options.seed, r+1 )
# sub-sample data
np.random.shuffle(indices)
sample_indices = indices[0:n_sample]
S = X[sample_indices,:]
sample_doc_ids = []
for doc_index in sample_indices:
sample_doc_ids.append( doc_ids[doc_index] )
# apply NMF
impl.apply( S, k )
# Get term rankings for each topic
term_rankings = []
for topic_index in range(k):
ranked_term_indices = impl.rank_terms( topic_index )
term_ranking = [terms[i] for i in ranked_term_indices]
term_rankings.append(term_ranking)
log.debug( "Generated ranking set with %d topics covering up to %d terms" % ( len(term_rankings), unsupervised.rankings.term_rankings_size( term_rankings ) ) )
# Write term rankings
ranks_out_path = os.path.join( dir_out_k, "ranks_%s.pkl" % file_suffix )
log.debug( "Writing term ranking set to %s" % ranks_out_path )
unsupervised.util.save_term_rankings( ranks_out_path, term_rankings )
# Write document partition
partition = impl.generate_partition()
partition_out_path = os.path.join( dir_out_k, "partition_%s.pkl" % file_suffix )
log.debug( "Writing document partition to %s" % partition_out_path )
unsupervised.util.save_partition( partition_out_path, partition, sample_doc_ids )
# Write the complete factorization?
if options.write_factors:
factor_out_path = os.path.join( dir_out_k, "factors_%s.pkl" % file_suffix )
# NB: need to make a copy of the factors
log.debug( "Writing factorization to %s" % factor_out_path )
unsupervised.util.save_nmf_factors( factor_out_path, np.array( impl.W ), np.array( impl.H ), sample_doc_ids )
# --------------------------------------------------------------
if __name__ == "__main__":
main()
|
{
"content_hash": "4660e1ffe2c0f2b74806e700d08140e0",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 177,
"avg_line_length": 50.62244897959184,
"alnum_prop": 0.6609554525297319,
"repo_name": "akiratu/topic-stability",
"id": "cb77d73b5473ba1916d13f4c909812a4c859eeaf",
"size": "4983",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "generate-nmf.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "93182"
}
],
"symlink_target": ""
}
|
import logging
import json
import urllib.request
import itertools
import websocket
import time
import threading
import subprocess
import signal
import tempfile
import os
import socket
from umbra.behaviors import Behavior
class BrowserPool:
logger = logging.getLogger(__module__ + "." + __qualname__)
BASE_PORT = 9200
def __init__(self, size=3, chrome_exe='chromium-browser'):
self._available = set()
self._in_use = set()
for i in range(0, size):
browser = Browser(BrowserPool.BASE_PORT + i, chrome_exe)
self._available.add(browser)
self._lock = threading.Lock()
self.logger.info("browser ports: {}".format([browser.chrome_port for browser in self._available]))
def acquire(self):
"""Returns browser from pool if available, raises KeyError otherwise."""
with self._lock:
browser = self._available.pop()
self._in_use.add(browser)
return browser
def release(self, browser):
with self._lock:
self._available.add(browser)
self._in_use.remove(browser)
def shutdown_now(self):
for browser in self._in_use:
browser.abort_browse_page()
class BrowsingException(Exception):
pass
class Browser:
"""Runs chrome/chromium to synchronously browse one page at a time using
worker.browse_page(). Currently the implementation starts up a new instance
of chrome for each page browsed, always on the same debug port. (In the
future, it may keep the browser running indefinitely.)"""
logger = logging.getLogger(__module__ + "." + __qualname__)
HARD_TIMEOUT_SECONDS = 20 * 60
def __init__(self, chrome_port=9222, chrome_exe='chromium-browser'):
self.command_id = itertools.count(1)
self.chrome_port = chrome_port
self.chrome_exe = chrome_exe
self._behavior = None
self._websock = None
self._abort_browse_page = False
self._chrome_instance = None
def __repr__(self):
return "{}.{}:{}".format(Browser.__module__, Browser.__qualname__, self.chrome_port)
def __enter__(self):
self.start()
return self
def __exit__(self, *args):
self.stop()
def start(self):
# these can raise exceptions
self._work_dir = tempfile.TemporaryDirectory()
self._chrome_instance = Chrome(self.chrome_port, self.chrome_exe,
self._work_dir.name, os.sep.join([self._work_dir.name, "chrome-user-data"]))
self._websocket_url = self._chrome_instance.start()
def stop(self):
self._chrome_instance.stop()
try:
self._work_dir.cleanup()
except:
self.logger.error("exception deleting %s", self._work_dir,
exc_info=True)
def abort_browse_page(self):
self._abort_browse_page = True
def browse_page(self, url, on_request=None):
"""Synchronously browses a page and runs behaviors.
Raises BrowsingException if browsing the page fails in a non-critical
way.
"""
self.url = url
self.on_request = on_request
self._websock = websocket.WebSocketApp(self._websocket_url,
on_open=self._visit_page, on_message=self._handle_message)
import random
threadName = "WebsockThread{}-{}".format(self.chrome_port,
''.join((random.choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789') for _ in range(6))))
websock_thread = threading.Thread(target=self._websock.run_forever, name=threadName, kwargs={'ping_timeout':0.5})
websock_thread.start()
self._start = time.time()
aborted = False
try:
while True:
time.sleep(0.5)
if not self._websock or not self._websock.sock or not self._websock.sock.connected:
raise BrowsingException("websocket closed, did chrome die? {}".format(self._websocket_url))
elif time.time() - self._start > Browser.HARD_TIMEOUT_SECONDS:
self.logger.info("finished browsing page, reached hard timeout of {} seconds url={}".format(Browser.HARD_TIMEOUT_SECONDS, self.url))
return
elif self._behavior != None and self._behavior.is_finished():
self.logger.info("finished browsing page according to behavior url={}".format(self.url))
return
elif self._abort_browse_page:
raise BrowsingException("browsing page aborted")
finally:
if self._websock and self._websock.sock and self._websock.sock.connected:
try:
self._websock.close()
except BaseException as e:
self.logger.error("exception closing websocket {} - {}".format(self._websock, e))
websock_thread.join(timeout=30)
if websock_thread.is_alive():
self.logger.error("{} still alive 30 seconds after closing {}, will forcefully nudge it again".format(websock_thread, self._websock))
self._websock.keep_running = False
websock_thread.join(timeout=30)
if websock_thread.is_alive():
self.logger.critical("{} still alive 60 seconds after closing {}".format(websock_thread, self._websock))
self._behavior = None
def send_to_chrome(self, suppress_logging=False, **kwargs):
msg_id = next(self.command_id)
kwargs['id'] = msg_id
msg = json.dumps(kwargs)
if not suppress_logging:
self.logger.debug('sending message to {}: {}'.format(self._websock, msg))
self._websock.send(msg)
return msg_id
def _visit_page(self, websock):
self.send_to_chrome(method="Network.enable")
self.send_to_chrome(method="Page.enable")
self.send_to_chrome(method="Console.enable")
self.send_to_chrome(method="Debugger.enable")
self.send_to_chrome(method="Runtime.enable")
# disable google analytics, see _handle_message() where breakpoint is caught "Debugger.paused"
self.send_to_chrome(method="Debugger.setBreakpointByUrl", params={"lineNumber": 1, "urlRegex":"https?://www.google-analytics.com/analytics.js"})
# navigate to the page!
self.send_to_chrome(method="Page.navigate", params={"url": self.url})
def _handle_message(self, websock, message):
# self.logger.debug("message from {} - {}".format(websock.url, message[:95]))
# self.logger.debug("message from {} - {}".format(websock.url, message))
message = json.loads(message)
if "method" in message and message["method"] == "Network.requestWillBeSent":
if self._behavior:
self._behavior.notify_of_activity()
if message["params"]["request"]["url"].lower().startswith("data:"):
self.logger.debug("ignoring data url {}".format(message["params"]["request"]["url"][:80]))
elif self.on_request:
self.on_request(message)
elif "method" in message and message["method"] == "Page.loadEventFired":
if self._behavior is None:
self.logger.info("Page.loadEventFired, starting behaviors url={} message={}".format(self.url, message))
self._behavior = Behavior(self.url, self)
self._behavior.start()
else:
self.logger.warn("Page.loadEventFired again, perhaps original url had a meta refresh, or behaviors accidentally navigated to another page? starting behaviors again url={} message={}".format(self.url, message))
self._behavior = Behavior(self.url, self)
self._behavior.start()
elif "method" in message and message["method"] == "Console.messageAdded":
self.logger.debug("{} console.{} {}".format(websock.url,
message["params"]["message"]["level"],
message["params"]["message"]["text"]))
elif "method" in message and message["method"] == "Debugger.paused":
# We hit the breakpoint set in visit_page. Get rid of google
# analytics script!
self.logger.debug("debugger paused! message={}".format(message))
scriptId = message['params']['callFrames'][0]['location']['scriptId']
# replace script
self.send_to_chrome(method="Debugger.setScriptSource", params={"scriptId": scriptId, "scriptSource":"console.log('google analytics is no more!');"})
# resume execution
self.send_to_chrome(method="Debugger.resume")
elif "result" in message:
if self._behavior and self._behavior.is_waiting_on_result(message['id']):
self._behavior.notify_of_result(message)
# elif "method" in message and message["method"] in ("Network.dataReceived", "Network.responseReceived", "Network.loadingFinished"):
# pass
# elif "method" in message:
# self.logger.debug("{} {}".format(message["method"], message))
# else:
# self.logger.debug("[no-method] {}".format(message))
class Chrome:
logger = logging.getLogger(__module__ + "." + __qualname__)
def __init__(self, port, executable, user_home_dir, user_data_dir):
self.port = port
self.executable = executable
self.user_home_dir = user_home_dir
self.user_data_dir = user_data_dir
# returns websocket url to chrome window with about:blank loaded
def __enter__(self):
return self.start()
def __exit__(self, *args):
self.stop()
# returns websocket url to chrome window with about:blank loaded
def start(self):
timeout_sec = 600
new_env = os.environ.copy()
new_env["HOME"] = self.user_home_dir
chrome_args = [self.executable,
"--use-mock-keychain", # mac thing
"--user-data-dir={}".format(self.user_data_dir),
"--remote-debugging-port={}".format(self.port),
"--disable-web-sockets", "--disable-cache",
"--window-size=1100,900", "--no-default-browser-check",
"--disable-first-run-ui", "--no-first-run",
"--homepage=about:blank", "--disable-direct-npapi-requests",
"--disable-web-security",
"about:blank"]
self.logger.info("running {}".format(chrome_args))
self.chrome_process = subprocess.Popen(chrome_args, env=new_env, start_new_session=True)
self.logger.info("chrome running, pid {}".format(self.chrome_process.pid))
self._start = time.time() # member variable just so that kill -QUIT reports it
json_url = "http://localhost:%s/json" % self.port
while True:
try:
raw_json = urllib.request.urlopen(json_url).read()
all_debug_info = json.loads(raw_json.decode('utf-8'))
debug_info = [x for x in all_debug_info if x['url'] == 'about:blank']
if debug_info and 'webSocketDebuggerUrl' in debug_info[0]:
self.logger.debug("{} returned {}".format(json_url, raw_json))
url = debug_info[0]['webSocketDebuggerUrl']
self.logger.info('got chrome window websocket debug url {} from {}'.format(url, json_url))
return url
except:
pass
finally:
if time.time() - self._start > timeout_sec:
raise Exception("failed to retrieve {} after {} seconds".format(json_url, time.time() - self._start))
else:
time.sleep(0.5)
def stop(self):
timeout_sec = 300
self.logger.info("terminating chrome pid {}".format(self.chrome_process.pid))
self.chrome_process.terminate()
first_sigterm = last_sigterm = time.time()
while time.time() - first_sigterm < timeout_sec:
time.sleep(0.5)
status = self.chrome_process.poll()
if status is not None:
if status == 0:
self.logger.info("chrome pid {} exited normally".format(self.chrome_process.pid, status))
else:
self.logger.warn("chrome pid {} exited with nonzero status {}".format(self.chrome_process.pid, status))
return
# sometimes a hung chrome process will terminate on repeated sigterms
if time.time() - last_sigterm > 10:
self.chrome_process.terminate()
last_sigterm = time.time()
self.logger.warn("chrome pid {} still alive {} seconds after sending SIGTERM, sending SIGKILL".format(self.chrome_process.pid, timeout_sec))
self.chrome_process.kill()
status = self.chrome_process.wait()
self.logger.warn("chrome pid {} reaped (status={}) after killing with SIGKILL".format(self.chrome_process.pid, status))
|
{
"content_hash": "5321be8dca17b2ae7a498531505644e5",
"timestamp": "",
"source": "github",
"line_count": 300,
"max_line_length": 225,
"avg_line_length": 43.663333333333334,
"alnum_prop": 0.5943201771127568,
"repo_name": "vonrosen/umbra",
"id": "3176a970ce21b30b91820ef5054bcb6b459a88b9",
"size": "13142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "umbra/browser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "26746"
},
{
"name": "Python",
"bytes": "41437"
}
],
"symlink_target": ""
}
|
from wtforms import SubmitField
from app.utils.form import model_form, BaseForm
from .models import School
class SchoolBase(BaseForm):
""" Form for a comment """
field_order = ('*', 'submit')
SchoolForm = model_form( School,
base_class=SchoolBase)
submit_add = SubmitField('Add School')
SchoolForm.submit = submit_add
|
{
"content_hash": "f1c4b41895808cb932f9f0a652251bad",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 47,
"avg_line_length": 26.833333333333332,
"alnum_prop": 0.7484472049689441,
"repo_name": "codeforanchorage/collective-development",
"id": "142f85b22dae45430746dfb70fd66d29033d0f48",
"size": "322",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app/mod_school/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1758"
},
{
"name": "HTML",
"bytes": "116251"
},
{
"name": "JavaScript",
"bytes": "3468"
},
{
"name": "Python",
"bytes": "93593"
},
{
"name": "Shell",
"bytes": "723"
}
],
"symlink_target": ""
}
|
from collections import OrderedDict
from .. import *
from bfg9000.path import Path
from bfg9000.shell import windows
from bfg9000.safe_str import jbos, literal, shell_literal
from bfg9000.shell.list import shell_list
class TestSplit(TestCase):
def test_single(self):
self.assertEqual(windows.split('foo'), ['foo'])
self.assertEqual(windows.split(' foo'), ['foo'])
self.assertEqual(windows.split('foo '), ['foo'])
self.assertEqual(windows.split(' foo '), ['foo'])
def test_multiple(self):
self.assertEqual(windows.split('foo bar baz'), ['foo', 'bar', 'baz'])
def test_backslash(self):
self.assertEqual(windows.split(r'C:\path\to\file'),
[r'C:\path\to\file'])
def test_quote(self):
self.assertEqual(windows.split('foo "bar baz"'), ['foo', 'bar baz'])
self.assertEqual(windows.split('foo"bar baz"'), ['foobar baz'])
self.assertEqual(windows.split(r'foo "c:\path\\"'),
['foo', 'c:\\path\\'])
self.assertEqual(windows.split('foo "it\'s \\"good\\""'),
['foo', 'it\'s "good"'])
def test_type(self):
self.assertEqual(windows.split('foo bar baz', type=tuple),
('foo', 'bar', 'baz'))
def test_invalid(self):
self.assertRaises(TypeError, windows.split, 1)
class TestJoin(TestCase):
def test_empty(self):
self.assertEqual(windows.join([]), '')
def test_single(self):
self.assertEqual(windows.join(['foo']), 'foo')
self.assertEqual(windows.join(['foo bar']), '"foo bar"')
def test_multiple(self):
self.assertEqual(windows.join(['foo bar', 'baz']), '"foo bar" baz')
def test_literal(self):
self.assertEqual(windows.join(['foo bar', shell_literal('>'), 'baz']),
'"foo bar" > baz')
self.assertEqual(windows.join(['foo bar' + shell_literal('>'), 'baz']),
'"foo bar"> baz')
class TestListify(TestCase):
def test_string(self):
self.assertEqual(windows.listify('foo bar baz'), ['foo', 'bar', 'baz'])
def test_list(self):
self.assertEqual(windows.listify(['foo bar', 'baz']),
['foo bar', 'baz'])
def test_type(self):
self.assertEqual(windows.listify('foo bar baz', type=tuple),
('foo', 'bar', 'baz'))
self.assertEqual(windows.listify(['foo bar', 'baz'], type=tuple),
('foo bar', 'baz'))
class TestQuote(TestCase):
def assertQuote(self, original, needs_quote, inner_quoted, quoted,
**kwargs):
self.assertEqual(windows.inner_quote(original, **kwargs), inner_quoted)
self.assertEqual(windows.inner_quote_info(original, **kwargs),
(inner_quoted, needs_quote))
self.assertEqual(windows.quote(original, **kwargs), quoted)
self.assertEqual(windows.quote_info(original, **kwargs),
(quoted, needs_quote))
self.assertEqual(windows.force_quote(original, **kwargs),
windows.wrap_quotes(inner_quoted))
def test_empty(self):
self.assertQuote('', True, '', '""')
def test_simple(self):
self.assertQuote('foo', False, 'foo', 'foo')
def test_space(self):
self.assertQuote('foo bar', True, 'foo bar', '"foo bar"')
def test_quote(self):
self.assertQuote('"foo"', True, r'\"foo\"', r'"\"foo\""')
self.assertQuote('"foo"z', True, r'\"foo\"z', r'"\"foo\"z"')
self.assertQuote('a"foo"', True, r'a\"foo\"', r'"a\"foo\""')
self.assertQuote('a"foo"z', True, r'a\"foo\"z', r'"a\"foo\"z"')
def test_escaped_quote(self):
self.assertQuote(r'\"foobar', True, r'\\\"foobar', r'"\\\"foobar"')
self.assertQuote(r'foo\"bar', True, r'foo\\\"bar', r'"foo\\\"bar"')
self.assertQuote(r'foobar\"', True, r'foobar\\\"', r'"foobar\\\""')
def test_backslash(self):
self.assertQuote(r'foo\bar', False, r'foo\bar', r'foo\bar')
self.assertQuote('foo\\bar\\', True, r'foo\bar\\', r'"foo\bar\\"')
def test_escape_percent(self):
self.assertQuote(r'100%', False, r'100%', r'100%')
self.assertQuote(r'100%', False, r'100%%', r'100%%',
escape_percent=True)
self.assertQuote(r'"100%"', True, r'\"100%\"', r'"\"100%\""')
self.assertQuote(r'"100%"', True, r'\"100%%\"', r'"\"100%%\""',
escape_percent=True)
def test_shell_chars(self):
self.assertQuote('&&', True, '&&', '"&&"')
self.assertQuote('>', True, '>', '">"')
self.assertQuote('|', True, '|', '"|"')
def test_literal(self):
self.assertQuote(shell_literal('>'), False, '>', '>')
self.assertQuote(shell_literal(''), False, '', '')
s = shell_literal('>') + 'foo bar'
self.assertEqual(windows.quote(s), '>"foo bar"')
self.assertEqual(windows.quote_info(s), ('>"foo bar"', True))
def test_invalid(self):
for fn in (windows.quote, windows.quote_info, windows.inner_quote,
windows.inner_quote_info):
with self.assertRaises(TypeError):
fn(1)
class TestWrapQuotes(TestCase):
def test_simple(self):
self.assertEqual(windows.wrap_quotes(''), '""')
self.assertEqual(windows.wrap_quotes('f'), '"f"')
self.assertEqual(windows.wrap_quotes('fo'), '"fo"')
self.assertEqual(windows.wrap_quotes('foo'), '"foo"')
def test_escaped_quote(self):
self.assertEqual(windows.wrap_quotes(r'\"'), r'"\""')
self.assertEqual(windows.wrap_quotes(r'\"foobar'), r'"\"foobar"')
self.assertEqual(windows.wrap_quotes(r'foo\"bar'), r'"foo\"bar"')
self.assertEqual(windows.wrap_quotes(r'foobar\"'), r'"foobar\""')
class TestEscapeLine(TestCase):
def test_string(self):
self.assertEqual(windows.escape_line('foo bar'),
shell_list([shell_literal('foo bar')]))
def test_jbos(self):
self.assertEqual(
windows.escape_line(jbos('foo', literal('bar'))),
shell_list([ jbos(shell_literal('foo'), literal('bar')) ])
)
def test_path(self):
self.assertEqual(windows.escape_line(Path('foo')),
shell_list([Path('foo')]))
def test_iterable(self):
self.assertEqual(windows.escape_line(['foo', 'bar']), ['foo', 'bar'])
gen = (i for i in ['foo', 'bar'])
self.assertEqual(windows.escape_line(gen), gen)
self.assertEqual(windows.escape_line(gen, listify=True),
['foo', 'bar'])
class TestJoinLines(TestCase):
def test_empty(self):
self.assertEqual(windows.join_lines([]), [])
def test_single(self):
self.assertEqual(windows.join_lines(['foo']), shell_list([
shell_literal('foo')
]))
self.assertEqual(windows.join_lines([['foo']]), ['foo'])
self.assertEqual(windows.join_lines([['foo', 'bar']]), ['foo', 'bar'])
def test_multiple(self):
self.assertEqual(windows.join_lines(['foo', 'bar']), shell_list([
shell_literal('foo'),
shell_literal('&&'),
shell_literal('bar'),
]))
self.assertEqual(
windows.join_lines([['foo', 'bar'], 'baz']),
shell_list([
'foo', 'bar',
shell_literal('&&'),
shell_literal('baz'),
])
)
class TestGlobalEnv(TestCase):
def test_empty(self):
self.assertEqual(windows.global_env({}), [])
self.assertEqual(windows.global_env({}, ['cmd']), shell_list([
shell_literal('cmd')
]))
self.assertEqual(windows.global_env({}, [['cmd']]), ['cmd'])
def test_single(self):
env = {'NAME': 'VALUE'}
self.assertEqual(windows.global_env(env), shell_list([
'set', 'NAME=VALUE'
]))
self.assertEqual(windows.global_env(env, ['cmd']), shell_list([
'set', 'NAME=VALUE',
shell_literal('&&'),
shell_literal('cmd')
]))
self.assertEqual(windows.global_env(env, [['cmd']]), shell_list([
'set', 'NAME=VALUE',
shell_literal('&&'),
'cmd'
]))
def test_multiple(self):
env = OrderedDict((('FOO', 'oof'), ('BAR', 'rab')))
self.assertEqual(windows.global_env(env), shell_list([
'set', 'FOO=oof',
shell_literal('&&'),
'set', 'BAR=rab'
]))
self.assertEqual(windows.global_env(env, ['cmd']), shell_list([
'set', 'FOO=oof',
shell_literal('&&'),
'set', 'BAR=rab',
shell_literal('&&'),
shell_literal('cmd')
]))
self.assertEqual(windows.global_env(env, [['cmd']]), shell_list([
'set', 'FOO=oof',
shell_literal('&&'),
'set', 'BAR=rab',
shell_literal('&&'),
'cmd'
]))
|
{
"content_hash": "ebe8b74d022774ceb5dfca640c7312f5",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 79,
"avg_line_length": 36.342629482071715,
"alnum_prop": 0.5340934005700504,
"repo_name": "jimporter/bfg9000",
"id": "1fa8db6d6e95ccacc456c8229ddb57440c2d6f67",
"size": "9122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unit/shell/test_windows.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "783"
},
{
"name": "C++",
"bytes": "14009"
},
{
"name": "Fortran",
"bytes": "229"
},
{
"name": "Java",
"bytes": "621"
},
{
"name": "Lex",
"bytes": "579"
},
{
"name": "Objective-C",
"bytes": "148"
},
{
"name": "Objective-C++",
"bytes": "167"
},
{
"name": "Python",
"bytes": "1262127"
},
{
"name": "Roff",
"bytes": "155"
},
{
"name": "Scala",
"bytes": "62"
},
{
"name": "Yacc",
"bytes": "792"
}
],
"symlink_target": ""
}
|
"""
unmature
"""
def initial_parse_given_csv():
import pandas as pd
train = pd.read_csv('sampleSubmission.csv')
id = train.Id.values
assert len(id) == NUM_TEST
dump(id, 'id')
train = pd.read_csv('train.csv')
xs = train.drop('Id', axis=1)
xs = xs.drop('Cover_Type', axis=1).values
ys = train.Cover_Type.values
assert len(xs) == NUM_TRAIN
assert len(ys) == NUM_TRAIN
assert len(set(ys)) == NUM_CLASSES
dump(xs, 'xs')
dump(ys, 'ys')
test = pd.read_csv('test.csv')
xs_sub = test.drop('Id', axis=1).values
assert len(xs_sub) == NUM_TEST
dump(xs_sub, 'xs_sub')
|
{
"content_hash": "8dc20f3aee9d690f2634f787afe0f100",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 47,
"avg_line_length": 25.2,
"alnum_prop": 0.5873015873015873,
"repo_name": "nishio/kagura",
"id": "d59300c5a367822cf5e07ce64a7d8a98dd734060",
"size": "630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kagura/from_csv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53326"
}
],
"symlink_target": ""
}
|
import json
import re
BUG_PATTERN = '([Rr]elated|[Pp]artial|[Cc]loses)-[Bb]ug[:]?[\s]?[#]?([0-9]+)'
BUG_RE = re.compile(BUG_PATTERN, re.MULTILINE)
def debug(d):
print(json.dumps(d, indent=4, sort_keys=True))
EVENTS = []
with open('bugsmash/events') as f:
for line in f.readlines():
EVENTS.append(json.loads(line))
USERS = []
with open('bugsmash/validated-ids') as f:
for line in f.readlines():
USERS.append(line.strip())
STATS = {
'participants': set(),
'new-patches': 0,
'new-patches-total': 0,
'approved-patches': set(),
'code-reviews': 0,
'revised-patches': 0,
'revised-patches-total': 0,
'patches-merged-total': 0,
'impacted-bugs': set(),
'fixed-bugs': set(),
'contributors-registered': set(USERS),
}
for event in EVENTS:
# We don't care about these events for summary purposes.
if event['type'] in (
'change-abandoned',
'change-restored',
'ref-replicated',
'ref-replication-done',
'ref-updated',
'reviewer-added',
'topic-changed'):
continue
bugs = BUG_RE.findall(event['change']['commitMessage'])
if event['type'] == 'patchset-created':
if event['patchSet']['kind'] in (
'NO_CHANGE',
'NO_CODE_CHANGE',
'TRIVIAL_REBASE'):
continue
if event['patchSet']['kind'] == 'REWORK':
if event['patchSet']['uploader']['username'] in USERS:
STATS['participants'].add(
event['patchSet']['uploader']['username'])
for impact, bug_number in bugs:
STATS['impacted-bugs'].add(int(bug_number))
if int(event['patchSet']['number']) == 1:
STATS['new-patches-total'] += 1
if event['patchSet']['uploader']['username'] in USERS:
STATS['new-patches'] += 1
else:
STATS['revised-patches-total'] += 1
if event['patchSet']['uploader']['username'] in USERS:
STATS['revised-patches'] += 1
else:
debug(event)
raise SystemExit()
elif event['type'] == 'comment-added':
if event['author'].get('username') in USERS:
STATS['code-reviews'] += 1
STATS['participants'].add(event['author']['username'])
for impact, bug_number in bugs:
STATS['impacted-bugs'].add(int(bug_number))
for approval in event.get('approvals', []):
if approval['type'] == 'Workflow' and approval['value'] == '1':
STATS['approved-patches'].add(event['change']['number'])
break
elif event['type'] == 'change-merged':
STATS['patches-merged-total'] += 1
if (event['patchSet']['author']['username'] in USERS or
event['patchSet']['uploader']['username'] in USERS or
event['change']['owner'] in USERS):
for impact, bug_number in bugs:
for impact, bug_number in bugs:
STATS['impacted-bugs'].add(int(bug_number))
if impact.lower() in ('closes', 'partial'):
STATS['fixed-bugs'].add(int(bug_number))
else:
debug(event)
raise Exception(event)
STATS['impacted-bugs'] = len(STATS['impacted-bugs'])
STATS['approved-patches'] = len(STATS['approved-patches'])
STATS['fixed-bugs'] = len(STATS['fixed-bugs'])
STATS['participants'] = len(STATS['participants'])
STATS['contributors-registered'] = len(STATS['contributors-registered'])
debug(STATS)
print(
'Of %d contributors that were tracked, %d participated in gerrit '
'(%.0f%%). Those %d participants did %d code reviews, authored patches to '
'fix %d bugs, and impacted %d bugs in total.' % (
STATS['contributors-registered'],
STATS['participants'],
100.0 * STATS['participants'] / STATS['contributors-registered'],
STATS['participants'],
STATS['code-reviews'],
STATS['fixed-bugs'],
STATS['impacted-bugs'],
)
)
|
{
"content_hash": "abb6e32216282bccf9d9938ddc45772a",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 79,
"avg_line_length": 35.20338983050848,
"alnum_prop": 0.5469427058257101,
"repo_name": "dolph/gerrit-growler",
"id": "adeae84dcda0219acbd54cc02189efde7ace57da",
"size": "4700",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "summarize_events.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20183"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('bike', '0004_auto_20170129_2220'),
]
operations = [
migrations.AlterField(
model_name='bike',
name='donated_at',
field=models.DateField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='bike',
name='donated_by',
field=models.TextField(blank=True, null=True),
),
]
|
{
"content_hash": "60d5b1238e84dfa801bde90a44949ade",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 70,
"avg_line_length": 24.541666666666668,
"alnum_prop": 0.5891341256366723,
"repo_name": "BridgeCityBicycleCoop/workstand",
"id": "b3f25d7619468f69cd081c1937b7376ad5bf96d3",
"size": "662",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bike/migrations/0005_auto_20170202_0345.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "49623"
},
{
"name": "JavaScript",
"bytes": "59638"
},
{
"name": "Python",
"bytes": "106312"
},
{
"name": "SCSS",
"bytes": "9234"
},
{
"name": "Shell",
"bytes": "24"
}
],
"symlink_target": ""
}
|
import sys, os, shlex, distutils.spawn
def main(argv, environ, env_prefix, main_class):
prefix = environ.get(
env_prefix + 'PREFIX',
os.path.abspath(os.path.dirname(os.path.dirname(
os.path.realpath(__file__)))))
lib = environ.get(
env_prefix + 'LIB',
os.path.join(prefix, 'lib'))
etc = environ.get(
env_prefix + 'ETC',
os.path.join(prefix, 'etc'))
java_classpath = os.path.join(lib, "*")
java_args = ['-classpath', java_classpath]
log_config = environ.get(env_prefix + 'LOG_CONFIG')
if not log_config:
for candidate in 'log4j2-test.xml', \
'log4j2.xml':
f = os.path.join(etc, candidate)
if os.path.isfile(f):
log_config = f
break
if log_config:
java_args.append("-Dlog4j.configurationFile=%s" % log_config)
config_file = environ.get(env_prefix + 'CONFIG_FILE')
if not config_file:
for candidate in 'application.conf', \
'application.json', \
'application.properties':
f = os.path.join(etc, candidate)
if os.path.isfile(f):
config_file = f
break
if config_file:
java_args.append("-Dconfig.file=%s" % config_file)
if 'JAVA_ARGS' in environ:
java_args.extend(shlex.split(environ['JAVA_ARGS']))
java = os.path.join(environ['JAVA_HOME'], 'bin', 'java') \
if 'JAVA_HOME' in environ \
else distutils.spawn.find_executable('java')
main = environ.get(
env_prefix + 'MAIN',
main_class)
args = [java] + java_args + [main] + argv[1:]
os.execv(java, args)
if __name__ == "__main__":
main(sys.argv, os.environ, '${main.prefix}', '${main.class}')
|
{
"content_hash": "ccc9a004ed623802d511db0763733b29",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 69,
"avg_line_length": 34.32142857142857,
"alnum_prop": 0.5156087408949012,
"repo_name": "lisaglendenning/zookeeper-lite",
"id": "8233cc9ef7cb18977f76a083332c225acd996fdf",
"size": "2232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zkcore/src/main/bin/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "1184245"
},
{
"name": "Python",
"bytes": "2232"
}
],
"symlink_target": ""
}
|
import time
def retry_exc_handler(tries_remaining, exception, delay):
"""
Exception handler for the retry decorator, logs exceptions to the database
:param tries_remaining: The number of tries remaining
:param exception: The exception instance which was raised
:param delay: We will sleep this many seconds
"""
print 'Caught \'{0}\', {1} tries remaining, sleeping for {2} seconds'.format(exception, tries_remaining, delay)
def retries(max_tries, delay=1, backoff=2, exceptions=(Exception,), hook=None):
"""
Function decorator implementing retrying logic
Based on: https://gist.github.com/n1ywb/2570004
The decorator will call the function up to max_tries times if it raises
an exception.
By default it catches instances of the Exception class and subclasses.
This will recover after all but the most fatal errors. You may specify
a custom tuple of exception classes with the `exceptions` argument; the
function will only be retried if it raises one of the specified exceptions.
Additionally, you may specify a hook function which will be called prior
to retrying with the number of remaining tries and the exception instance.
This is primarily intended to give the opportunity to log the failure.
Hook is not called after failure if no retries remain.
:param max_tries: The decorator will call the function up to max_tries time
:param delay: Sleep this many seconds * backoff * try number after failure
:param backoff: Multiple delay by this factor after each failure
:param exceptions: A tuple of exception classes; default (Exception,)
:param hook: A function with the signature myhook(tries_remaining, exception, delay);
default None
"""
def dec(func):
def f2(*args, **kwargs):
my_delay = delay
tries = range(max_tries)
tries.reverse()
for tries_remaining in tries:
try:
return func(*args, **kwargs)
except exceptions as e:
if tries_remaining > 0:
if hook is not None:
hook(tries_remaining, e, my_delay)
time.sleep(my_delay)
my_delay *= backoff
else:
raise
# else:
# break
return f2
return dec
|
{
"content_hash": "8398b4cb38b100330be1818421810ec0",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 115,
"avg_line_length": 40.278688524590166,
"alnum_prop": 0.6328856328856329,
"repo_name": "dn0z/Steam-Headers-Downloader",
"id": "f640179050a1da946531e5254ea247900659d57f",
"size": "3763",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "retries.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7986"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, unicode_literals
import weblab.experiment.exc as ExperimentErrors
import weblab.experiment.level as ExperimentApiLevel
import weblab.core.coordinator.coordinator as Coordinator
import json
class Experiment(object):
def __init__(self, *args, **kwargs):
super(Experiment, self).__init__(*args, **kwargs)
def do_start_experiment(self, client_initial_data, server_initial_data):
""" do_start_experiment(client_initial_data, server_initial_data) -> initial_configuration
This method indicates that a student has been assigned to use this
laboratory. client_initial_data will provide the data (typically a
JSON-serialized string) that the experiment client submitted (if any),
and server_initial_data is a JSON-serialized string with the data passed
by the core server. This includes the time slot available for the
current user, the priority, etc.
This method must return a JSON-serialized string which can be an empty
object ("{}"), but it can state that it is a batch experiment (and
therefore the scheduler will mark it as free once the start method has
finished), and it can provide information that the client will receive
(such as "the URL for the camera in this copy of the laboratory is this
one").
"""
# Default implementation: empty
return "{}"
def do_get_api(self):
"""
do_get_api() -> api_version
Reports the api version that the experiment uses. The default api level is the
current one. Experiments may override this method to return a different one.
TODO: Providing such a default might lead to errors, because if a new api was released
old experiments which didn't override get_api would without warning be using a wrong api.
It might be safer to enforce get_api() overriding, or to at least issue some kind of
warning if an experiment doesn't.
"""
return ExperimentApiLevel.current
def do_send_file_to_device(self, file_content, file_info):
"""do_send_file_to_device(file_content, file_info)
raises (FeatureNotImplemented, SendingFileFailureError)
"""
raise ExperimentErrors.FeatureNotImplementedError(
"send_file_to_device has not been implemented in this experiment"
)
def do_send_command_to_device(self, command):
"""do_send_command_to_device(command)
raises (FeatureNotImplemented, SendingCommandFailureError)
"""
raise ExperimentErrors.FeatureNotImplementedError(
"send_command_to_device has not been implemented in this experiment"
)
def do_should_finish(self):
"""
Should the experiment finish? If the experiment server should be able to
say "I've finished", it will be asked every few time; if the experiment
is completely interactive (so it's up to the user and the permissions of
the user to say when the session should finish), it will never be asked.
Therefore, this method will return a numeric result, being:
- result > 0: it hasn't finished but ask within result seconds.
- result == 0: completely interactive, don't ask again
- result < 0: it has finished.
"""
return 0
def do_dispose(self):
"""
Experiment should clean the resources now, and optionally return data. Default implementation: yes, I have finished.
"""
return json.dumps({ Coordinator.FINISH_FINISHED_MESSAGE : True, Coordinator.FINISH_DATA_MESSAGE : ""})
def do_is_up_and_running(self):
"""
Is the experiment up and running?
The scheduling system will ensure that the experiment will not be
assigned to other student while this method is called. The result
is an array of integer + String, where the first argument is:
- result >= 0: "the experiment is OK; please check again
within $result seconds"
- result == 0: the experiment is OK and I can't perform a proper
estimation
- result == -1: "the experiment is broken"
And the second (String) argument is the message detailing while
it failed
"""
return (600, '') # Default value: check every 10 minutes
|
{
"content_hash": "095bcc6e2a29c79a6449c66d78ba80da",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 124,
"avg_line_length": 43.23300970873787,
"alnum_prop": 0.660229059061307,
"repo_name": "weblabdeusto/weblabdeusto",
"id": "1e81a676c89b310ac84d3d2d12f23a85874c7e50",
"size": "4831",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "server/src/weblab/experiment/experiment.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ASP.NET",
"bytes": "4785"
},
{
"name": "ActionScript",
"bytes": "8508"
},
{
"name": "Batchfile",
"bytes": "7753"
},
{
"name": "C",
"bytes": "19456"
},
{
"name": "C#",
"bytes": "315160"
},
{
"name": "C++",
"bytes": "9547"
},
{
"name": "CSS",
"bytes": "202991"
},
{
"name": "CoffeeScript",
"bytes": "39146"
},
{
"name": "Go",
"bytes": "7076"
},
{
"name": "HTML",
"bytes": "620835"
},
{
"name": "Java",
"bytes": "856300"
},
{
"name": "JavaScript",
"bytes": "1606001"
},
{
"name": "Less",
"bytes": "13422"
},
{
"name": "Makefile",
"bytes": "24995"
},
{
"name": "Mako",
"bytes": "1236"
},
{
"name": "PHP",
"bytes": "159985"
},
{
"name": "Python",
"bytes": "3739523"
},
{
"name": "Shell",
"bytes": "7880"
},
{
"name": "Smarty",
"bytes": "42585"
},
{
"name": "VHDL",
"bytes": "5874"
}
],
"symlink_target": ""
}
|
import os
import sys
import hashlib
import re
import redis
import MySQLdb
import chardet
import ConfigParser
import subprocess
import shlex
import binascii
import xlrd
#import datetime
#from dateutil.parser import parse
from PyQt4 import QtGui, QtCore
from hashlib import sha256
from hmac import HMAC
import time
import threading
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
def encrypt_password(password, salt=None):
"""Hash password on the fly."""
if salt is None:
salt = os.urandom(8) # 64 bits.
assert 8 == len(salt)
assert isinstance(salt, str)
if isinstance(password, unicode):
password = password.encode('UTF-8')
assert isinstance(password, str)
result = password
for _ in xrange(10):
result = HMAC(result, salt, sha256).digest()
return salt + result
def validate_password(hashed, input_password):
return hashed == encrypt_password(input_password, salt=hashed[:8])
def execCLI(cmd_line, shell=True):
cmd_args = shlex.split(cmd_line, posix=False)
cmd_exec = subprocess.Popen(cmd_args,bufsize=0,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=shell)
output,strrout= cmd_exec.communicate()
cmd_exec.wait()
return (cmd_exec.returncode, output, strrout)
class Ui_Register(QtGui.QDialog):
def __init__(self, db, parent=None):
self.db = db
QtGui.QDialog.__init__(self, parent)
self.resize(429, 253)
self.label = QtGui.QLabel(self)
self.label.setGeometry(QtCore.QRect(60, 60, 261, 16))
self.label.setObjectName(_fromUtf8("label"))
self.lineEdit = QtGui.QLineEdit(self)
self.lineEdit.setGeometry(QtCore.QRect(60, 90, 231, 20))
self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
self.label_2 = QtGui.QLabel(self)
self.label_2.setGeometry(QtCore.QRect(60, 120, 231, 16))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.lineEdit_2 = QtGui.QLineEdit(self)
self.lineEdit_2.setGeometry(QtCore.QRect(60, 150, 231, 20))
self.lineEdit_2.setObjectName(_fromUtf8("lineEdit_2"))
self.pushButton = QtGui.QPushButton(self)
self.pushButton.setGeometry(QtCore.QRect(140, 200, 75, 23))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.pushButton_2 = QtGui.QPushButton(self)
self.pushButton_2.setGeometry(QtCore.QRect(220, 200, 75, 23))
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.pushButton.clicked.connect(self.login)
self.pushButton_2.clicked.connect(self.reject)
self.retranslateUi()
# QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self):
self.setWindowTitle(_translate("Dialog", u"注册管理员密码", None))
self.label.setText(_translate("Dialog", u"初次使用,请注册管理员(admin)密码:", None))
self.label_2.setText(_translate("Dialog", u"请再次输入:", None))
self.pushButton.setText(_translate("Dialog", u"确定", None))
self.pushButton_2.setText(_translate("Dialog", u"取消", None))
def login(self):
password_1 = str(self.lineEdit.text())
password_2 = str(self.lineEdit_2.text())
if not password_1 and not password_2:
QtGui.QMessageBox.critical(self, 'Error', u'密码不能为空!')
return
if password_1 != password_2:
QtGui.QMessageBox.critical(self, 'Error', u'两次输入密码不一致, 请重新输入!')
self.lineEdit.clear()
self.lineEdit_2.clear()
else:
print 'write into db'
# encr_pwd = encrypt_password(password_1)
self.db.add_user('admin', password_1)
self.accept()
class Ui_Login(QtGui.QDialog):
def __init__(self, db, parent=None):
self.db = db
QtGui.QDialog.__init__(self, parent)
self.resize(432, 257)
self.label = QtGui.QLabel(self)
self.label.setGeometry(QtCore.QRect(60, 80, 61, 21))
self.label.setObjectName(_fromUtf8("label"))
self.groupBox = QtGui.QGroupBox(self)
self.groupBox.setGeometry(QtCore.QRect(30, 40, 361, 181))
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.pushButton = QtGui.QPushButton(self.groupBox)
self.pushButton.setGeometry(QtCore.QRect(130, 130, 75, 23))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.pushButton_2 = QtGui.QPushButton(self.groupBox)
self.pushButton_2.setGeometry(QtCore.QRect(210, 130, 75, 23))
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.pushButton.clicked.connect(self.login)
self.pushButton_2.clicked.connect(self.reject)
self.lineEdit = QtGui.QLineEdit(self.groupBox)
self.lineEdit.setGeometry(QtCore.QRect(90, 40, 191, 21))
self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
self.lineEdit_2 = QtGui.QLineEdit(self.groupBox)
self.lineEdit_2.setGeometry(QtCore.QRect(90, 80, 191, 21))
self.lineEdit_2.setObjectName(_fromUtf8("lineEdit_2"))
self.lineEdit_2.setEchoMode(QtGui.QLineEdit.Password)
self.lineEdit.setPlaceholderText('username')
self.lineEdit_2.setPlaceholderText('password')
self.label_3 = QtGui.QLabel(self)
self.label_3.setGeometry(QtCore.QRect(60, 120, 61, 21))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.retranslateUi()
# QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self):
self.setWindowTitle(_translate("Dialog", u"HDCP Key 管理工具", None))
self.label.setText(_translate("Dialog", u"用户名:", None))
self.groupBox.setTitle(_translate("Dialog", u"登录", None))
self.label_3.setText(_translate("Dialog", u"密码:", None))
self.pushButton.setText(_translate("Dialog", u"确定", None))
self.pushButton_2.setText(_translate("Dialog", u"取消", None))
def login(self):
try:
password = self.db.get_ps(self.lineEdit.text())
except redis.exceptions.ConnectionError:
QtGui.QMessageBox.critical(self, 'Error', u'Redis server 没有运行!')
return
if password == None:
QtGui.QMessageBox.critical(self, 'Error', u'用户名: "%s" 不存在!'%self.lineEdit.text())
elif password == self.lineEdit_2.text():
self.current_user = self.lineEdit.text()
self.accept()
else:
QtGui.QMessageBox.critical(self, 'Error', u'密码不正确!')
#class WorkThread(QtCore.QThread):
# def __init__(self, parent = None, _func):
# super(WorkThread, self).__init__(parent)
# self.func = _func(
#def run(self):
# _func()
class Ui_MainWindow(QtGui.QMainWindow):
sinOut_err = QtCore.pyqtSignal(str)
sinOut_progress_bar = QtCore.pyqtSignal(int)
sinOut_info = QtCore.pyqtSignal(str, str)
sinOut_enable = QtCore.pyqtSignal(bool)
sinOut_status = QtCore.pyqtSignal()
def __init__(self, redis, db, user, parent = None):
self.redis_inst = redis
self.db = db
self.user = user
QtGui.QMainWindow.__init__(self, parent)
self.setObjectName(_fromUtf8("Dialog"))
self.resize(942, 712)
self.tabWidget = QtGui.QTabWidget(self)
self.tabWidget.setGeometry(QtCore.QRect(0, 30, 931, 661))
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.tab = QtGui.QWidget()
self.tab.setObjectName(_fromUtf8("tab"))
self.mac_pre = QtGui.QLineEdit(self.tab)
self.mac_pre.setGeometry(QtCore.QRect(250, 110, 311, 23))
self.mac_pre.setObjectName(_fromUtf8("mac_pre"))
self.mac_button = QtGui.QPushButton(self.tab)
self.mac_button.setGeometry(QtCore.QRect(630, 110, 75, 23))
self.mac_button.setObjectName(_fromUtf8("mac_button"))
self.mac_button.clicked.connect(self.mac_import)
self.mac_label = QtGui.QLabel(self.tab)
self.mac_label.setGeometry(QtCore.QRect(160, 110, 85, 23))
self.mac_label.setObjectName(_fromUtf8("mac_label"))
self.file_edit = QtGui.QLineEdit(self.tab)
self.file_edit.setGeometry(QtCore.QRect(250, 150, 311, 23))
self.file_edit.setObjectName(_fromUtf8("file_edit"))
self.import_button = QtGui.QPushButton(self.tab)
self.import_button.setGeometry(QtCore.QRect(630, 150, 75, 23))
self.import_button.setObjectName(_fromUtf8("import_button"))
self.import_button.clicked.connect(self.import_)
#self.sinOut.connect(self.outText)
self.sinOut_err.connect(self.warning)
self.sinOut_progress_bar.connect(self.progress_bar)
self.sinOut_info.connect(self.info)
self.sinOut_enable.connect(self.enable)
self.sinOut_status.connect(self.display_status)
self.file_import = QtGui.QPushButton(self.tab)
self.file_import.setGeometry(QtCore.QRect(160, 150, 90, 23))
self.file_import.setObjectName(_fromUtf8("file_import"))
self.file_import.clicked.connect(lambda: self.chose_file(1))
self.key_version = QtGui.QLabel(self.tab)
self.key_version.setGeometry(QtCore.QRect(160, 200, 71, 16))
self.key_version.setObjectName(_fromUtf8("key_version"))
self.x1 = QtGui.QRadioButton(self.tab)
self.x1.setGeometry(QtCore.QRect(260, 200, 89, 16))
self.x1.setObjectName(_fromUtf8("x1"))
self.x2 = QtGui.QRadioButton(self.tab)
self.x2.setGeometry(QtCore.QRect(400, 200, 89, 16))
self.x2.setObjectName(_fromUtf8("x2"))
self.key_version_group = QtGui.QButtonGroup()
self.key_version_group.addButton(self.x1)
self.key_version_group.addButton(self.x2)
self.x1.setChecked(True)
self.title_1x_tx_total = QtGui.QLabel(self.tab)
self.title_1x_tx_total.setGeometry(QtCore.QRect(160, 260, 124, 16))
self.title_1x_tx_total.setObjectName(_fromUtf8("title_1x_tx_total"))
self.num_1x_tx_left = QtGui.QLabel(self.tab)
self.num_1x_tx_left.setGeometry(QtCore.QRect(560, 260, 54, 16))
self.num_1x_tx_left.setObjectName(_fromUtf8("num_1x_tx_left"))
self.num_1x_rx_left = QtGui.QLabel(self.tab)
self.num_1x_rx_left.setGeometry(QtCore.QRect(560, 300, 80, 16))
self.num_1x_rx_left.setObjectName(_fromUtf8("num_1x_rx_left"))
self.title_1x_tx_left = QtGui.QLabel(self.tab)
self.title_1x_tx_left.setGeometry(QtCore.QRect(500, 260, 80, 16))
self.title_1x_tx_left.setObjectName(_fromUtf8("title_1x_tx_left"))
self.num_1x_tx_total = QtGui.QLabel(self.tab)
self.num_1x_tx_total.setGeometry(QtCore.QRect(330, 260, 100, 16))
self.num_1x_tx_total.setObjectName(_fromUtf8("num_1x_tx_total"))
self.num_1x_rx_total = QtGui.QLabel(self.tab)
self.num_1x_rx_total.setGeometry(QtCore.QRect(330, 300, 54, 16))
self.num_1x_rx_total.setObjectName(_fromUtf8("num_1x_rx_total"))
self.title_1x_rx_left = QtGui.QLabel(self.tab)
self.title_1x_rx_left.setGeometry(QtCore.QRect(500, 300, 51, 16))
self.title_1x_rx_left.setObjectName(_fromUtf8("title_1x_rx_left"))
self.title_1x_rx_total = QtGui.QLabel(self.tab)
self.title_1x_rx_total.setGeometry(QtCore.QRect(160, 300, 124, 16))
self.title_1x_rx_total.setObjectName(_fromUtf8("title_1x_rx_total"))
self.title_2x_tx_total = QtGui.QLabel(self.tab)
self.title_2x_tx_total.setGeometry(QtCore.QRect(160, 340, 124, 16))
self.title_2x_tx_total.setObjectName(_fromUtf8("title_2x_tx_total"))
self.num_2x_tx_left = QtGui.QLabel(self.tab)
self.num_2x_tx_left.setGeometry(QtCore.QRect(560, 340, 80, 16))
self.num_2x_tx_left.setObjectName(_fromUtf8("num_2x_tx_left"))
self.num_2x_rx_left = QtGui.QLabel(self.tab)
self.num_2x_rx_left.setGeometry(QtCore.QRect(560, 380, 80, 16))
self.num_2x_rx_left.setObjectName(_fromUtf8("num_2x_rx_left"))
self.title_2x_tx_left = QtGui.QLabel(self.tab)
self.title_2x_tx_left.setGeometry(QtCore.QRect(500, 340, 51, 16))
self.title_2x_tx_left.setObjectName(_fromUtf8("title_2x_tx_left"))
self.num_2x_tx_total = QtGui.QLabel(self.tab)
self.num_2x_tx_total.setGeometry(QtCore.QRect(330, 340, 54, 16))
self.num_2x_tx_total.setObjectName(_fromUtf8("num_2x_tx_total"))
self.num_2x_rx_total = QtGui.QLabel(self.tab)
self.num_2x_rx_total.setGeometry(QtCore.QRect(330, 380, 54, 16))
self.num_2x_rx_total.setObjectName(_fromUtf8("num_2x_rx_total"))
self.title_2x_rx_left = QtGui.QLabel(self.tab)
self.title_2x_rx_left.setGeometry(QtCore.QRect(500, 380, 51, 16))
self.title_2x_rx_left.setObjectName(_fromUtf8("title_2x_rx_left"))
self.title_2x_rx_total = QtGui.QLabel(self.tab)
self.title_2x_rx_total.setGeometry(QtCore.QRect(160, 380, 124, 16))
self.title_2x_rx_total.setObjectName(_fromUtf8("title_2x_rx_total"))
self.title_mac_total = QtGui.QLabel(self.tab)
self.title_mac_total.setGeometry(QtCore.QRect(160, 420, 124, 16))
self.title_mac_total.setObjectName(_fromUtf8("title_mac_total"))
self.num_mac_left = QtGui.QLabel(self.tab)
self.num_mac_left.setGeometry(QtCore.QRect(560, 420, 80, 16))
self.num_mac_left.setObjectName(_fromUtf8("num_mac_left"))
self.title_mac_left = QtGui.QLabel(self.tab)
self.title_mac_left.setGeometry(QtCore.QRect(500, 420, 51, 16))
self.title_mac_left.setObjectName(_fromUtf8("title_mac_left"))
self.num_mac_total = QtGui.QLabel(self.tab)
self.num_mac_total.setGeometry(QtCore.QRect(330, 420, 54, 16))
self.num_mac_total.setObjectName(_fromUtf8("num_mac_total"))
self.tabWidget.addTab(self.tab, _fromUtf8(""))
'''tab 2'''
self.tab_2 = QtGui.QWidget()
self.tab_2.setObjectName(_fromUtf8("tab_2"))
self.tableWidget = QtGui.QTableWidget(self.tab_2)
self.tableWidget.setGeometry(QtCore.QRect(20, 20, 870, 60))
self.tableWidget.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.tableWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.tableWidget.setRowCount(2)
self.tableWidget.setColumnCount(4)
self.tableWidget.setObjectName(_fromUtf8("tableWidget"))
# self.tableWidget.itemClicked.connect(self.get_item_text)
item = QtGui.QTableWidgetItem()
item.setFlags(QtCore.Qt.NoItemFlags)
self.tableWidget.setItem(0, 0, item)
item = QtGui.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsSelectable|QtCore.Qt.ItemIsEditable|QtCore.Qt.ItemIsEnabled)
self.tableWidget.setItem(0, 1, item)
item = QtGui.QTableWidgetItem()
item.setFlags(QtCore.Qt.NoItemFlags)
self.tableWidget.setItem(0, 2, item)
item = QtGui.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsSelectable|QtCore.Qt.ItemIsEditable|QtCore.Qt.ItemIsEnabled)
self.tableWidget.setItem(0, 3, item)
item = QtGui.QTableWidgetItem()
item.setFlags(QtCore.Qt.NoItemFlags)
self.tableWidget.setItem(1, 0, item)
item = QtGui.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsSelectable|QtCore.Qt.ItemIsEditable|QtCore.Qt.ItemIsEnabled)
self.tableWidget.setItem(1, 1, item)
item = QtGui.QTableWidgetItem()
item.setFlags(QtCore.Qt.NoItemFlags)
self.tableWidget.setItem(1, 2, item)
item = QtGui.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsSelectable|QtCore.Qt.ItemIsEditable|QtCore.Qt.ItemIsEnabled)
self.tableWidget.setItem(1, 3, item)
self.tableWidget.horizontalHeader().setVisible(False)
self.tableWidget.horizontalHeader().setDefaultSectionSize(180)
self.tableWidget.verticalHeader().setVisible(False)
self.tableWidget.setColumnWidth(0,175)
self.tableWidget.setColumnWidth(1,250)
self.tableWidget.setColumnWidth(2,175)
self.tableWidget.setColumnWidth(3,268)
self.tableWidget.setRowHeight(1, 28)
'''tableWidget_2'''
self.tableWidget_2 = QtGui.QTableWidget(self.tab_2)
self.tableWidget_2.setGeometry(QtCore.QRect(20, 80, 870, 390))
self.tableWidget_2.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.tableWidget_2.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.tableWidget_2.setRowCount(13)
self.tableWidget_2.setColumnCount(5)
self.tableWidget_2.setObjectName(_fromUtf8("tableWidget_2"))
# self.tableWidget_2.itemClicked.connect(self.get_item_text)
item = QtGui.QTableWidgetItem()
item.setFlags(QtCore.Qt.NoItemFlags)
self.tableWidget_2.setItem(0, 0, item)
self.radio = QtGui.QRadioButton()
self.tableWidget_2.setCellWidget(0, 1, self.radio)
self.radio_1 = QtGui.QRadioButton()
self.tableWidget_2.setCellWidget(0, 2, self.radio_1)
self.radio_2 = QtGui.QRadioButton()
self.tableWidget_2.setCellWidget(0, 3, self.radio_2)
self.radio_1.setChecked(True)
self.buttonGroup = QtGui.QButtonGroup()
self.buttonGroup.addButton(self.radio)
self.buttonGroup.addButton(self.radio_1)
self.buttonGroup.addButton(self.radio_2)
item = QtGui.QTableWidgetItem()
item.setFlags(QtCore.Qt.NoItemFlags)
self.tableWidget_2.setItem(1, 0, item)
item = QtGui.QTableWidgetItem()
item.setFlags(QtCore.Qt.NoItemFlags)
self.tableWidget_2.setItem(2, 0, item)
self.radio_3 = QtGui.QRadioButton()
self.tableWidget_2.setCellWidget(1, 1, self.radio_3)
self.radio_4 = QtGui.QRadioButton()
self.tableWidget_2.setCellWidget(1, 2, self.radio_4)
self.radio_3.setChecked(True)
self.buttonGroup_1 = QtGui.QButtonGroup()
self.buttonGroup_1.addButton(self.radio_3)
self.buttonGroup_1.addButton(self.radio_4)
item = QtGui.QTableWidgetItem()
item.setFlags(QtCore.Qt.NoItemFlags)
self.tableWidget_2.setItem(3, 0, item)
self.radio_5 = QtGui.QRadioButton()
self.tableWidget_2.setCellWidget(3, 2, self.radio_5)
self.radio_6 = QtGui.QRadioButton()
self.tableWidget_2.setCellWidget(4, 2, self.radio_6)
self.radio_20 = QtGui.QRadioButton()
self.tableWidget_2.setCellWidget(5, 2, self.radio_20)
self.radio_21 = QtGui.QRadioButton()
self.tableWidget_2.setCellWidget(6, 2, self.radio_21)
self.radio_5.setChecked(True)
self.buttonGroup_2 = QtGui.QButtonGroup()
self.buttonGroup_2.addButton(self.radio_5)
self.buttonGroup_2.addButton(self.radio_6)
self.buttonGroup_2.addButton(self.radio_20)
self.buttonGroup_2.addButton(self.radio_21)
self.radio_22 = QtGui.QRadioButton()
self.tableWidget_2.setCellWidget(3, 4, self.radio_22)
self.radio_23 = QtGui.QRadioButton()
self.tableWidget_2.setCellWidget(4, 4, self.radio_23)
self.radio_24 = QtGui.QRadioButton()
self.tableWidget_2.setCellWidget(5, 4, self.radio_24)
self.radio_25 = QtGui.QRadioButton()
self.tableWidget_2.setCellWidget(6, 4, self.radio_25)
self.radio_22.setChecked(True)
self.buttonGroup_6 = QtGui.QButtonGroup()
self.buttonGroup_6.addButton(self.radio_22)
self.buttonGroup_6.addButton(self.radio_23)
self.buttonGroup_6.addButton(self.radio_24)
self.buttonGroup_6.addButton(self.radio_25)
item = QtGui.QTableWidgetItem()
item.setFlags(QtCore.Qt.NoItemFlags)
self.tableWidget_2.setItem(4, 0, item)
self.radio_7 = QtGui.QRadioButton()
self.tableWidget_2.setCellWidget(2, 1, self.radio_7)
self.radio_8 = QtGui.QRadioButton()
self.tableWidget_2.setCellWidget(2, 2, self.radio_8)
self.radio_9 = QtGui.QRadioButton()
self.tableWidget_2.setCellWidget(2, 3, self.radio_9)
self.radio_7.setChecked(True)
self.buttonGroup_3 = QtGui.QButtonGroup()
self.buttonGroup_3.addButton(self.radio_7)
self.buttonGroup_3.addButton(self.radio_8)
self.buttonGroup_3.addButton(self.radio_9)
item = QtGui.QTableWidgetItem()
item.setFlags(QtCore.Qt.NoItemFlags)
self.tableWidget_2.setItem(5, 0, item)
# self.radio_10 = QtGui.QRadioButton()
# self.radio_11 = QtGui.QRadioButton()
# self.radio_10.setChecked(True)
#
#
# self.buttonGroup_4 = QtGui.QButtonGroup()
# self.buttonGroup_4.addButton(self.radio_10)
# self.buttonGroup_4.addButton(self.radio_11)
# self.buttonGroup_2.addButton(self.radio_15)
item = QtGui.QTableWidgetItem()
item.setFlags(QtCore.Qt.NoItemFlags)
self.tableWidget_2.setItem(6, 0, item)
self.radio_12 = QtGui.QRadioButton()
self.tableWidget_2.setCellWidget(7, 1, self.radio_12)
self.radio_12.setChecked(True)
self.radio_13 = QtGui.QRadioButton()
self.tableWidget_2.setCellWidget(7, 2, self.radio_13)
self.buttonGroup_5 = QtGui.QButtonGroup()
self.buttonGroup_5.addButton(self.radio_12)
self.buttonGroup_5.addButton(self.radio_13)
item = QtGui.QTableWidgetItem()
item.setFlags(QtCore.Qt.NoItemFlags)
self.tableWidget_2.setItem(7, 0, item)
item = QtGui.QTableWidgetItem()
item.setFlags(QtCore.Qt.NoItemFlags)
self.tableWidget_2.setItem(8, 0, item)
item = QtGui.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsSelectable|QtCore.Qt.ItemIsEditable|QtCore.Qt.ItemIsEnabled)
self.tableWidget_2.setItem(8, 1, item)
item = QtGui.QTableWidgetItem()
item.setFlags(QtCore.Qt.NoItemFlags)
self.tableWidget_2.setItem(9, 0, item)
item = QtGui.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsSelectable|QtCore.Qt.ItemIsEditable|QtCore.Qt.ItemIsEnabled)
self.tableWidget_2.setItem(9, 1, item)
item = QtGui.QTableWidgetItem()
item.setFlags(QtCore.Qt.NoItemFlags)
self.tableWidget_2.setItem(10, 0, item)
item = QtGui.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsSelectable|QtCore.Qt.ItemIsEditable|QtCore.Qt.ItemIsEnabled)
self.tableWidget_2.setItem(10, 1, item)
item = QtGui.QTableWidgetItem()
item.setFlags(QtCore.Qt.NoItemFlags)
self.tableWidget_2.setItem(11, 0, item)
item = QtGui.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsSelectable|QtCore.Qt.ItemIsEditable|QtCore.Qt.ItemIsEnabled)
self.tableWidget_2.setItem(11, 1, item)
item = QtGui.QTableWidgetItem()
item.setFlags(QtCore.Qt.NoItemFlags)
self.tableWidget_2.setItem(12, 0, item)
item = QtGui.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsSelectable|QtCore.Qt.ItemIsEditable|QtCore.Qt.ItemIsEnabled)
self.tableWidget_2.setItem(12, 1, item)
item = QtGui.QTableWidgetItem()
item.setFlags(QtCore.Qt.NoItemFlags)
self.tableWidget_2.setItem(3, 1, item)
item = QtGui.QTableWidgetItem()
item.setFlags(QtCore.Qt.NoItemFlags)
self.tableWidget_2.setItem(3, 3, item)
self.comboBox_3 = QtGui.QComboBox()
# # self.comboBox.setGeometry(QtCore.QRect(130, 60, 101, 22))
self.comboBox_3.setObjectName(_fromUtf8("comboBox"))
self.comboBox_3.setEditable(True)
self.tableWidget_2.setCellWidget(8, 1, self.comboBox_3)
self.tableWidget_2.horizontalHeader().setVisible(False)
self.tableWidget_2.horizontalHeader().setDefaultSectionSize(170)
self.tableWidget_2.verticalHeader().setVisible(False)
self.tableWidget_2.setWordWrap(True)
self.tableWidget_2.setSpan(0,3,1,2)
self.tableWidget_2.setSpan(1,2,1,3)
self.tableWidget_2.setSpan(2,3,1,2)
# self.tableWidget_2.setSpan(7,2,1,2)
self.tableWidget_2.setSpan(3,3,4,1)
self.tableWidget_2.setSpan(3,1,4,1)
self.tableWidget_2.setSpan(3,0,4,1)
self.tableWidget_2.setSpan(7,0,2,1)
self.tableWidget_2.setSpan(7,2,1,3)
self.tableWidget_2.setSpan(8,2,1,3)
for i in range(9, 13):
self.tableWidget_2.setSpan(i,1,1,4)
self.tabWidget.addTab(self.tab_2, _fromUtf8(""))
self.tableWidget_2.setColumnWidth(0,188)
self.tableWidget_2.setRowHeight(11, 28)
self.tableWidget_3 = QtGui.QTableWidget(self.tab_2)
self.tableWidget_3.setGeometry(QtCore.QRect(20, 468, 870, 60))
self.tableWidget_3.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.tableWidget_3.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.tableWidget_3.setRowCount(2)
self.tableWidget_3.setColumnCount(4)
self.tableWidget_3.horizontalHeader().setVisible(False)
self.tableWidget_3.horizontalHeader().setDefaultSectionSize(223)
self.tableWidget_3.verticalHeader().setVisible(False)
self.tableWidget_3.setWordWrap(True)
self.tableWidget_3.setColumnWidth(0,50)
self.tableWidget_3.setColumnWidth(1,110)
self.tableWidget_3.setColumnWidth(2,110)
self.tableWidget_3.setColumnWidth(3,598)
self.tableWidget_3.setRowHeight(1,28)
for i in range(0, 4):
item = QtGui.QTableWidgetItem()
item.setFlags(QtCore.Qt.NoItemFlags)
self.tableWidget_3.setItem(0, i, item)
item = QtGui.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsSelectable|QtCore.Qt.ItemIsEditable|QtCore.Qt.ItemIsEnabled)
self.tableWidget_3.setItem(1, i, item)
# item = QtGui.QTableWidgetItem()
# item.setFlags(QtCore.Qt.NoItemFlags)
# self.tableWidget_3.setItem(0, 1, item)
# item = QtGui.QTableWidgetItem()
# item.setFlags(QtCore.Qt.NoItemFlags)
# self.tableWidget_3.setItem(0, 2, item)
# item = QtGui.QTableWidgetItem()
# item.setFlags(QtCore.Qt.NoItemFlags)
# self.tableWidget_3.setItem(0, 3, item)
# item = QtGui.QTableWidgetItem()
# item.setFlags(QtCore.Qt.ItemIsSelectable|QtCore.Qt.ItemIsEditable|QtCore.Qt.ItemIsEnabled)
# self.tableWidget_3.setItem(1, 0, item)
# item = QtGui.QTableWidgetItem()
# item.setFlags(QtCore.Qt.ItemIsSelectable|QtCore.Qt.ItemIsEditable|QtCore.Qt.ItemIsEnabled)
# self.tableWidget_3.setItem(1, 1, item)
# item = QtGui.QTableWidgetItem()
# item.setFlags(QtCore.Qt.ItemIsSelectable|QtCore.Qt.ItemIsEditable|QtCore.Qt.ItemIsEnabled)
# self.tableWidget_3.setItem(1, 2, item)
# item = QtGui.QTableWidgetItem()
# item.setFlags(QtCore.Qt.ItemIsSelectable|QtCore.Qt.ItemIsEditable|QtCore.Qt.ItemIsEnabled)
# self.tableWidget_3.setItem(1, 3, item)
self.file_edit_2 = QtGui.QLineEdit(self.tab_2)
self.file_edit_2.setGeometry(QtCore.QRect(100, 555, 400, 25))
self.file_edit_2.setObjectName(_fromUtf8("file_edit_2"))
self.load_table = QtGui.QPushButton(self.tab_2)
self.load_table.setGeometry(QtCore.QRect(17, 555, 75, 23))
self.load_table.setObjectName(_fromUtf8("load_table"))
self.load_table.clicked.connect(lambda: self.chose_file(2))
self.file_edit_3 = QtGui.QLineEdit(self.tab_2)
self.file_edit_3.setGeometry(QtCore.QRect(100, 585, 400, 25))
self.file_edit_3.setObjectName(_fromUtf8("file_edit_3"))
self.export_dir = QtGui.QPushButton(self.tab_2)
self.export_dir.setGeometry(QtCore.QRect(17, 585, 75, 23))
self.export_dir.setObjectName(_fromUtf8("export_dir"))
self.export_dir.clicked.connect(lambda: self.chose_file(3))
self.start_export = QtGui.QPushButton(self.tab_2)
self.start_export.setGeometry(QtCore.QRect(780, 560, 75, 41))
self.start_export.setObjectName(_fromUtf8("start_export"))
self.start_export.clicked.connect(self.export)
self.tab_3 = QtGui.QWidget()
self.tab_3.setObjectName(_fromUtf8("tab_3"))
self.tabWidget.addTab(self.tab_3, _fromUtf8(""))
self.lot_id_label = QtGui.QLabel(self.tab_3)
self.lot_id_label.setGeometry(QtCore.QRect(40, 60, 81, 21))
self.lot_id_label.setObjectName(_fromUtf8("lot_id_label"))
self.comboBox = QtGui.QComboBox(self.tab_3)
self.comboBox.setGeometry(QtCore.QRect(130, 60, 101, 22))
self.comboBox.setEditable(True)
# self.display_lot()
self.comboBox.setObjectName(_fromUtf8("comboBox"))
self.wafer_id = QtGui.QLabel(self.tab_3)
self.wafer_id.setGeometry(QtCore.QRect(40, 100, 81, 21))
self.wafer_id.setObjectName(_fromUtf8("wafer_id"))
self.comboBox_2 = QtGui.QComboBox(self.tab_3)
self.comboBox_2.setGeometry(QtCore.QRect(130, 100, 101, 22))
self.comboBox_2.setEditable(True)
self.comboBox_2.setObjectName(_fromUtf8("comboBox_2"))
self.x_cor = QtGui.QLabel(self.tab_3)
self.x_cor.setGeometry(QtCore.QRect(40, 140, 81, 21))
self.x_cor.setObjectName(_fromUtf8("x_cor"))
self.y_cor = QtGui.QLabel(self.tab_3)
self.y_cor.setGeometry(QtCore.QRect(40, 180, 81, 21))
self.y_cor.setObjectName(_fromUtf8("y_cor"))
self.lineEdit_4 = QtGui.QLineEdit(self.tab_3)
self.lineEdit_4.setGeometry(QtCore.QRect(130, 140, 51, 20))
self.lineEdit_4.setObjectName(_fromUtf8("lineEdit_4"))
self.search_result = QtGui.QLabel(self.tab_3)
self.search_result.setGeometry(QtCore.QRect(40, 235, 91, 16))
self.search_result.setObjectName(_fromUtf8("search_result"))
self.textBrowser = QtGui.QTextBrowser(self.tab_3)
self.textBrowser.setGeometry(QtCore.QRect(40, 265, 390, 320))
self.textBrowser.setObjectName(_fromUtf8("textBrowser"))
self.search_key = QtGui.QPushButton(self.tab_3)
self.search_key.setGeometry(QtCore.QRect(320, 180, 75, 23))
self.search_key.setObjectName(_fromUtf8("search_key"))
self.search_key.clicked.connect(self.key_search)
self.label_18 = QtGui.QLabel(self.tab_3)
self.label_18.setGeometry(QtCore.QRect(190, 140, 16, 16))
self.label_18.setObjectName(_fromUtf8("label_18"))
self.lineEdit_6 = QtGui.QLineEdit(self.tab_3)
self.lineEdit_6.setGeometry(QtCore.QRect(210, 140, 51, 20))
self.lineEdit_6.setObjectName(_fromUtf8("lineEdit_6"))
self.lineEdit_5 = QtGui.QLineEdit(self.tab_3)
self.lineEdit_5.setGeometry(QtCore.QRect(130, 180, 51, 20))
self.lineEdit_5.setObjectName(_fromUtf8("lineEdit_5"))
self.lineEdit_7 = QtGui.QLineEdit(self.tab_3)
self.lineEdit_7.setGeometry(QtCore.QRect(210, 180, 51, 20))
self.lineEdit_7.setObjectName(_fromUtf8("lineEdit_7"))
self.label_19 = QtGui.QLabel(self.tab_3)
self.label_19.setGeometry(QtCore.QRect(190, 180, 16, 16))
self.label_19.setObjectName(_fromUtf8("label_19"))
self.search_lot = QtGui.QPushButton(self.tab_3)
self.search_lot.setGeometry(QtCore.QRect(320, 60, 75, 23))
self.search_lot.setObjectName(_fromUtf8("search_lot"))
self.search_lot.clicked.connect(self.lot_search)
self.line_2 = QtGui.QFrame(self.tab_3)
self.line_2.setGeometry(QtCore.QRect(450, 0, 20, 650))
self.line_2.setFrameShape(QtGui.QFrame.VLine)
self.line_2.setFrameShadow(QtGui.QFrame.Sunken)
self.line_2.setObjectName(_fromUtf8("line_2"))
#回收
self.op_type = QtGui.QLabel(self.tab_3)
self.op_type.setGeometry(QtCore.QRect(500, 90, 81, 20))
self.op_type.setObjectName(_fromUtf8("op_type"))
self.statis_search = QtGui.QPushButton(self.tab_3)
self.statis_search.setGeometry(QtCore.QRect(780, 230, 75, 23))
self.statis_search.setObjectName(_fromUtf8("statis_search"))
self.statis_search.clicked.connect(self.op_record_search)
self.user_ = QtGui.QLabel(self.tab_3)
self.user_.setGeometry(QtCore.QRect(500, 50, 71, 20))
self.user_.setObjectName(_fromUtf8("user_"))
self.lineEdit_2 = QtGui.QLineEdit(self.tab_3)
self.lineEdit_2.setGeometry(QtCore.QRect(600, 50, 121, 20))
self.lineEdit_2.setObjectName(_fromUtf8("lineEdit_2"))
self.comboBox_4 = QtGui.QComboBox(self.tab_3)
self.comboBox_4.setGeometry(QtCore.QRect(600, 90, 121, 22))
self.comboBox_4.setObjectName(_fromUtf8("comboBox_4"))
self.comboBox_5 = QtGui.QComboBox(self.tab_3)
self.comboBox_5.setGeometry(QtCore.QRect(600, 130, 121, 22))
self.comboBox_5.setObjectName(_fromUtf8("comboBox_5"))
self.comboBox_5.setEditable(True)
self.inner_type = QtGui.QLabel(self.tab_3)
self.inner_type.setGeometry(QtCore.QRect(500, 130, 91, 20))
self.inner_type.setObjectName(_fromUtf8("inner_type"))
self.date_ = QtGui.QLabel(self.tab_3)
self.date_.setGeometry(QtCore.QRect(500, 170, 91, 20))
self.date_.setObjectName(_fromUtf8("date_"))
self.dateEdit = QtGui.QDateEdit(self.tab_3)
self.dateEdit.setGeometry(QtCore.QRect(600, 170, 121, 22))
self.dateEdit.setObjectName(_fromUtf8("dateEdit"))
self.dateEdit.setDateTime(self.dateEdit.dateTimeFromText('2015/3/1'))
self.dateEdit_2 = QtGui.QDateEdit(self.tab_3)
self.dateEdit_2.setGeometry(QtCore.QRect(740, 170, 121, 22))
self.dateEdit_2.setObjectName(_fromUtf8("dateEdit_2"))
self.dateEdit_2.setDateTime(self.dateEdit.dateTimeFromText(time.strftime('%Y/%m/%d',time.localtime())))
self.label_49 = QtGui.QLabel(self.tab_3)
self.label_49.setGeometry(QtCore.QRect(725, 170, 16, 20))
self.label_49.setObjectName(_fromUtf8("label_49"))
self.textBrowser_2 = QtGui.QTextBrowser(self.tab_3)
self.textBrowser_2.setGeometry(QtCore.QRect(495, 265, 390, 320))
self.textBrowser_2.setObjectName(_fromUtf8("textBrowser_2"))
self.search_result_2 = QtGui.QLabel(self.tab_3)
self.search_result_2.setGeometry(QtCore.QRect(500, 235, 91, 16))
self.search_result_2.setObjectName(_fromUtf8("search_result_2"))
#tab_4
self.tab_4 = QtGui.QWidget()
self.tab_4.setObjectName(_fromUtf8("tab_4"))
self.tabWidget.addTab(self.tab_4, _fromUtf8(""))
self.line_3 = QtGui.QFrame(self.tab_4)
self.line_3.setGeometry(QtCore.QRect(450, 0, 20, 650))
self.line_3.setFrameShape(QtGui.QFrame.VLine)
self.line_3.setFrameShadow(QtGui.QFrame.Sunken)
self.line_3.setObjectName(_fromUtf8("line_3"))
self.op_history = QtGui.QLabel(self.tab_4)
self.op_history.setGeometry(QtCore.QRect(40, 30, 91, 16))
self.op_history.setObjectName(_fromUtf8("op_history"))
self.textBrowser_3 = QtGui.QTextBrowser(self.tab_4)
self.textBrowser_3.setGeometry(QtCore.QRect(40, 50, 390, 580))
self.textBrowser_3.setObjectName(_fromUtf8("textBrowser_3"))
self.admin_password = QtGui.QLabel(self.tab_4)
self.admin_password.setGeometry(QtCore.QRect(520, 140, 65, 16))
self.admin_password.setObjectName(_fromUtf8("admin_password"))
self.lineEdit_10 = QtGui.QLineEdit(self.tab_4)
self.lineEdit_10.setGeometry(QtCore.QRect(600, 140, 111, 20))
self.lineEdit_10.setObjectName(_fromUtf8("lineEdit_10"))
self.lineEdit_10.setEchoMode(QtGui.QLineEdit.Password)
self.lot_id_2 = QtGui.QLabel(self.tab_4)
self.lot_id_2.setGeometry(QtCore.QRect(520, 180, 61, 16))
self.lot_id_2.setObjectName(_fromUtf8("label"))
self.lineEdit = QtGui.QLineEdit(self.tab_4)
self.lineEdit.setGeometry(QtCore.QRect(600, 180, 111, 20))
self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
self.input_again = QtGui.QLabel(self.tab_4)
self.input_again.setGeometry(QtCore.QRect(520, 220, 65, 16))
self.input_again.setObjectName(_fromUtf8("input_again"))
self.lineEdit_11 = QtGui.QLineEdit(self.tab_4)
self.lineEdit_11.setGeometry(QtCore.QRect(600, 220, 111, 20))
self.lineEdit_11.setObjectName(_fromUtf8("lineEdit_11"))
self.retrieve = QtGui.QPushButton(self.tab_4)
self.retrieve.setGeometry(QtCore.QRect(770, 220, 75, 23))
self.retrieve.setObjectName(_fromUtf8("retrieve"))
self.retrieve.clicked.connect(self.retrieve_key)
self.menuBar = QtGui.QMenuBar(self)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 887, 23))
self.menuBar.setObjectName(_fromUtf8("menuBar"))
User = self.menuBar.addMenu(u'&用户管理')
add_delete = User.addAction(u'添加/删除用户')
password = User.addAction(u'修改密码')
if self.user != 'admin':
add_delete.setDisabled(True)
self.menuBar.connect(add_delete, QtCore.SIGNAL('triggered()'), self.add_delete)
self.menuBar.connect(password, QtCore.SIGNAL('triggered()'), self.change_password)
self.setMenuBar(self.menuBar)
self.progressBar = QtGui.QProgressBar(self)
self.progressBar.setGeometry(QtCore.QRect(0, 690, 940, 23))
self.progressBar.setProperty("value", 0)
self.progressBar.setObjectName(_fromUtf8("progressBar"))
self.retranslateUi()
self.display_status()
self.combox_init()
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(self)
def retranslateUi(self):
self.setWindowTitle(_translate("Dialog", u"HDCP Key 管理工具", None))
self.mac_label.setText(_translate("Dialog", u" 输入mac前缀:", None))
self.file_import.setText(_translate("Dialog", u"导入bin文件:", None))
self.mac_button.setText(_translate("Dialog", u"开始导入", None))
self.import_button.setText(_translate("Dialog", u"开始导入", None))
self.title_1x_tx_total.setText(_translate("Dialog", u"1.x Transmitter 总数:", None))
self.num_1x_tx_left.setText(_translate("Dialog", "0", None))
self.num_1x_rx_left.setText(_translate("Dialog", "0", None))
self.title_1x_tx_left.setText(_translate("Dialog", u"剩余:", None))
self.num_1x_tx_total.setText(_translate("Dialog", "0", None))
self.num_1x_rx_total.setText(_translate("Dialog", "0", None))
self.title_1x_rx_left.setText(_translate("Dialog", u"剩余:", None))
self.title_1x_rx_total.setText(_translate("Dialog", u"1.x Receiver 总数:", None))
self.key_version.setText(_translate("Dialog", u"key 版本:", None))
self.title_2x_tx_total.setText(_translate("Dialog", u"2.x Transmitter 总数:", None))
self.num_2x_tx_left.setText(_translate("Dialog", u"0", None))
self.num_2x_rx_left.setText(_translate("Dialog", u"0", None))
self.title_2x_tx_left.setText(_translate("Dialog", u"剩余", None))
self.num_2x_tx_total.setText(_translate("Dialog", u"0", None))
self.num_2x_rx_total.setText(_translate("Dialog", u"0", None))
self.title_2x_rx_left.setText(_translate("Dialog", u"剩余", None))
self.title_2x_rx_total.setText(_translate("Dialog", u"2.x Receiver 总数:", None))
self.title_mac_total.setText(_translate("Dialog", u"Mac 总数:", None))
self.title_mac_left.setText(_translate("Dialog", u"剩余:", None))
self.num_mac_total.setText(_translate("Dialog", u"0", None))
self.num_mac_left.setText(_translate("Dialog", u"0", None))
self.x1.setText(_translate("Dialog", u"1.X", None))
self.x2.setText(_translate("Dialog", u"2.X", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("Dialog", u"导入Key", None))
__sortingEnabled = self.tableWidget.isSortingEnabled()
self.tableWidget.setSortingEnabled(False)
item = self.tableWidget.item(0, 0)
item.setText(_translate("Dialog", u"申请日期", None))
item = self.tableWidget.item(0, 2)
item.setText(_translate("Dialog", u"所属部门", None))
item = self.tableWidget.item(1, 0)
item.setText(_translate("Dialog", u"申请人员", None))
item = self.tableWidget.item(1, 2)
item.setText(_translate("Dialog", u"此处待定", None))
item = self.tableWidget_2.item(0, 0)
item.setText(_translate("Dialog", u"领用类型", None))
item = self.tableWidget_2.item(1, 0)
item.setText(_translate("Dialog", u"KEY来源", None))
item = self.tableWidget_2.item(2, 0)
item.setText(_translate("Dialog", u"KEY内容", None))
# item = self.tableWidget_2.item(4, 0)
# item.setText(_translate("Dialog", u"KEY版本", None))
item = self.tableWidget_2.item(3, 0)
item.setText(_translate("Dialog", u"KEY版本/类型", None))
item = self.tableWidget_2.item(6, 0)
item.setText(_translate("Dialog", u"", None))
item = self.tableWidget_2.item(7, 0)
item.setText(_translate("Dialog", u"有无内部型号", None))
item = self.tableWidget_2.item(9, 0)
item.setText(_translate("Dialog", u"需求片数", None))
item = self.tableWidget_2.item(10, 0)
item.setText(_translate("Dialog", u"需求key数", None))
item = self.tableWidget_2.item(11, 0)
item.setText(_translate("Dialog", u"单位", None))
item = self.tableWidget_2.item(12, 0)
item.setText(_translate("Dialog", u"测试工程师", None))
item = self.tableWidget_3.item(0, 0)
item.setText(_translate("Dialog", u"序号", None))
item = self.tableWidget_3.item(0, 1)
item.setText(_translate("Dialog", u"外包商", None))
item = self.tableWidget_3.item(0, 2)
item.setText(_translate("Dialog", u"批号", None))
item = self.tableWidget_3.item(0, 3)
item.setText(_translate("Dialog", u"片号", None))
item = self.tableWidget_2.item(3, 1)
item.setText(_translate("Dialog", u"TX", None))
item = self.tableWidget_2.item(3, 3)
item.setText(_translate("Dialog", u"RX", None))
self.load_table.setText(_translate("Dialog", u"载入表格:", None))
self.export_dir.setText(_translate("Dialog", u"导出目录:", None))
self.start_export.setText(_translate("Dialog", u"开始导出", None))
self.radio.setText(u'工程')
self.radio_1.setText(u'量产')
self.radio_2.setText(u'pilot run')
self.radio_3.setText(u'Availink')
self.radio_4.setText(u'Customer')
self.radio_5.setText(u'1.X')
self.radio_6.setText(u'2.X')
self.radio_7.setText(u'16进制')
self.radio_8.setText(u'8进制')
self.radio_9.setText(u'2进制')
self.radio_12.setText(u'有内部型号')
self.radio_13.setText(u'无内部型号')
self.radio_20.setText(u'Both')
self.radio_21.setText(u'None')
self.radio_22.setText(u'1.X')
self.radio_23.setText(u'2.X')
self.radio_24.setText(u'Both')
self.radio_25.setText(u'None')
self.tableWidget.setSortingEnabled(__sortingEnabled)
__sortingEnabled = self.tableWidget_2.isSortingEnabled()
self.tableWidget_2.setSortingEnabled(False)
self.tableWidget_2.setSortingEnabled(__sortingEnabled)
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("Dialog", u"导出Key", None))
self.lot_id_label.setText(_translate("Dialog", "Lot ID:", None))
self.wafer_id.setText(_translate("Dialog", "Wafer ID:", None))
self.x_cor.setText(_translate("Dialog", u"X 坐标:", None))
self.y_cor.setText(_translate("Dialog", u"Y 坐标:", None))
self.search_result.setText(_translate("Dialog", u"查询结果:", None))
self.label_18.setText(_translate("Dialog", "-", None))
self.label_19.setText(_translate("Dialog", "-", None))
self.search_key.setText(_translate("Dialog", "查询key", None))
self.search_lot.setText(_translate("Dialog", "查询lot", None))
self.op_type.setText(_translate("Dialog", "操作类型:", None))
self.user_.setText(_translate("Dialog", "用户:", None))
self.inner_type.setText(_translate("Dialog", "内部型号:", None))
self.date_.setText(_translate("Dialog", "日期:", None))
self.statis_search.setText(_translate("Dialog", "查询", None))
self.label_49.setText(_translate("Dialog", "-", None))
self.search_result_2.setText(_translate("Dialog", "查询结果:", None))
self.lot_id_2.setText(_translate("Dialog", "LotID:", None))
self.input_again.setText(_translate("Dialog", u"再输入一次:", None))
self.retrieve.setText(_translate("Dialog", "回收", None))
self.admin_password.setText(_translate("Dialog", u"管理员密码:", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate("Dialog", u"查询", None))
self.op_history.setText(_translate("Dialog", "操作历史:", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_4), _translate("Dialog", u"历史/回收", None))
def add_delete(self):
ui = UserDialog(self.db, self)
ui.exec_()
def change_password(self):
'''change password'''
dialog = PasswordDialog(self.db, self.user, parent = self)
if dialog.exec_():
# new_password = QtGui.QStandardItem(dialog.newPassword())
new_password = dialog.new_password()
# encr_pwd = encrypt_password(str(new_password))
self.db.update_ps(str(self.user), new_password)
dialog.destroy()
def import_(self):
self.file_path = self.file_edit.text()
if not self.file_path:
QtGui.QMessageBox.critical(self.tab, 'Error', u'You have not chose any file!')
return
if not os.path.exists(self.file_path):
QtGui.QMessageBox.critical(self.tab, 'Error', u'This file is not exist!')
self.tabWidget.setEnabled(False)
thread_obj = threading.Thread(target=self.parse_key_file)
thread_obj.start()
#self.display_status()
def md5_calc(self, data):
m = hashlib.md5()
m.update(data)
return m.hexdigest()
def sha1_calc(self, data):
sha1obj = hashlib.sha1()
sha1obj.update(data)
return sha1obj.hexdigest()
def mac_import(self):
mac_pre = str(self.mac_pre.text()).strip()
if not mac_pre:
QtGui.QMessageBox.critical(self.tab, 'Error', u'Mac 前缀不能为空!')
if self.redis_inst.val_is_exist(mac_pre, 'mac'):
QtGui.QMessageBox.critical(self.tab, 'Error', u'Mac 前缀已经被导入过!')
return
mac_total = 256*256*256
for i in range(0, mac_total):
val = int((i + 1)*100/mac_total)
self.progressBar.setValue(val)
self.db.insert_mac( "0000" + mac_pre + "{0:0>6}".format(hex(i)[2:]))
if i % 100 == 0:
print i
self.db.update_status('mac', 'import', total = mac_total, left = mac_total)
self.redis_inst.add_val(mac_pre)
self.redis_inst.add_history('%s import %s mac address at %s' % (self.user, mac_total, time.strftime('%Y/%m/%d',time.localtime())))
QtGui.QMessageBox.information(self, u'提示', u'Mac导入已完成,共导入%s个Mac!'%mac_total)
self.progressBar.setValue(0)
def warning(self, str):
QtGui.QMessageBox.critical(self.tab, 'Error', str)
def progress_bar(self, val):
self.progressBar.setValue(val)
def info(self,str1, str2):
QtGui.QMessageBox.information(self, str1, str2)
def enable(self, bool):
self.tabWidget.setEnabled(bool)
def parse_key_file(self):
start_time = time.time()
bin_size = os.path.getsize(self.file_path)
with open(self.file_path, 'rb') as f:
data = f.read()
md5_val = self.md5_calc(data)
if self.redis_inst.val_is_exist(md5_val):
#QtGui.QMessageBox.critical(self.tab, 'Error', u'文件数据已经被导入过!')
self.sinOut_err.emit(u'文件数据已经被导入过!')
self.sinOut_enable.emit(True)
return
order_format = bytearray(data)
f.seek(0)
key_type = self.checked_radio(self.key_version_group).text()
if key_type == '1.X':
table_pre = '1X'
if (bin_size - 4)%308 != 0:
#QtGui.QMessageBox.critical(self, 'Error', u'bin文件大小不对,请确认是否导入了正确的文件!')
self.sinOut_err.emit(u'bin文件大小不对,请确认是否导入了正确的文件!')
self.sinOut_enable.emit(True)
return
if order_format[0] == 1:
table_suff = 'TX'
else:
table_suff = 'RX'
elif key_type == '2.X':
table_pre = '2X'
if (bin_size - 40)%862 != 0:
#QtGui.QMessageBox.critical(self, 'Error', u'bin文件大小不对,请确认是否导入了正确的文件!')
self.sinOut_err.emit(u'bin文件大小不对,请确认是否导入了正确的文件!')
self.sinOut_enable.emit(True)
return
if order_format[3] == 1:
table_suff = 'TX'
else:
table_suff = 'RX'
db_table = table_pre + '_' + table_suff
print db_table
#self.tabWidget.setEnabled(False)
#self.sinOut_enable.emit(False)
if table_pre == '1X':
with open(self.file_path, 'rb') as f:
f.read(4)
key_total = (bin_size - 4)/308
key_imported = 0
for i in range(0, key_total):
key_bin = f.read(288)
sha = f.read(20)
if self.sha1_calc(key_bin) == binascii.hexlify(sha):
key_hex = binascii.hexlify(key_bin)
if i % ((key_total/1000)+1) == 0:
print i
val = int((i + 1)*100/key_total)
self.sinOut_progress_bar.emit(val)
self.db.insert_key(db_table, key_hex)
key_imported += 1
else:
print "error occured!"
self.sinOut_err.emit(u'bin文件checksum不对,请确认是否导入了正确的文件!')
self.sinOut_enable.emit(True)
return
elif db_table == '2X_RX':
with open(self.file_path, 'rb') as f:
f.read(40)
data = f.read(862)
key_total = (bin_size - 40)/862
key_imported = 0
while data:
key_hex = binascii.hexlify(data)
if key_imported % ((key_total/1000)+1) == 0:
print key_imported
val = int((key_imported + 1)*100/key_total)
self.sinOut_progress_bar.emit(val)
self.db.insert_key(db_table, key_hex)
key_imported += 1
data = f.read(862)
elif db_table == '2X_TX':
self.sinOut_err.emit(u'暂时不支持2X_TX类型!')
self.sinOut_enable.emit(True)
return 0
self.db.update_status(db_table.lower(), 'import', total = key_imported, left = key_imported)
self.redis_inst.add_val(md5_val)
today = time.strftime('%Y-%m-%d',time.localtime())
sql = "insert into op_record(user,operate_type,%s,day) values('%s','storage',%s,'%s')" % (db_table.lower(), self.user, key_imported, today)
self.db.set_op_record(sql)
self.redis_inst.add_history('%s import %s keys: %s at %s' % (self.user, db_table, key_imported, today))
# print 'history: ', '%s import %s keys: %s at %s' % (self.user, db_table, key_imported, time.strftime('%Y/%m/%d',time.localtime()))
#QtGui.QMessageBox.information(self, u'提示', u'导入已完成,共导入%s个key!'%key_imported)
self.sinOut_info.emit(u'提示',u'导入已完成,共导入%s个key!'%key_imported)
self.sinOut_progress_bar.emit(0)
end_time = time.time()
print "spend time:%d second"%(end_time-start_time)
#self.progressBar.setValue(0)
#self.tabWidget.setEnabled(True)
self.sinOut_enable.emit(True)
self.sinOut_status.emit()
def chose_file(self, n):
if n == 1:
filename = QtGui.QFileDialog.getOpenFileName(self,'Open file','./')
self.file_edit.setText(filename)
elif n == 2:
filename = str(QtGui.QFileDialog.getOpenFileName(self,'Open file','./'))
self.load_excel_form(filename)
self.file_edit_2.setText(filename)
elif n == 3:
dir_name = QtGui.QFileDialog.getExistingDirectory(self,'Open directory','./')
self.file_edit_3.setText(dir_name)
def load_excel_form(self, file_name):
try:
data = xlrd.open_workbook(file_name)
except:
QtGui.QMessageBox.critical(self, 'Error', u'打开文件格式不对!')
return
self.get_form_detail()
table = data.sheets()[0]
apply_date = (xlrd.xldate.xldate_as_datetime(table.cell(1,2).value, 0)).strftime( '%Y-%m-%d')
print apply_date
self.apply_date_p.setText(apply_date)
department = table.cell(1,7).value
self.department_p.setText(department)
apply_person = table.cell(2,2).value
self.apply_person_p.setText(apply_person)
review_person = table.cell(2,7).value
self.review_person_p.setText(review_person)
usage = table.cell(3,4).value
self.chose_radio(self.buttonGroup, usage)
key_source = table.cell(5,4).value
self.chose_radio(self.buttonGroup_1, key_source)
key_content = table.cell(7,4).value
self.chose_radio(self.buttonGroup_3, key_content)
tx = table.cell(9,5).value
self.chose_radio(self.buttonGroup_2, tx)
rx = table.cell(9,8).value
self.chose_radio(self.buttonGroup_6, rx)
inner_model = table.cell(11,4).value
self.inner_model_p.setEditText(inner_model)
chip_num = table.cell(12,4).value
self.chip_num_p.setText(str(int(chip_num)))
test_engineer = table.cell(14,4).value
self.test_engineer_p.setText(test_engineer)
No = table.cell(16,0).value
self.No_p.setText(str(int(No)))
contractor = table.cell(16,1).value
self.contractor_p.setText(contractor)
lot_id = table.cell(16,3).value
self.lot_id_p.setText(lot_id)
wafers = table.cell(16,5).value
if type(wafers)==float:
wafers = str(int(wafers))
self.wafers_p.setText(wafers)
def get_form_detail(self):
self.dir_path = self.file_edit_3.text()
self.apply_date_p = self.tableWidget.item(0, 1)
self.apply_date = self.apply_date_p.text()
self.department_p = self.tableWidget.item(0, 3)
self.department = self.department_p.text()
self.apply_person_p = self.tableWidget.item(1, 1)
self.apply_person = self.apply_person_p.text()
self.review_person_p = self.tableWidget.item(1, 3)
self.review_person = self.review_person_p.text()
self.usage = self.checked_radio(self.buttonGroup)
self.key_source = self.checked_radio(self.buttonGroup_1)
self.tx = self.checked_radio(self.buttonGroup_2).text()
self.rx = self.checked_radio(self.buttonGroup_6).text()
self.key_content = self.checked_radio(self.buttonGroup_3)
# self.inner_model_radio = self.checked_radio(self.buttonGroup_5).text()
self.inner_model_p = self.comboBox_3
self.inner_model = self.inner_model_p.currentText()
self.chip_num_p = self.tableWidget_2.item(9, 1)
self.chip_num = self.chip_num_p.text()
self.key_num_p = self.tableWidget_2.item(10, 1)
self.key_num = self.key_num_p.text()
self.unit_p = self.tableWidget_2.item(11, 1)
self.unit = self.unit_p.text()
self.test_engineer_p = self.tableWidget_2.item(12, 1)
self.test_engineer = self.test_engineer_p.text()
self.No_p = self.tableWidget_3.item(1,0)
self.No = self.No_p.text()
self.contractor_p = self.tableWidget_3.item(1,1)
self.contractor = self.contractor_p.text()
self.lot_id_p = self.tableWidget_3.item(1, 2)
self.lot_id = self.lot_id_p.text()
self.wafers_p = self.tableWidget_3.item(1, 3)
self.wafers = self.wafers_p.text()
def chose_radio(self, group, name):
for radio in group.buttons():
if radio.text() == name:
radio.setChecked(True)
def checked_radio(self, group_button):
for radio in group_button.buttons():
if radio.isChecked():
return radio
def get_statis(self):
statis = self.db.get_statistics()
X1_TX_total, X1_TX_left = statis['1X_TX'][0], statis['1X_TX'][1]
X1_RX_total, X1_RX_left = statis['1X_RX'][0], statis['1X_RX'][1]
X2_TX_total, X2_TX_left = statis['2X_TX'][0], statis['2X_TX'][1]
X2_RX_total, X2_RX_left = statis['2X_RX'][0], statis['2X_RX'][1]
mac_total, mac_left = statis['mac'][0], statis['mac'][1]
return X1_TX_total, X1_TX_left, X1_RX_total, X1_RX_left, X2_TX_total, X2_TX_left, X2_RX_total, X2_RX_left, mac_total, mac_left
def display_status(self):
X1_TX_total, X1_TX_left, X1_RX_total, X1_RX_left, X2_TX_total, X2_TX_left, X2_RX_total, X2_RX_left, mac_total, mac_left = self.get_statis()
self.num_1x_tx_total.setText(str(X1_TX_total))
self.num_1x_rx_total.setText(str(X1_RX_total))
self.num_1x_tx_left.setText(str(X1_TX_left))
self.num_1x_rx_left.setText(str(X1_RX_left))
self.num_2x_tx_left.setText(str(X2_TX_left))
self.num_2x_rx_left.setText(str(X2_RX_left))
self.num_2x_tx_total.setText(str(X2_TX_total))
self.num_2x_rx_total.setText(str(X2_RX_total))
self.num_mac_total.setText(str(mac_total))
self.num_mac_left.setText(str(mac_left))
history = self.redis_inst.get_history()
for item in history:
self.textBrowser_3.append(item)
def get_item_text(self):
# item = self.tableWidget_2.currentItem()
test_zh = self.tableWidget.currentItem().text()
print 'out of db:'
print test_zh, type(test_zh)
self.db.add_user(test_zh, '123')
users = self.db.get_users()
print 'in db:'
print users[-1], type(users[-1])
print chardet.detect(users[-1])
# print new.decode('gbk'), type(new)
item = self.tableWidget.item(1,0)
item.setText(users[-1].decode('utf8'))
def export(self):
self.get_form_detail()
key_table = []
if self.tx == "1.X":
key_table.append("1X_TX")
elif self.tx == "2.X":
key_table.append("2X_TX")
elif self.tx == "Both":
key_table.extend(["1X_TX", "2X_TX"])
if self.rx == "1.X":
key_table.append("1X_RX")
elif self.rx == "2.X":
key_table.append("2X_RX")
elif self.rx == "Both":
key_table.extend(["1X_RX", "2X_RX"])
if not self.lot_id:
QtGui.QMessageBox.critical(self.tab, 'Error', u'批号不能为空!')
return
elif self.redis_inst.val_is_exist(str(self.lot_id), 'lot_id'):
QtGui.QMessageBox.critical(self.tab, 'Error', u'批号:%s 已经被导出过!' % str(self.lot_id))
return
if not self.inner_model:
QtGui.QMessageBox.critical(self.tab, 'Error', u'内部型号不能为空!')
return
elif str(self.chip_num).strip() != str(len(self.wafers.split(','))):
QtGui.QMessageBox.critical(self.tab, 'Error', u'wafer片号与总数不匹配!')
return
elif not str(self.dir_path).strip():
QtGui.QMessageBox.critical(self.tab, 'Error', u'你还没有选择导出目录!')
return
elif not os.path.isdir(str(self.dir_path)):
QtGui.QMessageBox.critical(self.tab, 'Error', u'输入的不是目录名!')
return
self.parse_cfg()#解析 conf文件
self.create_target_dir()
export_thread_obj = threading.Thread(target = self._export)
export_thread_obj.start()
def _export(self):
#self.tabWidget.setEnabled(False)
self.sinOut_enable.emit(False)
#解析txt文件,获得wafer上的每个芯片的位置
keys_in_one_wafer = len(self.wafer_map)
self.lot_id_str = str(self.lot_id).replace('.', '_')
#创建lotid这个表,用于通过lotid查询
self.db.create_table_by_lot(self.lot_id_str)
self.mac_start_id = self.mac_end_id = self.db.get_start_id('mac')
exported_keys = 0
if len(self.key_type) == 1:
total_keys = keys_in_one_wafer*(int(str(self.chip_num)))
for tb in self.key_type:
#start id是上一次取key结束的位置,end id是这次取key结束的位置
key_start_id = key_end_id = self.db.get_start_id(tb)
#self.wafers代表“片号”
for wafer_id in self.wafers.split(','):
i = 0
while i < keys_in_one_wafer:
x, y = self.wafer_map[i]
print "lot_id: %s, wafer_id: %s, key_type: %s, x: %s, y: %s, id: %s"%(self.lot_id_str, wafer_id, tb, x, y, key_end_id)
#根据key id查tb表获取key值
key = self.db.get_key(tb, key_end_id)
#将key生成key文件
self.gen_key_file(tb, wafer_id, x, y, key)
#往lot_id_str这个表里插入信息,用于通过lotid查询导出的key的
self.db.insert_value_by_lot(self.lot_id_str, wafer_id, tb, x, y, key_end_id)
i += 1
key_end_id += 1
exported_keys += 1
val = int((exported_keys + 1)*100/total_keys)
#self.progressBar.setValue(val)
self.sinOut_progress_bar.emit(val)
#print exported_keys
wafer_dir = os.path.join(self.output_dir, str("%02d" % int(wafer_id)))
self.clean_file(wafer_dir)
today = time.strftime('%Y-%m-%d',time.localtime())
self.db.update_status(tb.lower(), 'export', left = key_end_id - key_start_id, start_id = key_end_id)
sql = "insert into op_record(user,inner_model,operate_type,%s,day) values('%s','%s','fetch',%s,'%s')" % (tb.lower(), self.user, str(self.inner_model), key_end_id - key_start_id, today)
self.db.set_op_record(sql)
elif len(self.key_type) == 2:
assert '1X_TX' == self.key_type[0] and '2X_RX' == self.key_type[1]
x1 = '1X_TX'
x2 = '2X_RX'
x1_tx_start_id = x1_tx_end_id = self.db.get_start_id(x1)
x2_rx_start_id = x2_rx_end_id = self.db.get_start_id(x2)
total_keys = keys_in_one_wafer*(int(str(self.chip_num)))
for wafer_id in self.wafers.split(','):
i = 0
while i < keys_in_one_wafer:
x, y = self.wafer_map[i]
x1_key = self.db.get_key(x1, x1_tx_end_id)
x2_key = self.db.get_key(x2, x2_rx_end_id)
self.gen_key_file(x1, wafer_id, x, y, x1_key, x2_key)
self.db.insert_value_by_lot(self.lot_id_str, wafer_id, x1, x, y, x1_tx_end_id)
self.db.insert_value_by_lot(self.lot_id_str, wafer_id, x2, x, y, x2_rx_end_id)
i += 1
x1_tx_end_id += 1
x2_rx_end_id += 1
exported_keys += 1
val = int((exported_keys + 1)*100/total_keys)
#self.progressBar.setValue(val)
self.sinOut_progress_bar.emit(val)
# print exported_keys
wafer_dir = os.path.join(self.output_dir, str("%02d" % int(wafer_id)))
self.clean_file(wafer_dir)
today = time.strftime('%Y-%m-%d',time.localtime())
self.db.update_status(x1.lower(), 'export', left = x1_tx_end_id - x1_tx_start_id, start_id = x1_tx_end_id)
self.db.update_status(x2.lower(), 'export', left = x2_rx_end_id - x2_rx_start_id, start_id = x2_rx_end_id)
sql = "insert into op_record(user,inner_model,operate_type,%s,day) values('%s','%s','fetch',%s,'%s')" % (x1, self.user, str(self.inner_model), x1_tx_end_id - x1_tx_start_id, today)
self.db.set_op_record(sql)
sql = "insert into op_record(user,inner_model,operate_type,%s,day) values('%s','%s','fetch',%s,'%s')" % (x2, self.user, str(self.inner_model), x1_tx_end_id - x1_tx_start_id, today)
self.db.set_op_record(sql)
# print "lot_id: %s, wafer_id: %s, key_type: %s, x: %s, y: %s, id: %s"%(self.lot_id_str, wafer_id, self.key_type, x, y, x1_tx_end_id)
self.db.update_status('mac', 'export', left = self.mac_end_id - self.mac_start_id, start_id = self.mac_end_id)
self.redis_inst.add_val(str(self.lot_id), 'lot_id')
for type_ in self.key_type:
self.redis_inst.add_history('%s export %s keys: %s at %s' % (self.user, type_, total_keys, time.strftime('%Y/%m/%d',time.localtime())))
#QtGui.QMessageBox.information(self, u'提示', u'导出已完成,共导出%s个key!' % total_keys)
self.sinOut_info.emit(u'提示', u'导出已完成,共导出%s个key!' % total_keys)
#self.display_status()
#self.progressBar.setValue(0)
self.sinOut_progress_bar.emit(0)
#self.tabWidget.setEnabled(True)
self.sinOut_enable.emit(True)
self.sinOut_status.emit()
def create_target_dir(self):
main_dir = os.getcwd()
self.output_dir = os.path.join(main_dir, "Output", str(self.lot_id))
if not os.path.exists(self.output_dir):
os.mkdir(self.output_dir)
def gen_key_file(self, tb, wafer_id, x, y, key_1, key_2 = None):
wafer_dir = os.path.join(self.output_dir, str("%02d" % int(wafer_id)))
print wafer_dir
if not os.path.exists(wafer_dir):
os.mkdir(wafer_dir)
work_dir = os.getcwd()
os.chdir(wafer_dir)
name_base = "%s_%s_%s_%s" % (str(self.lot_id), str("%02d" % int(wafer_id)), x, y)
print name_base
key_name = "%s.key_source" % name_base
key2_name = "%s.txt" % name_base
key_name_abs = os.path.join(wafer_dir, key_name)
key2_name_abs = os.path.join(wafer_dir, key2_name)
if "TX" in tb:
type_ = '1'
else:
type_ = '2'
if "1X" in tb:
bytes_ = '288'
with open(key_name_abs, 'a+') as f:
f.write("==PAR_START==" + "\n")
f.write(str(self.lot_id) + "\n")
f.write(str(wafer_id) + '\n')
f.write(str(x) + '\n')
f.write(str(y) + '\n')
f.write(type_ + '\n')
f.write(bytes_ + '\n')
f.write("==KEY_START==" + '\n')
length = len(zip(key_1[0::2], key_1[1::2]))
for index, item in enumerate(zip(key_1[0::2], key_1[1::2])):
if index != length - 1:
f.write(''.join(item) + '\n')
else:
f.write(''.join(item))
if self.bind_mac == 'yes':
mac_value = self.db.get_mac(self.mac_end_id)
self.mac_end_id += 1
else:
mac_value = 1234567890123456
if len(self.key_type) == 1:
cmd_str = '%s %s %d' % (self.rom_gen_exe, key_name_abs, 1)
execCLI(cmd_str)
with open("%s.rom"%key_name_abs, 'a+') as f:
f.write('\n')
cmd_final = '%s %s %s %s' % (self.final_exe[0], "%s.rom"%key_name_abs, "%s.key"%name_base, mac_value)
execCLI(cmd_final)
print 'key: %s, mac: %s' % (name_base, mac_value)
elif len(self.key_type) == 2:
with open(key2_name_abs, 'a+') as f:
length = len(zip(key_2[0::2], key_2[1::2]))
for index, item in enumerate(zip(key_2[0::2], key_2[1::2])):
f.write((''.join(item) + '\n'))
cmd_str = '%s %s %d' % (self.rom_gen_exe, key_name_abs, 1)
execCLI(cmd_str)
with open("%s.rom"%key_name_abs, 'a+') as f:
f.write('\n')
cmd_final_1 = '%s %s %s %s' % (self.final_exe[0], "%s.rom"%key_name_abs, "%s.out"%key_name_abs, mac_value)
execCLI(cmd_final_1)
cmd_final_2 = '%s %s %s %s' % (self.final_exe[1], "%s.out"%key_name_abs, key2_name_abs, "%s.key"%name_base)
execCLI(cmd_final_2)
print 'key: %s, mac: %s' % (name_base, mac_value)
os.chdir(work_dir)
def clean_file(self, clean_dir, reserved = 'key'):
work_dir = os.getcwd()
os.chdir(clean_dir)
for f in os.listdir('.'):
if not f.endswith(reserved):
os.remove(f)
os.chdir(work_dir)
def parse_cfg(self):
target_dir = os.path.join(os.getcwd(),'Input', str(self.inner_model))
if not os.path.exists(target_dir):
QtGui.QMessageBox.critical(self.tab, 'Error', u'目标目录不存在!')
return
for f in os.listdir(target_dir):
if f.endswith("cfg"):
cfg_file = f
break
cfg_file_abs = os.path.join(target_dir, cfg_file)
config = ConfigParser.ConfigParser()
try:
cfg_fp = open(cfg_file_abs,"r")
config.readfp(cfg_fp)
except Exception,e:
print e
self.chip_name = config.get('Chip name', 'chip_name').strip()
self.map_file = os.path.join(target_dir, config.get('Map file', 'file').strip())
self.rom_gen_exe = os.path.join(target_dir, config.get('HDCP_ROM_GEN', 'rom_gen_exe').strip())
print self.rom_gen_exe
# self.final_exe = os.path.join(target_dir, config.get('FINAL_ROM_GEN', 'final_exe').strip())
self.final_exe = [os.path.join(target_dir, config.get('FINAL_ROM_GEN', item).strip()) for item in config.options('FINAL_ROM_GEN')]
print self.final_exe
self.bind_mac = config.get('MAC', 'bind_mac').strip()
self.key_type = [config.get('Key type', item).strip() for item in config.options('Key type')]
# print self.chip_name
# print self.map_file
# print self.rom_gen_exe
# print self.final_exe
# print self.key_type
self.wafer_map = self.parse_map(self.map_file)
# print self.wafer_map
def parse_map(self, file_name):
with open(file_name, 'r') as f:
data = f.readlines()
start_x = int(data[7].split('=')[1].split(',')[0])
start_y = int(data[7].split('=')[1].split(',')[1])
length = len(data)
#解析出出wafer圆的第一行
for i in range(0, length):
if '=' not in data[i] and data[i].strip():
circle_start = i
break
#解析出出wafer圆的最后一行,如果最后一行后面不是空格怎么办呢?感觉会有问题
for i in range(circle_start, length):
if not data[i].strip():
circle_end = i - 1
a = re.compile(r"\s*[MSms]*([^MmSs\s]+?).*")
for i in range(circle_start, circle_end + 1):
match = a.search(data[i])
if match:
#x_base 是第一行的最左边一个字符的坐标,start(1)函数是指group(1)匹配的字符(括号里面匹配的部分)在整个字符串里面的位置
x_base = start_x - match.start(1)
break
wafer_map = []
for i in range(circle_start, circle_end + 1):
match = a.search(data[i])
if match:
first_index = match.start(1)
incr = 0
for j in data[i][first_index:].strip():
if j not in "MmSs":
x = x_base + first_index + incr
y = start_y
wafer_map.append((x, y))
incr += 1
start_y -= 1
return wafer_map
def key_search(self):
lot_id = str(self.comboBox.currentText()).replace('.', '_')
wafer_id = self.comboBox_2.currentText()
x_start = str(self.lineEdit_4.text()).strip()
y_start = str(self.lineEdit_5.text()).strip()
x_end = str(self.lineEdit_6.text()).strip()
y_end = str(self.lineEdit_7.text()).strip()
self.textBrowser.clear()
if not lot_id:
QtGui.QMessageBox.critical(self.tab_3, 'Error', u'Lot ID 不能为空!')
return
sql = "select id, x_coordinate, y_coordinate from %s where 1=1 " % lot_id
if not wafer_id:
QtGui.QMessageBox.critical(self.tab_3, 'Error', u'Wafer ID 不能为空!')
return
sql += "and waferID=%s " % wafer_id
if not x_start and not x_end:
QtGui.QMessageBox.critical(self.tab_3, 'Error', u'至少应输入一个 X坐标!')
return
elif not x_start.isdigit() and not x_end.isdigit():
QtGui.QMessageBox.critical(self.tab_3, 'Error', u'坐标应该输入数字!')
return
elif x_start and not x_end:
sql += "and x_coordinate=%s " % x_start
elif x_end and not x_start:
sql += "and x_coordinate=%s " % x_end
elif x_start and x_end:
sql += "and x_coordinate>=%s and x_coordinate<=%s " % (x_start, x_end)
if not y_start and not y_end:
QtGui.QMessageBox.critical(self.tab_3, 'Error', u'至少应输入一个 Y坐标!')
return
elif not y_start.isdigit() and not y_end.isdigit():
QtGui.QMessageBox.critical(self.tab_3, 'Error', u'坐标应该输入数字!')
return
elif y_start and not y_end:
sql += "and y_coordinate=%s " % y_start
elif y_end and not y_start:
sql += "and y_coordinate=%s " % y_end
elif x_start and x_end:
sql += "and y_coordinate>=%s and y_coordinate<=%s " % (y_start, y_end)
key_types = self.db.get_key_types(lot_id)
for item in key_types:
cur_sql = sql + "and key_type='%s' " % item
print cur_sql
cors = self.db.get_key_id(cur_sql)
if cors:
self.textBrowser.append("[key_type: %s]" % item)
for cor in cors:
self.textBrowser.append("X Coordinate: %s, Y Coordinate: %s\n" % (cor[1], cor[2]))
self.textBrowser.append(self.db.get_key_info(item, cor[0]) + '\n')
def lot_search(self):
self.textBrowser.setText("hello sky")
def display_lot(self):
if self.redis_inst.is_exist('lot_id'):
for item in self.redis_inst.r.smembers('lot_id'):
self.comboBox.addItem(QtCore.QString(item))
def combox_init(self):
if self.redis_inst.is_exist('lot_id'):
for item in self.redis_inst.r.smembers('lot_id'):
self.comboBox.addItem(QtCore.QString(item))
for i in range(1,26):
self.comboBox_2.addItem(QtCore.QString(str(i)))
self.comboBox_4.addItem(QtCore.QString(u'storage'))
self.comboBox_4.addItem(QtCore.QString(u'fetch'))
def op_record_search(self):
sql = "select sum(1x_tx), sum(1x_rx), sum(2x_tx), sum(2x_rx) from op_record where 1=1 "
html = "<p><b>Date:</b> %s -- %s</p><p><b>Operate type:</b> %s</p>"
from_date = self.dateEdit.textFromDateTime(self.dateEdit.dateTime())
if from_date:
sql += "and day >= '%s' " % from_date
real_para = [from_date,]
end_date = self.dateEdit_2.textFromDateTime(self.dateEdit_2.dateTime())
if end_date:
sql += "and day <= '%s' " % end_date
real_para.append(end_date)
operate_type = self.comboBox_4.currentText()
if operate_type:
sql += "and operate_type='%s' " % operate_type
real_para.append(operate_type)
user = self.lineEdit_2.text()
if user:
sql += "and user='%s' " % user
html += "<p><b>User:</b> %s</p>"
real_para.append(user)
inner_model = self.comboBox_5.currentText()
if inner_model:
sql += "and inner_model='%s' " % inner_model
html += "<p><b>Inner model:</b> %s</p>"
real_para.append(inner_model)
x1_tx, x1_rx, x2_tx, x2_rx = self.db.get_op_record(sql)
html += "<p><b>1X_TX:</b> %s</p><p><b>1X_RX:</b> %s</p><p><b>2X_TX:</b> %s</p><p><b>2X_RX:</b> %s</p>"
x1_tx = 0 if not x1_tx else x1_tx
x1_rx = 0 if not x1_rx else x1_rx
x2_tx = 0 if not x2_tx else x2_tx
x2_rx = 0 if not x2_rx else x2_rx
real_para.extend([x1_tx, x1_rx, x2_tx, x2_rx])
self.textBrowser_2.setText(html%tuple(real_para))
def retrieve_key(self):
lot_id_1 = str(self.lineEdit.text()).replace('.', '_')
lot_id_2 = str(self.lineEdit_11.text()).replace('.', '_')
if not self.lineEdit_10.text() == self.db.get_ps('admin'):
QtGui.QMessageBox.critical(self.tab, 'Error', u'输入的管理员密码错误!')
return
if lot_id_1 != lot_id_2:
QtGui.QMessageBox.critical(self.tab, 'Error', u'两次输入的lot id 不一致!')
return
if not self.redis_inst.val_is_exist(lot_id_1.replace('_', '.'),'lot_id'):
QtGui.QMessageBox.critical(self.tab, 'Error', u'输入的lot id在数据库中不存在!')
return
key_types = self.db.get_key_types(lot_id_1)
if not key_types:
QtGui.QMessageBox.critical(self.tab, 'Error', u'lot id 对应的表类型为空!')
return
for type_ in key_types:
new_id = int(mysql_inst.get_new_id(type_)) + 1
del_ids = self.db.get_del_ids(lot_id_1, type_)
id_num = len(del_ids)
#update id
for id_ in del_ids:
self.db.update_key_id(type_, id_, str(new_id))
new_id += 1
print new_id
#update statistics table
self.db.update_status(type_, 'retrieve', left = id_num)
#should insert code here of update op_record
self.redis_inst.add_history('%s retrieve %s keys: %s at %s' % (self.user, type_, id_num, time.strftime('%Y/%m/%d',time.localtime())))
#drop table
self.db.drop_table(lot_id_1)
self.redis_inst.del_val(lot_id_1.replace('_', '.'), 'lot_id')
self.display_status()
QtGui.QMessageBox.information(self, u'提示', u'回收操作已完成!')
class UserDialog(QtGui.QDialog):
def __init__(self, db, parent=None):
QtGui.QDialog.__init__(self, parent)
self.db = db
self.resize(400, 300)
self.pushButton = QtGui.QPushButton(self)
self.pushButton.setGeometry(QtCore.QRect(300, 80, 75, 23))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.pushButton.clicked.connect(self.delete)
self.pushButton_4 = QtGui.QPushButton(self)
self.pushButton_4.setGeometry(QtCore.QRect(300, 40, 75, 23))
self.pushButton_4.setObjectName(_fromUtf8("pushButton_4"))
self.pushButton_4.clicked.connect(self.add)
self.listWidget = QtGui.QListWidget(self)
self.listWidget.setGeometry(QtCore.QRect(20, 30, 256, 201))
self.listWidget.setObjectName(_fromUtf8("listWidget"))
items = [QtGui.QListWidgetItem(item) for item in self.db.get_users()]
for i in range(len(items)):
if items[i].text() != 'admin':
self.listWidget.insertItem(i+1,items[i])
self.listWidget.itemClicked.connect(self.cur_item)
self.retranslateUi(self)
QtCore.QMetaObject.connectSlotsByName(self)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", u"添加/删除用户", None))
self.pushButton.setText(_translate("Dialog", u"删除", None))
self.pushButton_4.setText(_translate("Dialog", u"添加", None))
def cur_item(self):
self.current_item = self.listWidget.currentItem().text()
def add(self):
ui = AddDialog(self.db, self)
if ui.exec_():
self.listWidget.addItem(QtGui.QListWidgetItem(ui.addUser))
def delete(self):
if not hasattr(self, 'current_item'):
QtGui.QMessageBox.critical(self, 'Error', u'请选择要删除的用户!')
return
reply = QtGui.QMessageBox.question(self, 'Warning', u'你确定要删除用户: %s 吗?'%self.current_item, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
self.listWidget.setItemHidden(self.listWidget.currentItem(), True)
self.db.del_user(self.current_item)
class AddDialog(QtGui.QDialog):
def __init__(self, db, parent = None):
QtGui.QDialog.__init__(self, parent)
self.db = db
self.setObjectName(_fromUtf8("Dialog"))
self.resize(345, 208)
self.buttonBox = QtGui.QDialogButtonBox(self)
self.buttonBox.setGeometry(QtCore.QRect(40, 160, 291, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.label = QtGui.QLabel(self)
self.label.setGeometry(QtCore.QRect(40, 40, 141, 16))
self.label.setObjectName(_fromUtf8("label"))
self.label_2 = QtGui.QLabel(self)
self.label_2.setGeometry(QtCore.QRect(40, 90, 141, 16))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.lineEdit = QtGui.QLineEdit(self)
self.lineEdit.setGeometry(QtCore.QRect(40, 60, 221, 20))
self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
self.lineEdit_2 = QtGui.QLineEdit(self)
self.lineEdit_2.setGeometry(QtCore.QRect(40, 110, 221, 20))
self.lineEdit_2.setObjectName(_fromUtf8("lineEdit_2"))
self.retranslateUi()
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), self.add_user)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), self.reject)
QtCore.QMetaObject.connectSlotsByName(self)
def retranslateUi(self):
self.setWindowTitle(_translate("Dialog", u"添加用户", None))
self.label.setText(_translate("Dialog", u"请输入用户名:", None))
self.label_2.setText(_translate("Dialog", u"请输入密码:", None))
def add_user(self):
username = self.lineEdit.text()
password = str(self.lineEdit_2.text())
if not username:
QtGui.QMessageBox.warning(self, 'Warning', u'用户名不能为空!', QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
return
if not password:
QtGui.QMessageBox.warning(self, 'Warning', u'密码不能为空!', QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
return
if username in self.db.get_users():
QtGui.QMessageBox.warning(self, 'Warning', u'用户: %s 已经存在,请更换其他的用户名!'%username, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
else:
self.db.add_user(str(username), password)
if username in self.db.get_users():
self.addUser = username
self.accept()
class PasswordDialog(QtGui.QDialog):
def __init__(self, db, user, parent=None):
self.db = db
self.user = user
QtGui.QDialog.__init__(self, parent)
self.resize(240, 200)
self.setWindowTitle(u'修改密码')
grid = QtGui.QGridLayout()
grid.addWidget(QtGui.QLabel(u'旧密码:', parent=self), 0, 0, 1, 1)
self.oldPassword = QtGui.QLineEdit(parent=self)
grid.addWidget(self.oldPassword, 0, 1, 1, 1)
grid.addWidget(QtGui.QLabel(u'新密码:', parent=self), 1, 0, 1, 1)
self.newPassword = QtGui.QLineEdit(parent=self)
grid.addWidget(self.newPassword, 1, 1, 1, 1)
buttonBox = QtGui.QDialogButtonBox(parent=self)
buttonBox.setOrientation(QtCore.Qt.Horizontal)
buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
buttonBox.accepted.connect(self.judge)
buttonBox.rejected.connect(self.reject)
layout = QtGui.QVBoxLayout()
layout.addLayout(grid)
spacerItem = QtGui.QSpacerItem(20, 48, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
layout.addItem(spacerItem)
layout.addWidget(buttonBox)
self.setLayout(layout)
def judge(self):
if self.db.get_ps(self.user) == self.old_password():
# if self.old_password() == self.db.get_ps(self.user):
self.accept()
else:
QtGui.QMessageBox.critical(self, 'Error', u'旧密码不正确!')
def old_password(self):
return self.oldPassword.text()
def new_password(self):
return self.newPassword.text()
class RedisOperate(object):
def __init__(self):
pool = redis.ConnectionPool(host='127.0.0.1', port = 6379, password = 'AvL_1108')
self.r = redis.StrictRedis(connection_pool=pool)
def get_ps(self, name):
return self.r.hget('user_management', name)
def set_ps(self, name, value):
self.r.hset('user_management', name, value)
def is_hexist(self, name):
return self.r.hexists('user_management', name)
def is_exist(self, name):
return self.r.exists(name)
def add_val(self, val, set_type = 'file_md5'):
self.r.sadd(set_type, val)
def del_val(self, val, set_type):
self.r.srem(set_type, val)
def val_is_exist(self, val, set_type = 'file_md5'):
return self.r.sismember(set_type, val)
def add_history(self, action):
self.r.lpush('history', action)
def get_history(self):
return self.r.lrange('history', 0, -1)
def set(self, name, value):
self.r.set(name, value)
def get(self, name):
return self.r.get(name)
def delete_user(self, name):
self.r.hdel('user_management', name)
def dump_users(self):
return self.r.hkeys('user_management')
class MysqlOperate(object):
def __init__(self, host, user, passwd, db):
self.conn = MySQLdb.connect(host, user, passwd, db)
self.cur = self.conn.cursor()
self.conn.set_character_set('utf8')
self.cur.execute('SET NAMES utf8;')
self.cur.execute('SET CHARACTER SET utf8;')
self.cur.execute('SET character_set_connection=utf8;')
if not self.table_is_exist():
self.create_tables()
def create_tables(self):
#username management table
self.cur.execute("create table user(name varchar(20), password varchar(50))")
#key table
self.cur.execute("create table 1X_TX(id int not null auto_increment, value varchar(2000), is_use enum('yes', 'no'), primary key(id))")
self.cur.execute("create table 1X_RX(id int not null auto_increment, value varchar(2000), is_use enum('yes', 'no'), primary key(id))")
self.cur.execute("create table 2X_TX(id int not null auto_increment, value varchar(2000), is_use enum('yes', 'no'), primary key(id))")
self.cur.execute("create table 2X_RX(id int not null auto_increment, value varchar(2000), is_use enum('yes', 'no'), primary key(id))")
#lot table, to be finish
self.cur.execute("create table lot(lotID varchar(20), department varchar(20), applyPerson varchar(20), \
applyType enum('project', 'batch', 'pilot'), keySource enum('availink', 'customer'), \
keyContent enum('16', '8', '2'), TX enum('1X','2X','Both','None'), RX enum('1X','2X','Both','None'), \
innerType varchar(30), sliceNumber tinyint, keyNumber int, testEngineer varchar(20), slices varchar(80))")
#key record
self.cur.execute("create table op_record(user char(20), inner_model char(30), operate_type enum('fetch','storage'), 1x_tx int, 1x_rx int, 2x_tx int, 2x_rx int, day date)")
#Statistics table
self.cur.execute("create table Statistics(type enum('1X_TX','1X_RX','2X_TX','2X_RX','mac'), total int, left_num int, start_id int)")
sql = "insert into Statistics values(%s,%s,%s,%s)"
self.cur.executemany(sql,[('1X_TX', '0', '0', '1'), ('1X_RX', '0', '0', '1'), ('2X_TX', '0', '0', '1'), ('2X_RX', '0', '0', '1'), ('mac', '0', '0', '1')])
#mac info
self.cur.execute("create table mac_addr(id int not null auto_increment,mac_value char(16),primary key(id))")
self.conn.commit()
def insert_key(self, table_name, value):
# print "insert into %s(value, is_use) values('%s', 'no')"%(table_name, value)
self.cur.execute("insert into %s(value, is_use) values('%s', 'no')"%(table_name, value))
#self.conn.commit()
def insert_mac(self, value):
self.cur.execute("insert into mac_addr(mac_value) values('%s')" % value)
self.conn.commit()
def update_status(self, key_type, op_type, total = None, left = None, start_id = None):
if op_type == 'import':
sql = "update statistics set total=total+%s,left_num=left_num+%s where type='%s'"%(total, left, key_type)
elif op_type == 'export':
sql = "update statistics set left_num=left_num-%s,start_id=%s where type='%s'"%(left, start_id, key_type)
elif op_type == 'retrieve':
sql = "update statistics set left_num=left_num+%s where type='%s'"%(left, key_type)
self.cur.execute(sql)
self.conn.commit()
def update_start_id(self, tb, start_id):
sql = "update statistics set start_id=%s where type='%s'" %(start_id, tb)
self.cur.execute(sql)
self.conn.commit()
def table_is_exist(self):
self.cur.execute("SELECT count(*) FROM information_schema.tables WHERE table_schema = 'hdcp' AND table_name ='user'")
return self.cur.fetchone()[0] == 1
def create_table_by_lot(self, lotID):
self.cur.execute("create table %s(waferID tinyint, key_type enum('1X_RX', '1X_TX', '2X_RX', '2X_TX'), x_coordinate smallint, y_coordinate smallint, id int)"%lotID)
def insert_value_by_lot(self, lotID, waferID, key_type, x, y, id_):
self.cur.execute("insert into %s values(%s,'%s',%s,%s,%s)"%(lotID, waferID, key_type, x, y, id_))
self.conn.commit()
def get_start_id(self, tb):
sql = "select start_id from statistics where type='%s'" % tb
self.cur.execute(sql)
return self.cur.fetchone()[0]
def get_new_id(self, tb):
sql = "select id from %s order by id desc limit 1" % tb
self.cur.execute(sql)
return self.cur.fetchone()[0]
def get_key_id(self, sql):
self.cur.execute(sql)
return self.cur.fetchall()
def get_key(self, tb, id_):
self.cur.execute("select value from %s where id='%s'" % (tb, id_))
key_value = self.cur.fetchone()[0]
# self.cur.execute("update %s set is_use='yes' where id='%s'" % (tb, id))
return key_value
def get_mac(self, id_):
self.cur.execute("select mac_value from mac_addr where id='%s'" % id_)
mac_value = self.cur.fetchone()[0]
return mac_value
def get_key_info(self, target_table, id_):
self.cur.execute("select value from %s where id=%s" % (target_table, id_))
return self.cur.fetchone()[0]
def get_key_types(self, tb):
self.cur.execute("select distinct key_type from %s" % tb)
types_ = [item[0] for item in self.cur.fetchall()]
return types_
def get_del_ids(self, tb, key_type):
self.cur.execute("select id from %s where key_type='%s'" % (tb, key_type))
ids = [item[0] for item in self.cur.fetchall()]
return ids
def update_key_id(self, tb, cur_id, new_id):
self.cur.execute("update %s set id=%s where id=%s" % (tb, new_id, cur_id))
self.conn.commit()
def drop_table(self,tb):
self.cur.execute("drop table %s" % tb)
self.conn.commit()
def set_op_record(self, sql):
self.cur.execute(sql)
self.conn.commit()
def get_op_record(self, sql):
self.cur.execute(sql)
x1_tx, x1_rx, x2_tx, x2_rx = self.cur.fetchall()[0]
return x1_tx, x1_rx, x2_tx, x2_rx
def get_lot_info(self, lotID):
self.cur.execute("select * from lot where lotID='%s'" % lotID)
print self.cur.fetchall()
return self.cur.fetchall()[0]
def add_user(self, name, passwd):
self.cur.execute("insert into user values('%s','%s')"%(name, passwd))
self.conn.commit()
def get_ps(self, name):
self.cur.execute("select password from user where name='%s'"%name)
return self.cur.fetchone()[0]
def update_ps(self, name, ps):
self.cur.execute("update user set password='%s' where name='%s'"%(ps, name))
self.conn.commit()
def del_user(self, name):
self.cur.execute("delete from user where name='%s'"%name)
self.conn.commit()
def get_users(self):
self.cur.execute("select name from user")
users = []
for item in self.cur.fetchall():
users.extend(item)
return users
def get_statistics(self):
self.cur.execute("select type, total, left_num from statistics")
statis = {}
for item in self.cur.fetchall():
statis[item[0]] = (item[1], item[2])
return statis
def close(self):
self.cur.close()
self.conn.close()
if __name__ == '__main__':
redis_inst = RedisOperate()
mysql_inst = MysqlOperate(host='127.0.0.1', user='root', passwd='Avl1108', db ='hdcp')
app = QtGui.QApplication(sys.argv)
if 'admin' not in mysql_inst.get_users():
reg = Ui_Register(mysql_inst)
if reg.exec_():
login = Ui_Login(mysql_inst)
if login.exec_():
ui = Ui_MainWindow(redis_inst, mysql_inst, login.current_user)
ui.show()
sys.exit(app.exec_())
else:
login = Ui_Login(mysql_inst)
if login.exec_():
ui = Ui_MainWindow(redis_inst, mysql_inst, login.current_user)
ui.show()
sys.exit(app.exec_())
# ui = Ui_MainWindow(redis_inst, mysql_inst, 'admin')
# ui.show()
# sys.exit(app.exec_())
mysql_inst.close()
redis_inst.r.save()
|
{
"content_hash": "4289eaa675bddc6cd9732223d8b7cbcf",
"timestamp": "",
"source": "github",
"line_count": 2084,
"max_line_length": 200,
"avg_line_length": 46.29126679462572,
"alnum_prop": 0.592385276404308,
"repo_name": "joakimzhang/qa_study",
"id": "e0943cdc542a0b8a04a5a70feb3e0c5053473b5a",
"size": "98351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hdcp_app/hdcp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "45820"
},
{
"name": "HTML",
"bytes": "17507"
},
{
"name": "JavaScript",
"bytes": "97109"
},
{
"name": "Python",
"bytes": "586032"
}
],
"symlink_target": ""
}
|
import os
from core import perf_benchmark
from core import platforms
from page_sets.system_health import multi_tab_stories
from telemetry import benchmark
from telemetry import story
from telemetry.timeline import chrome_trace_category_filter
from telemetry.web_perf import timeline_based_measurement
@benchmark.Info(emails=['vovoy@chromium.org'],
component='OS>Performance')
class TabSwitchingTypical25(perf_benchmark.PerfBenchmark):
"""This test records the MPArch.RWH_TabSwitchPaintDuration histogram.
The histogram is a measure of the time between when a tab was requested to be
shown, and when first paint occurred. The script opens 25 pages in different
tabs, waits for them to load, and then switches to each tab and records the
metric. The pages were chosen from Alexa top ranking sites.
"""
# TODO(rmhasan): Remove the SUPPORTED_PLATFORMS lists.
# SUPPORTED_PLATFORMS is deprecated, please put system specifier tags
# from expectations.config in SUPPORTED_PLATFORM_TAGS.
SUPPORTED_PLATFORMS = [story.expectations.ALL_DESKTOP]
SUPPORTED_PLATFORM_TAGS = [platforms.DESKTOP]
@classmethod
def AddBenchmarkCommandLineArgs(cls, parser):
parser.add_option('--tabset-repeat', type='int', default=1,
help='repeat tab page set')
def CreateStorySet(self, options):
story_set = story.StorySet(
archive_data_file='../page_sets/data/system_health_desktop.json',
base_dir=os.path.dirname(os.path.abspath(__file__)),
cloud_storage_bucket=story.PARTNER_BUCKET)
story_set.AddStory(multi_tab_stories.MultiTabTypical24Story(
story_set, False, options.tabset_repeat))
return story_set
def CreateCoreTimelineBasedMeasurementOptions(self):
category_filter = chrome_trace_category_filter.ChromeTraceCategoryFilter()
category_filter.AddIncludedCategory('latency')
options = timeline_based_measurement.Options(category_filter)
options.SetTimelineBasedMetrics(['tabsMetric'])
return options
@classmethod
def Name(cls):
return 'tab_switching.typical_25'
|
{
"content_hash": "3bc5357e038cc0e4631c260722762c4b",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 79,
"avg_line_length": 40.17307692307692,
"alnum_prop": 0.7520344662517952,
"repo_name": "nwjs/chromium.src",
"id": "ede18393e7aef2ce896b695754798a2775f9e5c9",
"size": "2230",
"binary": false,
"copies": "7",
"ref": "refs/heads/nw70",
"path": "tools/perf/benchmarks/tab_switching.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
""" Contains tensorflow models and functions """
import sys
import tensorflow as tf_
class SilentTF:
""" Class to supress deprecation warnings. """
def __init__(self):
modules = []
if hasattr(tf_.compat, 'v1'):
modules.append(tf_.compat.v1)
self.modules = modules
def __getattr__(self, name):
for module in self.modules:
if name in module.__dict__:
return getattr(module, name)
return getattr(tf_, name)
sys.modules['tensorflow'] = SilentTF()
from .base import TFModel
from .utils import get_shape, get_num_dims, get_channels_axis, get_num_channels, \
get_batch_size, get_spatial_dim, get_spatial_shape
from .vgg import VGG, VGG16, VGG19, VGG7
from .linknet import LinkNet
from .unet import UNet, UNetPP
from .vnet import VNet
from .fcn import FCN, FCN32, FCN16, FCN8
from .resnet import ResNet, ResNet18, ResNet34, ResNet50, ResNet101, ResNet152, \
ResNeXt18, ResNeXt34, ResNeXt50, ResNeXt101, ResNeXt152, \
SEResNet18, SEResNet34, SEResNet50, SEResNet101, SEResNet152, \
SEResNeXt18, SEResNeXt34, SEResNeXt50, SEResNeXt101, SEResNeXt152
from .inception_v1 import Inception_v1
from .inception_v3 import Inception_v3
from .inception_v4 import Inception_v4
from .inception_resnet_v2 import InceptionResNet_v2
from .squeezenet import SqueezeNet
from .mobilenet import MobileNet, MobileNet_v2, MobileNet_v3, MobileNet_v3_small
from .densenet import DenseNet, DenseNet121, DenseNet169, DenseNet201, DenseNet264
from .faster_rcnn import FasterRCNN
from .resattention import ResNetAttention, ResNetAttention56, ResNetAttention92
from .densenet_fc import DenseNetFC, DenseNetFC56, DenseNetFC67, DenseNetFC103
from .refinenet import RefineNet
from .gcn import GlobalConvolutionNetwork as GCN
from .encoder_decoder import EncoderDecoder, AutoEncoder, VariationalAutoEncoder
from .pyramidnet import PyramidNet, PyramidNet18, PyramidNet34, PyramidNet50, PyramidNet101, PyramidNet152
from .tf_sampler import TfSampler
from .xception import Xception, XceptionS, Xception41, Xception64
from .deeplab import DeepLab, DeepLabXS, DeepLabX8, DeepLabX16
from .efficientnet import EfficientNetB0, EfficientNetB1, EfficientNetB2, EfficientNetB3, \
EfficientNetB4, EfficientNetB5, EfficientNetB6, EfficientNetB7
from .pspnet import PSPNet, PSPNet18, PSPNet34, PSPNet50
|
{
"content_hash": "df18b6055a80403021a76487bfec9b69",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 106,
"avg_line_length": 44.163636363636364,
"alnum_prop": 0.7443392342527789,
"repo_name": "analysiscenter/dataset",
"id": "60b4dbdcc47598c13bfd7def2796a6f42cc8e582",
"size": "2429",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "batchflow/models/tf/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "711078"
}
],
"symlink_target": ""
}
|
"""
WSGI config for kronikka project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "kronikka.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kronikka.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
{
"content_hash": "aa52152a995b5891d3d0a818a38c9351",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 79,
"avg_line_length": 44.53125,
"alnum_prop": 0.7936842105263158,
"repo_name": "Hippu/pilttikronikka",
"id": "850c693ab9ab01702b5bc0ea3d3c23a7e84096d0",
"size": "1425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kronikka/kronikka_conf/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21551"
}
],
"symlink_target": ""
}
|
"""
The homematic rollershutter platform.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/rollershutter.homematic/
Important: For this platform to work the homematic component has to be
properly configured.
"""
import logging
from homeassistant.const import (STATE_OPEN, STATE_CLOSED, STATE_UNKNOWN)
from homeassistant.components.rollershutter import RollershutterDevice,\
ATTR_CURRENT_POSITION
import homeassistant.components.homematic as homematic
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['homematic']
def setup_platform(hass, config, add_callback_devices, discovery_info=None):
"""Setup the platform."""
if discovery_info is None:
return
return homematic.setup_hmdevice_discovery_helper(HMRollershutter,
discovery_info,
add_callback_devices)
class HMRollershutter(homematic.HMDevice, RollershutterDevice):
"""Represents a Homematic Rollershutter in Home Assistant."""
@property
def current_position(self):
"""
Return current position of rollershutter.
None is unknown, 0 is closed, 100 is fully open.
"""
if self.available:
return int((1 - self._hm_get_state()) * 100)
return None
def position(self, **kwargs):
"""Move to a defined position: 0 (closed) and 100 (open)."""
if self.available:
if ATTR_CURRENT_POSITION in kwargs:
position = float(kwargs[ATTR_CURRENT_POSITION])
position = min(100, max(0, position))
level = (100 - position) / 100.0
self._hmdevice.set_level(level, self._channel)
@property
def state(self):
"""Return the state of the rollershutter."""
current = self.current_position
if current is None:
return STATE_UNKNOWN
return STATE_CLOSED if current == 100 else STATE_OPEN
def move_up(self, **kwargs):
"""Move the rollershutter up."""
if self.available:
self._hmdevice.move_up(self._channel)
def move_down(self, **kwargs):
"""Move the rollershutter down."""
if self.available:
self._hmdevice.move_down(self._channel)
def stop(self, **kwargs):
"""Stop the device if in motion."""
if self.available:
self._hmdevice.stop(self._channel)
def _check_hm_to_ha_object(self):
"""Check if possible to use the HM Object as this HA type."""
from pyhomematic.devicetypes.actors import Blind
# Check compatibility from HMDevice
if not super(HMRollershutter, self)._check_hm_to_ha_object():
return False
# Check if the homematic device is correct for this HA device
if isinstance(self._hmdevice, Blind):
return True
_LOGGER.critical("This %s can't be use as rollershutter!", self._name)
return False
def _init_data_struct(self):
"""Generate a data dict (self._data) from hm metadata."""
super(HMRollershutter, self)._init_data_struct()
# Add state to data dict
self._state = "LEVEL"
self._data.update({self._state: STATE_UNKNOWN})
|
{
"content_hash": "fa287e2bcf7f85708108940b851c7917",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 78,
"avg_line_length": 32.89108910891089,
"alnum_prop": 0.6243226971703792,
"repo_name": "Julian/home-assistant",
"id": "4a6b06f925c9bf713f0e73ac45bee2ca96835655",
"size": "3322",
"binary": false,
"copies": "1",
"ref": "refs/heads/py2",
"path": "homeassistant/components/rollershutter/homematic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1354942"
},
{
"name": "Python",
"bytes": "2755966"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "6430"
}
],
"symlink_target": ""
}
|
"""
Generate responsive, embeddable HTML/JS code from URL of online content
"""
try:
from urlparse import urlparse, parse_qs
except ImportError:
from urllib.parse import urlparse, parse_qs
class OnlineContent(object):
"""
Object to represent a single content on Internet which can be accessed through a link.
After initiating the `OnlineContent` object using its URL, embeddable code can be generated.
Embeddable code generation is offline, by default.
However, by using oEmbed protocol, it can be generated from the host site using its own API. (Experimental)
In some cases, e.g. Flickr images, embed code must be generated online.
>>> from embedx import OnlineContent
>>> content = OnlineContent('https://www.youtube.com/watch?v=_lOT2p_FCvA')
>>> content.get_content_uid()
'_lOT2p_FCvA'
>>> content.get_embed_code()
'<iframe id="embedx-yt" type="text/html" width="640" height="390" position="center" src="https://www.youtube.com/embed/_lOT2p_FCvA" frameborder="0"></iframe>'
>>> content.check_if_alive()
True
"""
# pointer to current `OnlineContent` object
instance = None
# these templates must be defined by implementor subclasses
hostnames = []
EMBED_SCRIPT = ""
STATUS_LINK = "" # it is needed for hosts which generate HTTP:200 even if CONTENT_ID is invalid e.g. Youtube
LINK_TEMPLATE = "" # not used. yet
def __init__(self, url):
# without protocol name, `urlparse` may not parse, so ...
if not url.startswith('http'):
url = 'https://' + url
# URL-specific dispatching to `provider` objects e.g. Youtube, Twitter etc.
for provider_obj in self.__class__.__subclasses__():
_hosts = provider_obj.hostnames
if any(x for x in _hosts if x in url):
self.instance = provider_obj(url)
def get_provider(self):
return self.instance.__str__()
def get_content_uid(self):
"""Returns the unique ID extracted from the link of the content
"""
return self.instance.extract_id()
def get_embed_code(self):
if len(self.instance.EMBED_SCRIPT):
return self.instance.EMBED_SCRIPT % ({'content_uid': self.get_content_uid()})
elif len(self.instance.OEMBED_LINK):
oembed_req_url = self.instance.EMBED_SCRIPT % ({'URL': self.instance.url})
return oembed_req_url
else:
raise NotImplementedError
def get_clean_link(self):
if len(self.instance.LINK_TEMPLATE):
return self.instance.LINK_TEMPLATE % ({'content_uid': self.get_content_uid()})
else:
raise NotImplementedError
def extract_id(self):
"""Extract the unique ID from the URL link, set it to `self.content_uid`, and return the value.
This method must be implemented by sub-class.
:return: string ID of the content
"""
raise NotImplementedError
def check_if_alive(self):
"""Check if the content is available on the host server. Returns `True` if available, else `False`.
This method is `lazy`-evaluated or only executes when called.
:rtype: bool
"""
try:
from urllib2 import urlopen, URLError, HTTPError
except ImportError:
from urllib.request import urlopen, URLError, HTTPError
if len(self.instance.STATUS_LINK):
check_url = self.instance.STATUS_LINK % ({'content_uid': self.get_content_uid()})
else:
# fallback
check_url = self.instance.url
try:
response = urlopen(check_url)
except (HTTPError, URLError):
return False
except ValueError:
raise URLError('Invalid URL: %s'.format(check_url))
else:
return True if response.code == 200 else False
class YouTube(OnlineContent):
""" Use `OnlineContent` object to instatiate or use this class
>>> from embedx import OnlineContent
>>> content = OnlineContent('https://www.youtube.com/watch?v=_lOT2p_FCvA')
>>> content.get_content_uid()
'_lOT2p_FCvA'
"""
hostnames = ['youtube', 'youtu.be']
EMBED_SCRIPT = '''<iframe id="embedx-yt" type="text/html" width="640" height="390" position="center" src="https://www.youtube.com/embed/%(content_uid)s" frameborder="0"></iframe>'''
STATUS_LINK = '''https://www.youtube.com/oembed?url=https://www.youtube.com/watch?v=%(content_uid)s&format=json'''
LINK_TEMPLATE = '''https://www.youtube.com/watch?v=%(content_uid)s'''
def __init__(self, url):
self.url = url
def extract_id(self):
if '/channel/' in self.url:
raise NotImplementedError
if self.url.startswith(('youtu', 'www')):
self.url = 'https://' + self.url
parsed_url = urlparse(self.url)
if 'youtube' in parsed_url.hostname:
if parsed_url.path == '/watch':
return parse_qs(parsed_url.query)['v'][0]
elif parsed_url.path.startswith(('/embed/', '/v/')):
return parsed_url.path.split('/')[2]
else:
raise NotImplementedError
elif 'youtu.be' in parsed_url.hostname:
return parsed_url.path[1:]
else:
raise ValueError("Invalid URL for a Youtube video")
class Vimeo(OnlineContent):
"""Use `OnlineContent` object to instatiate or use this class
>>> from embedx import OnlineContent
>>> vimeo = OnlineContent('https://vimeo.com/92129360')
>>> vimeo.get_embed_code()
"<div class='embedx-vm'><iframe src='https://player.vimeo.com/video/92129360' frameborder='0' webkitAllowFullScreen mozallowfullscreen allowFullScreen></iframe></div>"
"""
hostnames = ['vimeo', ]
LINK_TEMPLATE = '''https://vimeo.com/%(content_uid)s'''
STATUS_LINK = '''https://vimeo.com/api/oembed.json?url=https://vimeo.com/%(content_uid)s'''
EMBED_SCRIPT = ("<div class='embedx-vm'>"
"<iframe src='https://player.vimeo.com/video/%(content_uid)s' "
"frameborder='0' webkitAllowFullScreen mozallowfullscreen allowFullScreen>"
"</iframe></div>")
def __init__(self, url):
self.url = url
def extract_id(self):
if self.url.endswith('/'):
return self.url.split('/')[-2]
else:
return self.url.split('/')[-1]
class Twitter(OnlineContent):
"""Use `OnlineContent` object to instatiate or use this class
>>> from embedx import OnlineContent
>>> twit = OnlineContent('https://twitter.com/jack/status/20')
>>> twit.get_embed_code()
"<div id='embedx-twt' align='center'></div><script async src='https://platform.twitter.com/widgets.js'></script><script> window.onload=(function(){twttr.widgets.createTweet('20', document.getElementById('embedx-twt'),{});});</script>"
"""
hostnames = ['twitter', ]
EMBED_SCRIPT = ("<div id='embedx-twt' align='center'></div>"
"<script async src='https://platform.twitter.com/widgets.js'></script>"
"<script> window.onload=(function(){twttr.widgets.createTweet('%(content_uid)s',"
" document.getElementById('embedx-twt'),{});});</script>")
def __init__(self, url):
self.url = url
def extract_id(self):
if '/status/' not in self.url:
raise NotImplementedError
else:
return urlparse(self.url).path.split('/')[3]
class Github(OnlineContent):
"""Use `OnlineContent` object to instatiate or use this class
>>> from embedx import OnlineContent
>>> gist = OnlineContent('https://gist.github.com/kmonsoor/2a1afba4ee127cce50a0')
>>> gist.get_embed_code()
"<script src='https://gist.github.com/2a1afba4ee127cce50a0.js'></script>"
"""
hostnames = ['github', ]
EMBED_SCRIPT = "<script src='https://gist.github.com/%(content_uid)s.js'></script>"
def __init__(self, url):
self.url = url
def extract_id(self):
if 'gist.' not in self.url:
raise NotImplementedError
else:
return urlparse(self.url).path.split('/')[2]
class Flickr(OnlineContent):
hostnames = ['flickr', ]
OEMBED_LINK = "https://www.flickr.com/services/oembed/?url=%(URL)s&format=json"
def extract_id(self):
raise NotImplementedError
class Facebook(OnlineContent):
hostnames = ['facebook', 'fb.com']
STATUS_LINK = ""
EMBED_SCRIPT = ""
def extract_id(self):
raise NotImplementedError
# for quick overview testing
if __name__ == '__main__':
test_urls = ['https://twitter.com/thepodcastdude/status/686258030229336064',
'youtube.com/watch?v=_lOT2p_FCvA',
'https://www.facebook.com/kmonsoor/posts/10153282994792374',
'vimeo.com/150519302'
]
for a_url in test_urls:
try:
ov = OnlineContent(a_url)
print(ov.get_content_uid())
# print(ov.check_if_alive())
print(ov.get_embed_code())
except NotImplementedError:
pass
|
{
"content_hash": "09e684a1caac98ca4177df63ce3a1be6",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 238,
"avg_line_length": 36.26086956521739,
"alnum_prop": 0.6103117505995204,
"repo_name": "kmonsoor/embedX",
"id": "4fc8244bc527a28d9ec9695580e4536a391d2950",
"size": "9220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "embedx/embedx.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10861"
}
],
"symlink_target": ""
}
|
import click
import subprocess
import configparser
import os
import posixpath
import json
import io
TB_SCRIPT = """
tell application "Tunnelblick"
set connected_configs to name of every configuration whose state = "CONNECTED"
return connected_configs
end tell
"""
@click.command()
@click.option(
"--auth/--no-auth",
default=True,
is_flag=True,
help="Authenticate with vault first.",
)
@click.option("--profile", default="default", type=str, help="AWS profile to update")
@click.option("--og", is_flag=True, help="Use OG method.")
@click.option(
"--from-profile/--not-from-profile",
default=True,
is_flag=True,
help="Gather arguments from the aws profile.",
)
@click.option(
"--vault-path",
default="aws/creds",
type=str,
help="Default path in the vault for keys.",
)
@click.option(
"--vault-write/--vault-read",
default=True,
is_flag=True,
help="Should we read or write from the vault?",
)
@click.option("--role", "--vault-role", default="user_engineer_default", type=str)
@click.option(
"--config",
"config_path",
default=os.path.expanduser("~/.aws/credentials"),
type=click.Path(),
)
@click.option("--vault-username", default=os.environ["USER"])
def main(
auth,
profile,
og,
from_profile,
vault_path,
vault_write,
vault_role,
config_path,
vault_username,
):
"""Update AWS credentials from the LendUp Vault"""
if not check_vpn():
click.echo("Please connect to the prod-us-east VPN before continuing")
raise click.Abort()
if not check_vault_connection():
if auth:
authenticate_vault(vault_username)
else:
click.echo(
"Please authenticate in vault with `vault auth` or the `--auth` flag to this command."
)
raise click.Abort()
config = get_aws_config(config_path)
if from_profile and profile in config:
vault_role = config[profile].get("vault_role", vault_role)
vault_path = config[profile].get("vault_path", vault_path)
vault_write = config[profile].getboolean("vault_write", vault_write)
if og:
vault_path = "aws/sts"
vault_write = True
if not vault_role.endswith("-og"):
vault_role += "-og"
try:
response_data = get_vault_info(
path=vault_path, role=vault_role, write=vault_write
)
except subprocess.CalledProcessError as e:
click.echo("Error in vault read, returned exit code: {}".format(e.returncode))
raise click.Abort()
if profile not in config:
config.add_section(profile)
if from_profile:
config[profile]["vault_role"] = vault_role
config[profile]["vault_path"] = vault_path
config[profile]["vault_write"] = "yes" if vault_write else "no"
config[profile]["aws_access_key_id"] = response_data["access_key"]
config[profile]["aws_secret_access_key"] = response_data["secret_key"]
if response_data.get("security_token", False):
config[profile]["aws_session_token"] = response_data["security_token"]
elif "aws_session_token" in config[profile]:
config[profile].pop("aws_session_token")
with open(config_path, "w") as f:
config.write(f)
def get_vault_info(path, role, write):
"""Get and parse fixed-width format Vault information."""
fullpath = posixpath.join(path, role)
if write:
out = subprocess.check_output(
["vault", "write", "-f", "--format", "json", f"{fullpath}"]
)
else:
out = subprocess.check_output(
["vault", "read", "--format", "json", f"{fullpath}"]
)
response = json.loads(out)
return response["data"]
def check_vault_connection():
"""docstring for check_vault_connection"""
try:
out = (
subprocess.check_output(
["vault", "read", "--format", "json", "/auth/token/lookup-self"]
)
== 0
)
except subprocess.CalledProcessError as e:
return False
else:
return True
def authenticate_vault(username):
"""Authenticate to Vault."""
args = ["vault", "auth", "-method=ldap", "username={}".format(username)]
click.echo(" ".join(args))
subprocess.call(args)
def check_vpn(connection="prod-us-east"):
"""Check VPN Connection"""
out = subprocess.check_output(["osascript", "-e", TB_SCRIPT]).decode("utf-8")
return connection in out
def get_aws_config(path="~/.aws/credentials"):
"""Get the AWS configuration"""
config = configparser.ConfigParser()
config.read(path)
return config
if __name__ == "__main__":
main()
|
{
"content_hash": "814d8145d208f82d98f8a1927c4f1d9c",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 102,
"avg_line_length": 28.325301204819276,
"alnum_prop": 0.6144193960017014,
"repo_name": "alexrudy/dotfiles",
"id": "8af4681a88a7d8c4ab6651369b528c968c6c2e58",
"size": "4726",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "bin/lu-vault.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "AppleScript",
"bytes": "373"
},
{
"name": "Python",
"bytes": "79202"
},
{
"name": "Ruby",
"bytes": "25013"
},
{
"name": "Shell",
"bytes": "93650"
}
],
"symlink_target": ""
}
|
'''
Test Website
:copyright: (c) 2014-2015 by Openlabs Technologies & Consulting (P) LTD
:license: GPLv3, see LICENSE for more details
'''
import json
from trytond.tests.test_tryton import POOL, USER, DB_NAME, CONTEXT
from trytond.transaction import Transaction
from test_base import BaseTestCase
from decimal import Decimal
class TestWebsite(BaseTestCase):
"""
Test case for website.
"""
def test_0010_sitemap(self):
"""
Tests the rendering of the sitemap.
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
node1, = self.Node.create([{
'name': 'Node1',
'type_': 'catalog',
'slug': 'node1',
}])
node2, = self.Node.create([{
'name': 'Node2',
'type_': 'catalog',
'slug': 'node2',
'display': 'product.template',
}])
node3, = self.Node.create([{
'name': 'Node3',
'type_': 'catalog',
'slug': 'node3',
}])
node4, = self.Node.create([{
'name': 'Node4',
'type_': 'catalog',
'slug': 'node4',
}])
node5, = self.Node.create([{
'name': 'Node5',
'type_': 'catalog',
'slug': 'node5',
}])
self.Node.write([node2], {
'parent': node1
})
self.Node.write([node3], {
'parent': node1,
})
self.Node.write([node4], {
'parent': node3,
})
self.Node.write([node5], {
'parent': node4,
})
with app.test_client() as c:
rv = c.get('/sitemap')
self.assertEqual(rv.status_code, 200)
self.assertIn('Node1', rv.data)
self.assertIn('Node2', rv.data)
self.assertIn('Node3', rv.data)
self.assertIn('Node4', rv.data)
# Beyond depth of 2, will not show.
self.assertNotIn('Node5', rv.data)
def test_0020_search_data(self):
"""
Tests that the auto-complete search URL returns JSON product data.
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
with app.test_client() as c:
self.create_test_products()
rv = c.get('/search-auto-complete?q=product')
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data)
self.assertEquals(data['results'], [])
def test0030_menuitem(self):
'''
Test create menuitem for products
'''
self.Product = POOL.get('product.product')
self.Template = POOL.get('product.template')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
app = self.get_app()
self.setup_defaults()
uom, = self.Uom.search([], limit=1)
template, = self.Template.create([{
'name': 'TestProduct',
'type': 'goods',
'list_price': Decimal('100'),
'cost_price': Decimal('100'),
'default_uom': uom.id,
}])
product, = self.Product.create([{
'template': template.id,
'displayed_on_eshop': True,
'uri': 'test-product',
}])
with app.test_request_context('/'):
rv = product.get_menu_item(max_depth=10)
self.assertEqual(rv['title'], product.name)
|
{
"content_hash": "dac430b42a0610106472c06780a72695",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 75,
"avg_line_length": 29.007518796992482,
"alnum_prop": 0.46578538102643857,
"repo_name": "openlabs/nereid-webshop",
"id": "7fac74934bdc351d9992aa4a261f27e7023ce3b6",
"size": "3858",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_website.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "61527"
},
{
"name": "HTML",
"bytes": "212050"
},
{
"name": "JavaScript",
"bytes": "28047"
},
{
"name": "Python",
"bytes": "123792"
}
],
"symlink_target": ""
}
|
import itertools
import logging
import unittest
import urllib
import environment
import keyspace_util
import utils
from vtdb import dbexceptions
from vtdb import vtgate_cursor
from vtdb import vtgate_client
shard_0_master = None
shard_1_master = None
lookup_master = None
keyspace_env = None
create_vt_user = '''create table vt_user (
id bigint,
name varchar(64),
primary key (id)
) Engine=InnoDB'''
create_vt_user2 = '''create table vt_user2 (
id bigint,
name varchar(64),
primary key (id)
) Engine=InnoDB'''
create_vt_user_extra = '''create table vt_user_extra (
user_id bigint,
email varchar(64),
primary key (user_id)
) Engine=InnoDB'''
create_vt_music = '''create table vt_music (
user_id bigint,
id bigint,
song varchar(64),
primary key (user_id, id)
) Engine=InnoDB'''
create_vt_music_extra = '''create table vt_music_extra (
music_id bigint,
user_id bigint,
artist varchar(64),
primary key (music_id)
) Engine=InnoDB'''
create_join_user = '''create table join_user (
id bigint,
name varchar(64),
primary key (id)
) Engine=InnoDB'''
create_join_user_extra = '''create table join_user_extra (
user_id bigint,
email varchar(64),
primary key (user_id)
) Engine=InnoDB'''
create_join_name_info = '''create table join_name_info (
name varchar(128),
info varchar(128),
primary key (name)
) Engine=InnoDB'''
create_twopc_user = '''create table twopc_user (
user_id bigint,
val varchar(128),
primary key (user_id)
) Engine=InnoDB'''
create_vt_user_seq = '''create table vt_user_seq (
id int,
next_id bigint,
cache bigint,
primary key(id)
) comment 'vitess_sequence' Engine=InnoDB'''
init_vt_user_seq = 'insert into vt_user_seq values(0, 1, 2)'
create_vt_music_seq = '''create table vt_music_seq (
id int,
next_id bigint,
cache bigint,
primary key(id)
) comment 'vitess_sequence' Engine=InnoDB'''
init_vt_music_seq = 'insert into vt_music_seq values(0, 1, 2)'
create_vt_main_seq = '''create table vt_main_seq (
id int,
next_id bigint,
cache bigint,
primary key(id)
) comment 'vitess_sequence' Engine=InnoDB'''
init_vt_main_seq = 'insert into vt_main_seq values(0, 1, 2)'
create_name_user2_map = '''create table name_user2_map (
name varchar(64),
user2_id bigint,
primary key (name, user2_id)
) Engine=InnoDB'''
create_music_user_map = '''create table music_user_map (
music_id bigint,
user_id bigint,
primary key (music_id)
) Engine=InnoDB'''
create_main = '''create table main (
id bigint,
val varchar(128),
primary key(id)
) Engine=InnoDB'''
create_twopc_lookup = '''create table twopc_lookup (
id bigint,
val varchar(128),
primary key (id)
) Engine=InnoDB'''
vschema = {
'user': '''{
"sharded": true,
"vindexes": {
"user_index": {
"type": "hash"
},
"unicode_hash": {
"type": "unicode_loose_md5"
},
"name_user2_map": {
"type": "lookup_hash",
"params": {
"table": "name_user2_map",
"from": "name",
"to": "user2_id"
},
"owner": "vt_user2"
},
"music_user_map": {
"type": "lookup_hash_unique",
"params": {
"table": "music_user_map",
"from": "music_id",
"to": "user_id"
},
"owner": "vt_music"
}
},
"tables": {
"vt_user": {
"column_vindexes": [
{
"column": "id",
"name": "user_index"
}
],
"auto_increment": {
"column": "id",
"sequence": "vt_user_seq"
}
},
"vt_user2": {
"column_vindexes": [
{
"column": "id",
"name": "user_index"
},
{
"column": "name",
"name": "name_user2_map"
}
]
},
"vt_user_extra": {
"column_vindexes": [
{
"column": "user_id",
"name": "user_index"
}
]
},
"vt_music": {
"column_vindexes": [
{
"column": "user_id",
"name": "user_index"
},
{
"column": "id",
"name": "music_user_map"
}
],
"auto_increment": {
"column": "id",
"sequence": "vt_music_seq"
}
},
"vt_music_extra": {
"column_vindexes": [
{
"column": "music_id",
"name": "music_user_map"
},
{
"column": "user_id",
"name": "user_index"
}
]
},
"join_user": {
"column_vindexes": [
{
"column": "id",
"name": "user_index"
}
]
},
"join_user_extra": {
"column_vindexes": [
{
"column": "user_id",
"name": "user_index"
}
]
},
"join_name_info": {
"column_vindexes": [
{
"column": "name",
"name": "unicode_hash"
}
]
},
"twopc_user": {
"column_vindexes": [
{
"column": "user_id",
"name": "user_index"
}
]
}
}
}''',
'lookup': '''{
"sharded": false,
"tables": {
"vt_user_seq": {
"type": "sequence"
},
"vt_music_seq": {
"type": "sequence"
},
"vt_main_seq": {
"type": "sequence"
},
"music_user_map": {},
"name_user2_map": {},
"main": {
"auto_increment": {
"column": "id",
"sequence": "vt_main_seq"
}
},
"twopc_lookup": {}
}
}''',
}
def setUpModule():
global keyspace_env
global shard_0_master
global shard_1_master
global lookup_master
logging.debug('in setUpModule')
try:
environment.topo_server().setup()
logging.debug('Setting up tablets')
keyspace_env = keyspace_util.TestEnv()
keyspace_env.launch(
'user',
shards=['-80', '80-'],
ddls=[
create_vt_user,
create_vt_user2,
create_vt_user_extra,
create_vt_music,
create_vt_music_extra,
create_join_user,
create_join_user_extra,
create_join_name_info,
create_twopc_user,
],
rdonly_count=1, # to test SplitQuery
twopc_coordinator_address='localhost:15028', # enables 2pc
)
keyspace_env.launch(
'lookup',
ddls=[
create_vt_user_seq,
create_vt_music_seq,
create_vt_main_seq,
create_music_user_map,
create_name_user2_map,
create_main,
create_twopc_lookup,
],
twopc_coordinator_address='localhost:15028', # enables 2pc
)
shard_0_master = keyspace_env.tablet_map['user.-80.master']
shard_1_master = keyspace_env.tablet_map['user.80-.master']
lookup_master = keyspace_env.tablet_map['lookup.0.master']
utils.apply_vschema(vschema)
utils.VtGate().start(
tablets=[shard_0_master, shard_1_master, lookup_master],
extra_args=['-transaction_mode', 'TWOPC'])
utils.vtgate.wait_for_endpoints('user.-80.master', 1)
utils.vtgate.wait_for_endpoints('user.80-.master', 1)
utils.vtgate.wait_for_endpoints('lookup.0.master', 1)
except:
tearDownModule()
raise
def tearDownModule():
logging.debug('in tearDownModule')
utils.required_teardown()
if utils.options.skip_teardown:
return
logging.debug('Tearing down the servers and setup')
if keyspace_env:
keyspace_env.teardown()
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
def get_connection(timeout=10.0):
protocol, endpoint = utils.vtgate.rpc_endpoint(python=True)
try:
return vtgate_client.connect(protocol, endpoint, timeout)
except Exception:
logging.exception('Connection to vtgate (timeout=%s) failed.', timeout)
raise
class TestVTGateFunctions(unittest.TestCase):
int_type = 265
string_type = 6165
def setUp(self):
self.master_tablet = shard_1_master
def execute_on_master(self, vtgate_conn, sql, bind_vars):
return vtgate_conn._execute(
sql, bind_vars, tablet_type='master', keyspace_name=None)
def test_health(self):
f = urllib.urlopen('http://localhost:%d/debug/health' % utils.vtgate.port)
response = f.read()
f.close()
self.assertEqual(response, 'ok')
def test_srv_vschema(self):
"""Makes sure the SrvVSchema object is properly built."""
v = utils.run_vtctl_json(['GetSrvVSchema', 'test_nj'])
self.assertEqual(len(v['keyspaces']), 2, 'wrong vschema: %s' % str(v))
self.assertIn('user', v['keyspaces'])
self.assertIn('lookup', v['keyspaces'])
def test_user(self):
count = 4
vtgate_conn = get_connection()
cursor = vtgate_conn.cursor(
tablet_type='master', keyspace=None, writable=True)
# Initialize the sequence.
# TODO(sougou): Use DDL when ready.
cursor.begin()
cursor.execute(init_vt_user_seq, {})
cursor.commit()
# Test insert
for x in xrange(count):
i = x+1
cursor.begin()
cursor.execute(
'insert into vt_user (name) values (:name)',
{'name': 'test %s' % i})
self.assertEqual(
(cursor.fetchall(), cursor.rowcount, cursor.lastrowid,
cursor.description),
([], 1L, i, []))
cursor.commit()
# Test select equal
for x in xrange(count):
i = x+1
cursor.execute('select id, name from vt_user where id = :id', {'id': i})
self.assertEqual(
(cursor.fetchall(), cursor.rowcount, cursor.lastrowid,
cursor.description),
([(i, 'test %s' % i)], 1L, 0,
[('id', self.int_type), ('name', self.string_type)]))
# Test case sensitivity
cursor.execute('select Id, Name from vt_user where iD = :id', {'id': 1})
self.assertEqual(
(cursor.fetchall(), cursor.rowcount, cursor.lastrowid,
cursor.description),
([(1, 'test 1')], 1L, 0,
[('Id', self.int_type), ('Name', self.string_type)]))
# Test insert with no auto-inc
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_user (id, name) values (:id, :name)',
{'id': 6, 'name': 'test 6'})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
# Verify values in db
result = shard_0_master.mquery('vt_user', 'select id, name from vt_user')
self.assertEqual(result, ((1L, 'test 1'), (2L, 'test 2'), (3L, 'test 3')))
result = shard_1_master.mquery('vt_user', 'select id, name from vt_user')
self.assertEqual(result, ((4L, 'test 4'), (6L, 'test 6')))
# Test MultiValueInsert with no auto-inc
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_user (id, name) values (:id0, :name0), (:id1, :name1)',
{'id0': 5, 'name0': 'test 5','id1': 7, 'name1': 'test 7'})
self.assertEqual(result, ([], 2L, 0L, []))
vtgate_conn.commit()
# Verify values in db
result = shard_0_master.mquery('vt_user', 'select id, name from vt_user')
self.assertEqual(result, ((1L, 'test 1'), (2L, 'test 2'), (3L, 'test 3'), (5L, 'test 5')))
result = shard_1_master.mquery('vt_user', 'select id, name from vt_user')
self.assertEqual(result, ((4L, 'test 4'), (6L, 'test 6'), (7L, 'test 7')))
# Test IN clause
result = self.execute_on_master(
vtgate_conn,
'select id, name from vt_user where id in (:a, :b)', {'a': 1, 'b': 4})
result[0].sort()
self.assertEqual(
result,
([(1L, 'test 1'), (4L, 'test 4')], 2L, 0,
[('id', self.int_type), ('name', self.string_type)]))
result = self.execute_on_master(
vtgate_conn,
'select id, name from vt_user where id in (:a, :b)', {'a': 1, 'b': 2})
result[0].sort()
self.assertEqual(
result,
([(1L, 'test 1'), (2L, 'test 2')], 2L, 0,
[('id', self.int_type), ('name', self.string_type)]))
# Test scatter
result = vtgate_conn._execute(
'select id, name from vt_user',
{}, tablet_type='master', keyspace_name=None)
result[0].sort()
self.assertEqual(
result,
([(1L, 'test 1'), (2L, 'test 2'), (3L, 'test 3'), (4L, 'test 4'),
(5L, 'test 5'), (6L, 'test 6'), (7L, 'test 7')], 7L, 0,
[('id', self.int_type), ('name', self.string_type)]))
# Test stream over scatter
stream_cursor_1 = vtgate_conn.cursor(
tablet_type='master', keyspace=None,
cursorclass=vtgate_cursor.StreamVTGateCursor)
stream_cursor_1.execute('select id, name from vt_user', {})
stream_cursor_2 = vtgate_conn.cursor(
tablet_type='master', keyspace=None,
cursorclass=vtgate_cursor.StreamVTGateCursor)
stream_cursor_2.execute('select id, name from vt_user', {})
self.assertEqual(stream_cursor_1.description,
[('id', self.int_type), ('name', self.string_type)])
self.assertEqual(stream_cursor_2.description,
[('id', self.int_type), ('name', self.string_type)])
rows_1 = []
rows_2 = []
for row_1, row_2 in itertools.izip(stream_cursor_1, stream_cursor_2):
rows_1.append(row_1)
rows_2.append(row_2)
self.assertEqual(
sorted(rows_1),
[(1L, 'test 1'), (2L, 'test 2'), (3L, 'test 3'), (4L, 'test 4'),
(5L, 'test 5'),(6L, 'test 6'),(7L, 'test 7')])
self.assertEqual(
sorted(rows_2),
[(1L, 'test 1'), (2L, 'test 2'), (3L, 'test 3'), (4L, 'test 4'),
(5L, 'test 5'),(6L, 'test 6'),(7L, 'test 7')])
# Test updates
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'update vt_user set name = :name where id = :id',
{'id': 1, 'name': 'test one'})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'update vt_user set name = :name where id = :id',
{'id': 4, 'name': 'test four'})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery('vt_user', 'select id, name from vt_user')
self.assertEqual(
result, ((1L, 'test one'), (2L, 'test 2'), (3L, 'test 3'), (5L, 'test 5')))
result = shard_1_master.mquery('vt_user', 'select id, name from vt_user')
self.assertEqual(
result, ((4L, 'test four'), (6L, 'test 6'), (7L, 'test 7')))
# Test deletes
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'delete from vt_user where id = :id',
{'id': 1})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'delete from vt_user where id = :id',
{'id': 4})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery('vt_user', 'select id, name from vt_user')
self.assertEqual(result, ((2L, 'test 2'), (3L, 'test 3'), (5L, 'test 5')))
result = shard_1_master.mquery('vt_user', 'select id, name from vt_user')
self.assertEqual(result, ((6L, 'test 6'), (7L, 'test 7')))
# test passing in the keyspace in the cursor
lcursor = vtgate_conn.cursor(
tablet_type='master', keyspace='lookup', writable=True)
with self.assertRaisesRegexp(
dbexceptions.DatabaseError, '.*table vt_user not found in schema.*'):
lcursor.execute('select id, name from vt_user', {})
def test_user2(self):
# user2 is for testing non-unique vindexes
vtgate_conn = get_connection()
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_user2 (id, name) values (:id, :name)',
{'id': 1, 'name': 'name1'})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'insert into vt_user2 (id, name) values (:id, :name)',
{'id': 7, 'name': 'name1'})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'insert into vt_user2 (id, name) values (:id0, :name0),(:id1, :name1)',
{'id0': 2, 'name0': 'name2','id1': 3, 'name1': 'name2'})
self.assertEqual(result, ([], 2L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery('vt_user', 'select id, name from vt_user2')
self.assertEqual(result, ((1L, 'name1'), (2L, 'name2'), (3L, 'name2')))
result = shard_1_master.mquery('vt_user', 'select id, name from vt_user2')
self.assertEqual(result, ((7L, 'name1'),))
result = lookup_master.mquery(
'vt_lookup', 'select name, user2_id from name_user2_map')
self.assertEqual(result, (('name1', 1L), ('name1', 7L), ('name2', 2L), ('name2', 3L)))
# Test select by id
result = self.execute_on_master(
vtgate_conn,
'select id, name from vt_user2 where id = :id', {'id': 1})
self.assertEqual(
result, ([(1, 'name1')], 1L, 0,
[('id', self.int_type), ('name', self.string_type)]))
# Test select by lookup
result = self.execute_on_master(
vtgate_conn,
'select id, name from vt_user2 where name = :name', {'name': 'name1'})
result[0].sort()
self.assertEqual(
result,
([(1, 'name1'), (7, 'name1')], 2L, 0,
[('id', self.int_type), ('name', self.string_type)]))
# Test IN clause using non-unique vindex
result = self.execute_on_master(
vtgate_conn,
"select id, name from vt_user2 where name in ('name1', 'name2')", {})
result[0].sort()
self.assertEqual(
result,
([(1, 'name1'), (2, 'name2'), (3, 'name2'), (7, 'name1')], 4L, 0,
[('id', self.int_type), ('name', self.string_type)]))
result = self.execute_on_master(
vtgate_conn,
"select id, name from vt_user2 where name in ('name1')", {})
result[0].sort()
self.assertEqual(
result,
([(1, 'name1'), (7, 'name1')], 2L, 0,
[('id', self.int_type), ('name', self.string_type)]))
# Test delete
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'delete from vt_user2 where id = :id',
{'id': 1})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'delete from vt_user2 where id = :id',
{'id': 2})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery('vt_user', 'select id, name from vt_user2')
self.assertEqual(result, ((3L, 'name2'),))
result = shard_1_master.mquery('vt_user', 'select id, name from vt_user2')
self.assertEqual(result, ((7L, 'name1'),))
result = lookup_master.mquery(
'vt_lookup', 'select name, user2_id from name_user2_map')
self.assertEqual(result, (('name1', 7L), ('name2', 3L)))
vtgate_conn.begin()
self.execute_on_master(
vtgate_conn,
'delete from vt_user2 where id = :id',
{'id': 7})
vtgate_conn.commit()
def test_user_extra(self):
# user_extra is for testing unowned functional vindex
count = 4
vtgate_conn = get_connection()
for x in xrange(count):
i = x+1
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_user_extra (user_id, email) '
'values (:user_id, :email)',
{'user_id': i, 'email': 'test %s' % i})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
for x in xrange(count):
i = x+1
result = self.execute_on_master(
vtgate_conn,
'select user_id, email from vt_user_extra where user_id = :user_id',
{'user_id': i})
self.assertEqual(
result,
([(i, 'test %s' % i)], 1L, 0,
[('user_id', self.int_type), ('email', self.string_type)]))
result = shard_0_master.mquery(
'vt_user', 'select user_id, email from vt_user_extra')
self.assertEqual(result, ((1L, 'test 1'), (2L, 'test 2'), (3L, 'test 3')))
result = shard_1_master.mquery(
'vt_user', 'select user_id, email from vt_user_extra')
self.assertEqual(result, ((4L, 'test 4'),))
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'update vt_user_extra set email = :email where user_id = :user_id',
{'user_id': 1, 'email': 'test one'})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'update vt_user_extra set email = :email where user_id = :user_id',
{'user_id': 4, 'email': 'test four'})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery(
'vt_user', 'select user_id, email from vt_user_extra')
self.assertEqual(result, ((1L, 'test one'), (2L, 'test 2'), (3L, 'test 3')))
result = shard_1_master.mquery(
'vt_user', 'select user_id, email from vt_user_extra')
self.assertEqual(result, ((4L, 'test four'),))
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'delete from vt_user_extra where user_id = :user_id',
{'user_id': 1})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'delete from vt_user_extra where user_id = :user_id',
{'user_id': 4})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery(
'vt_user', 'select user_id, email from vt_user_extra')
self.assertEqual(result, ((2L, 'test 2'), (3L, 'test 3')))
result = shard_1_master.mquery(
'vt_user', 'select user_id, email from vt_user_extra')
self.assertEqual(result, ())
vtgate_conn.begin()
self.execute_on_master(
vtgate_conn,
'delete from vt_user_extra where user_id = :user_id',
{'user_id': 2})
self.execute_on_master(
vtgate_conn,
'delete from vt_user_extra where user_id = :user_id',
{'user_id': 3})
vtgate_conn.commit()
def test_music(self):
# music is for testing owned lookup index
vtgate_conn = get_connection()
# Initialize the sequence.
# TODO(sougou): Use DDL when ready.
vtgate_conn.begin()
self.execute_on_master(vtgate_conn, init_vt_music_seq, {})
vtgate_conn.commit()
count = 4
for x in xrange(count):
i = x+1
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_music (user_id, song) values (:user_id, :song)',
{'user_id': i, 'song': 'test %s' % i})
self.assertEqual(result, ([], 1L, i, []))
vtgate_conn.commit()
for x in xrange(count):
i = x+1
result = self.execute_on_master(
vtgate_conn,
'select user_id, id, song from vt_music where id = :id', {'id': i})
self.assertEqual(
result,
([(i, i, 'test %s' % i)], 1, 0,
[('user_id', self.int_type),
('id', self.int_type),
('song', self.string_type)]))
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_music (user_id, id, song) '
'values (:user_id0, :id0, :song0), (:user_id1, :id1, :song1)',
{'user_id0': 5, 'id0': 6, 'song0': 'test 6','user_id1': 7, 'id1': 7, 'song1': 'test 7'})
self.assertEqual(result, ([], 2L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery(
'vt_user', 'select user_id, id, song from vt_music')
self.assertEqual(
result,
((1L, 1L, 'test 1'), (2L, 2L, 'test 2'), (3L, 3L, 'test 3'),
(5L, 6L, 'test 6')))
result = shard_1_master.mquery(
'vt_user', 'select user_id, id, song from vt_music')
self.assertEqual(
result, ((4L, 4L, 'test 4'), (7L, 7L, 'test 7')))
result = lookup_master.mquery(
'vt_lookup', 'select music_id, user_id from music_user_map')
self.assertEqual(
result,
((1L, 1L), (2L, 2L), (3L, 3L), (4L, 4L), (6L, 5L), (7L, 7L)))
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'update vt_music set song = :song where id = :id',
{'id': 6, 'song': 'test six'})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'update vt_music set song = :song where id = :id',
{'id': 4, 'song': 'test four'})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery(
'vt_user', 'select user_id, id, song from vt_music')
self.assertEqual(
result, ((1L, 1L, 'test 1'), (2L, 2L, 'test 2'), (3L, 3L, 'test 3'),
(5L, 6L, 'test six')))
result = shard_1_master.mquery(
'vt_user', 'select user_id, id, song from vt_music')
self.assertEqual(
result, ((4L, 4L, 'test four'), (7L, 7L, 'test 7')))
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'delete from vt_music where id = :id',
{'id': 3})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'delete from vt_music where user_id = :user_id',
{'user_id': 4})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery(
'vt_user', 'select user_id, id, song from vt_music')
self.assertEqual(
result, ((1L, 1L, 'test 1'), (2L, 2L, 'test 2'), (5L, 6L, 'test six')))
result = shard_1_master.mquery(
'vt_user', 'select user_id, id, song from vt_music')
self.assertEqual(result, ((7L, 7L, 'test 7'),))
result = lookup_master.mquery(
'vt_lookup', 'select music_id, user_id from music_user_map')
self.assertEqual(result, ((1L, 1L), (2L, 2L), (6L, 5L), (7L, 7L)))
def test_music_extra(self):
# music_extra is for testing unonwed lookup index
vtgate_conn = get_connection()
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_music_extra (music_id, user_id, artist) '
'values (:music_id, :user_id, :artist)',
{'music_id': 1, 'user_id': 1, 'artist': 'test 1'})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'insert into vt_music_extra (music_id, artist) '
'values (:music_id0, :artist0), (:music_id1, :artist1)',
{'music_id0': 6, 'artist0': 'test 6', 'music_id1': 7, 'artist1': 'test 7'})
self.assertEqual(result, ([], 2L, 0L, []))
vtgate_conn.commit()
result = self.execute_on_master(
vtgate_conn,
'select music_id, user_id, artist '
'from vt_music_extra where music_id = :music_id',
{'music_id': 6})
self.assertEqual(
result, ([(6L, 5L, 'test 6')], 1, 0,
[('music_id', self.int_type),
('user_id', self.int_type),
('artist', self.string_type)]))
result = shard_0_master.mquery(
'vt_user', 'select music_id, user_id, artist from vt_music_extra')
self.assertEqual(result, ((1L, 1L, 'test 1'), (6L, 5L, 'test 6')))
result = shard_1_master.mquery(
'vt_user', 'select music_id, user_id, artist from vt_music_extra')
self.assertEqual(result, ((7L, 7L, 'test 7'),))
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'update vt_music_extra set artist = :artist '
'where music_id = :music_id',
{'music_id': 6, 'artist': 'test six'})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'update vt_music_extra set artist = :artist '
'where music_id = :music_id',
{'music_id': 7, 'artist': 'test seven'})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery(
'vt_user', 'select music_id, user_id, artist from vt_music_extra')
self.assertEqual(result, ((1L, 1L, 'test 1'), (6L, 5L, 'test six')))
result = shard_1_master.mquery(
'vt_user', 'select music_id, user_id, artist from vt_music_extra')
self.assertEqual(result, ((7L, 7L, 'test seven'),))
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'delete from vt_music_extra where music_id = :music_id',
{'music_id': 6})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'delete from vt_music_extra where music_id = :music_id',
{'music_id': 7})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery(
'vt_user', 'select music_id, user_id, artist from vt_music_extra')
self.assertEqual(result, ((1L, 1L, 'test 1'),))
result = shard_1_master.mquery(
'vt_user', 'select music_id, user_id, artist from vt_music_extra')
self.assertEqual(result, ())
def test_main_seq(self):
# music is for testing owned lookup index
vtgate_conn = get_connection()
# Initialize the sequence.
# TODO(sougou): Use DDL when ready.
vtgate_conn.begin()
self.execute_on_master(vtgate_conn, init_vt_main_seq, {})
vtgate_conn.commit()
count = 4
for x in xrange(count):
i = x+1
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into main (val) values (:val)',
{'val': 'test %s' % i})
self.assertEqual(result, ([], 1L, i, []))
vtgate_conn.commit()
result = self.execute_on_master(
vtgate_conn, 'select id, val from main where id = 4', {})
self.assertEqual(
result,
([(4, 'test 4')], 1, 0,
[('id', self.int_type),
('val', self.string_type)]))
# Now test direct calls to sequence.
result = self.execute_on_master(
vtgate_conn, "select next 1 values from vt_main_seq", {})
self.assertEqual(
result,
([(5,)], 1, 0,
[('nextval', self.int_type)]))
def test_joins(self):
vtgate_conn = get_connection()
vtgate_conn.begin()
self.execute_on_master(
vtgate_conn,
'insert into join_user (id, name) values (:id, :name)',
{'id': 1, 'name': 'name1'})
self.execute_on_master(
vtgate_conn,
'insert into join_user_extra (user_id, email) '
'values (:user_id, :email)',
{'user_id': 1, 'email': 'email1'})
self.execute_on_master(
vtgate_conn,
'insert into join_user_extra (user_id, email) '
'values (:user_id, :email)',
{'user_id': 2, 'email': 'email2'})
self.execute_on_master(
vtgate_conn,
'insert into join_name_info (name, info) '
'values (:name, :info)',
{'name': 'name1', 'info': 'name test'})
vtgate_conn.commit()
result = self.execute_on_master(
vtgate_conn,
'select u.id, u.name, e.user_id, e.email '
'from join_user u join join_user_extra e where e.user_id = u.id',
{})
self.assertEqual(
result,
([(1L, 'name1', 1L, 'email1')],
1,
0,
[('id', self.int_type),
('name', self.string_type),
('user_id', self.int_type),
('email', self.string_type)]))
result = self.execute_on_master(
vtgate_conn,
'select u.id, u.name, e.user_id, e.email '
'from join_user u join join_user_extra e where e.user_id = u.id+1',
{})
self.assertEqual(
result,
([(1L, 'name1', 2L, 'email2')],
1,
0,
[('id', self.int_type),
('name', self.string_type),
('user_id', self.int_type),
('email', self.string_type)]))
result = self.execute_on_master(
vtgate_conn,
'select u.id, u.name, e.user_id, e.email '
'from join_user u left join join_user_extra e on e.user_id = u.id+1',
{})
self.assertEqual(
result,
([(1L, 'name1', 2L, 'email2')],
1,
0,
[('id', self.int_type),
('name', self.string_type),
('user_id', self.int_type),
('email', self.string_type)]))
result = self.execute_on_master(
vtgate_conn,
'select u.id, u.name, e.user_id, e.email '
'from join_user u left join join_user_extra e on e.user_id = u.id+2',
{})
self.assertEqual(
result,
([(1L, 'name1', None, None)],
1,
0,
[('id', self.int_type),
('name', self.string_type),
('user_id', self.int_type),
('email', self.string_type)]))
result = self.execute_on_master(
vtgate_conn,
'select u.id, u.name, e.user_id, e.email '
'from join_user u join join_user_extra e on e.user_id = u.id+2 '
'where u.id = 2',
{})
self.assertEqual(
result,
([],
0,
0,
[('id', self.int_type),
('name', self.string_type),
('user_id', self.int_type),
('email', self.string_type)]))
result = self.execute_on_master(
vtgate_conn,
'select u.id, u.name, n.info '
'from join_user u join join_name_info n on u.name = n.name '
'where u.id = 1',
{})
self.assertEqual(
result,
([(1L, 'name1', 'name test')],
1,
0,
[('id', self.int_type),
('name', self.string_type),
('info', self.string_type)]))
vtgate_conn.begin()
self.execute_on_master(
vtgate_conn,
'delete from join_user where id = :id',
{'id': 1})
self.execute_on_master(
vtgate_conn,
'delete from join_user_extra where user_id = :user_id',
{'user_id': 1})
self.execute_on_master(
vtgate_conn,
'delete from join_user_extra where user_id = :user_id',
{'user_id': 2})
vtgate_conn.commit()
def test_insert_value_required(self):
vtgate_conn = get_connection()
try:
vtgate_conn.begin()
with self.assertRaisesRegexp(
dbexceptions.DatabaseError, '.*value must be supplied.*'):
self.execute_on_master(
vtgate_conn,
'insert into vt_user_extra (email) values (:email)',
{'email': 'test 10'})
finally:
vtgate_conn.rollback()
def test_transaction_modes(self):
vtgate_conn = get_connection()
cursor = vtgate_conn.cursor(
tablet_type='master', keyspace=None, writable=True, single_db=True)
cursor.begin()
cursor.execute(
'insert into twopc_user (user_id, val) values(1, \'val\')', {})
with self.assertRaisesRegexp(
dbexceptions.DatabaseError, '.*multi-db transaction attempted.*'):
cursor.execute(
'insert into twopc_lookup (id, val) values(1, \'val\')', {})
cursor = vtgate_conn.cursor(
tablet_type='master', keyspace=None, writable=True, twopc=True)
cursor.begin()
cursor.execute(
'insert into twopc_user (user_id, val) values(1, \'val\')', {})
cursor.execute(
'insert into twopc_lookup (id, val) values(1, \'val\')', {})
cursor.commit()
cursor.execute('select user_id, val from twopc_user where user_id = 1', {})
self.assertEqual(cursor.fetchall(), [(1, 'val')])
cursor.execute('select id, val from twopc_lookup where id = 1', {})
self.assertEqual(cursor.fetchall(), [(1, 'val')])
cursor.begin()
cursor.execute('delete from twopc_user where user_id = 1', {})
cursor.execute('delete from twopc_lookup where id = 1', {})
cursor.commit()
cursor.execute('select user_id, val from twopc_user where user_id = 1', {})
self.assertEqual(cursor.fetchall(), [])
cursor.execute('select id, val from twopc_lookup where id = 1', {})
self.assertEqual(cursor.fetchall(), [])
def test_vtclient(self):
"""This test uses vtclient to send and receive various queries.
"""
# specify a good default keyspace for the connection here.
utils.vtgate.vtclient(
'insert into vt_user_extra(user_id, email) values (:v1, :v2)',
keyspace='user',
bindvars=[10, 'test 10'])
out, _ = utils.vtgate.vtclient(
'select user_id, email from vt_user_extra where user_id = :v1',
bindvars=[10], json_output=True)
self.assertEqual(out, {
u'fields': [u'user_id', u'email'],
u'rows': [[u'10', u'test 10']],
})
utils.vtgate.vtclient(
'update vt_user_extra set email=:v2 where user_id = :v1',
bindvars=[10, 'test 1000'])
out, _ = utils.vtgate.vtclient(
'select user_id, email from vt_user_extra where user_id = :v1',
bindvars=[10], streaming=True, json_output=True)
self.assertEqual(out, {
u'fields': [u'user_id', u'email'],
u'rows': [[u'10', u'test 1000']],
})
utils.vtgate.vtclient(
'delete from vt_user_extra where user_id = :v1', bindvars=[10])
out, _ = utils.vtgate.vtclient(
'select user_id, email from vt_user_extra where user_id = :v1',
bindvars=[10], json_output=True)
self.assertEqual(out, {
u'fields': [u'user_id', u'email'],
u'rows': None,
})
# check that specifying an invalid keyspace is propagated and triggers an
# error
_, err = utils.vtgate.vtclient(
'insert into vt_user_extra(user_id, email) values (:v1, :v2)',
keyspace='invalid',
bindvars=[10, 'test 10'],
raise_on_error=False)
self.assertIn('keyspace invalid not found in vschema', err)
def test_vtctl_vtgate_execute(self):
"""This test uses 'vtctl VtGateExecute' to send and receive various queries.
"""
utils.vtgate.execute(
'insert into vt_user_extra(user_id, email) values (:user_id, :email)',
bindvars={'user_id': 11, 'email': 'test 11'})
qr = utils.vtgate.execute(
'select user_id, email from vt_user_extra where user_id = :user_id',
bindvars={'user_id': 11})
logging.debug('Original row: %s', str(qr))
self.assertEqual(qr['fields'][0]['name'], 'user_id')
self.assertEqual(len(qr['rows']), 1)
v = qr['rows'][0][1]
self.assertEqual(v, 'test 11')
# test using exclude_field_names works.
qr = utils.vtgate.execute(
'select user_id, email from vt_user_extra where user_id = :user_id',
bindvars={'user_id': 11}, execute_options='included_fields:TYPE_ONLY ')
logging.debug('Original row: %s', str(qr))
self.assertNotIn('name', qr['fields'][0])
self.assertEqual(len(qr['rows']), 1)
v = qr['rows'][0][1]
self.assertEqual(v, 'test 11')
utils.vtgate.execute(
'update vt_user_extra set email=:email where user_id = :user_id',
bindvars={'user_id': 11, 'email': 'test 1100'})
qr = utils.vtgate.execute(
'select user_id, email from vt_user_extra where user_id = :user_id',
bindvars={'user_id': 11})
logging.debug('Modified row: %s', str(qr))
self.assertEqual(len(qr['rows']), 1)
v = qr['rows'][0][1]
self.assertEqual(v, 'test 1100')
utils.vtgate.execute(
'delete from vt_user_extra where user_id = :user_id',
bindvars={'user_id': 11})
qr = utils.vtgate.execute(
'select user_id, email from vt_user_extra where user_id = :user_id',
bindvars={'user_id': 11})
self.assertEqual(len(qr['rows'] or []), 0)
def test_split_query(self):
"""This test uses 'vtctl VtGateSplitQuery' to validate the Map-Reduce APIs.
We want to return KeyRange queries.
"""
sql = 'select id, name from vt_user'
s = utils.vtgate.split_query(sql, 'user', 2)
self.assertEqual(len(s), 2)
first_half_queries = 0
second_half_queries = 0
for q in s:
self.assertEqual(q['query']['sql'], sql)
self.assertIn('key_range_part', q)
self.assertEqual(len(q['key_range_part']['key_ranges']), 1)
kr = q['key_range_part']['key_ranges'][0]
eighty_in_base64 = 'gA=='
is_first_half = 'start' not in kr and kr['end'] == eighty_in_base64
is_second_half = 'end' not in kr and kr['start'] == eighty_in_base64
self.assertTrue(is_first_half or is_second_half,
'invalid keyrange %s' % str(kr))
if is_first_half:
first_half_queries += 1
else:
second_half_queries += 1
self.assertEqual(first_half_queries, 1, 'invalid split %s' % str(s))
self.assertEqual(second_half_queries, 1, 'invalid split %s' % str(s))
def test_vschema_vars(self):
v = utils.vtgate.get_vars()
self.assertIn('VtgateVSchemaCounts', v)
self.assertIn('Reload', v['VtgateVSchemaCounts'])
self.assertTrue(v['VtgateVSchemaCounts']['Reload'] > 0)
self.assertNotIn('Parsing', v['VtgateVSchemaCounts'])
self.assertNotIn('WatchError', v['VtgateVSchemaCounts'])
if __name__ == '__main__':
utils.main()
|
{
"content_hash": "5adfc4ba92c0279616383ead4aef1105",
"timestamp": "",
"source": "github",
"line_count": 1243,
"max_line_length": 96,
"avg_line_length": 33.30008045052293,
"alnum_prop": 0.5542375338229609,
"repo_name": "applift/vitess",
"id": "058926be51622c69bd262474fb1b4206d20538d3",
"size": "42011",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/vtgatev3_test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6156"
},
{
"name": "CSS",
"bytes": "213877"
},
{
"name": "Go",
"bytes": "7302479"
},
{
"name": "HTML",
"bytes": "58963"
},
{
"name": "Java",
"bytes": "1120251"
},
{
"name": "JavaScript",
"bytes": "43452"
},
{
"name": "Liquid",
"bytes": "7287"
},
{
"name": "Makefile",
"bytes": "9479"
},
{
"name": "PHP",
"bytes": "1252556"
},
{
"name": "Protocol Buffer",
"bytes": "138909"
},
{
"name": "Python",
"bytes": "1099025"
},
{
"name": "Ruby",
"bytes": "466"
},
{
"name": "Shell",
"bytes": "61718"
},
{
"name": "Smarty",
"bytes": "24438"
},
{
"name": "TypeScript",
"bytes": "152739"
},
{
"name": "Yacc",
"bytes": "34615"
}
],
"symlink_target": ""
}
|
"""
A simple echo server.
Launch it with:
$ ./server.py
or:
$ ./server.py xml '<?xml version="1.0" encoding="UTF-8" ?><methodResponse><params>\n<param><value><struct>\n<member><name>success</name><value><i4>1</i4></value></member>\n</struct></value></param>\n</params></methodResponse>\n\n\n'
Test it with curl:
curl http://127.0.0.1:50000/api/order/add_order/ -d "id=123456&customer=509347002&reason=whatever&description=nowayyy"
"""
import socket
import sys
import time
from datetime import datetime
def main():
host = ''
port = 50000
backlog = 5
size = 1024*1024
sys.stdout.write('%s Server running on port: %s\n' % (datetime.now(), port))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
s.bind((host,port))
s.listen(backlog)
# Loop forever.
while True:
# Read the request.
client, address = s.accept()
data = client.recv(size)
# Print the content of the request.
if data:
sys.stdout.write('\n\n%s RECEIVED: \n%s\n' % (datetime.now(), data))
else:
sys.stdout.write('RECEIVED: \nNo data.\n')
sys.stdout.flush()
# Send the response.
send_response(client)
client.close()
def send_response(client):
is_xml = 'xml' in sys.argv[1]
xml_body = sys.argv[2].replace('\\n', '\n')
body = 'This is the body of this message\n' if not is_xml else xml_body
headers = 'Content-Type: text/%s\n' % ('xml' if is_xml else 'plain') + \
'Content-Length: %s\n' % len(body) + \
'Connection: close\n'
client.send('HTTP/1.1 200 OK\n')
client.send(headers)
client.send('\n')
client.send(body)
if __name__ == '__main__':
main()
|
{
"content_hash": "af8da2d91a9d38c2e64c0c497a5faef0",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 232,
"avg_line_length": 28.129032258064516,
"alnum_prop": 0.6009174311926605,
"repo_name": "nimiq/echo-server",
"id": "95dce575197538aae945b993e6473ef5272d6f33",
"size": "1767",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1767"
}
],
"symlink_target": ""
}
|
from mutornadomon.collectors.web import WebCollector # noqa
|
{
"content_hash": "be75c13dbca54ab8db5224beb0742c77",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 60,
"avg_line_length": 61,
"alnum_prop": 0.8360655737704918,
"repo_name": "francoricci/sapspid",
"id": "6443013840baf758ab1215dde050dc73741b9b18",
"size": "61",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/mutornadomon/collectors/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2808"
},
{
"name": "PLpgSQL",
"bytes": "63664"
},
{
"name": "Python",
"bytes": "267311"
},
{
"name": "Shell",
"bytes": "2074"
}
],
"symlink_target": ""
}
|
import getpass
import base64
import urllib
import urllib2
from datetime import datetime
try:
import json
except ImportError:
import simplejson as json
def mstr(obj):
if (obj == None):
return ""
return unicode(obj)
def gittime(obj):
if (obj == None):
return None
return datetime.strptime(obj[0:19], "%Y-%m-%dT%H:%M:%S")
class GitPullRequest:
"""Pull Request from Git"""
def __init__(self, data, parent):
self.data = data
self.parent = parent
def html_url(self):
return self.data["html_url"]
def title(self):
return self.data["title"]
def number(self):
return self.data["number"]
#TODO def review_comments
def user(self):
return mstr(self.data["user"]["login"])
def fromBranch(self):
return mstr(self.data["head"]["ref"])
def fromRepo(self):
return mstr(self.data["head"]["repo"]["clone_url"])
def merged(self):
return self.data["merged_at"] != None
def raw(self):
return self.data
def created_at(self):
return gittime(self.data["created_at"])
def updated_at(self):
return gittime(self.data["updated_at"])
def merged_at(self):
return gittime(self.data["merged_at"])
def __str__(self):
return self.html_url()
def __repr__(self):
return self.html_url()
class GitHub:
"""Github API"""
def __init__(self, options):
self.headers = {}
if options.gituser:
gitpassword = getpass.getpass("github.com user " + options.gituser+":")
authstr = base64.encodestring('%s:%s' % (options.gituser, gitpassword)).replace('\n', '')
self.headers["Authorization"] = "Basic "+authstr
def pulls(self, user, repo, type="all"):
page=1
ret = []
while True:
url = "https://api.github.com/repos/"+user+"/"+repo+"/pulls?state="+type+"&page="+str(page)
req = urllib2.Request(url,None,self.headers)
result = urllib2.urlopen(req)
contents = result.read()
if result.getcode() != 200:
raise Exception(result.getcode() + " != 200 "+ contents)
got = json.loads(contents)
for part in got:
ret.append(GitPullRequest(part, self))
if len(got) == 0:
return ret
page = page + 1
def openPulls(self, user, repo):
return self.pulls(user, repo, "open")
|
{
"content_hash": "790c8aaed45a3435af7ca16e29fb50cd",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 94,
"avg_line_length": 22.520833333333332,
"alnum_prop": 0.6535615171137835,
"repo_name": "Aloomaio/incubator-storm",
"id": "c3e734e24a3b6be65a0fc31c1147f70d4159c37b",
"size": "2738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dev-tools/github/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "52821"
},
{
"name": "C++",
"bytes": "1650"
},
{
"name": "CSS",
"bytes": "63637"
},
{
"name": "Clojure",
"bytes": "894325"
},
{
"name": "Fancy",
"bytes": "6234"
},
{
"name": "Java",
"bytes": "2976712"
},
{
"name": "JavaScript",
"bytes": "83688"
},
{
"name": "Python",
"bytes": "357373"
},
{
"name": "Ruby",
"bytes": "19946"
},
{
"name": "Shell",
"bytes": "11945"
},
{
"name": "Thrift",
"bytes": "9946"
}
],
"symlink_target": ""
}
|
from itertools import chain
from typing import Any, Dict
from ..compiler.helpers import (
INBOUND_EDGE_DIRECTION,
OUTBOUND_EDGE_DIRECTION,
FoldScopeLocation,
Location,
get_edge_direction_and_name,
)
from ..compiler.metadata import QueryMetadataTable
from ..schema.schema_info import QueryPlanningSchemaInfo
from .filter_selectivity_utils import adjust_counts_for_filters
def _is_subexpansion_optional(query_metadata, parent_location, child_location):
"""Return True if child_location is the root of an optional subexpansion."""
child_optional_depth = query_metadata.get_location_info(child_location).optional_scopes_depth
parent_optional_depth = query_metadata.get_location_info(parent_location).optional_scopes_depth
return child_optional_depth > parent_optional_depth
def _is_subexpansion_folded(location):
"""Return True if location is the root of a folded subexpansion."""
return isinstance(location, FoldScopeLocation) and len(location.fold_path) == 1
def _is_subexpansion_recursive(query_metadata, parent_location, child_location):
"""Return True if child_location is the root of a recursive subexpansion."""
edge_direction, edge_name = _get_last_edge_direction_and_name_to_location(child_location)
for recurse_info in query_metadata.get_recurse_infos(parent_location):
if recurse_info.edge_direction == edge_direction and recurse_info.edge_name == edge_name:
return True
return False
def _get_all_original_child_locations(query_metadata, start_location):
"""Get all original child Locations of a start Location and revisits to the start Location.
Args:
query_metadata: QueryMetadataTable object
start_location: Location object, where we're looking for child Locations
Returns:
list of child Locations. Given start_location, get all revisits to start_location, then for
all visits, get all child locations and return ones that are original visits.
"""
child_locations = set()
start_location_revisit_origin = [start_location]
start_location_revisits = list(query_metadata.get_all_revisits(start_location))
for location in chain(start_location_revisit_origin, start_location_revisits):
for child_location in query_metadata.get_child_locations(location):
child_location_revisit_origin = query_metadata.get_revisit_origin(child_location)
if child_location_revisit_origin is None:
# If child_location is not a revisit, set origin to child_location
child_location_revisit_origin = child_location
child_locations.add(child_location_revisit_origin)
return list(child_locations)
def _get_last_edge_direction_and_name_to_location(location):
"""Get the direction and name of the last edge to a non-root BaseLocation object."""
if isinstance(location, Location):
edge_direction, edge_name = get_edge_direction_and_name(location.query_path[-1])
elif isinstance(location, FoldScopeLocation):
edge_direction, edge_name = location.fold_path[-1]
else:
raise AssertionError("Unexpected location encountered: {}".format(location))
return edge_direction, edge_name
def _get_base_class_names_of_parent_and_child_from_edge(schema_graph, current_location):
"""Return the base class names of a location and its parent from last edge information."""
edge_direction, edge_name = _get_last_edge_direction_and_name_to_location(current_location)
edge_element = schema_graph.get_edge_schema_element_or_raise(edge_name)
if edge_direction == INBOUND_EDGE_DIRECTION:
parent_base_class_name = edge_element.base_out_connection
child_base_class_name = edge_element.base_in_connection
elif edge_direction == OUTBOUND_EDGE_DIRECTION:
parent_base_class_name = edge_element.base_in_connection
child_base_class_name = edge_element.base_out_connection
else:
raise AssertionError(
"Expected edge direction to be either inbound or outbound."
"Found: edge {} with direction {}".format(edge_name, edge_direction)
)
return parent_base_class_name, child_base_class_name
def _query_statistics_for_vertex_edge_vertex_count(
statistics, query_metadata, parent_location, child_location
):
"""Query statistics for the count of edges connecting parent and child_location vertices.
Given a parent location and a child location, there are three constraints on each edge directly
connecting the two:
1. The edge class must be the same as the target location's last traversed edge.
2. The parent_location vertex class must inherit from the edge endpoint the traversal began at.
3. The child_location vertex class must inherit from the edge endpoint the traversal ended at.
Using get_vertex_edge_vertex_count(), we find the number of edges satisfying these three
constraints.
Args:
statistics: Statistics object, used for querying over get_vertex_edge_vertex_count().
query_metadata: QueryMetadataTable object.
parent_location: BaseLocation, corresponding to the location the edge traversal begins from.
child_location: BaseLocation, child of parent_location corresponding to the location the
edge traversal ends at.
Returns:
- int, count of edges connecting parent and child_location vertices if the statistic exists.
- None otherwise.
"""
edge_direction, edge_name = _get_last_edge_direction_and_name_to_location(child_location)
parent_name_from_location = query_metadata.get_location_info(parent_location).type.name
child_name_from_location = query_metadata.get_location_info(child_location).type.name
# Since we need to provide the source vertex class and target vertex class in the same order
# regardless of the direction of edge traversal, we first provide the class of the outbound
# vertex (i.e. the vertex the edge starts from), then the class of the inbound vertex(i.e. the
# vertex the edge ends at).
if edge_direction == INBOUND_EDGE_DIRECTION:
outbound_vertex_name = child_name_from_location
inbound_vertex_name = parent_name_from_location
elif edge_direction == OUTBOUND_EDGE_DIRECTION:
outbound_vertex_name = parent_name_from_location
inbound_vertex_name = child_name_from_location
else:
raise AssertionError(
"Expected edge direction to be either inbound or outbound."
"Found: edge {} with direction {}".format(edge_name, edge_direction)
)
query_result = statistics.get_vertex_edge_vertex_count(
outbound_vertex_name, edge_name, inbound_vertex_name
)
return query_result
def _estimate_vertex_edge_vertex_count_using_class_count(
schema_info, query_metadata, parent_location, child_location
):
"""Estimate the count of edges connecting parent_location and child_location vertices.
Given a parent location of class A and a child location of class B, this function estimates the
number of AB edges using class counts. If A and B are subclasses of the edge's endpoint classes
(which we'll name C and D respectively), we only have statistics for CD edges. So estimates for
the number of AB edges will be made using the assumption that CD edges are distributed
independently of whether or not the vertex of class C is also of class A and likewise for D and
B. In the general case, we estimate the statistic as
(number of AB edges) = (number of CD edges) * (number of A vertices) / (number of C vertices) *
(number of B vertices) / (number of D vertices).
Args:
schema_info: QueryPlanningSchemaInfo
query_metadata: QueryMetadataTable object.
parent_location: BaseLocation, corresponding to the location the edge traversal begins from.
child_location: BaseLocation, child of parent_location corresponding to the location the
edge traversal ends at.
Returns:
float, estimate for number of edges connecting parent_location and child_location.
"""
_, edge_name = _get_last_edge_direction_and_name_to_location(child_location)
edge_counts = schema_info.statistics.get_class_count(edge_name)
parent_name_from_location = query_metadata.get_location_info(parent_location).type.name
child_name_from_location = query_metadata.get_location_info(child_location).type.name
(
parent_base_class_name,
child_base_class_name,
) = _get_base_class_names_of_parent_and_child_from_edge(
schema_info.schema_graph, child_location
)
# False-positive bug in pylint: https://github.com/PyCQA/pylint/issues/3039
# pylint: disable=old-division
#
# Scale edge_counts if child_location's type is a subclass of the edge's endpoint type.
if child_name_from_location != child_base_class_name:
edge_counts *= float(
schema_info.statistics.get_class_count(child_name_from_location)
) / schema_info.statistics.get_class_count(child_base_class_name)
# Scale edge_counts if parent_location's type is a subclass of the edge's endpoint type.
if parent_name_from_location != parent_base_class_name:
edge_counts *= float(
schema_info.statistics.get_class_count(parent_name_from_location)
) / schema_info.statistics.get_class_count(parent_base_class_name)
# pylint: enable=old-division
return edge_counts
def _estimate_edges_to_children_per_parent(
schema_info, query_metadata, parameters, parent_location, child_location
):
"""Estimate the count of edges per parent_location that connect to child_location vertices.
Given a parent location of type A and child location of type B, assume all AB edges are
distributed evenly over A vertices, so the expected number of child edges per parent vertex is
(number of AB edges) / (number of A vertices).
Args:
schema_info: QueryPlanningSchemaInfo
query_metadata: QueryMetadataTable object.
parameters: dict, parameters with which query will be executed.
parent_location: BaseLocation, corresponding to the location the edge traversal begins from.
child_location: BaseLocation, child of parent_location corresponding to the location the
edge traversal ends at.
Returns:
float, expected number of edges per parent_location vertex that connect to child_location
vertices.
"""
edge_counts = _query_statistics_for_vertex_edge_vertex_count(
schema_info.statistics, query_metadata, parent_location, child_location
)
if edge_counts is None:
edge_counts = _estimate_vertex_edge_vertex_count_using_class_count(
schema_info, query_metadata, parent_location, child_location
)
parent_name_from_location = query_metadata.get_location_info(parent_location).type.name
# Count the number of parents, over which we assume the edges are uniformly distributed.
parent_location_counts = schema_info.statistics.get_class_count(parent_name_from_location)
# Anticipate division by zero
if parent_location_counts == 0:
# This implies that edge_counts is also 0. However, asserting that edge_counts is 0 is
# too aggressive because we can't expect all statistics to be collected at the same time.
return 0.0
# False-positive bug in pylint: https://github.com/PyCQA/pylint/issues/3039
# pylint: disable=old-division
#
# TODO(evan): edges are not necessarily uniformly distributed, so record more statistics
child_counts_per_parent = float(edge_counts) / parent_location_counts
# pylint: enable=old-division
# TODO(evan): If edge is recursed over, we need a more detailed statistic
# Recursion always starts with depth = 0, so we should treat the parent result set itself as a
# child result set to be expanded (so add 1 to child_counts).
is_recursive = _is_subexpansion_recursive(query_metadata, parent_location, child_location)
if is_recursive:
child_counts_per_parent += 1
# Adjust the counts for filters at child_location.
child_name_from_location = query_metadata.get_location_info(child_location).type.name
child_filters = query_metadata.get_filter_infos(child_location)
child_counts_per_parent = adjust_counts_for_filters(
schema_info, child_filters, parameters, child_name_from_location, child_counts_per_parent
)
return child_counts_per_parent
def _estimate_subexpansion_cardinality(
schema_info, query_metadata, parameters, parent_location, child_location
):
"""Estimate the cardinality associated with the subexpansion of a child_location vertex.
Args:
schema_info: QueryPlanningSchemaInfo
query_metadata: QueryMetadataTable object
parameters: dict, parameters with which query will be executed
parent_location: BaseLocation object, location corresponding to the vertex being expanded
child_location: BaseLocation object, child of parent_location corresponding to the
subexpansion root
Returns:
float, number of expected result sets found when a vertex corresponding to parent_location
is expanded via child_location. For example, if parent_location (type A) has children (types
B and C), the subexpansion results associated with the B-location are the result sets found
when we expand an A-vertex over AB-edges and each subsequent B-vertex is fully expanded. We
estimate this recursively as:
(expected number of B-vertices) * (expected number of result sets per B-vertex).
"""
child_counts_per_parent = _estimate_edges_to_children_per_parent(
schema_info, query_metadata, parameters, parent_location, child_location
)
results_per_child = _estimate_expansion_cardinality(
schema_info, query_metadata, parameters, child_location
)
subexpansion_cardinality = child_counts_per_parent * results_per_child
# If child_location is the root of an optional or folded subexpansion, the empty result set will
# be returned if no other result sets exist, so return at least 1.
# TODO(evan): @filters on _x_count inside @folds can reduce result size.
is_optional = _is_subexpansion_optional(query_metadata, parent_location, child_location)
is_folded = _is_subexpansion_folded(child_location)
if is_optional or is_folded:
subexpansion_cardinality = max(subexpansion_cardinality, 1)
return subexpansion_cardinality
def _estimate_expansion_cardinality(schema_info, query_metadata, parameters, current_location):
"""Estimate the cardinality of fully expanding a vertex corresponding to current_location.
Args:
schema_info: QueryPlanningSchemaInfo
query_metadata: QueryMetadataTable object
parameters: dict, parameters with which query will be executed
current_location: BaseLocation object, corresponding to the vertex we're expanding
Returns:
float, expected cardinality associated with the full expansion of one current vertex.
"""
expansion_cardinality = 1
child_locations = _get_all_original_child_locations(query_metadata, current_location)
for child_location in child_locations:
# The expected cardinality per current vertex is the product of the expected cardinality for
# each subexpansion (e.g. If we expect each current vertex to have 2 children of type A and
# 3 children of type B, we'll return 6 distinct result sets per current vertex).
subexpansion_cardinality = _estimate_subexpansion_cardinality(
schema_info, query_metadata, parameters, current_location, child_location
)
expansion_cardinality *= subexpansion_cardinality
return expansion_cardinality
def estimate_query_result_cardinality(
schema_info: QueryPlanningSchemaInfo,
query_metadata: QueryMetadataTable,
parameters: Dict[str, Any],
) -> float:
"""Estimate the cardinality of a GraphQL query's result using database statistics.
Args:
schema_info: QueryPlanningSchemaInfo
query_metadata: info on locations, inputs, outputs, and tags in the query
parameters: dict, parameters with which query will be executed.
Returns:
float, expected query result cardinality. Equal to the number of root vertices multiplied by
the expected number of result sets per full expansion of a root vertex.
"""
root_location = query_metadata.root_location
# First, count the vertices corresponding to the root location that pass relevant filters
root_name = query_metadata.get_location_info(root_location).type.name
root_counts = schema_info.statistics.get_class_count(root_name)
root_counts = adjust_counts_for_filters(
schema_info,
query_metadata.get_filter_infos(root_location),
parameters,
root_name,
root_counts,
)
# Next, find the number of expected result sets per root vertex when fully expanded
results_per_root = _estimate_expansion_cardinality(
schema_info, query_metadata, parameters, root_location
)
expected_query_result_cardinality = root_counts * results_per_root
return expected_query_result_cardinality
|
{
"content_hash": "9992f5f000ddc6114ca8ac291f6d6e62",
"timestamp": "",
"source": "github",
"line_count": 364,
"max_line_length": 100,
"avg_line_length": 47.997252747252745,
"alnum_prop": 0.7174746723141205,
"repo_name": "kensho-technologies/graphql-compiler",
"id": "c159e093a9d6cf6c977071a5d909417d4ebf49a8",
"size": "17522",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "graphql_compiler/cost_estimation/cardinality_estimator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8213336"
},
{
"name": "Shell",
"bytes": "12556"
}
],
"symlink_target": ""
}
|
class2rel = {
'Attribution': ['attribution', 'attribution-e', 'attribution-n', 'attribution-negative'],
'Background' : ['background', 'background-e', 'circumstance', 'circumstance-e'],
'Cause' : ['cause', 'cause-result', 'result', 'result-e', 'consequence', 'consequence-n-e', 'consequence-n', 'consequence-s-e', 'consequence-s'],
'Comparison' : ['comparison', 'comparison-e', 'preference', 'preference-e', 'analogy','analogy-e', 'proportion'],
'Condition' : ['condition', 'condition-e', 'hypothetical', 'contingency', 'otherwise'],
'Contrast' : ['contrast', 'concession', 'concession-e', 'antithesis', 'antithesis-e'],
'Elaboration' : ['elaboration-additional', 'elaboration-additional-e', 'elaboration-general-specific-e', 'elaboration-general-specific', 'elaboration-part-whole', 'elaboration-part-whole-e', 'elaboration-process-step', 'elaboration-process-step-e', 'elaboration-object-attribute-e', 'elaboration-object-attribute', 'elaboration-set-member', 'elaboration-set-member-e', 'example', 'example-e', 'definition', 'definition-e'],
'Enablement' : ['purpose', 'purpose-e', 'enablement', 'enablement-e'],
'Evaluation' : ['evaluation', 'evaluation-n', 'evaluation-s-e', 'evaluation-s', 'interpretation-n', 'interpretation-s-e', 'interpretation-s', 'interpretation', 'conclusion', 'comment', 'comment-e', 'comment-topic'],
'Explanation' : ['evidence', 'evidence-e', 'explanation-argumentative', 'explanation-argumentative-e', 'reason', 'reason-e'],
'Joint' : ['list', 'disjunction'],
'Manner-Means' : ['manner', 'manner-e', 'means', 'means-e'],
'Topic-Comment' : ['problem-solution', 'problem-solution-n', 'problem-solution-s', 'question-answer', 'question-answer-n', 'question-answer-s', 'statement-response', 'statement-response-n', 'statement-response-s', 'topic-comment', 'comment-topic', 'rhetorical-question'],
'Summary' : ['summary', 'summary-n', 'summary-s', 'restatement', 'restatement-e'],
'Temporal' : ['temporal-before', 'temporal-before-e', 'temporal-after', 'temporal-after-e', 'temporal-same-time', 'temporal-same-time-e', 'sequence', 'inverted-sequence'],
'Topic-Change' : ['topic-shift', 'topic-drift'],
'textual-organization' : ['textualorganization'],
'span' : ['span'],
'same-unit' : ['same-unit']
}
rel_status_classes = []
for rel in class2rel:
rel_status_classes.append(rel + '[N][S]')
rel_status_classes.append(rel + '[N][N]')
rel_status_classes.append(rel + '[S][N]')
rel2class = {}
for cl in class2rel:
rel2class[cl.lower()] = cl
for rel in class2rel[cl]:
rel2class[rel] = cl
|
{
"content_hash": "1d2710f2ceecdbf7ec1c45c1217de813",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 423,
"avg_line_length": 75.88235294117646,
"alnum_prop": 0.6744186046511628,
"repo_name": "arne-cl/rst_discourse_parser",
"id": "829c19d037a0d793df1f877eae30177a98b30525",
"size": "2580",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/utils/RST_Classes.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "154260"
}
],
"symlink_target": ""
}
|
import os
import sys
from telemetry import benchmark
from telemetry import story
from telemetry.core import util
from telemetry import decorators
from telemetry.page import page as page_module
from telemetry.page import page_test
from telemetry import record_wpr
from telemetry.testing import options_for_unittests
from telemetry.testing import tab_test_case
from telemetry.util import wpr_modes
class MockPage(page_module.Page):
def __init__(self, story_set, url):
super(MockPage, self).__init__(url=url,
page_set=story_set,
base_dir=util.GetUnittestDataDir())
self.func_calls = []
def RunNavigateSteps(self, action_runner):
self.func_calls.append('RunNavigateSteps')
super(MockPage, self).RunNavigateSteps(action_runner)
def RunPageInteractions(self, _):
self.func_calls.append('RunPageInteractions')
def RunSmoothness(self, _):
self.func_calls.append('RunSmoothness')
class MockStorySet(story.StorySet):
def __init__(self, url=''):
super(MockStorySet, self).__init__(
archive_data_file='data/archive_files/test.json')
self.AddStory(MockPage(self, url))
class MockPageTest(page_test.PageTest):
def __init__(self):
super(MockPageTest, self).__init__()
self._action_name_to_run = "RunPageInteractions"
self.func_calls = []
def CustomizeBrowserOptions(self, options):
self.func_calls.append('CustomizeBrowserOptions')
def WillNavigateToPage(self, page, tab):
self.func_calls.append('WillNavigateToPage')
def DidNavigateToPage(self, page, tab):
self.func_calls.append('DidNavigateToPage')
def ValidateAndMeasurePage(self, page, tab, results):
self.func_calls.append('ValidateAndMeasurePage')
def WillStartBrowser(self, platform):
self.func_calls.append('WillStartBrowser')
def DidStartBrowser(self, browser):
self.func_calls.append('DidStartBrowser')
class MockBenchmark(benchmark.Benchmark):
test = MockPageTest
def __init__(self):
super(MockBenchmark, self).__init__()
self.mock_story_set = None
@classmethod
def AddBenchmarkCommandLineArgs(cls, group):
group.add_option('', '--mock-benchmark-url', action='store', type='string')
def CreateStorySet(self, options):
kwargs = {}
if options.mock_benchmark_url:
kwargs['url'] = options.mock_benchmark_url
self.mock_story_set = MockStorySet(**kwargs)
return self.mock_story_set
class MockTimelineBasedMeasurementBenchmark(benchmark.Benchmark):
def __init__(self):
super(MockTimelineBasedMeasurementBenchmark, self).__init__()
self.mock_story_set = None
@classmethod
def AddBenchmarkCommandLineArgs(cls, group):
group.add_option('', '--mock-benchmark-url', action='store', type='string')
def CreateStorySet(self, options):
kwargs = {}
if options.mock_benchmark_url:
kwargs['url'] = options.mock_benchmark_url
self.mock_story_set = MockStorySet(**kwargs)
return self.mock_story_set
class RecordWprUnitTests(tab_test_case.TabTestCase):
_base_dir = util.GetUnittestDataDir()
_test_data_dir = os.path.join(util.GetUnittestDataDir(), 'page_tests')
@classmethod
def setUpClass(cls):
sys.path.extend([cls._base_dir, cls._test_data_dir])
super(RecordWprUnitTests, cls).setUpClass()
cls._url = cls.UrlOfUnittestFile('blank.html')
cls._test_options = options_for_unittests.GetCopy()
# When the RecorderPageTest is created from a PageSet, we do not have a
# PageTest to use. In this case, we will record every available action.
def testRunPage_AllActions(self):
record_page_test = record_wpr.RecorderPageTest()
page = MockPage(story_set=MockStorySet(url=self._url), url=self._url)
record_page_test.RunNavigateSteps(page, self._tab)
self.assertTrue('RunNavigateSteps' in page.func_calls)
# When the RecorderPageTest is created from a Benchmark, the benchmark will
# have a PageTest, specified by its test attribute.
def testRunPage_OnlyRunBenchmarkAction(self):
record_page_test = record_wpr.RecorderPageTest()
record_page_test.page_test = MockBenchmark().test()
page = MockPage(story_set=MockStorySet(url=self._url), url=self._url)
record_page_test.ValidateAndMeasurePage(page, self._tab, results=None)
def testRunPage_CallBenchmarksPageTestsFunctions(self):
record_page_test = record_wpr.RecorderPageTest()
record_page_test.page_test = MockBenchmark().test()
page = MockPage(story_set=MockStorySet(url=self._url), url=self._url)
record_page_test.ValidateAndMeasurePage(page, self._tab, results=None)
self.assertEqual(1, len(record_page_test.page_test.func_calls))
self.assertEqual('ValidateAndMeasurePage',
record_page_test.page_test.func_calls[0])
def GetBrowserDeviceFlags(self):
flags = ['--browser', self._browser.browser_type,
'--remote', self._test_options.cros_remote,
'--device', self._device]
if self._test_options.chrome_root:
flags += ['--chrome-root', self._test_options.chrome_root]
return flags
@decorators.Disabled('chromeos') # crbug.com/404868.
def testWprRecorderWithPageSet(self):
flags = self.GetBrowserDeviceFlags()
mock_story_set = MockStorySet(url=self._url)
wpr_recorder = record_wpr.WprRecorder(self._test_data_dir,
mock_story_set, flags)
results = wpr_recorder.CreateResults()
wpr_recorder.Record(results)
self.assertEqual(set(mock_story_set.stories), results.pages_that_succeeded)
def testWprRecorderWithBenchmark(self):
flags = self.GetBrowserDeviceFlags()
flags.extend(['--mock-benchmark-url', self._url])
mock_benchmark = MockBenchmark()
wpr_recorder = record_wpr.WprRecorder(self._test_data_dir, mock_benchmark,
flags)
results = wpr_recorder.CreateResults()
wpr_recorder.Record(results)
self.assertEqual(set(mock_benchmark.mock_story_set.stories),
results.pages_that_succeeded)
def testWprRecorderWithTimelineBasedMeasurementBenchmark(self):
flags = self.GetBrowserDeviceFlags()
flags.extend(['--mock-benchmark-url', self._url])
mock_benchmark = MockTimelineBasedMeasurementBenchmark()
wpr_recorder = record_wpr.WprRecorder(self._test_data_dir, mock_benchmark,
flags)
results = wpr_recorder.CreateResults()
wpr_recorder.Record(results)
self.assertEqual(set(mock_benchmark.mock_story_set.stories),
results.pages_that_succeeded)
def testPageSetBaseDirFlag(self):
flags = self.GetBrowserDeviceFlags()
flags.extend(['--page-set-base-dir', self._test_data_dir,
'--mock-benchmark-url', self._url])
mock_benchmark = MockBenchmark()
wpr_recorder = record_wpr.WprRecorder(
'non-existent-dummy-dir', mock_benchmark, flags)
results = wpr_recorder.CreateResults()
wpr_recorder.Record(results)
self.assertEqual(set(mock_benchmark.mock_story_set.stories),
results.pages_that_succeeded)
def testCommandLineFlags(self):
flags = [
'--page-repeat', '2',
'--mock-benchmark-url', self._url,
'--upload',
]
wpr_recorder = record_wpr.WprRecorder(self._test_data_dir, MockBenchmark(),
flags)
# page_runner command-line args
self.assertEquals(2, wpr_recorder.options.page_repeat)
# benchmark command-line args
self.assertEquals(self._url, wpr_recorder.options.mock_benchmark_url)
# record_wpr command-line arg to upload to cloud-storage.
self.assertTrue(wpr_recorder.options.upload)
# invalid command-line args
self.assertFalse(hasattr(wpr_recorder.options, 'not_a_real_option'))
def testRecordingEnabled(self):
flags = ['--mock-benchmark-url', self._url]
wpr_recorder = record_wpr.WprRecorder(self._test_data_dir, MockBenchmark(),
flags)
self.assertEqual(wpr_modes.WPR_RECORD,
wpr_recorder.options.browser_options.wpr_mode)
# When the RecorderPageTest CustomizeBrowserOptions/WillStartBrowser/
# DidStartBrowser function is called, it forwards the call to the PageTest
def testRecorderPageTest_BrowserMethods(self):
flags = ['--mock-benchmark-url', self._url]
record_page_test = record_wpr.RecorderPageTest()
record_page_test.page_test = MockBenchmark().test()
wpr_recorder = record_wpr.WprRecorder(self._test_data_dir, MockBenchmark(),
flags)
record_page_test.CustomizeBrowserOptions(wpr_recorder.options)
record_page_test.WillStartBrowser(self._tab.browser.platform)
record_page_test.DidStartBrowser(self._tab.browser)
self.assertTrue(
'CustomizeBrowserOptions' in record_page_test.page_test.func_calls)
self.assertTrue('WillStartBrowser' in record_page_test.page_test.func_calls)
self.assertTrue('DidStartBrowser' in record_page_test.page_test.func_calls)
def testUseLiveSitesUnsupported(self):
flags = ['--use-live-sites']
with self.assertRaises(SystemExit):
record_wpr.WprRecorder(self._test_data_dir, MockBenchmark(), flags)
|
{
"content_hash": "303b262fe9bc6f3793ead7d83fa6af22",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 80,
"avg_line_length": 39.58974358974359,
"alnum_prop": 0.6925734024179621,
"repo_name": "SummerLW/Perf-Insight-Report",
"id": "9760cb07e602259373cafe776470662bd2d97a38",
"size": "9427",
"binary": false,
"copies": "8",
"ref": "refs/heads/test",
"path": "telemetry/telemetry/record_wpr_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3598"
},
{
"name": "C++",
"bytes": "6411"
},
{
"name": "CSS",
"bytes": "14952"
},
{
"name": "HTML",
"bytes": "27508823"
},
{
"name": "JavaScript",
"bytes": "75587"
},
{
"name": "Python",
"bytes": "4638631"
},
{
"name": "Shell",
"bytes": "2124"
}
],
"symlink_target": ""
}
|
import os
import sys
from django.core.management import execute_from_command_line
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"%s.settings" % __package__)
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
|
{
"content_hash": "61b0383dbffe4a93ba02da830872bb2b",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 60,
"avg_line_length": 20.285714285714285,
"alnum_prop": 0.6126760563380281,
"repo_name": "novapost/django-generic-filters",
"id": "b3cd09a497b5965d70e79c11e91b0abcb0e5675f",
"size": "306",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "demo/demoproject/manage.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "87"
},
{
"name": "HTML",
"bytes": "8706"
},
{
"name": "JavaScript",
"bytes": "117"
},
{
"name": "Makefile",
"bytes": "199"
},
{
"name": "Python",
"bytes": "42034"
}
],
"symlink_target": ""
}
|
import sys
import logging
from sentry_sdk import utils
from sentry_sdk.hub import Hub
from sentry_sdk.utils import logger
from sentry_sdk.client import _client_init_debug
from logging import LogRecord
class _HubBasedClientFilter(logging.Filter):
def filter(self, record):
# type: (LogRecord) -> bool
if _client_init_debug.get(False):
return True
hub = Hub.current
if hub is not None and hub.client is not None:
return hub.client.options["debug"]
return False
def init_debug_support():
# type: () -> None
if not logger.handlers:
configure_logger()
configure_debug_hub()
def configure_logger():
# type: () -> None
_handler = logging.StreamHandler(sys.stderr)
_handler.setFormatter(logging.Formatter(" [sentry] %(levelname)s: %(message)s"))
logger.addHandler(_handler)
logger.setLevel(logging.DEBUG)
logger.addFilter(_HubBasedClientFilter())
def configure_debug_hub():
# type: () -> None
def _get_debug_hub():
# type: () -> Hub
return Hub.current
utils._get_debug_hub = _get_debug_hub
|
{
"content_hash": "06755f47684d1b6df80fbc9f084d2e81",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 84,
"avg_line_length": 25.727272727272727,
"alnum_prop": 0.651060070671378,
"repo_name": "liszd/whyliam.workflows.youdao",
"id": "fe8ae50cead3d8e1551427df36fc82393c8594f7",
"size": "1132",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sentry_sdk/debug.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "196285"
}
],
"symlink_target": ""
}
|
import calendar
import datetime
import logging
import re
import socket
from typing import Optional, Dict
from urllib.parse import quote
from uuid import uuid4
import requests
from requests.exceptions import RequestException, HTTPError, SSLError
from requests.exceptions import ConnectionError
from requests.structures import CaseInsensitiveDict
from federation import __version__
logger = logging.getLogger("federation")
USER_AGENT = "python/federation/%s" % __version__
def fetch_content_type(url: str) -> Optional[str]:
"""
Fetch the HEAD of the remote url to determine the content type.
"""
try:
response = requests.head(url, headers={'user-agent': USER_AGENT}, timeout=10)
except RequestException as ex:
logger.warning("fetch_content_type - %s when fetching url %s", ex, url)
else:
return response.headers.get('Content-Type')
def fetch_document(url=None, host=None, path="/", timeout=10, raise_ssl_errors=True, extra_headers=None):
"""Helper method to fetch remote document.
Must be given either the ``url`` or ``host``.
If ``url`` is given, only that will be tried without falling back to http from https.
If ``host`` given, `path` will be added to it. Will fall back to http on non-success status code.
:arg url: Full url to fetch, including protocol
:arg host: Domain part only without path or protocol
:arg path: Path without domain (defaults to "/")
:arg timeout: Seconds to wait for response (defaults to 10)
:arg raise_ssl_errors: Pass False if you want to try HTTP even for sites with SSL errors (default True)
:arg extra_headers: Optional extra headers dictionary to add to requests
:returns: Tuple of document (str or None), status code (int or None) and error (an exception class instance or None)
:raises ValueError: If neither url nor host are given as parameters
"""
if not url and not host:
raise ValueError("Need url or host.")
logger.debug("fetch_document: url=%s, host=%s, path=%s, timeout=%s, raise_ssl_errors=%s",
url, host, path, timeout, raise_ssl_errors)
headers = {'user-agent': USER_AGENT}
if extra_headers:
headers.update(extra_headers)
if url:
# Use url since it was given
logger.debug("fetch_document: trying %s", url)
try:
response = requests.get(url, timeout=timeout, headers=headers)
logger.debug("fetch_document: found document, code %s", response.status_code)
response.raise_for_status()
return response.text, response.status_code, None
except RequestException as ex:
logger.debug("fetch_document: exception %s", ex)
return None, None, ex
# Build url with some little sanitizing
host_string = host.replace("http://", "").replace("https://", "").strip("/")
path_string = path if path.startswith("/") else "/%s" % path
url = "https://%s%s" % (host_string, path_string)
logger.debug("fetch_document: trying %s", url)
try:
response = requests.get(url, timeout=timeout, headers=headers)
logger.debug("fetch_document: found document, code %s", response.status_code)
response.raise_for_status()
return response.text, response.status_code, None
except (HTTPError, SSLError, ConnectionError) as ex:
if isinstance(ex, SSLError) and raise_ssl_errors:
logger.debug("fetch_document: exception %s", ex)
return None, None, ex
# Try http then
url = url.replace("https://", "http://")
logger.debug("fetch_document: trying %s", url)
try:
response = requests.get(url, timeout=timeout, headers=headers)
logger.debug("fetch_document: found document, code %s", response.status_code)
response.raise_for_status()
return response.text, response.status_code, None
except RequestException as ex:
logger.debug("fetch_document: exception %s", ex)
return None, None, ex
except RequestException as ex:
logger.debug("fetch_document: exception %s", ex)
return None, None, ex
def fetch_host_ip(host: str) -> str:
"""
Fetch ip by host
"""
try:
ip = socket.gethostbyname(host)
except socket.gaierror:
return ''
return ip
def fetch_file(url: str, timeout: int = 30, extra_headers: Dict = None) -> str:
"""
Download a file with a temporary name and return the name.
"""
headers = {'user-agent': USER_AGENT}
if extra_headers:
headers.update(extra_headers)
response = requests.get(url, timeout=timeout, headers=headers, stream=True)
response.raise_for_status()
name = f"/tmp/{str(uuid4())}"
with open(name, "wb") as f:
for chunk in response.iter_content(chunk_size=8192):
f.write(chunk)
return name
def parse_http_date(date):
"""
Parse a date format as specified by HTTP RFC7231 section 7.1.1.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Return an integer expressed in seconds since the epoch, in UTC.
Implementation copied from Django.
https://github.com/django/django/blob/master/django/utils/http.py#L157
License: BSD 3-clause
"""
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
# email.utils.parsedate() does the job for RFC1123 dates; unfortunately
# RFC7231 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception as exc:
raise ValueError("%r is not a valid date" % date) from exc
def send_document(url, data, timeout=10, method="post", *args, **kwargs):
"""Helper method to send a document via POST.
Additional ``*args`` and ``**kwargs`` will be passed on to ``requests.post``.
:arg url: Full url to send to, including protocol
:arg data: Dictionary (will be form-encoded), bytes, or file-like object to send in the body
:arg timeout: Seconds to wait for response (defaults to 10)
:arg method: Method to use, defaults to post
:returns: Tuple of status code (int or None) and error (exception class instance or None)
"""
logger.debug("send_document: url=%s, data=%s, timeout=%s, method=%s", url, data, timeout, method)
if not method:
method = "post"
headers = CaseInsensitiveDict({
'User-Agent': USER_AGENT,
})
if "headers" in kwargs:
# Update from kwargs
headers.update(kwargs.get("headers"))
kwargs.update({
"data": data, "timeout": timeout, "headers": headers
})
request_func = getattr(requests, method)
try:
response = request_func(url, *args, **kwargs)
logger.debug("send_document: response status code %s", response.status_code)
return response.status_code, None
# TODO support rate limit 429 code
except RequestException as ex:
logger.debug("send_document: exception %s", ex)
return None, ex
def try_retrieve_webfinger_document(handle: str) -> Optional[str]:
"""
Try to retrieve an RFC7033 webfinger document. Does not raise if it fails.
"""
try:
host = handle.split("@")[1]
except AttributeError:
logger.warning("retrieve_webfinger_document: invalid handle given: %s", handle)
return None
document, code, exception = fetch_document(
host=host, path="/.well-known/webfinger?resource=acct:%s" % quote(handle),
)
if exception:
logger.debug("retrieve_webfinger_document: failed to fetch webfinger document: %s, %s", code, exception)
return document
|
{
"content_hash": "76a8376c1026000bc3f8252f0fd74d11",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 120,
"avg_line_length": 39.36,
"alnum_prop": 0.6317750677506775,
"repo_name": "jaywink/social-federation",
"id": "e341969204bb8dcc1bffe161ba4f67aab0b0b0a3",
"size": "8856",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "federation/utils/network.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2934"
},
{
"name": "Python",
"bytes": "124166"
}
],
"symlink_target": ""
}
|
"""
Script used to create template config.py files for Glue
"""
from __future__ import absolute_import, division, print_function
import os
import sys
from shutil import copyfile
import glue
from glue.external.six import input
def get_clobber():
result = None
result = input("\nDestination file exists. Overwrite? [y/n] ")
while result not in ['y', 'n']:
print("\tPlease choose one of [y/n]")
result = input("\nDestination file exists. Overwrite? [y/n] ")
return result == 'y'
def main():
# Import at runtime because some tests change this value. We also don't
# just import the function directly otherwise it is cached.
from glue import config
dest = config.CFG_DIR
if not os.path.exists(dest):
print("Creating directory %s" % dest)
os.makedirs(dest)
infile = os.path.join(glue.__path__[0], 'default_config.py')
outfile = os.path.join(dest, 'config.py')
print("Creating file %s" % outfile)
if os.path.exists(outfile):
clobber = get_clobber()
if not clobber:
print("Exiting")
sys.exit(1)
copyfile(infile, outfile)
if __name__ == "__main__":
main()
|
{
"content_hash": "3c7540dfcf040402ca6a05546f61273d",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 75,
"avg_line_length": 23.88,
"alnum_prop": 0.6306532663316583,
"repo_name": "saimn/glue",
"id": "808d3cfa13d77c5cd931d7de8698a27a5ed1c935",
"size": "1216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glue/config_gen.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1609137"
},
{
"name": "Shell",
"bytes": "1603"
}
],
"symlink_target": ""
}
|
from os_client_config import cloud_config
from os_client_config import config
from os_client_config import exceptions
from os_client_config.tests import base
import fixtures
class TestEnviron(base.TestCase):
def setUp(self):
super(TestEnviron, self).setUp()
self.useFixture(
fixtures.EnvironmentVariable('OS_AUTH_URL', 'https://example.com'))
self.useFixture(
fixtures.EnvironmentVariable('OS_USERNAME', 'testuser'))
self.useFixture(
fixtures.EnvironmentVariable('OS_PASSWORD', 'testpass'))
self.useFixture(
fixtures.EnvironmentVariable('OS_PROJECT_NAME', 'testproject'))
self.useFixture(
fixtures.EnvironmentVariable('NOVA_PROJECT_ID', 'testnova'))
def test_get_one_cloud(self):
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml])
self.assertIsInstance(c.get_one_cloud(), cloud_config.CloudConfig)
def test_no_fallthrough(self):
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml])
self.assertRaises(
exceptions.OpenStackConfigException, c.get_one_cloud, 'openstack')
def test_envvar_name_override(self):
self.useFixture(
fixtures.EnvironmentVariable('OS_CLOUD_NAME', 'override'))
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml])
cc = c.get_one_cloud('override')
self._assert_cloud_details(cc)
def test_envvar_prefer_ipv6_override(self):
self.useFixture(
fixtures.EnvironmentVariable('OS_PREFER_IPV6', 'false'))
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml],
secure_files=[self.secure_yaml])
cc = c.get_one_cloud('_test-cloud_')
self.assertFalse(cc.prefer_ipv6)
def test_environ_exists(self):
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml],
secure_files=[self.secure_yaml])
cc = c.get_one_cloud('envvars')
self._assert_cloud_details(cc)
self.assertNotIn('auth_url', cc.config)
self.assertIn('auth_url', cc.config['auth'])
self.assertNotIn('project_id', cc.config['auth'])
self.assertNotIn('auth_url', cc.config)
cc = c.get_one_cloud('_test-cloud_')
self._assert_cloud_details(cc)
cc = c.get_one_cloud('_test_cloud_no_vendor')
self._assert_cloud_details(cc)
def test_environ_prefix(self):
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml],
envvar_prefix='NOVA_',
secure_files=[self.secure_yaml])
cc = c.get_one_cloud('envvars')
self._assert_cloud_details(cc)
self.assertNotIn('auth_url', cc.config)
self.assertIn('auth_url', cc.config['auth'])
self.assertIn('project_id', cc.config['auth'])
self.assertNotIn('auth_url', cc.config)
cc = c.get_one_cloud('_test-cloud_')
self._assert_cloud_details(cc)
cc = c.get_one_cloud('_test_cloud_no_vendor')
self._assert_cloud_details(cc)
def test_get_one_cloud_with_config_files(self):
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml],
secure_files=[self.secure_yaml])
self.assertIsInstance(c.cloud_config, dict)
self.assertIn('cache', c.cloud_config)
self.assertIsInstance(c.cloud_config['cache'], dict)
self.assertIn('max_age', c.cloud_config['cache'])
self.assertIn('path', c.cloud_config['cache'])
cc = c.get_one_cloud('_test-cloud_')
self._assert_cloud_details(cc)
cc = c.get_one_cloud('_test_cloud_no_vendor')
self._assert_cloud_details(cc)
def test_config_file_override(self):
self.useFixture(
fixtures.EnvironmentVariable(
'OS_CLIENT_CONFIG_FILE', self.cloud_yaml))
c = config.OpenStackConfig(config_files=[],
vendor_files=[self.vendor_yaml])
cc = c.get_one_cloud('_test-cloud_')
self._assert_cloud_details(cc)
class TestEnvvars(base.TestCase):
def test_no_envvars(self):
self.useFixture(
fixtures.EnvironmentVariable('NOVA_USERNAME', 'nova'))
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml])
self.assertRaises(
exceptions.OpenStackConfigException, c.get_one_cloud, 'envvars')
def test_test_envvars(self):
self.useFixture(
fixtures.EnvironmentVariable('NOVA_USERNAME', 'nova'))
self.useFixture(
fixtures.EnvironmentVariable('OS_STDERR_CAPTURE', 'True'))
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml])
self.assertRaises(
exceptions.OpenStackConfigException, c.get_one_cloud, 'envvars')
def test_incomplete_envvars(self):
self.useFixture(
fixtures.EnvironmentVariable('NOVA_USERNAME', 'nova'))
self.useFixture(
fixtures.EnvironmentVariable('OS_USERNAME', 'user'))
config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml])
# This is broken due to an issue that's fixed in a subsequent patch
# commenting it out in this patch to keep the patch size reasonable
# self.assertRaises(
# keystoneauth1.exceptions.auth_plugins.MissingRequiredOptions,
# c.get_one_cloud, 'envvars')
def test_have_envvars(self):
self.useFixture(
fixtures.EnvironmentVariable('NOVA_USERNAME', 'nova'))
self.useFixture(
fixtures.EnvironmentVariable('OS_AUTH_URL', 'http://example.com'))
self.useFixture(
fixtures.EnvironmentVariable('OS_USERNAME', 'user'))
self.useFixture(
fixtures.EnvironmentVariable('OS_PASSWORD', 'password'))
self.useFixture(
fixtures.EnvironmentVariable('OS_PROJECT_NAME', 'project'))
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml])
cc = c.get_one_cloud('envvars')
self.assertEqual(cc.config['auth']['username'], 'user')
def test_old_envvars(self):
self.useFixture(
fixtures.EnvironmentVariable('NOVA_USERNAME', 'nova'))
self.useFixture(
fixtures.EnvironmentVariable(
'NOVA_AUTH_URL', 'http://example.com'))
self.useFixture(
fixtures.EnvironmentVariable('NOVA_PASSWORD', 'password'))
self.useFixture(
fixtures.EnvironmentVariable('NOVA_PROJECT_NAME', 'project'))
c = config.OpenStackConfig(config_files=[self.cloud_yaml],
vendor_files=[self.vendor_yaml],
envvar_prefix='NOVA_')
cc = c.get_one_cloud('envvars')
self.assertEqual(cc.config['auth']['username'], 'nova')
|
{
"content_hash": "2352a4b987c54b443fde1020c3941b39",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 79,
"avg_line_length": 44.752941176470586,
"alnum_prop": 0.5901682439537329,
"repo_name": "openstack/os-client-config",
"id": "35ce2f2bf4a2b066a28b2dc4185d06d2193a4b51",
"size": "8220",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "os_client_config/tests/test_environ.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "120466"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.contrib.auth.forms import AuthenticationForm
from django.core.urlresolvers import reverse
from django.db.models import Count
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, redirect, render
from django.views.decorators.csrf import ensure_csrf_cookie
from core.common import *
from posts.forms import PostForm
from posts.models import Post
from users.models import User
@ensure_csrf_cookie
def post_form(request, post_id=None):
user = User.objects.get(user=request.user)
post = Post.objects.get(pk=post_id)
if user == post.author:
form = PostForm(instance=post, request=request)
else:
return reject_user()
return render(request, 'posts/edit.html', {
'settings': settings,
'user': user,
'title': 'Edit ' + post.title,
'post': post,
'form': form,
})
@ensure_csrf_cookie
def gallery(request, query=None):
return render(request, 'posts/booru.html', {
'settings': settings,
'user': get_user(request.user),
'posts': Post.objects.exclude(media=False, hidden=False),
})
|
{
"content_hash": "f332139dd063797f9b340bb18a131af5",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 67,
"avg_line_length": 29.31578947368421,
"alnum_prop": 0.7459605026929982,
"repo_name": "PrincessTeruko/TsunArt",
"id": "b2b5ea845ea84b24a43d670472d05e7c9ca6d503",
"size": "1114",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "posts/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23615"
},
{
"name": "HTML",
"bytes": "34809"
},
{
"name": "JavaScript",
"bytes": "47538"
},
{
"name": "Python",
"bytes": "114688"
},
{
"name": "Shell",
"bytes": "1392"
}
],
"symlink_target": ""
}
|
import vtk
def MakeScalars(dims, origin, spacing, scalars):
# Implicit function used to compute scalars
sphere = vtk.vtkSphere()
sphere.SetRadius(3)
sphere.SetCenter(5, 5, 5)
scalars.SetNumberOfTuples(dims[0] * dims[1] * dims[2])
for k in range(0, dims[2]):
z = origin[2] + spacing[2] * k
for j in range(0, dims[1]):
y = origin[1] + spacing[1] * j
for i in range(0, dims[0]):
x = origin[0] + spacing[0] * i
scalars.SetValue(k * dims[0] * dims[1] + j * dims[0] + i, sphere.EvaluateFunction(x, y, z))
def main():
colors = vtk.vtkNamedColors()
# Create and populate the AMR dataset
# The dataset should look like
# Level 0
# uniform grid, dimensions 11, 11, 11, AMR box (0, 0, 0) - (9, 9, 9)
# Level 1 - refinement ratio : 2
# uniform grid, dimensions 11, 11, 11, AMR box (0, 0, 0) - (9, 9, 9)
# uniform grid, dimensions 11, 11, 11, AMR box (10, 10, 10) - (19, 19, 19)
# Use MakeScalars() above to fill the scalar arrays
amr = vtk.vtkOverlappingAMR()
blocksPerLevel = [1, 2]
amr.Initialize(2, blocksPerLevel)
origin = [0.0, 0.0, 0.0]
spacing = [1.0, 1.0, 1.0]
dims = [11, 11, 11]
ug1 = vtk.vtkUniformGrid()
# Geometry
ug1.SetOrigin(origin)
ug1.SetSpacing(spacing)
ug1.SetDimensions(dims)
# Data
scalars = vtk.vtkFloatArray()
ug1.GetPointData().SetScalars(scalars)
MakeScalars(dims, origin, spacing, scalars)
lo = [0, 0, 0]
hi = [9, 9, 9]
box1 = vtk.vtkAMRBox()
amr.SetAMRBox(0, 0, box1)
amr.SetDataSet(0, 0, ug1)
spacing2 = [0.5, 0.5, 0.5]
ug2 = vtk.vtkUniformGrid()
# Geometry
ug2.SetOrigin(origin)
ug2.SetSpacing(spacing2)
ug2.SetDimensions(dims)
# Data
scalars = vtk.vtkFloatArray()
ug2.GetPointData().SetScalars(scalars)
MakeScalars(dims, origin, spacing2, scalars)
lo2 = [0, 0, 0]
hi2 = [9, 9, 9]
box2 = vtk.vtkAMRBox()
amr.SetAMRBox(1, 0, box2)
amr.SetDataSet(1, 0, ug2)
origin3 = [5, 5, 5]
ug3 = vtk.vtkUniformGrid()
# Geometry
ug3.SetOrigin(origin3)
ug3.SetSpacing(spacing2)
ug3.SetDimensions(dims)
# Data
scalars = vtk.vtkFloatArray()
ug3.GetPointData().SetScalars(scalars)
MakeScalars(dims, origin3, spacing2, scalars)
lo3 = [10, 10, 10]
hi3 = [19, 19, 19]
box3 = vtk.vtkAMRBox()
amr.SetAMRBox(1, 1, box3)
amr.SetDataSet(1, 1, ug3)
amr.SetRefinementRatio(0, 2)
# Render the amr data here.
of = vtk.vtkOutlineFilter()
of.SetInputData(amr)
geomFilter = vtk.vtkCompositeDataGeometryFilter()
geomFilter.SetInputConnection(of.GetOutputPort())
# Create an iso-surface - at 10.
cf = vtk.vtkContourFilter()
cf.SetInputData(amr)
cf.SetNumberOfContours(1)
cf.SetValue(0, 10.0)
geomFilter2 = vtk.vtkCompositeDataGeometryFilter()
geomFilter2.SetInputConnection(cf.GetOutputPort())
# Create the render window, renderer, and interactor.
aren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(aren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Associate the geometry with a mapper and the mapper to an actor.
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(geomFilter.GetOutputPort())
actor1 = vtk.vtkActor()
actor1.GetProperty().SetColor(colors.GetColor3d("Yellow"))
actor1.SetMapper(mapper)
# Associate the geometry with a mapper and the mapper to an actor.
mapper2 = vtk.vtkPolyDataMapper()
mapper2.SetInputConnection(geomFilter2.GetOutputPort())
actor2 = vtk.vtkActor()
actor2.SetMapper(mapper2)
# Add the actor to the renderer and start handling events.
aren.AddActor(actor1)
aren.AddActor(actor2)
aren.SetBackground(colors.GetColor3d("CornflowerBlue"))
renWin.Render()
iren.Start()
if __name__ == '__main__':
main()
|
{
"content_hash": "3d78cfb2a16320eb2593eff47793b5f4",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 107,
"avg_line_length": 28.66187050359712,
"alnum_prop": 0.6352911646586346,
"repo_name": "lorensen/VTKExamples",
"id": "1b0c11acb083f5b8086714eba9e5973ce81d8355",
"size": "4006",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Python/CompositeData/OverlappingAMR.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "322226"
},
{
"name": "C++",
"bytes": "4187688"
},
{
"name": "CMake",
"bytes": "155244"
},
{
"name": "CSS",
"bytes": "556"
},
{
"name": "G-code",
"bytes": "377583"
},
{
"name": "GLSL",
"bytes": "5375"
},
{
"name": "HTML",
"bytes": "635483160"
},
{
"name": "Java",
"bytes": "629442"
},
{
"name": "JavaScript",
"bytes": "18199"
},
{
"name": "Python",
"bytes": "1376010"
},
{
"name": "Shell",
"bytes": "3481"
}
],
"symlink_target": ""
}
|
import io
from django.test import TestCase
from cms.models import Page
from django.contrib.sites.models import Site
from django.core.management import call_command, CommandError
from email_user.tests.factories import EmailUserFactory
from service_info_cms import utils
class BaseMovePagesTest(TestCase):
def setUp(self):
self.user = EmailUserFactory(is_superuser=True)
# Use the existing sites; their use of hard-coded pks breaks the ability to
# add arbitrary additional sites for this test.
self.orig_site_domain = 'serviceinfo.rescue.org'
self.new_site_domain = 'serviceinfo-staging.rescue.org'
self.orig_site = Site.objects.get(domain=self.orig_site_domain)
self.new_site = Site.objects.get(domain=self.new_site_domain)
utils.create_essential_pages(self.user, self.orig_site)
def pre_move(self):
starting_counts = {
'orig_pages': Page.objects.filter(site=self.orig_site).count(),
'new_pages': Page.objects.filter(site=self.new_site).count(),
}
self.assertEqual(0, starting_counts['new_pages'])
self.assertGreater(starting_counts['orig_pages'], 0)
return starting_counts
def post_move(self, starting_counts):
ending_counts = {
'orig_pages': Page.objects.filter(site=self.orig_site).count(),
'new_pages': Page.objects.filter(site=self.new_site).count(),
}
self.assertEqual(starting_counts['orig_pages'], ending_counts['new_pages'])
self.assertEqual(ending_counts['new_pages'], starting_counts['orig_pages'])
class TestMovePagesFunction(BaseMovePagesTest):
def test_move_function(self):
starting_counts = self.pre_move()
utils.change_cms_site(self.orig_site, self.new_site)
self.post_move(starting_counts)
class TestMovePagesCommand(BaseMovePagesTest):
def test_move_command(self):
starting_counts = self.pre_move()
hide_output = io.StringIO()
call_command('change_cms_site', orig=self.orig_site_domain, to=self.new_site_domain,
stdout=hide_output)
self.post_move(starting_counts)
class TestBadMovePagesCommands(BaseMovePagesTest):
def test_same_domains(self):
with self.assertRaises(CommandError):
call_command('change_cms_site', orig=self.orig_site_domain, to=self.orig_site_domain)
def test_only_one_domain(self):
with self.assertRaises(CommandError):
call_command('change_cms_site', orig=self.orig_site_domain)
|
{
"content_hash": "715439b704e9d2aceb13e811acc28c02",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 97,
"avg_line_length": 37.14492753623188,
"alnum_prop": 0.6792820912992587,
"repo_name": "theirc/ServiceInfo",
"id": "a53c1bdd57bc559c5ce2b96235708965ff8817d1",
"size": "2563",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "service_info_cms/tests/test_change_site.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "91208"
},
{
"name": "HTML",
"bytes": "169211"
},
{
"name": "JavaScript",
"bytes": "126261"
},
{
"name": "Python",
"bytes": "486647"
},
{
"name": "Shell",
"bytes": "141"
}
],
"symlink_target": ""
}
|
import mock
from neutron_lib import constants
from neutron_lib import exceptions as n_exc
from oslo_db import exception as db_exc
from neutron.api.rpc.handlers import dhcp_rpc
from neutron.callbacks import resources
from neutron.common import constants as n_const
from neutron.common import exceptions
from neutron.common import utils
from neutron.db import provisioning_blocks
from neutron.extensions import portbindings
from neutron.tests import base
class TestDhcpRpcCallback(base.BaseTestCase):
def setUp(self):
super(TestDhcpRpcCallback, self).setUp()
self.plugin_p = mock.patch('neutron.manager.NeutronManager.get_plugin')
get_plugin = self.plugin_p.start()
self.plugin = mock.MagicMock()
get_plugin.return_value = self.plugin
self.callbacks = dhcp_rpc.DhcpRpcCallback()
self.log_p = mock.patch('neutron.api.rpc.handlers.dhcp_rpc.LOG')
self.log = self.log_p.start()
set_dirty_p = mock.patch('neutron.quota.resource_registry.'
'set_resources_dirty')
self.mock_set_dirty = set_dirty_p.start()
self.utils_p = mock.patch('neutron.plugins.common.utils.create_port')
self.utils = self.utils_p.start()
self.segment_p = mock.patch(
'neutron.manager.NeutronManager.get_service_plugins')
self.get_service_plugins = self.segment_p.start()
self.segment_plugin = mock.MagicMock()
def test_group_by_network_id(self):
port1 = {'network_id': 'a'}
port2 = {'network_id': 'b'}
port3 = {'network_id': 'a'}
grouped_ports = self.callbacks._group_by_network_id(
[port1, port2, port3])
expected = {'a': [port1, port3], 'b': [port2]}
self.assertEqual(expected, grouped_ports)
def test_get_active_networks_info(self):
plugin_retval = [{'id': 'a'}, {'id': 'b'}]
self.plugin.get_networks.return_value = plugin_retval
port = {'network_id': 'a'}
subnet = {'network_id': 'b', 'id': 'c'}
self.plugin.get_ports.return_value = [port]
self.plugin.get_subnets.return_value = [subnet]
networks = self.callbacks.get_active_networks_info(mock.Mock(),
host='host')
expected = [{'id': 'a', 'subnets': [], 'ports': [port]},
{'id': 'b', 'subnets': [subnet], 'ports': []}]
self.assertEqual(expected, networks)
def test_get_active_networks_info_with_routed_networks(self):
self.get_service_plugins.return_value = {
'segments': self.segment_plugin
}
plugin_retval = [{'id': 'a'}, {'id': 'b'}]
port = {'network_id': 'a'}
subnets = [{'network_id': 'b', 'id': 'c', 'segment_id': '1'},
{'network_id': 'a', 'id': 'e'},
{'network_id': 'b', 'id': 'd', 'segment_id': '3'}]
self.plugin.get_ports.return_value = [port]
self.plugin.get_networks.return_value = plugin_retval
hostseg_retval = ['1', '2']
self.segment_plugin.get_segments_by_hosts.return_value = hostseg_retval
self.plugin.get_subnets.return_value = subnets
networks = self.callbacks.get_active_networks_info(mock.Mock(),
host='host')
expected = [{'id': 'a', 'subnets': [subnets[1]], 'ports': [port]},
{'id': 'b', 'subnets': [subnets[0]], 'ports': []}]
self.assertEqual(expected, networks)
def _test__port_action_with_failures(self, exc=None, action=None):
port = {
'network_id': 'foo_network_id',
'device_owner': constants.DEVICE_OWNER_DHCP,
'fixed_ips': [{'subnet_id': 'foo_subnet_id'}]
}
self.plugin.create_port.side_effect = exc
self.utils.side_effect = exc
self.assertIsNone(self.callbacks._port_action(self.plugin,
mock.Mock(),
{'port': port},
action))
def _test__port_action_good_action(self, action, port, expected_call):
self.callbacks._port_action(self.plugin, mock.Mock(),
port, action)
if action == 'create_port':
self.utils.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY)
else:
self.plugin.assert_has_calls([expected_call])
def test_port_action_create_port(self):
self._test__port_action_good_action(
'create_port', mock.Mock(),
mock.call.create_port(mock.ANY, mock.ANY))
def test_port_action_update_port(self):
fake_port = {'id': 'foo_port_id', 'port': mock.Mock()}
self._test__port_action_good_action(
'update_port', fake_port,
mock.call.update_port(mock.ANY, 'foo_port_id', mock.ANY))
def test__port_action_bad_action(self):
self.assertRaises(
n_exc.Invalid,
self._test__port_action_with_failures,
exc=None,
action='foo_action')
def test_create_port_catch_network_not_found(self):
self._test__port_action_with_failures(
exc=n_exc.NetworkNotFound(net_id='foo_network_id'),
action='create_port')
def test_create_port_catch_subnet_not_found(self):
self._test__port_action_with_failures(
exc=n_exc.SubnetNotFound(subnet_id='foo_subnet_id'),
action='create_port')
def test_create_port_catch_db_reference_error(self):
self._test__port_action_with_failures(
exc=db_exc.DBReferenceError('a', 'b', 'c', 'd'),
action='create_port')
def test_create_port_catch_ip_generation_failure_reraise(self):
self.assertRaises(
n_exc.IpAddressGenerationFailure,
self._test__port_action_with_failures,
exc=n_exc.IpAddressGenerationFailure(net_id='foo_network_id'),
action='create_port')
def test_create_port_catch_and_handle_ip_generation_failure(self):
self.plugin.get_subnet.side_effect = (
n_exc.SubnetNotFound(subnet_id='foo_subnet_id'))
self._test__port_action_with_failures(
exc=n_exc.IpAddressGenerationFailure(net_id='foo_network_id'),
action='create_port')
self._test__port_action_with_failures(
exc=n_exc.InvalidInput(error_message='sorry'),
action='create_port')
def test_update_port_missing_port_on_get(self):
self.plugin.get_port.side_effect = n_exc.PortNotFound(port_id='66')
self.assertIsNone(self.callbacks.update_dhcp_port(
context='ctx', host='host', port_id='66',
port={'port': {'network_id': 'a'}}))
def test_update_port_missing_port_on_update(self):
self.plugin.get_port.return_value = {
'device_id': n_const.DEVICE_ID_RESERVED_DHCP_PORT}
self.plugin.update_port.side_effect = n_exc.PortNotFound(port_id='66')
self.assertIsNone(self.callbacks.update_dhcp_port(
context='ctx', host='host', port_id='66',
port={'port': {'network_id': 'a'}}))
def test_get_network_info_return_none_on_not_found(self):
self.plugin.get_network.side_effect = n_exc.NetworkNotFound(net_id='a')
retval = self.callbacks.get_network_info(mock.Mock(), network_id='a')
self.assertIsNone(retval)
def _test_get_network_info(self, segmented_network=False,
routed_network=False):
network_retval = dict(id='a')
if not routed_network:
subnet_retval = [dict(id='a'), dict(id='c'), dict(id='b')]
else:
subnet_retval = [dict(id='c', segment_id='1'),
dict(id='a', segment_id='1')]
port_retval = mock.Mock()
self.plugin.get_network.return_value = network_retval
self.plugin.get_subnets.return_value = subnet_retval
self.plugin.get_ports.return_value = port_retval
if segmented_network:
self.segment_plugin.get_segments.return_value = [dict(id='1'),
dict(id='2')]
self.segment_plugin.get_segments_by_hosts.return_value = ['1']
retval = self.callbacks.get_network_info(mock.Mock(), network_id='a')
self.assertEqual(retval, network_retval)
if not routed_network:
sorted_subnet_retval = [dict(id='a'), dict(id='b'), dict(id='c')]
else:
sorted_subnet_retval = [dict(id='a', segment_id='1'),
dict(id='c', segment_id='1')]
self.assertEqual(retval['subnets'], sorted_subnet_retval)
self.assertEqual(retval['ports'], port_retval)
def test_get_network_info(self):
self._test_get_network_info()
def test_get_network_info_with_routed_network(self):
self.get_service_plugins.return_value = {
'segments': self.segment_plugin
}
self._test_get_network_info(segmented_network=True,
routed_network=True)
def test_get_network_info_with_segmented_network_but_not_routed(self):
self.get_service_plugins.return_value = {
'segments': self.segment_plugin
}
self._test_get_network_info(segmented_network=True)
def test_get_network_info_with_non_segmented_network(self):
self.get_service_plugins.return_value = {
'segments': self.segment_plugin
}
self._test_get_network_info()
def test_update_dhcp_port_verify_port_action_port_dict(self):
port = {'port': {'network_id': 'foo_network_id',
'device_owner': constants.DEVICE_OWNER_DHCP,
'fixed_ips': [{'subnet_id': 'foo_subnet_id'}]}
}
expected_port = {'port': {'network_id': 'foo_network_id',
'device_owner': constants.DEVICE_OWNER_DHCP,
portbindings.HOST_ID: 'foo_host',
'fixed_ips': [{'subnet_id': 'foo_subnet_id'}]
},
'id': 'foo_port_id'
}
def _fake_port_action(plugin, context, port, action):
self.assertEqual(expected_port, port)
self.plugin.get_port.return_value = {
'device_id': n_const.DEVICE_ID_RESERVED_DHCP_PORT}
self.callbacks._port_action = _fake_port_action
self.callbacks.update_dhcp_port(mock.Mock(),
host='foo_host',
port_id='foo_port_id',
port=port)
def test_update_reserved_dhcp_port(self):
port = {'port': {'network_id': 'foo_network_id',
'device_owner': constants.DEVICE_OWNER_DHCP,
'fixed_ips': [{'subnet_id': 'foo_subnet_id'}]}
}
expected_port = {'port': {'network_id': 'foo_network_id',
'device_owner': constants.DEVICE_OWNER_DHCP,
portbindings.HOST_ID: 'foo_host',
'fixed_ips': [{'subnet_id': 'foo_subnet_id'}]
},
'id': 'foo_port_id'
}
def _fake_port_action(plugin, context, port, action):
self.assertEqual(expected_port, port)
self.plugin.get_port.return_value = {
'device_id': utils.get_dhcp_agent_device_id('foo_network_id',
'foo_host')}
self.callbacks._port_action = _fake_port_action
self.callbacks.update_dhcp_port(
mock.Mock(), host='foo_host', port_id='foo_port_id', port=port)
self.plugin.get_port.return_value = {
'device_id': 'other_id'}
self.assertRaises(exceptions.DhcpPortInUse,
self.callbacks.update_dhcp_port,
mock.Mock(),
host='foo_host',
port_id='foo_port_id',
port=port)
def test_update_dhcp_port(self):
port = {'port': {'network_id': 'foo_network_id',
'device_owner': constants.DEVICE_OWNER_DHCP,
'fixed_ips': [{'subnet_id': 'foo_subnet_id'}]}
}
expected_port = {'port': {'network_id': 'foo_network_id',
'device_owner': constants.DEVICE_OWNER_DHCP,
portbindings.HOST_ID: 'foo_host',
'fixed_ips': [{'subnet_id': 'foo_subnet_id'}]
},
'id': 'foo_port_id'
}
self.plugin.get_port.return_value = {
'device_id': n_const.DEVICE_ID_RESERVED_DHCP_PORT}
self.callbacks.update_dhcp_port(mock.Mock(),
host='foo_host',
port_id='foo_port_id',
port=port)
self.plugin.assert_has_calls([
mock.call.update_port(mock.ANY, 'foo_port_id', expected_port)])
def test_release_dhcp_port(self):
port_retval = dict(id='port_id', fixed_ips=[dict(subnet_id='a')])
self.plugin.get_ports.return_value = [port_retval]
self.callbacks.release_dhcp_port(mock.ANY, network_id='netid',
device_id='devid')
self.plugin.assert_has_calls([
mock.call.delete_ports_by_device_id(mock.ANY, 'devid', 'netid')])
def test_dhcp_ready_on_ports(self):
context = mock.Mock()
port_ids = range(10)
with mock.patch.object(provisioning_blocks,
'provisioning_complete') as pc:
self.callbacks.dhcp_ready_on_ports(context, port_ids)
calls = [mock.call(context, port_id, resources.PORT,
provisioning_blocks.DHCP_ENTITY)
for port_id in port_ids]
pc.assert_has_calls(calls)
|
{
"content_hash": "6a9ad65ba6be9f623c9cea9072fd1f81",
"timestamp": "",
"source": "github",
"line_count": 315,
"max_line_length": 79,
"avg_line_length": 45.75873015873016,
"alnum_prop": 0.5364922991536006,
"repo_name": "sebrandon1/neutron",
"id": "c8f28e84550921949e04d8a92d757dc62ed1a4e4",
"size": "15005",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/api/rpc/handlers/test_dhcp_rpc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "9903006"
},
{
"name": "Shell",
"bytes": "14339"
}
],
"symlink_target": ""
}
|
from django.conf.urls.defaults import *
from django.contrib import admin
from django.conf import settings
admin.autodiscover()
urlpatterns = patterns('',
(r'^admin/', include(admin.site.urls)),
url(r'^testit/', 'imageutil.tests.testapp.views.crop_images', name='testit'),
url(r'^%s/(?P<path>.*)$' % settings.MEDIA_URL.strip("/"), 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
)
|
{
"content_hash": "426321d2a02dc4b1bd1cdcd8cd36e1b1",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 131,
"avg_line_length": 34.666666666666664,
"alnum_prop": 0.6923076923076923,
"repo_name": "gregplaysguitar/glamkit",
"id": "a0366c83736468541aba0d02a2ce4a45eb1f05d8",
"size": "416",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glamkit/incubated/imageutil/tests/testapp/test_urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "25519"
},
{
"name": "Python",
"bytes": "111853"
}
],
"symlink_target": ""
}
|
"""Gradients for operators defined in array_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from math import ceil
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
@ops.RegisterGradient("Pack")
def _PackGrad(op, grad):
"""Gradient for pack op."""
return array_ops.unstack(grad, num=op.get_attr("N"), axis=op.get_attr("axis"))
@ops.RegisterGradient("Unpack")
def _UnpackGrad(op, *grads):
"""Gradient for unpack op."""
return array_ops.stack(grads, axis=op.get_attr("axis"))
def _ConcatGradHelper(op, grad, start_value_index, end_value_index, dim_index):
"""Gradient for concat op.
Args:
op: An operation.
grad: `Tensor` or `IndexedSlices` representing the gradients with respect
to each output of the op.
start_value_index: An integer index of the first value in the op.inputs.
end_value_index: An integer index of the last value in the op.inputs.
dim_index: An interger index of concat_dim or axis parameter in op.inputs.
Returns:
Tensors represending the partial gradients with respect to each input
of the op.
Raises:
ValueError: if concat_dim/axis is not statically known.
"""
def _CreateDenseMaskAndBegin(sizes, concat_dim):
"""Create variables for iteratively slicing a dense gradients tensor."""
# Since shape is 1-D, shape_of_shape = [rank-of-inputs]
shape_of_shape = array_ops.shape(sizes[0])
# Make a vector of length equal to the input's dimensions,
# with 0's everywhere and 1 in the concat dim position.
# Note: Can't use sparse_to_dense since it isn't GPU-capable (for now)
mask = array_ops.concat([
array_ops.fill(array_ops.expand_dims(concat_dim, 0), 0), [1],
array_ops.fill(shape_of_shape - concat_dim - 1, 0)
], 0)
begin = array_ops.fill(shape_of_shape, 0)
return mask, begin
def _ExtractInputShapes(inputs):
"""Extract the shapes of a set of input tensors."""
sizes = []
fully_known = True
for x in inputs:
input_shape = array_ops.shape(x)
if not isinstance(input_shape,
ops.Tensor) or input_shape.op.type != "Const":
fully_known = False
break
else:
sizes.append(input_shape)
if fully_known:
return sizes
else:
return array_ops.shape_n(inputs)
# Degenerate concatenation, just return grad.
if len(op.inputs) == 2:
return grad + [None] if end_value_index <= dim_index else [None] + grad
concat_dim = op.inputs[dim_index]
input_values = op.inputs[start_value_index:end_value_index]
# Using mod here for convenience since concat_dim is already verified
# in concat implementation to be within the allowed [-rank, rank) range.
non_neg_concat_dim = concat_dim % array_ops.rank(input_values[0])
out_grads = []
if isinstance(grad, ops.Tensor):
# Get the inputs' tensor shapes
sizes = _ExtractInputShapes(input_values)
# The magic number of 16 was found through benchmarking a range of sizes
# on CPUs and a Maxwell TitanX. A speedup was seen in a large majority of
# cases when switching implementations at N=16, but it is possible that
# there will be a small number of performance regressions.
# pylint: disable=protected-access
if len(sizes) > 16:
# extract the size of each input along the concat dimension
sizes = array_ops.squeeze(
array_ops.slice(
array_ops.stack(
sizes, axis=1), [non_neg_concat_dim, 0], [1, -1]))
out_grads = array_ops.split(grad, sizes, non_neg_concat_dim)
else:
offset = gen_array_ops._concat_offset(non_neg_concat_dim, sizes)
for (begin, size) in zip(offset, sizes):
out_grads.append(array_ops.slice(grad, begin, size))
# pylint: enable=protected-access
elif isinstance(grad, ops.IndexedSlices):
concat_dim_static = tensor_util.constant_value(concat_dim)
if concat_dim_static is None:
raise ValueError("Can only compute IndexedSlices gradient with "
"statically-known concat_dim")
if concat_dim_static < 0:
rank = tensor_util.constant_value(array_ops.rank(input_values[0]))
if rank is None:
raise ValueError("Can only compute IndexedSlices gradient with "
"negative concat_dim when first value rank is "
"statically-known.")
concat_dim_static %= rank
# Get the inputs' tensor shapes
sizes = [array_ops.shape(x) for x in input_values]
if concat_dim_static > 0:
# IndexedSlices, non_neg_concat_dim > 0. Each input gets IndexedSlices
# gradients with all the indices, but with grad.values sliced accordingly.
# This is like the Tensor case, except shape(grad.values)[0] is not equal
# to shape(sizes[i])[0], since only a subset of the dim-0 values are
# stored.
mask, begin = _CreateDenseMaskAndBegin(sizes, non_neg_concat_dim)
for size in sizes:
new_values = array_ops.slice(
grad.values, begin,
array_ops.concat([[-1], array_ops.slice(size, [1], [-1])], 0))
out_grads.append(
ops.IndexedSlices(new_values, grad.indices, size))
# Lint complains begin = begin + ...
begin = math_ops.add(begin, size * mask)
else:
# IndexedSlices, concat_dim == 0. Each input gets IndexedSlices gradients
# only for the relevant indices.
start = constant_op.constant(0, dtype=grad.indices.dtype)
for size in sizes:
size_concat_dim = array_ops.gather(size, non_neg_concat_dim)
if size_concat_dim.dtype != grad.indices.dtype:
size_concat_dim = math_ops.cast(size_concat_dim,
dtype=grad.indices.dtype)
end = start + size_concat_dim
# Compute the 1-D Tensor of indices relevant for this input.
indices_to_select = array_ops.squeeze(
array_ops.where(math_ops.logical_and(grad.indices >= start,
grad.indices < end)),
squeeze_dims=[1])
new_indices = array_ops.gather(grad.indices, indices_to_select) - start
new_values = array_ops.gather(grad.values, indices_to_select)
out_grads.append(
ops.IndexedSlices(new_values, new_indices, size))
start = end
else:
raise TypeError("Expected Tensor or IndexedSlices, got %s" % type(grad))
return (out_grads + [None] if end_value_index <= dim_index
else [None] + out_grads)
@ops.RegisterGradient("Concat")
def _ConcatGrad(op, grad):
return _ConcatGradHelper(
op, grad, start_value_index=1, end_value_index=len(op.inputs),
dim_index=0)
@ops.RegisterGradient("ConcatV2")
def _ConcatGradV2(op, grad):
return _ConcatGradHelper(
op, grad, start_value_index=0, end_value_index=-1, dim_index=-1)
ops.NotDifferentiable("ConcatOffset")
@ops.RegisterGradient("Slice")
def _SliceGrad(op, grad):
"""Gradient for Slice op."""
# Create an Nx2 padding where the first column represents how many
# zeros are to be prepended for each dimension, and the second
# column indicates how many zeros are appended.
#
# The number of zeros to append is the shape of the input
# elementwise-subtracted by both the begin vector and sizes vector.
#
# Some more reshaping is needed to assemble this tensor with the
# right dimensions.
input_vec = op.inputs[0]
begin_vec = op.inputs[1]
input_rank = array_ops.rank(input_vec)
slice_size = array_ops.shape(op.outputs[0])
shape = array_ops.stack([input_rank, 1])
before_pad = array_ops.reshape(begin_vec, shape)
after_pad = array_ops.reshape(
array_ops.shape(input_vec) - slice_size - begin_vec, shape)
paddings = array_ops.concat([before_pad, after_pad], 1)
return array_ops.pad(grad, paddings), None, None
@ops.RegisterGradient("StridedSlice")
def _StridedSliceGrad(op, grad):
"""Gradient for StridedSlice op."""
x = array_ops.shape(op.inputs[0])
begin = op.inputs[1]
end = op.inputs[2]
strides = op.inputs[3]
return array_ops.strided_slice_grad(
x,
begin,
end,
strides,
grad,
begin_mask=op.get_attr("begin_mask"),
end_mask=op.get_attr("end_mask"),
ellipsis_mask=op.get_attr("ellipsis_mask"),
new_axis_mask=op.get_attr("new_axis_mask"),
shrink_axis_mask=op.get_attr("shrink_axis_mask")), None, None, None
@ops.RegisterGradient("StridedSliceGrad")
def _StridedSliceGradGrad(op, grad):
"""Gradient for StridedSliceGrad op."""
begin = op.inputs[1]
end = op.inputs[2]
strides = op.inputs[3]
return None, None, None, None, array_ops.strided_slice(
grad,
begin,
end,
strides,
begin_mask=op.get_attr("begin_mask"),
end_mask=op.get_attr("end_mask"),
ellipsis_mask=op.get_attr("ellipsis_mask"),
new_axis_mask=op.get_attr("new_axis_mask"),
shrink_axis_mask=op.get_attr("shrink_axis_mask"))
@ops.RegisterGradient("Split")
def _SplitGrad(op, *grads):
return None, array_ops.concat(list(grads), op.inputs[0])
@ops.RegisterGradient("SplitV")
def _SplitVGrad(op, *grads):
returnval = array_ops.concat(list(grads), op.inputs[2])
returnval = [returnval] + [None,] * (len(op.inputs) - 1)
return returnval
ops.NotDifferentiable("Const")
@ops.RegisterGradient("Diag")
def _DiagGrad(_, grad):
return array_ops.diag_part(grad)
@ops.RegisterGradient("DiagPart")
def _DiagPartGrad(_, grad):
return array_ops.diag(grad)
@ops.RegisterGradient("MatrixDiag")
def _MatrixDiagGrad(_, grad):
return array_ops.matrix_diag_part(grad)
@ops.RegisterGradient("MatrixDiagPart")
def _MatrixDiagPartGrad(op, grad):
matrix_shape = op.inputs[0].get_shape()[-2:]
if matrix_shape.is_fully_defined() and matrix_shape[0] == matrix_shape[1]:
return array_ops.matrix_diag(grad)
else:
return array_ops.matrix_set_diag(array_ops.zeros_like(op.inputs[0]), grad)
@ops.RegisterGradient("MatrixSetDiag")
def _MatrixSetDiagGrad(op, grad):
"""Gradient for MatrixSetDiag."""
input_shape = op.inputs[0].get_shape().merge_with(grad.get_shape())
diag_shape = op.inputs[1].get_shape()
batch_shape = input_shape[:-2].merge_with(diag_shape[:-1])
matrix_shape = input_shape[-2:]
if batch_shape.is_fully_defined() and matrix_shape.is_fully_defined():
diag_shape = batch_shape.as_list() + [min(matrix_shape.as_list())]
else:
with ops.colocate_with(grad):
grad_shape = array_ops.shape(grad)
grad_rank = array_ops.rank(grad)
batch_shape = array_ops.slice(grad_shape, [0], [grad_rank - 2])
matrix_shape = array_ops.slice(grad_shape, [grad_rank - 2], [2])
min_dim = math_ops.reduce_min(matrix_shape)
diag_shape = array_ops.concat([batch_shape, [min_dim]], 0)
grad_input = array_ops.matrix_set_diag(
grad, array_ops.zeros(
diag_shape, dtype=grad.dtype))
grad_diag = array_ops.matrix_diag_part(grad)
return (grad_input, grad_diag)
@ops.RegisterGradient("MatrixBandPart")
def _MatrixBandPartGrad(op, grad):
num_lower = op.inputs[1]
num_upper = op.inputs[2]
return (array_ops.matrix_band_part(grad, num_lower, num_upper), None, None)
# Edit Distance has no gradient (but can be used to eval seq2seq or CTC).
ops.NotDifferentiable("EditDistance")
@ops.RegisterGradient("Fill")
def _FillGrad(_, grad):
return None, math_ops.reduce_sum(grad)
ops.NotDifferentiable("ZerosLike")
ops.NotDifferentiable("OnesLike")
@ops.RegisterGradient("PreventGradient")
def _PreventGradientGrad(op, _):
raise LookupError(
"Gradient explicitly disabled. Reason: %s" % op.get_attr("message"))
@ops.RegisterGradient("Gather")
def _GatherGrad(op, grad):
"""Gradient for Gather op."""
# params can be large, so colocate the shape calculation with it.
#
# params can be very large for sparse model, array_ops.shape raises
# exception on the Windows platform when any dimension is larger than
# int32. params_shape is not used in optimizer apply_sparse gradients,
# so it's fine to convert it back to int32 regardless of truncation.
params = op.inputs[0]
with ops.colocate_with(params):
params_shape = array_ops.shape(params, out_type=ops.dtypes.int64)
params_shape = math_ops.to_int32(params_shape)
# Build appropriately shaped IndexedSlices
indices = op.inputs[1]
size = array_ops.expand_dims(array_ops.size(indices), 0)
values_shape = array_ops.concat([size, params_shape[1:]], 0)
values = array_ops.reshape(grad, values_shape)
indices = array_ops.reshape(indices, size)
return [ops.IndexedSlices(values, indices, params_shape), None]
@ops.RegisterGradient("GatherNd")
def _GatherNdGrad(op, grad):
ref = op.inputs[0]
indices = op.inputs[1]
ref_shape = array_ops.shape(ref, out_type=indices.dtype)
ref_grad = array_ops.scatter_nd(indices, grad, ref_shape)
return [ref_grad, None]
@ops.RegisterGradient("CheckNumerics")
def _CheckNumericsGrad(_, grad):
"""Gradient for check_numerics op."""
return array_ops.check_numerics(
grad, "Not a number (NaN) or infinity (Inf) values detected in gradient.")
@ops.RegisterGradient("PlaceholderWithDefault")
@ops.RegisterGradient("Identity")
def _IdGrad(_, grad):
return grad
@ops.RegisterGradient("RefIdentity")
def _RefIdGrad(_, grad):
return grad
ops.NotDifferentiable("StopGradient")
@ops.RegisterGradient("Reshape")
def _ReshapeGrad(op, grad):
return [array_ops.reshape(grad, array_ops.shape(op.inputs[0])), None]
ops.NotDifferentiable("InvertPermutation")
def _ReshapeToInput(op, grad):
"""Reshapes the gradient to the shape of the original input."""
return array_ops.reshape(grad, array_ops.shape(op.inputs[0]))
@ops.RegisterGradient("ExpandDims")
def _ExpandDimsGrad(op, grad):
return [_ReshapeToInput(op, grad), None]
@ops.RegisterGradient("Squeeze")
def _SqueezeGrad(op, grad):
return _ReshapeToInput(op, grad)
@ops.RegisterGradient("Transpose")
def _TransposeGrad(op, grad):
"""Returns unshuffle(grad)."""
p = op.inputs[1]
return [array_ops.transpose(grad, array_ops.invert_permutation(p)), None]
ops.NotDifferentiable("Shape")
ops.NotDifferentiable("ShapeN")
ops.NotDifferentiable("Rank")
ops.NotDifferentiable("Size")
@ops.RegisterGradient("Tile")
def _TileGrad(op, grad):
"""Sum reduces grad along the tiled dimensions."""
assert isinstance(grad, ops.Tensor)
input_shape = array_ops.shape(op.inputs[0])
# We interleave multiples and input_shape to get split_shape,
# reshape grad to split_shape, and reduce along all even
# dimensions (the tiled dimensions) to get the result
# with shape input_shape. For example
# input_shape = [20, 30, 40]
# multiples = [2, 3, 4]
# split_shape = [2, 20, 3, 30, 4, 40]
# axes = [0, 2, 4]
split_shape = array_ops.reshape(
array_ops.transpose(array_ops.stack([op.inputs[1], input_shape])), [-1])
axes = math_ops.range(0, array_ops.size(split_shape), 2)
input_grad = math_ops.reduce_sum(array_ops.reshape(grad, split_shape), axes)
# Fix shape inference
input_grad.set_shape(op.inputs[0].get_shape())
return [input_grad, None]
ops.NotDifferentiable("BroadcastGradientArgs")
def _PadGrad(op, grad):
"""Gradient for Pad."""
# Pad introduces values around the original tensor, so the gradient function
# slices the original shape out of the gradient."""
x = op.inputs[0]
a = op.inputs[1] # [Rank(x), 2]
# Takes a slice of a. The 1st column. [Rank(x), 1].
pad_before = array_ops.slice(a, [0, 0],
array_ops.stack([array_ops.rank(x), 1]))
# Make it a 1-D tensor.
begin = array_ops.reshape(pad_before, [-1])
sizes = array_ops.shape(x)
x_grad = array_ops.slice(grad, begin, sizes)
if len(op.inputs) == 3:
return x_grad, None, None
else:
return x_grad, None
ops.RegisterGradient("Pad")(_PadGrad)
ops.RegisterGradient("PadV2")(_PadGrad)
# ReverseSequence is just a permutation. The gradient permutes back.
@ops.RegisterGradient("ReverseSequence")
def _ReverseSequenceGrad(op, grad):
seq_lengths = op.inputs[1]
return [
array_ops.reverse_sequence(
grad,
batch_axis=op.get_attr("batch_dim"),
seq_axis=op.get_attr("seq_dim"),
seq_lengths=seq_lengths), None
]
@ops.RegisterGradient("Reverse")
def _ReverseGrad(op, grad):
reverse_dims = op.inputs[1]
# pylint: disable=protected-access
return gen_array_ops._reverse(grad, reverse_dims), None
# pylint: enable=protected-access
@ops.RegisterGradient("ReverseV2")
def _ReverseV2Grad(op, grad):
axis = op.inputs[1]
return array_ops.reverse_v2(grad, axis), None
@ops.RegisterGradient("SpaceToBatch")
def _SpaceToBatchGrad(op, grad):
# Its gradient is the opposite op: BatchToSpace.
block_size = op.get_attr("block_size")
return [array_ops.batch_to_space(grad, op.inputs[1], block_size=block_size),
None]
@ops.RegisterGradient("SpaceToBatchND")
def _SpaceToBatchNDGrad(op, grad):
# Its gradient is the opposite op: BatchToSpaceND.
return [array_ops.batch_to_space_nd(grad, op.inputs[1], op.inputs[2]),
None, None]
@ops.RegisterGradient("BatchToSpace")
def _BatchToSpaceGrad(op, grad):
# Its gradient is the opposite op: SpaceToBatch.
block_size = op.get_attr("block_size")
return [array_ops.space_to_batch(grad, op.inputs[1], block_size=block_size),
None]
@ops.RegisterGradient("BatchToSpaceND")
def _BatchToSpaceNDGrad(op, grad):
# Its gradient is the opposite op: SpaceToBatchND.
return [array_ops.space_to_batch_nd(grad, op.inputs[1], op.inputs[2]),
None, None]
@ops.RegisterGradient("SpaceToDepth")
def _SpaceToDepthGrad(op, grad):
# Its gradient is the opposite op: DepthToSpace.
block_size = op.get_attr("block_size")
return array_ops.depth_to_space(grad, block_size)
@ops.RegisterGradient("DepthToSpace")
def _DepthToSpaceGrad(op, grad):
# Its gradient is the opposite op: SpaceToDepth.
block_size = op.get_attr("block_size")
return array_ops.space_to_depth(grad, block_size)
ops.NotDifferentiable("OneHot")
@ops.RegisterGradient("MirrorPad")
def _MirrorPadGrad(op, grad):
mode = op.get_attr("mode")
# pylint: disable=protected-access
return [gen_array_ops._mirror_pad_grad(grad, op.inputs[1], mode=mode), None]
# pylint: enable=protected-access
@ops.RegisterGradient("MirrorPadGrad")
def _MirrorPadGradGrad(op, grad):
mode = op.get_attr("mode")
# pylint: disable=protected-access
return [gen_array_ops._mirror_pad(grad, op.inputs[1], mode=mode), None]
# pylint: enable=protected-access
@ops.RegisterGradient("QuantizeAndDequantize")
def _QuantizeAndDequantizeGrad(_, grad):
return grad
@ops.RegisterGradient("QuantizeAndDequantizeV2")
def _QuantizeAndDequantizeV2Grad(_, grad):
return [grad, None, None]
@ops.RegisterGradient("QuantizeAndDequantizeV3")
def _QuantizeAndDequantizeV3Grad(_, grad):
# Only propagate the gradient for the unquantized input.
return [grad, None, None, None]
@ops.RegisterGradient("ExtractImagePatches")
def _ExtractImagePatchesGrad(op, grad):
batch_size, rows_in, cols_in, channels = [
dim.value for dim in op.inputs[0].get_shape()
]
input_bhwc = array_ops.shape(op.inputs[0])
batch_size = input_bhwc[0]
channels = input_bhwc[3]
_, rows_out, cols_out, _ = [
dim.value for dim in op.outputs[0].get_shape()
]
_, ksize_r, ksize_c, _ = op.get_attr('ksizes')
_, stride_r, stride_h, _ = op.get_attr('strides')
_, rate_r, rate_c, _ = op.get_attr('rates')
padding = op.get_attr('padding')
ksize_r_eff = ksize_r + (ksize_r - 1) * (rate_r - 1)
ksize_c_eff = ksize_c + (ksize_c - 1) * (rate_c - 1)
if padding == b'SAME':
rows_out = int(ceil(rows_in / stride_r))
cols_out = int(ceil(cols_in / stride_h))
pad_rows = ((rows_out - 1) * stride_r + ksize_r_eff - rows_in) // 2
pad_cols = ((cols_out - 1) * stride_h + ksize_c_eff - cols_in) // 2
elif padding == b'VALID':
rows_out = int(ceil((rows_in - ksize_r_eff + 1) / stride_r))
cols_out = int(ceil((cols_in - ksize_c_eff + 1) / stride_h))
pad_rows = (rows_out - 1) * stride_r + ksize_r_eff - rows_in
pad_cols = (cols_out - 1) * stride_h + ksize_c_eff - cols_in
pad_rows, pad_cols = max(0, pad_rows), max(0, pad_cols)
grad_expanded = array_ops.transpose(
array_ops.reshape(grad, (batch_size, rows_out,
cols_out, ksize_r, ksize_c, channels)),
(1, 2, 3, 4, 0, 5)
)
grad_flat = array_ops.reshape(grad_expanded, (-1, batch_size * channels))
row_steps = range(0, rows_out * stride_r, stride_r)
col_steps = range(0, cols_out * stride_h, stride_h)
idx = []
for i in range(rows_out):
for j in range(cols_out):
r_low, c_low = row_steps[i] - pad_rows, col_steps[j] - pad_cols
r_high, c_high = r_low + ksize_r_eff, c_low + ksize_c_eff
idx.extend([(r * (cols_in) + c,
i * (cols_out * ksize_r * ksize_c) +
j * (ksize_r * ksize_c) +
ri * (ksize_c) + ci)
for (ri, r) in enumerate(range(r_low, r_high, rate_r))
for (ci, c) in enumerate(range(c_low, c_high, rate_c))
if 0 <= r and r < rows_in and 0 <= c and c < cols_in
])
sp_shape = (rows_in * cols_in,
rows_out * cols_out * ksize_r * ksize_c)
sp_mat = sparse_tensor.SparseTensor(
array_ops.constant(idx, dtype=ops.dtypes.int64),
array_ops.ones((len(idx),), dtype=ops.dtypes.float32),
sp_shape
)
jac = sparse_ops.sparse_tensor_dense_matmul(sp_mat, grad_flat)
grad_out = array_ops.reshape(
jac, (rows_in, cols_in, batch_size, channels)
)
grad_out = array_ops.transpose(grad_out, (2, 0, 1, 3))
return [grad_out]
@ops.RegisterGradient("ScatterNd")
def _ScatterNdGrad(op, grad):
indices = op.inputs[0]
updates_grad = array_ops.gather_nd(grad, indices)
return [None, updates_grad, None]
@ops.RegisterGradient("ScatterNdNonAliasingAdd")
def _ScatterNdNonAliasingAddGrad(op, grad):
indices = op.inputs[1]
updates_grad = array_ops.gather_nd(grad, indices)
return [grad, None, updates_grad]
|
{
"content_hash": "921c6b0008a5fcc3b6acdd9401fa10df",
"timestamp": "",
"source": "github",
"line_count": 676,
"max_line_length": 80,
"avg_line_length": 33.25887573964497,
"alnum_prop": 0.6696170439887915,
"repo_name": "andrewcmyers/tensorflow",
"id": "73a4b7db9f0fb8742e0445c43504ee8b49ad0c94",
"size": "23173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/array_grad.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7666"
},
{
"name": "C",
"bytes": "194808"
},
{
"name": "C++",
"bytes": "27141925"
},
{
"name": "CMake",
"bytes": "176726"
},
{
"name": "Go",
"bytes": "926344"
},
{
"name": "Java",
"bytes": "333968"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37293"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6715"
},
{
"name": "Protocol Buffer",
"bytes": "250935"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "23695367"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "336974"
}
],
"symlink_target": ""
}
|
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_utils import uuidutils
from tacker.common import exceptions
import tacker.context
from tacker.db import api as db_api
from tacker.db.db_sqlalchemy import api
from tacker.db.db_sqlalchemy import models
from tacker.objects import base
from tacker.objects import fields
LOG = logging.getLogger(__name__)
@db_api.context_manager.writer
def _vnf_package_vnfd_create(context, values):
vnf_package_vnfd = models.VnfPackageVnfd()
vnf_package_vnfd.update(values)
try:
vnf_package_vnfd.save(context.session)
except db_exc.DBDuplicateEntry as e:
if 'vnfd_id' in e.columns:
raise exceptions.VnfPackageVnfdIdDuplicate(
vnfd_id=values.get('vnfd_id'))
return vnf_package_vnfd
@db_api.context_manager.reader
def _get_vnf_package_vnfd(context, id, package_uuid=None, del_flg=None):
if package_uuid and not del_flg:
query = api.model_query(
context,
models.VnfPackageVnfd).filter_by(
package_uuid=id).filter_by(
deleted=0)
elif package_uuid and del_flg:
query = api.model_query(
context, models.VnfPackageVnfd).filter_by(
package_uuid=id)
else:
query = api.model_query(
context,
models.VnfPackageVnfd).filter_by(
vnfd_id=id).filter_by(
deleted=0)
try:
result = query.all()
result_line = ""
for line in result:
result_line = line
except Exception:
LOG.error("select vnf_package_vnfd failed")
if result_line:
return result_line
else:
return None
@db_api.context_manager.writer
def _vnf_package_vnfd_delete(context, id):
try:
api.model_query(
context, models.VnfPackageVnfd).filter_by(
package_uuid=id).delete()
except Exception:
LOG.error("delete vnf_package_vnfd failed")
@db_api.context_manager.reader
def _vnf_package_vnfd_get_by_id(context, vnfd_id):
query = api.model_query(context, models.VnfPackageVnfd,
read_deleted="no", project_only=False). \
filter_by(vnfd_id=vnfd_id).\
join((models.VnfPackage, models.VnfPackage.id ==
models.VnfPackageVnfd.package_uuid))
if tacker.context.is_user_context(context):
query = query.filter(models.VnfPackage.tenant_id == context.project_id)
result = query.first()
if not result:
raise exceptions.VnfPackageVnfdNotFound(id=vnfd_id)
return result
def _vnf_package_vnfd_get_by_packageId(context, packageId):
query = api.model_query(
context,
models.VnfPackageVnfd,
read_deleted="no",
project_only=True).filter_by(
package_uuid=packageId)
result = query.first()
if not result:
return None
return result
@db_api.context_manager.reader
def _vnf_package_vnfd_get_by_vnfdId(context, vnfdId):
query = api.model_query(context,
models.VnfPackageVnfd,
read_deleted="no",
project_only=True).filter_by(vnfd_id=vnfdId)
result = query.first()
if not result:
return None
return result
@db_api.context_manager.reader
def _get_vnf_package_vnfd_by_vnfid(context, vnfpkgid):
sql = ("select"
" t1.vnfd_id,"
" t1.vnf_provider,"
" t1.vnf_product_name,"
" t1.vnf_software_version,"
" t1.vnfd_version,"
" t2.name"
" from "
" vnf_package_vnfd t1,"
" vnf t2 "
" where"
" t1.vnfd_id=t2.vnfd_id"
" and"
" t2.id= :vnfpkgid")
result = context.session.execute(sql, {'vnfpkgid': vnfpkgid})
for line in result:
return line
@base.TackerObjectRegistry.register
class VnfPackageVnfd(base.TackerObject, base.TackerObjectDictCompat,
base.TackerPersistentObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'id': fields.UUIDField(nullable=False),
'package_uuid': fields.UUIDField(nullable=False),
'vnfd_id': fields.UUIDField(nullable=False),
'vnf_provider': fields.StringField(nullable=False),
'vnf_product_name': fields.StringField(nullable=False),
'vnf_software_version': fields.StringField(nullable=False),
'vnfd_version': fields.StringField(nullable=False),
}
@staticmethod
def _from_db_object(context, vnf_package_vnfd, db_vnf_package_vnfd):
for key in vnf_package_vnfd.fields:
if db_vnf_package_vnfd[key]:
setattr(vnf_package_vnfd, key, db_vnf_package_vnfd[key])
vnf_package_vnfd._context = context
vnf_package_vnfd.obj_reset_changes()
return vnf_package_vnfd
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exceptions.ObjectActionError(action='create',
reason=_('already created'))
updates = self.obj_get_changes()
if 'id' not in updates:
updates['id'] = uuidutils.generate_uuid()
self.id = updates['id']
updates = self.obj_get_changes()
db_vnf_package_vnfd = _vnf_package_vnfd_create(
self._context, updates)
self._from_db_object(self._context, self, db_vnf_package_vnfd)
@base.remotable
def get_vnf_package_vnfd(self, id, package_uuid=None, del_flg=None):
return _get_vnf_package_vnfd(self._context, id, package_uuid, del_flg)
@base.remotable_classmethod
def get_vnf_package_vnfd_by_vnfid(self, context, vnfid):
return _get_vnf_package_vnfd_by_vnfid(context, vnfid)
@base.remotable
def delete(self, id):
_vnf_package_vnfd_delete(self._context, id)
@classmethod
def obj_from_db_obj(cls, context, db_obj):
return cls._from_db_object(context, cls(), db_obj)
@base.remotable_classmethod
def get_by_id(cls, context, id):
db_vnf_package_vnfd = _vnf_package_vnfd_get_by_id(context, id)
return cls._from_db_object(context, cls(), db_vnf_package_vnfd)
@base.remotable_classmethod
def get_by_vnfdId(cls, context, id):
db_vnf_package_vnfd = _vnf_package_vnfd_get_by_vnfdId(
context, id)
if not db_vnf_package_vnfd:
return db_vnf_package_vnfd
return cls._from_db_object(context, cls(), db_vnf_package_vnfd)
@base.remotable_classmethod
def get_by_packageId(cls, context, id):
db_vnf_package_vnfd = _vnf_package_vnfd_get_by_packageId(
context, id)
if not db_vnf_package_vnfd:
return db_vnf_package_vnfd
return cls._from_db_object(context, cls(), db_vnf_package_vnfd)
|
{
"content_hash": "ce569fc4ea842d1faf0e743db12c3e87",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 79,
"avg_line_length": 30.152838427947597,
"alnum_prop": 0.6149167270094135,
"repo_name": "stackforge/tacker",
"id": "d5676a69c14ca011cc98c6ff0f541cabb456781c",
"size": "7510",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tacker/objects/vnf_package_vnfd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1142"
},
{
"name": "Python",
"bytes": "1143026"
},
{
"name": "Shell",
"bytes": "26584"
}
],
"symlink_target": ""
}
|
from typing import Any, Callable, Dict, Optional, TypeVar
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._configuration_profile_hci_assignments_operations import build_create_or_update_request, build_delete_request, build_get_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ConfigurationProfileHCIAssignmentsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.automanage.aio.AutomanageClient`'s
:attr:`configuration_profile_hci_assignments` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
cluster_name: str,
configuration_profile_assignment_name: str,
parameters: _models.ConfigurationProfileAssignment,
**kwargs: Any
) -> _models.ConfigurationProfileAssignment:
"""Creates an association between a AzureStackHCI cluster and Automanage configuration profile.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param cluster_name: The name of the Arc machine.
:type cluster_name: str
:param configuration_profile_assignment_name: Name of the configuration profile assignment.
Only default is supported.
:type configuration_profile_assignment_name: str
:param parameters: Parameters supplied to the create or update configuration profile
assignment.
:type parameters: ~azure.mgmt.automanage.models.ConfigurationProfileAssignment
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConfigurationProfileAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.automanage.models.ConfigurationProfileAssignment
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-04")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[_models.ConfigurationProfileAssignment]
_json = self._serialize.body(parameters, 'ConfigurationProfileAssignment')
request = build_create_or_update_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
cluster_name=cluster_name,
configuration_profile_assignment_name=configuration_profile_assignment_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConfigurationProfileAssignment', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ConfigurationProfileAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AzureStackHci/clusters/{clusterName}/providers/Microsoft.Automanage/configurationProfileAssignments/{configurationProfileAssignmentName}"} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
cluster_name: str,
configuration_profile_assignment_name: str,
**kwargs: Any
) -> _models.ConfigurationProfileAssignment:
"""Get information about a configuration profile assignment.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param cluster_name: The name of the Arc machine.
:type cluster_name: str
:param configuration_profile_assignment_name: The configuration profile assignment name.
:type configuration_profile_assignment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConfigurationProfileAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.automanage.models.ConfigurationProfileAssignment
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-04")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.ConfigurationProfileAssignment]
request = build_get_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
cluster_name=cluster_name,
configuration_profile_assignment_name=configuration_profile_assignment_name,
api_version=api_version,
template_url=self.get.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ConfigurationProfileAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AzureStackHci/clusters/{clusterName}/providers/Microsoft.Automanage/configurationProfileAssignments/{configurationProfileAssignmentName}"} # type: ignore
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cluster_name: str,
configuration_profile_assignment_name: str,
**kwargs: Any
) -> None:
"""Delete a configuration profile assignment.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param cluster_name: The name of the Arc machine.
:type cluster_name: str
:param configuration_profile_assignment_name: Name of the configuration profile assignment.
:type configuration_profile_assignment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-04")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
request = build_delete_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
cluster_name=cluster_name,
configuration_profile_assignment_name=configuration_profile_assignment_name,
api_version=api_version,
template_url=self.delete.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AzureStackHci/clusters/{clusterName}/providers/Microsoft.Automanage/configurationProfileAssignments/{configurationProfileAssignmentName}"} # type: ignore
|
{
"content_hash": "f3bf0284a1d9e7824e58a4e3aeee0e45",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 282,
"avg_line_length": 47.310483870967744,
"alnum_prop": 0.6749339469871303,
"repo_name": "Azure/azure-sdk-for-python",
"id": "ec6cbe1e365fc50e977e97703d9c22398bc600e6",
"size": "12233",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/automanage/azure-mgmt-automanage/azure/mgmt/automanage/aio/operations/_configuration_profile_hci_assignments_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
"""Basic collect and runtest protocol implementations."""
import bdb
import os
import sys
from typing import Callable
from typing import cast
from typing import Dict
from typing import Generic
from typing import List
from typing import Optional
from typing import Tuple
from typing import Type
from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
import attr
from .reports import BaseReport
from .reports import CollectErrorRepr
from .reports import CollectReport
from .reports import TestReport
from _pytest import timing
from _pytest._code.code import ExceptionChainRepr
from _pytest._code.code import ExceptionInfo
from _pytest._code.code import TerminalRepr
from _pytest.compat import final
from _pytest.config.argparsing import Parser
from _pytest.deprecated import check_ispytest
from _pytest.nodes import Collector
from _pytest.nodes import Item
from _pytest.nodes import Node
from _pytest.outcomes import Exit
from _pytest.outcomes import OutcomeException
from _pytest.outcomes import Skipped
from _pytest.outcomes import TEST_OUTCOME
if TYPE_CHECKING:
from typing_extensions import Literal
from _pytest.main import Session
from _pytest.terminal import TerminalReporter
#
# pytest plugin hooks.
def pytest_addoption(parser: Parser) -> None:
group = parser.getgroup("terminal reporting", "Reporting", after="general")
group.addoption(
"--durations",
action="store",
type=int,
default=None,
metavar="N",
help="Show N slowest setup/test durations (N=0 for all)",
)
group.addoption(
"--durations-min",
action="store",
type=float,
default=0.005,
metavar="N",
help="Minimal duration in seconds for inclusion in slowest list. "
"Default: 0.005.",
)
def pytest_terminal_summary(terminalreporter: "TerminalReporter") -> None:
durations = terminalreporter.config.option.durations
durations_min = terminalreporter.config.option.durations_min
verbose = terminalreporter.config.getvalue("verbose")
if durations is None:
return
tr = terminalreporter
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if not dlist:
return
dlist.sort(key=lambda x: x.duration, reverse=True) # type: ignore[no-any-return]
if not durations:
tr.write_sep("=", "slowest durations")
else:
tr.write_sep("=", "slowest %s durations" % durations)
dlist = dlist[:durations]
for i, rep in enumerate(dlist):
if verbose < 2 and rep.duration < durations_min:
tr.write_line("")
tr.write_line(
"(%s durations < %gs hidden. Use -vv to show these durations.)"
% (len(dlist) - i, durations_min)
)
break
tr.write_line(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}")
def pytest_sessionstart(session: "Session") -> None:
session._setupstate = SetupState()
def pytest_sessionfinish(session: "Session") -> None:
session._setupstate.teardown_exact(None)
def pytest_runtest_protocol(item: Item, nextitem: Optional[Item]) -> bool:
ihook = item.ihook
ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location)
runtestprotocol(item, nextitem=nextitem)
ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location)
return True
def runtestprotocol(
item: Item, log: bool = True, nextitem: Optional[Item] = None
) -> List[TestReport]:
hasrequest = hasattr(item, "_request")
if hasrequest and not item._request: # type: ignore[attr-defined]
# This only happens if the item is re-run, as is done by
# pytest-rerunfailures.
item._initrequest() # type: ignore[attr-defined]
rep = call_and_report(item, "setup", log)
reports = [rep]
if rep.passed:
if item.config.getoption("setupshow", False):
show_test_item(item)
if not item.config.getoption("setuponly", False):
reports.append(call_and_report(item, "call", log))
reports.append(call_and_report(item, "teardown", log, nextitem=nextitem))
# After all teardown hooks have been called
# want funcargs and request info to go away.
if hasrequest:
item._request = False # type: ignore[attr-defined]
item.funcargs = None # type: ignore[attr-defined]
return reports
def show_test_item(item: Item) -> None:
"""Show test function, parameters and the fixtures of the test item."""
tw = item.config.get_terminal_writer()
tw.line()
tw.write(" " * 8)
tw.write(item.nodeid)
used_fixtures = sorted(getattr(item, "fixturenames", []))
if used_fixtures:
tw.write(" (fixtures used: {})".format(", ".join(used_fixtures)))
tw.flush()
def pytest_runtest_setup(item: Item) -> None:
_update_current_test_var(item, "setup")
item.session._setupstate.setup(item)
def pytest_runtest_call(item: Item) -> None:
_update_current_test_var(item, "call")
try:
del sys.last_type
del sys.last_value
del sys.last_traceback
except AttributeError:
pass
try:
item.runtest()
except Exception as e:
# Store trace info to allow postmortem debugging
sys.last_type = type(e)
sys.last_value = e
assert e.__traceback__ is not None
# Skip *this* frame
sys.last_traceback = e.__traceback__.tb_next
raise e
def pytest_runtest_teardown(item: Item, nextitem: Optional[Item]) -> None:
_update_current_test_var(item, "teardown")
item.session._setupstate.teardown_exact(nextitem)
_update_current_test_var(item, None)
def _update_current_test_var(
item: Item, when: Optional["Literal['setup', 'call', 'teardown']"]
) -> None:
"""Update :envvar:`PYTEST_CURRENT_TEST` to reflect the current item and stage.
If ``when`` is None, delete ``PYTEST_CURRENT_TEST`` from the environment.
"""
var_name = "PYTEST_CURRENT_TEST"
if when:
value = f"{item.nodeid} ({when})"
# don't allow null bytes on environment variables (see #2644, #2957)
value = value.replace("\x00", "(null)")
os.environ[var_name] = value
else:
os.environ.pop(var_name)
def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str]]:
if report.when in ("setup", "teardown"):
if report.failed:
# category, shortletter, verbose-word
return "error", "E", "ERROR"
elif report.skipped:
return "skipped", "s", "SKIPPED"
else:
return "", "", ""
return None
#
# Implementation
def call_and_report(
item: Item, when: "Literal['setup', 'call', 'teardown']", log: bool = True, **kwds
) -> TestReport:
call = call_runtest_hook(item, when, **kwds)
hook = item.ihook
report: TestReport = hook.pytest_runtest_makereport(item=item, call=call)
if log:
hook.pytest_runtest_logreport(report=report)
if check_interactive_exception(call, report):
hook.pytest_exception_interact(node=item, call=call, report=report)
return report
def check_interactive_exception(call: "CallInfo[object]", report: BaseReport) -> bool:
"""Check whether the call raised an exception that should be reported as
interactive."""
if call.excinfo is None:
# Didn't raise.
return False
if hasattr(report, "wasxfail"):
# Exception was expected.
return False
if isinstance(call.excinfo.value, (Skipped, bdb.BdbQuit)):
# Special control flow exception.
return False
return True
def call_runtest_hook(
item: Item, when: "Literal['setup', 'call', 'teardown']", **kwds
) -> "CallInfo[None]":
if when == "setup":
ihook: Callable[..., None] = item.ihook.pytest_runtest_setup
elif when == "call":
ihook = item.ihook.pytest_runtest_call
elif when == "teardown":
ihook = item.ihook.pytest_runtest_teardown
else:
assert False, f"Unhandled runtest hook case: {when}"
reraise: Tuple[Type[BaseException], ...] = (Exit,)
if not item.config.getoption("usepdb", False):
reraise += (KeyboardInterrupt,)
return CallInfo.from_call(
lambda: ihook(item=item, **kwds), when=when, reraise=reraise
)
TResult = TypeVar("TResult", covariant=True)
@final
@attr.s(repr=False, init=False, auto_attribs=True)
class CallInfo(Generic[TResult]):
"""Result/Exception info of a function invocation."""
_result: Optional[TResult]
#: The captured exception of the call, if it raised.
excinfo: Optional[ExceptionInfo[BaseException]]
#: The system time when the call started, in seconds since the epoch.
start: float
#: The system time when the call ended, in seconds since the epoch.
stop: float
#: The call duration, in seconds.
duration: float
#: The context of invocation: "collect", "setup", "call" or "teardown".
when: "Literal['collect', 'setup', 'call', 'teardown']"
def __init__(
self,
result: Optional[TResult],
excinfo: Optional[ExceptionInfo[BaseException]],
start: float,
stop: float,
duration: float,
when: "Literal['collect', 'setup', 'call', 'teardown']",
*,
_ispytest: bool = False,
) -> None:
check_ispytest(_ispytest)
self._result = result
self.excinfo = excinfo
self.start = start
self.stop = stop
self.duration = duration
self.when = when
@property
def result(self) -> TResult:
"""The return value of the call, if it didn't raise.
Can only be accessed if excinfo is None.
"""
if self.excinfo is not None:
raise AttributeError(f"{self!r} has no valid result")
# The cast is safe because an exception wasn't raised, hence
# _result has the expected function return type (which may be
# None, that's why a cast and not an assert).
return cast(TResult, self._result)
@classmethod
def from_call(
cls,
func: "Callable[[], TResult]",
when: "Literal['collect', 'setup', 'call', 'teardown']",
reraise: Optional[
Union[Type[BaseException], Tuple[Type[BaseException], ...]]
] = None,
) -> "CallInfo[TResult]":
"""Call func, wrapping the result in a CallInfo.
:param func:
The function to call. Called without arguments.
:param when:
The phase in which the function is called.
:param reraise:
Exception or exceptions that shall propagate if raised by the
function, instead of being wrapped in the CallInfo.
"""
excinfo = None
start = timing.time()
precise_start = timing.perf_counter()
try:
result: Optional[TResult] = func()
except BaseException:
excinfo = ExceptionInfo.from_current()
if reraise is not None and isinstance(excinfo.value, reraise):
raise
result = None
# use the perf counter
precise_stop = timing.perf_counter()
duration = precise_stop - precise_start
stop = timing.time()
return cls(
start=start,
stop=stop,
duration=duration,
when=when,
result=result,
excinfo=excinfo,
_ispytest=True,
)
def __repr__(self) -> str:
if self.excinfo is None:
return f"<CallInfo when={self.when!r} result: {self._result!r}>"
return f"<CallInfo when={self.when!r} excinfo={self.excinfo!r}>"
def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> TestReport:
return TestReport.from_item_and_call(item, call)
def pytest_make_collect_report(collector: Collector) -> CollectReport:
call = CallInfo.from_call(lambda: list(collector.collect()), "collect")
longrepr: Union[None, Tuple[str, int, str], str, TerminalRepr] = None
if not call.excinfo:
outcome: Literal["passed", "skipped", "failed"] = "passed"
else:
skip_exceptions = [Skipped]
unittest = sys.modules.get("unittest")
if unittest is not None:
# Type ignored because unittest is loaded dynamically.
skip_exceptions.append(unittest.SkipTest) # type: ignore
if isinstance(call.excinfo.value, tuple(skip_exceptions)):
outcome = "skipped"
r_ = collector._repr_failure_py(call.excinfo, "line")
assert isinstance(r_, ExceptionChainRepr), repr(r_)
r = r_.reprcrash
assert r
longrepr = (str(r.path), r.lineno, r.message)
else:
outcome = "failed"
errorinfo = collector.repr_failure(call.excinfo)
if not hasattr(errorinfo, "toterminal"):
assert isinstance(errorinfo, str)
errorinfo = CollectErrorRepr(errorinfo)
longrepr = errorinfo
result = call.result if not call.excinfo else None
rep = CollectReport(collector.nodeid, outcome, longrepr, result)
rep.call = call # type: ignore # see collect_one_node
return rep
class SetupState:
"""Shared state for setting up/tearing down test items or collectors
in a session.
Suppose we have a collection tree as follows:
<Session session>
<Module mod1>
<Function item1>
<Module mod2>
<Function item2>
The SetupState maintains a stack. The stack starts out empty:
[]
During the setup phase of item1, setup(item1) is called. What it does
is:
push session to stack, run session.setup()
push mod1 to stack, run mod1.setup()
push item1 to stack, run item1.setup()
The stack is:
[session, mod1, item1]
While the stack is in this shape, it is allowed to add finalizers to
each of session, mod1, item1 using addfinalizer().
During the teardown phase of item1, teardown_exact(item2) is called,
where item2 is the next item to item1. What it does is:
pop item1 from stack, run its teardowns
pop mod1 from stack, run its teardowns
mod1 was popped because it ended its purpose with item1. The stack is:
[session]
During the setup phase of item2, setup(item2) is called. What it does
is:
push mod2 to stack, run mod2.setup()
push item2 to stack, run item2.setup()
Stack:
[session, mod2, item2]
During the teardown phase of item2, teardown_exact(None) is called,
because item2 is the last item. What it does is:
pop item2 from stack, run its teardowns
pop mod2 from stack, run its teardowns
pop session from stack, run its teardowns
Stack:
[]
The end!
"""
def __init__(self) -> None:
# The stack is in the dict insertion order.
self.stack: Dict[
Node,
Tuple[
# Node's finalizers.
List[Callable[[], object]],
# Node's exception, if its setup raised.
Optional[Union[OutcomeException, Exception]],
],
] = {}
def setup(self, item: Item) -> None:
"""Setup objects along the collector chain to the item."""
needed_collectors = item.listchain()
# If a collector fails its setup, fail its entire subtree of items.
# The setup is not retried for each item - the same exception is used.
for col, (finalizers, exc) in self.stack.items():
assert col in needed_collectors, "previous item was not torn down properly"
if exc:
raise exc
for col in needed_collectors[len(self.stack) :]:
assert col not in self.stack
# Push onto the stack.
self.stack[col] = ([col.teardown], None)
try:
col.setup()
except TEST_OUTCOME as exc:
self.stack[col] = (self.stack[col][0], exc)
raise exc
def addfinalizer(self, finalizer: Callable[[], object], node: Node) -> None:
"""Attach a finalizer to the given node.
The node must be currently active in the stack.
"""
assert node and not isinstance(node, tuple)
assert callable(finalizer)
assert node in self.stack, (node, self.stack)
self.stack[node][0].append(finalizer)
def teardown_exact(self, nextitem: Optional[Item]) -> None:
"""Teardown the current stack up until reaching nodes that nextitem
also descends from.
When nextitem is None (meaning we're at the last item), the entire
stack is torn down.
"""
needed_collectors = nextitem and nextitem.listchain() or []
exc = None
while self.stack:
if list(self.stack.keys()) == needed_collectors[: len(self.stack)]:
break
node, (finalizers, _) = self.stack.popitem()
while finalizers:
fin = finalizers.pop()
try:
fin()
except TEST_OUTCOME as e:
# XXX Only first exception will be seen by user,
# ideally all should be reported.
if exc is None:
exc = e
if exc:
raise exc
if nextitem is None:
assert not self.stack
def collect_one_node(collector: Collector) -> CollectReport:
ihook = collector.ihook
ihook.pytest_collectstart(collector=collector)
rep: CollectReport = ihook.pytest_make_collect_report(collector=collector)
call = rep.__dict__.pop("call", None)
if call and check_interactive_exception(call, rep):
ihook.pytest_exception_interact(node=collector, call=call, report=rep)
return rep
|
{
"content_hash": "5b123075fbfc11b04cb3780e03288006",
"timestamp": "",
"source": "github",
"line_count": 542,
"max_line_length": 87,
"avg_line_length": 33.31734317343174,
"alnum_prop": 0.6210543803300477,
"repo_name": "markshao/pytest",
"id": "584c3229d5f416c862a4eacbb434e53e6f334432",
"size": "18058",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "src/_pytest/runner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "192"
},
{
"name": "Python",
"bytes": "2619027"
}
],
"symlink_target": ""
}
|
import unittest
from spyne.test.protocol._test_dictdoc import TDictDocumentTest
from spyne.protocol.yaml import YamlDocument
from spyne import MethodContext
from spyne.application import Application
from spyne.decorator import srpc
from spyne.service import ServiceBase
from spyne.server import ServerBase
from spyne.protocol.yaml import yaml
yaml.dumps = yaml.dump
yaml.loads = yaml.load
TestYamlDocument = TDictDocumentTest(yaml, YamlDocument, YamlDocument().out_kwargs)
class Test(unittest.TestCase):
def test_invalid_input(self):
class SomeService(ServiceBase):
@srpc()
def yay():
pass
app = Application([SomeService], 'tns',
in_protocol=YamlDocument(),
out_protocol=YamlDocument())
server = ServerBase(app)
initial_ctx = MethodContext(server, MethodContext.SERVER)
initial_ctx.in_string = [b'{']
ctx, = server.generate_contexts(initial_ctx)
assert ctx.in_error.faultcode == 'Client.YamlDecodeError'
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "b8303a30744015828a1784ab212a8d7e",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 83,
"avg_line_length": 28.871794871794872,
"alnum_prop": 0.6669626998223801,
"repo_name": "deevarvar/myLab",
"id": "399d943df5f7569a25a370155c2c76780a15caf2",
"size": "1922",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "baidu_code/soap_mockserver/spyne/test/protocol/test_yaml.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "850"
},
{
"name": "C",
"bytes": "856044"
},
{
"name": "C++",
"bytes": "2988"
},
{
"name": "CSS",
"bytes": "6488"
},
{
"name": "DIGITAL Command Language",
"bytes": "282400"
},
{
"name": "HTML",
"bytes": "119253"
},
{
"name": "JavaScript",
"bytes": "445705"
},
{
"name": "Makefile",
"bytes": "20119"
},
{
"name": "Objective-C",
"bytes": "108"
},
{
"name": "PHP",
"bytes": "2502"
},
{
"name": "Python",
"bytes": "2305843"
},
{
"name": "Roff",
"bytes": "106"
},
{
"name": "Ruby",
"bytes": "478"
},
{
"name": "Shell",
"bytes": "68858"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import subprocess
from django.http import HttpResponse
from django.utils.timezone import now as timezone_now
from zerver.lib.test_helpers import (
most_recent_message,
most_recent_usermessage,
POSTRequestMock)
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.models import (
get_display_recipient,
get_realm,
get_stream,
get_client,
Recipient,
UserProfile,
UserActivity,
Realm
)
from zerver.lib.actions import (
encode_email_address,
do_create_user
)
from zerver.lib.email_mirror import (
process_message, process_stream_message, ZulipEmailForwardError,
create_missed_message_address,
get_missed_message_token_from_address,
)
from zerver.lib.digest import handle_digest_email, enqueue_emails
from zerver.lib.send_email import FromAddress
from zerver.lib.notifications import (
handle_missedmessage_emails,
)
from zerver.management.commands import email_mirror
from email.mime.text import MIMEText
import datetime
import time
import re
import ujson
import mock
import os
import sys
from six.moves import cStringIO as StringIO
from django.conf import settings
from zerver.lib.str_utils import force_str
from typing import Any, Callable, Dict, Mapping, Union, Text
class TestEmailMirrorLibrary(ZulipTestCase):
def test_get_missed_message_token(self):
# type: () -> None
def get_token(address):
# type: (Text) -> Text
with self.settings(EMAIL_GATEWAY_PATTERN="%s@example.com"):
return get_missed_message_token_from_address(address)
address = 'mm' + ('x' * 32) + '@example.com'
token = get_token(address)
self.assertEqual(token, 'x' * 32)
# This next section was a bug at one point--we'd treat ordinary
# user addresses that happened to begin with "mm" as being
# the special mm+32chars tokens.
address = 'mmathers@example.com'
with self.assertRaises(ZulipEmailForwardError):
get_token(address)
# Now test the case where we our address does not match the
# EMAIL_GATEWAY_PATTERN.
# This used to crash in an ugly way; we want to throw a proper
# exception.
address = 'alice@not-the-domain-we-were-expecting.com'
with self.assertRaises(ZulipEmailForwardError):
get_token(address)
class TestStreamEmailMessagesSuccess(ZulipTestCase):
def test_receive_stream_email_messages_success(self):
# type: () -> None
# build dummy messages for stream
# test valid incoming stream message is processed properly
user_profile = self.example_user('hamlet')
self.login(user_profile.email)
self.subscribe(user_profile, "Denmark")
stream = get_stream("Denmark", user_profile.realm)
stream_to_address = encode_email_address(stream)
incoming_valid_message = MIMEText('TestStreamEmailMessages Body') # type: Any # https://github.com/python/typeshed/issues/275
incoming_valid_message['Subject'] = 'TestStreamEmailMessages Subject'
incoming_valid_message['From'] = self.example_email('hamlet')
incoming_valid_message['To'] = stream_to_address
incoming_valid_message['Reply-to'] = self.example_email('othello')
process_message(incoming_valid_message)
# Hamlet is subscribed to this stream so should see the email message from Othello.
message = most_recent_message(user_profile)
self.assertEqual(message.content, "TestStreamEmailMessages Body")
self.assertEqual(get_display_recipient(message.recipient), stream.name)
self.assertEqual(message.topic_name(), incoming_valid_message['Subject'])
class TestStreamEmailMessagesEmptyBody(ZulipTestCase):
def test_receive_stream_email_messages_empty_body(self):
# type: () -> None
# build dummy messages for stream
# test message with empty body is not sent
user_profile = self.example_user('hamlet')
self.login(user_profile.email)
self.subscribe(user_profile, "Denmark")
stream = get_stream("Denmark", user_profile.realm)
stream_to_address = encode_email_address(stream)
headers = {}
headers['Reply-To'] = self.example_email('othello')
# empty body
incoming_valid_message = MIMEText('') # type: Any # https://github.com/python/typeshed/issues/275
incoming_valid_message['Subject'] = 'TestStreamEmailMessages Subject'
incoming_valid_message['From'] = self.example_email('hamlet')
incoming_valid_message['To'] = stream_to_address
incoming_valid_message['Reply-to'] = self.example_email('othello')
exception_message = ""
debug_info = {} # type: Dict[str, Any]
# process_message eats the exception & logs an error which can't be parsed here
# so calling process_stream_message directly
try:
process_stream_message(incoming_valid_message['To'],
incoming_valid_message['Subject'],
incoming_valid_message,
debug_info)
except ZulipEmailForwardError as e:
# empty body throws exception
exception_message = str(e)
self.assertEqual(exception_message, "Unable to find plaintext or HTML message body")
class TestMissedPersonalMessageEmailMessages(ZulipTestCase):
def test_receive_missed_personal_message_email_messages(self):
# type: () -> None
# build dummy messages for missed messages email reply
# have Hamlet send Othello a PM. Othello will reply via email
# Hamlet will receive the message.
email = self.example_email('hamlet')
self.login(email)
result = self.client_post("/json/messages", {"type": "private",
"content": "test_receive_missed_message_email_messages",
"client": "test suite",
"to": self.example_email('othello')})
self.assert_json_success(result)
user_profile = self.example_user('othello')
usermessage = most_recent_usermessage(user_profile)
# we don't want to send actual emails but we do need to create and store the
# token for looking up who did reply.
mm_address = create_missed_message_address(user_profile, usermessage.message)
incoming_valid_message = MIMEText('TestMissedMessageEmailMessages Body') # type: Any # https://github.com/python/typeshed/issues/275
incoming_valid_message['Subject'] = 'TestMissedMessageEmailMessages Subject'
incoming_valid_message['From'] = self.example_email('othello')
incoming_valid_message['To'] = mm_address
incoming_valid_message['Reply-to'] = self.example_email('othello')
process_message(incoming_valid_message)
# self.login(self.example_email("hamlet"))
# confirm that Hamlet got the message
user_profile = self.example_user('hamlet')
message = most_recent_message(user_profile)
self.assertEqual(message.content, "TestMissedMessageEmailMessages Body")
self.assertEqual(message.sender, self.example_user('othello'))
self.assertEqual(message.recipient.id, user_profile.id)
self.assertEqual(message.recipient.type, Recipient.PERSONAL)
class TestMissedHuddleMessageEmailMessages(ZulipTestCase):
def test_receive_missed_huddle_message_email_messages(self):
# type: () -> None
# build dummy messages for missed messages email reply
# have Othello send Iago and Cordelia a PM. Cordelia will reply via email
# Iago and Othello will receive the message.
email = self.example_email('othello')
self.login(email)
result = self.client_post("/json/messages", {"type": "private",
"content": "test_receive_missed_message_email_messages",
"client": "test suite",
"to": ujson.dumps([self.example_email('cordelia'),
self.example_email('iago')])})
self.assert_json_success(result)
user_profile = self.example_user('cordelia')
usermessage = most_recent_usermessage(user_profile)
# we don't want to send actual emails but we do need to create and store the
# token for looking up who did reply.
mm_address = create_missed_message_address(user_profile, usermessage.message)
incoming_valid_message = MIMEText('TestMissedHuddleMessageEmailMessages Body') # type: Any # https://github.com/python/typeshed/issues/275
incoming_valid_message['Subject'] = 'TestMissedHuddleMessageEmailMessages Subject'
incoming_valid_message['From'] = self.example_email('cordelia')
incoming_valid_message['To'] = mm_address
incoming_valid_message['Reply-to'] = self.example_email('cordelia')
process_message(incoming_valid_message)
# Confirm Iago received the message.
user_profile = self.example_user('iago')
message = most_recent_message(user_profile)
self.assertEqual(message.content, "TestMissedHuddleMessageEmailMessages Body")
self.assertEqual(message.sender, self.example_user('cordelia'))
self.assertEqual(message.recipient.type, Recipient.HUDDLE)
# Confirm Othello received the message.
user_profile = self.example_user('othello')
message = most_recent_message(user_profile)
self.assertEqual(message.content, "TestMissedHuddleMessageEmailMessages Body")
self.assertEqual(message.sender, self.example_user('cordelia'))
self.assertEqual(message.recipient.type, Recipient.HUDDLE)
class TestEmptyGatewaySetting(ZulipTestCase):
def test_missed_message(self):
# type: () -> None
email = self.example_email('othello')
self.login(email)
result = self.client_post("/json/messages", {"type": "private",
"content": "test_receive_missed_message_email_messages",
"client": "test suite",
"to": ujson.dumps([self.example_email('cordelia'),
self.example_email('iago')])})
self.assert_json_success(result)
user_profile = self.example_user('cordelia')
usermessage = most_recent_usermessage(user_profile)
with self.settings(EMAIL_GATEWAY_PATTERN=''):
mm_address = create_missed_message_address(user_profile, usermessage.message)
self.assertEqual(mm_address, FromAddress.NOREPLY)
def test_encode_email_addr(self):
# type: () -> None
stream = get_stream("Denmark", get_realm("zulip"))
with self.settings(EMAIL_GATEWAY_PATTERN=''):
test_address = encode_email_address(stream)
self.assertEqual(test_address, '')
class TestDigestEmailMessages(ZulipTestCase):
@mock.patch('zerver.lib.digest.enough_traffic')
@mock.patch('zerver.lib.digest.send_future_email')
def test_receive_digest_email_messages(self, mock_send_future_email, mock_enough_traffic):
# type: (mock.MagicMock, mock.MagicMock) -> None
# build dummy messages for missed messages email reply
# have Hamlet send Othello a PM. Othello will reply via email
# Hamlet will receive the message.
email = self.example_email('hamlet')
self.login(email)
result = self.client_post("/json/messages", {"type": "private",
"content": "test_receive_missed_message_email_messages",
"client": "test suite",
"to": self.example_email('othello')})
self.assert_json_success(result)
user_profile = self.example_user('othello')
cutoff = time.mktime(datetime.datetime(year=2016, month=1, day=1).timetuple())
handle_digest_email(user_profile.id, cutoff)
self.assertEqual(mock_send_future_email.call_count, 1)
self.assertEqual(mock_send_future_email.call_args[1]['to_user_id'], user_profile.id)
@mock.patch('zerver.lib.digest.queue_digest_recipient')
@mock.patch('zerver.lib.digest.timezone_now')
def test_inactive_users_queued_for_digest(self, mock_django_timezone, mock_queue_digest_recipient):
# type: (mock.MagicMock, mock.MagicMock) -> None
cutoff = timezone_now()
# Test Tuesday
mock_django_timezone.return_value = datetime.datetime(year=2016, month=1, day=5)
# Mock user activity for each user
realm = get_realm("zulip")
for realm in Realm.objects.filter(deactivated=False, show_digest_email=True):
for user_profile in UserProfile.objects.filter(realm=realm):
UserActivity.objects.create(
last_visit=cutoff - datetime.timedelta(days=1),
user_profile=user_profile,
count=0,
client=get_client('test_client'))
# Check that inactive users are enqueued
enqueue_emails(cutoff)
self.assertEqual(mock_queue_digest_recipient.call_count, 13)
@mock.patch('zerver.lib.digest.queue_digest_recipient')
@mock.patch('zerver.lib.digest.timezone_now')
def test_active_users_not_enqueued(self, mock_django_timezone, mock_queue_digest_recipient):
# type: (mock.MagicMock, mock.MagicMock) -> None
cutoff = timezone_now()
# A Tuesday
mock_django_timezone.return_value = datetime.datetime(year=2016, month=1, day=5)
for realm in Realm.objects.filter(deactivated=False, show_digest_email=True):
for user_profile in UserProfile.objects.filter(realm=realm):
UserActivity.objects.create(
last_visit=cutoff + datetime.timedelta(days=1),
user_profile=user_profile,
count=0,
client=get_client('test_client'))
# Check that an active user is not enqueued
enqueue_emails(cutoff)
self.assertEqual(mock_queue_digest_recipient.call_count, 0)
@mock.patch('zerver.lib.digest.queue_digest_recipient')
@mock.patch('zerver.lib.digest.timezone_now')
def test_only_enqueue_on_valid_day(self, mock_django_timezone, mock_queue_digest_recipient):
# type: (mock.MagicMock, mock.MagicMock) -> None
# Not a Tuesday
mock_django_timezone.return_value = datetime.datetime(year=2016, month=1, day=6)
# Check that digests are not sent on days other than Tuesday.
cutoff = timezone_now()
enqueue_emails(cutoff)
self.assertEqual(mock_queue_digest_recipient.call_count, 0)
@mock.patch('zerver.lib.digest.queue_digest_recipient')
@mock.patch('zerver.lib.digest.timezone_now')
def test_no_email_digest_for_bots(self, mock_django_timezone, mock_queue_digest_recipient):
# type: (mock.MagicMock, mock.MagicMock) -> None
cutoff = timezone_now()
# A Tuesday
mock_django_timezone.return_value = datetime.datetime(year=2016, month=1, day=5)
bot = do_create_user('some_bot@example.com', 'password', get_realm('zulip'), 'some_bot', '',
bot_type=UserProfile.DEFAULT_BOT)
UserActivity.objects.create(
last_visit=cutoff - datetime.timedelta(days=1),
user_profile=bot,
count=0,
client=get_client('test_client'))
# Check that bots are not sent emails
enqueue_emails(cutoff)
for arg in mock_queue_digest_recipient.call_args_list:
user = arg[0][0]
self.assertNotEqual(user.id, bot.id)
class TestReplyExtraction(ZulipTestCase):
def test_reply_is_extracted_from_plain(self):
# type: () -> None
# build dummy messages for stream
# test valid incoming stream message is processed properly
email = self.example_email('hamlet')
self.login(email)
user_profile = self.example_user('hamlet')
self.subscribe(user_profile, "Denmark")
stream = get_stream("Denmark", user_profile.realm)
stream_to_address = encode_email_address(stream)
text = """Reply
-----Original Message-----
Quote"""
incoming_valid_message = MIMEText(text) # type: Any # https://github.com/python/typeshed/issues/275
incoming_valid_message['Subject'] = 'TestStreamEmailMessages Subject'
incoming_valid_message['From'] = self.example_email('hamlet')
incoming_valid_message['To'] = stream_to_address
incoming_valid_message['Reply-to'] = self.example_email('othello')
process_message(incoming_valid_message)
# Hamlet is subscribed to this stream so should see the email message from Othello.
message = most_recent_message(user_profile)
self.assertEqual(message.content, "Reply")
def test_reply_is_extracted_from_html(self):
# type: () -> None
# build dummy messages for stream
# test valid incoming stream message is processed properly
email = self.example_email('hamlet')
self.login(email)
user_profile = self.example_user('hamlet')
self.subscribe(user_profile, "Denmark")
stream = get_stream("Denmark", user_profile.realm)
stream_to_address = encode_email_address(stream)
html = """
<html>
<body>
<p>Reply</p>
<blockquote>
<div>
On 11-Apr-2011, at 6:54 PM, Bob <bob@example.com> wrote:
</div>
<div>
Quote
</div>
</blockquote>
</body>
</html>
"""
incoming_valid_message = MIMEText(html, 'html') # type: Any # https://github.com/python/typeshed/issues/275
incoming_valid_message['Subject'] = 'TestStreamEmailMessages Subject'
incoming_valid_message['From'] = self.example_email('hamlet')
incoming_valid_message['To'] = stream_to_address
incoming_valid_message['Reply-to'] = self.example_email('othello')
process_message(incoming_valid_message)
# Hamlet is subscribed to this stream so should see the email message from Othello.
message = most_recent_message(user_profile)
self.assertEqual(message.content, 'Reply')
MAILS_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "fixtures", "email")
class TestScriptMTA(ZulipTestCase):
def test_success(self):
# type: () -> None
script = os.path.join(os.path.dirname(__file__),
'../../scripts/lib/email-mirror-postfix')
sender = self.example_email('hamlet')
stream = get_stream("Denmark", get_realm("zulip"))
stream_to_address = encode_email_address(stream)
template_path = os.path.join(MAILS_DIR, "simple.txt")
with open(template_path) as template_file:
mail_template = template_file.read()
mail = mail_template.format(stream_to_address=stream_to_address, sender=sender)
read_pipe, write_pipe = os.pipe()
os.write(write_pipe, mail.encode())
os.close(write_pipe)
subprocess.check_call(
[script, '-r', force_str(stream_to_address), '-s', settings.SHARED_SECRET, '-t'],
stdin=read_pipe)
def test_error_no_recipient(self):
# type: () -> None
script = os.path.join(os.path.dirname(__file__),
'../../scripts/lib/email-mirror-postfix')
sender = self.example_email('hamlet')
stream = get_stream("Denmark", get_realm("zulip"))
stream_to_address = encode_email_address(stream)
template_path = os.path.join(MAILS_DIR, "simple.txt")
with open(template_path) as template_file:
mail_template = template_file.read()
mail = mail_template.format(stream_to_address=stream_to_address, sender=sender)
read_pipe, write_pipe = os.pipe()
os.write(write_pipe, mail.encode())
os.close(write_pipe)
success_call = True
try:
subprocess.check_output([script, '-s', settings.SHARED_SECRET, '-t'],
stdin=read_pipe)
except subprocess.CalledProcessError as e:
self.assertEqual(
e.output,
b'5.1.1 Bad destination mailbox address: No missed message email address.\n'
)
self.assertEqual(e.returncode, 67)
success_call = False
self.assertFalse(success_call)
class TestEmailMirrorTornadoView(ZulipTestCase):
def send_private_message(self):
# type: () -> Text
email = self.example_email('othello')
self.login(email)
result = self.client_post(
"/json/messages",
{
"type": "private",
"content": "test_receive_missed_message_email_messages",
"client": "test suite",
"to": ujson.dumps([self.example_email('cordelia'), self.example_email('iago')])
})
self.assert_json_success(result)
user_profile = self.example_user('cordelia')
user_message = most_recent_usermessage(user_profile)
return create_missed_message_address(user_profile, user_message.message)
@mock.patch('zerver.lib.email_mirror.queue_json_publish')
def send_offline_message(self, to_address, sender, mock_queue_json_publish):
# type: (str, str, mock.Mock) -> HttpResponse
template_path = os.path.join(MAILS_DIR, "simple.txt")
with open(template_path) as template_file:
mail_template = template_file.read()
mail = mail_template.format(stream_to_address=to_address, sender=sender)
def check_queue_json_publish(queue_name, event, processor):
# type: (str, Union[Mapping[str, Any], str], Callable[[Any], None]) -> None
self.assertEqual(queue_name, "email_mirror")
self.assertEqual(event, {"rcpt_to": to_address, "message": mail})
mock_queue_json_publish.side_effect = check_queue_json_publish
request_data = {
"recipient": to_address,
"msg_text": mail
}
post_data = dict(
data=ujson.dumps(request_data),
secret=settings.SHARED_SECRET
)
return self.client_post('/email_mirror_message', post_data)
def test_success_stream(self):
# type: () -> None
stream = get_stream("Denmark", get_realm("zulip"))
stream_to_address = encode_email_address(stream)
result = self.send_offline_message(stream_to_address, self.example_email('hamlet'))
self.assert_json_success(result)
def test_error_to_stream_with_wrong_address(self):
# type: () -> None
stream = get_stream("Denmark", get_realm("zulip"))
stream_to_address = encode_email_address(stream)
stream_to_address = stream_to_address.replace("Denmark", "Wrong_stream")
result = self.send_offline_message(stream_to_address, self.example_email('hamlet'))
self.assert_json_error(
result,
"5.1.1 Bad destination mailbox address: "
"Please use the address specified in your Streams page.")
def test_success_to_private(self):
# type: () -> None
mm_address = self.send_private_message()
result = self.send_offline_message(mm_address, self.example_email('cordelia'))
self.assert_json_success(result)
def test_using_mm_address_twice(self):
# type: () -> None
mm_address = self.send_private_message()
self.send_offline_message(mm_address, self.example_email('cordelia'))
result = self.send_offline_message(mm_address, self.example_email('cordelia'))
self.assert_json_error(
result,
"5.1.1 Bad destination mailbox address: Bad or expired missed message address.")
def test_wrong_missed_email_private_message(self):
# type: () -> None
self.send_private_message()
mm_address = 'mm' + ('x' * 32) + '@testserver'
result = self.send_offline_message(mm_address, self.example_email('cordelia'))
self.assert_json_error(
result,
"5.1.1 Bad destination mailbox address: Bad or expired missed message address.")
|
{
"content_hash": "a089508054cb324aa6c62be312de4f96",
"timestamp": "",
"source": "github",
"line_count": 590,
"max_line_length": 147,
"avg_line_length": 42.383050847457625,
"alnum_prop": 0.6220906982324242,
"repo_name": "verma-varsha/zulip",
"id": "83a48fca9fc64684cc2fbc3b7d3abdb133f84fa1",
"size": "25030",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerver/tests/test_email_mirror.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "426706"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "489996"
},
{
"name": "JavaScript",
"bytes": "2151770"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "85239"
},
{
"name": "Python",
"bytes": "3780334"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "45134"
}
],
"symlink_target": ""
}
|
from tests.test_helper import *
class TestCreditCard(unittest.TestCase):
def test_create_adds_credit_card_to_existing_customer(self):
customer = Customer.create().customer
result = CreditCard.create({
"customer_id": customer.id,
"number": "4111111111111111",
"expiration_date": "05/2009",
"cvv": "100",
"cardholder_name": "John Doe"
})
self.assertTrue(result.is_success)
credit_card = result.credit_card
self.assertTrue(re.search("\A\w{4,5}\Z", credit_card.token) != None)
self.assertEquals("411111", credit_card.bin)
self.assertEquals("1111", credit_card.last_4)
self.assertEquals("05", credit_card.expiration_month)
self.assertEquals("2009", credit_card.expiration_year)
self.assertEquals("05/2009", credit_card.expiration_date)
self.assertEquals("John Doe", credit_card.cardholder_name)
def test_create_and_make_default(self):
customer = Customer.create().customer
card1 = CreditCard.create({
"customer_id": customer.id,
"number": "4111111111111111",
"expiration_date": "05/2009",
"cvv": "100",
"cardholder_name": "John Doe"
}).credit_card
self.assertTrue(card1.default)
card2 = CreditCard.create({
"customer_id": customer.id,
"number": "4111111111111111",
"expiration_date": "05/2009",
"cvv": "100",
"cardholder_name": "John Doe",
"options":
{"make_default": True}
}).credit_card
card1 = CreditCard.find(card1.token)
self.assertFalse(card1.default)
self.assertTrue(card2.default)
def test_create_with_expiration_month_and_year(self):
customer = Customer.create().customer
result = CreditCard.create({
"customer_id": customer.id,
"number": "4111111111111111",
"expiration_month": "05",
"expiration_year": "2009",
"cvv": "100",
"cardholder_name": "John Doe"
})
self.assertTrue(result.is_success)
credit_card = result.credit_card
self.assertEquals("05/2009", credit_card.expiration_date)
def test_create_can_specify_the_desired_token(self):
token = str(random.randint(1, 1000000))
customer = Customer.create().customer
result = CreditCard.create({
"customer_id": customer.id,
"number": "4111111111111111",
"expiration_date": "05/2009",
"token": token
})
self.assertTrue(result.is_success)
credit_card = result.credit_card
self.assertEquals(token, credit_card.token)
def test_create_with_billing_address(self):
customer = Customer.create().customer
result = CreditCard.create({
"customer_id": customer.id,
"number": "4111111111111111",
"expiration_date": "05/2009",
"billing_address": {
"street_address": "123 Abc Way",
"locality": "Chicago",
"region": "Illinois",
"postal_code": "60622",
"country_code_alpha2": "MX",
"country_code_alpha3": "MEX",
"country_code_numeric": "484",
"country_name": "Mexico"
}
})
self.assertTrue(result.is_success)
address = result.credit_card.billing_address
self.assertEquals("123 Abc Way", address.street_address)
self.assertEquals("Chicago", address.locality)
self.assertEquals("Illinois", address.region)
self.assertEquals("60622", address.postal_code)
self.assertEquals("MX", address.country_code_alpha2)
self.assertEquals("MEX", address.country_code_alpha3)
self.assertEquals("484", address.country_code_numeric)
self.assertEquals("Mexico", address.country_name)
def test_create_with_billing_address_id(self):
customer = Customer.create().customer
address = Address.create({
"customer_id": customer.id,
"street_address": "123 Abc Way"
}).address
result = CreditCard.create({
"customer_id": customer.id,
"number": "4111111111111111",
"expiration_date": "05/2009",
"billing_address_id": address.id
})
self.assertTrue(result.is_success)
billing_address = result.credit_card.billing_address
self.assertEquals(address.id, billing_address.id)
self.assertEquals("123 Abc Way", billing_address.street_address)
def test_create_without_billing_address_still_has_billing_address_method(self):
customer = Customer.create().customer
result = CreditCard.create({
"customer_id": customer.id,
"number": "4111111111111111",
"expiration_date": "05/2009",
})
self.assertTrue(result.is_success)
self.assertEquals(None, result.credit_card.billing_address)
def test_create_with_card_verification(self):
customer = Customer.create().customer
result = CreditCard.create({
"customer_id": customer.id,
"number": "4000111111111115",
"expiration_date": "05/2009",
"options": {"verify_card": True}
})
self.assertFalse(result.is_success)
verification = result.credit_card_verification
self.assertEquals(CreditCardVerification.Status.ProcessorDeclined, verification.status)
self.assertEquals("2000", verification.processor_response_code)
self.assertEquals("Do Not Honor", verification.processor_response_text)
self.assertEquals("I", verification.cvv_response_code)
self.assertEquals(None, verification.avs_error_response_code)
self.assertEquals("I", verification.avs_postal_code_response_code)
self.assertEquals("I", verification.avs_street_address_response_code)
self.assertEquals(TestHelper.default_merchant_account_id, verification.merchant_account_id)
def test_create_with_card_verification_and_non_default_merchant_account(self):
customer = Customer.create().customer
result = CreditCard.create({
"customer_id": customer.id,
"number": "4000111111111115",
"expiration_date": "05/2009",
"options": {
"verification_merchant_account_id": TestHelper.non_default_merchant_account_id,
"verify_card": True
}
})
self.assertFalse(result.is_success)
verification = result.credit_card_verification
self.assertEquals(CreditCardVerification.Status.ProcessorDeclined, verification.status)
self.assertEquals(None, verification.gateway_rejection_reason)
self.assertEquals(TestHelper.non_default_merchant_account_id, verification.merchant_account_id)
def test_verify_gateway_rejected_responds_to_processor_response_code(self):
old_merchant_id = Configuration.merchant_id
old_public_key = Configuration.public_key
old_private_key = Configuration.private_key
try:
Configuration.merchant_id = "processing_rules_merchant_id"
Configuration.public_key = "processing_rules_public_key"
Configuration.private_key = "processing_rules_private_key"
customer = Customer.create().customer
result = CreditCard.create({
"customer_id": customer.id,
"number": "4111111111111111",
"expiration_date": "05/2009",
"billing_address": {
"postal_code": "20000"
},
"options": {
"verify_card": True
}
})
self.assertFalse(result.is_success)
self.assertEquals('1000', result.credit_card_verification.processor_response_code)
self.assertEquals('Approved', result.credit_card_verification.processor_response_text)
finally:
Configuration.merchant_id = old_merchant_id
Configuration.public_key = old_public_key
Configuration.private_key = old_private_key
def test_expose_gateway_rejection_reason_on_verification(self):
old_merchant_id = Configuration.merchant_id
old_public_key = Configuration.public_key
old_private_key = Configuration.private_key
try:
Configuration.merchant_id = "processing_rules_merchant_id"
Configuration.public_key = "processing_rules_public_key"
Configuration.private_key = "processing_rules_private_key"
customer = Customer.create().customer
result = CreditCard.create({
"customer_id": customer.id,
"number": "4111111111111111",
"expiration_date": "05/2009",
"cvv": "200",
"options": {
"verify_card": True
}
})
self.assertFalse(result.is_success)
verification = result.credit_card_verification
self.assertEquals(Transaction.GatewayRejectionReason.Cvv, verification.gateway_rejection_reason)
finally:
Configuration.merchant_id = old_merchant_id
Configuration.public_key = old_public_key
Configuration.private_key = old_private_key
def test_create_with_card_verification_set_to_false(self):
customer = Customer.create().customer
result = CreditCard.create({
"customer_id": customer.id,
"number": "4000111111111115",
"expiration_date": "05/2009",
"options": {"verify_card": False}
})
self.assertTrue(result.is_success)
def test_create_with_invalid_invalid_options(self):
customer = Customer.create().customer
result = CreditCard.create({
"customer_id": customer.id,
"number": "4111111111111111",
"expiration_date": "invalid_date",
})
self.assertFalse(result.is_success)
self.assertEquals(ErrorCodes.CreditCard.ExpirationDateIsInvalid, result.errors.for_object("credit_card").on("expiration_date")[0].code)
self.assertEquals("Expiration date is invalid.", result.message)
def test_create_with_invalid_country_codes(self):
customer = Customer.create().customer
result = CreditCard.create({
"customer_id": customer.id,
"number": "4111111111111111",
"expiration_date": "05/2012",
"billing_address": {
"country_code_alpha2": "ZZ",
"country_code_alpha3": "ZZZ",
"country_code_numeric": "000",
"country_name": "zzzzzzz"
}
})
self.assertFalse(result.is_success)
self.assertEquals(
ErrorCodes.Address.CountryCodeAlpha2IsNotAccepted,
result.errors.for_object("credit_card").for_object("billing_address").on("country_code_alpha2")[0].code
)
self.assertEquals(
ErrorCodes.Address.CountryCodeAlpha3IsNotAccepted,
result.errors.for_object("credit_card").for_object("billing_address").on("country_code_alpha3")[0].code
)
self.assertEquals(
ErrorCodes.Address.CountryCodeNumericIsNotAccepted,
result.errors.for_object("credit_card").for_object("billing_address").on("country_code_numeric")[0].code
)
self.assertEquals(
ErrorCodes.Address.CountryNameIsNotAccepted,
result.errors.for_object("credit_card").for_object("billing_address").on("country_name")[0].code
)
def test_update_with_valid_options(self):
customer = Customer.create().customer
credit_card = CreditCard.create({
"customer_id": customer.id,
"number": "4111111111111111",
"expiration_date": "05/2009",
"cvv": "100",
"cardholder_name": "John Doe"
}).credit_card
result = CreditCard.update(credit_card.token, {
"number": "5105105105105100",
"expiration_date": "06/2010",
"cvv": "123",
"cardholder_name": "Jane Jones"
})
self.assertTrue(result.is_success)
credit_card = result.credit_card
self.assertTrue(re.search("\A\w{4,5}\Z", credit_card.token) != None)
self.assertEquals("510510", credit_card.bin)
self.assertEquals("5100", credit_card.last_4)
self.assertEquals("06", credit_card.expiration_month)
self.assertEquals("2010", credit_card.expiration_year)
self.assertEquals("06/2010", credit_card.expiration_date)
self.assertEquals("Jane Jones", credit_card.cardholder_name)
def test_update_billing_address_creates_new_by_default(self):
customer = Customer.create().customer
initial_credit_card = CreditCard.create({
"customer_id": customer.id,
"number": "4111111111111111",
"expiration_date": "05/2009",
"billing_address": {
"street_address": "123 Nigeria Ave",
}
}).credit_card
updated_credit_card = CreditCard.update(initial_credit_card.token, {
"billing_address": {
"region": "IL",
"country_code_alpha2": "NG",
"country_code_alpha3": "NGA",
"country_code_numeric": "566",
"country_name": "Nigeria"
}
}).credit_card
self.assertEquals("IL", updated_credit_card.billing_address.region)
self.assertEquals("NG", updated_credit_card.billing_address.country_code_alpha2)
self.assertEquals("NGA", updated_credit_card.billing_address.country_code_alpha3)
self.assertEquals("566", updated_credit_card.billing_address.country_code_numeric)
self.assertEquals("Nigeria", updated_credit_card.billing_address.country_name)
self.assertEquals(None, updated_credit_card.billing_address.street_address)
self.assertNotEquals(initial_credit_card.billing_address.id, updated_credit_card.billing_address.id)
def test_update_billing_address_when_update_existing_is_True(self):
customer = Customer.create().customer
initial_credit_card = CreditCard.create({
"customer_id": customer.id,
"number": "4111111111111111",
"expiration_date": "05/2009",
"billing_address": {
"street_address": "123 Nigeria Ave",
}
}).credit_card
updated_credit_card = CreditCard.update(initial_credit_card.token, {
"billing_address": {
"region": "IL",
"options": {
"update_existing": True
}
}
}).credit_card
self.assertEquals("IL", updated_credit_card.billing_address.region)
self.assertEquals("123 Nigeria Ave", updated_credit_card.billing_address.street_address)
self.assertEquals(initial_credit_card.billing_address.id, updated_credit_card.billing_address.id)
def test_update_and_make_default(self):
customer = Customer.create().customer
card1 = CreditCard.create({
"customer_id": customer.id,
"number": "4111111111111111",
"expiration_date": "05/2009",
"cvv": "100",
"cardholder_name": "John Doe"
}).credit_card
card2 = CreditCard.create({
"customer_id": customer.id,
"number": "4111111111111111",
"expiration_date": "05/2009",
"cvv": "100",
"cardholder_name": "John Doe"
}).credit_card
self.assertTrue(card1.default)
self.assertFalse(card2.default)
result = CreditCard.update(card2.token, {
"options": {
"make_default": True
}
})
self.assertFalse(CreditCard.find(card1.token).default)
self.assertTrue(CreditCard.find(card2.token).default)
def test_update_verifies_card_if_option_is_provided(self):
customer = Customer.create().customer
credit_card = CreditCard.create({
"customer_id": customer.id,
"number": "4111111111111111",
"expiration_date": "05/2009",
"cvv": "100",
"cardholder_name": "John Doe"
}).credit_card
result = CreditCard.update(credit_card.token, {
"number": "4000111111111115",
"expiration_date": "06/2010",
"cvv": "123",
"cardholder_name": "Jane Jones",
"options": {"verify_card": True}
})
self.assertFalse(result.is_success)
self.assertEquals(CreditCardVerification.Status.ProcessorDeclined, result.credit_card_verification.status)
def test_update_verifies_card_with_non_default_merchant_account(self):
customer = Customer.create().customer
credit_card = CreditCard.create({
"customer_id": customer.id,
"number": "4111111111111111",
"expiration_date": "05/2009",
"cvv": "100",
"cardholder_name": "John Doe"
}).credit_card
result = CreditCard.update(credit_card.token, {
"number": "4000111111111115",
"expiration_date": "06/2010",
"cvv": "123",
"cardholder_name": "Jane Jones",
"options": {
"verification_merchant_account_id": TestHelper.non_default_merchant_account_id,
"verify_card": True
}
})
self.assertFalse(result.is_success)
self.assertEquals(CreditCardVerification.Status.ProcessorDeclined, result.credit_card_verification.status)
def test_update_billing_address(self):
customer = Customer.create().customer
credit_card = CreditCard.create({
"customer_id": customer.id,
"number": "4111111111111111",
"expiration_date": "05/2009",
"billing_address": {
"street_address": "321 Xyz Way",
"locality": "Chicago",
"region": "Illinois",
"postal_code": "60621"
}
}).credit_card
result = CreditCard.update(credit_card.token, {
"billing_address": {
"street_address": "123 Abc Way",
"locality": "Chicago",
"region": "Illinois",
"postal_code": "60622"
}
})
self.assertTrue(result.is_success)
address = result.credit_card.billing_address
self.assertEquals("123 Abc Way", address.street_address)
self.assertEquals("Chicago", address.locality)
self.assertEquals("Illinois", address.region)
self.assertEquals("60622", address.postal_code)
def test_update_returns_error_if_invalid(self):
customer = Customer.create().customer
credit_card = CreditCard.create({
"customer_id": customer.id,
"number": "4111111111111111",
"expiration_date": "05/2009"
}).credit_card
result = CreditCard.update(credit_card.token, {
"expiration_date": "invalid_date"
})
self.assertFalse(result.is_success)
self.assertEquals(ErrorCodes.CreditCard.ExpirationDateIsInvalid, result.errors.for_object("credit_card").on("expiration_date")[0].code)
def test_delete_with_valid_token(self):
customer = Customer.create().customer
credit_card = CreditCard.create({
"customer_id": customer.id,
"number": "4111111111111111",
"expiration_date": "05/2009"
}).credit_card
result = CreditCard.delete(credit_card.token)
self.assertTrue(result.is_success)
@raises(NotFoundError)
def test_delete_raises_error_when_deleting_twice(self):
customer = Customer.create().customer
credit_card = CreditCard.create({
"customer_id": customer.id,
"number": "4111111111111111",
"expiration_date": "05/2009"
}).credit_card
CreditCard.delete(credit_card.token)
CreditCard.delete(credit_card.token)
@raises(NotFoundError)
def test_delete_with_invalid_token(self):
result = CreditCard.delete("notreal")
def test_find_with_valid_token(self):
customer = Customer.create().customer
credit_card = CreditCard.create({
"customer_id": customer.id,
"number": "4111111111111111",
"expiration_date": "05/2009"
}).credit_card
found_credit_card = CreditCard.find(credit_card.token)
self.assertTrue(re.search("\A\w{4,5}\Z", credit_card.token) != None)
self.assertEquals("411111", credit_card.bin)
self.assertEquals("1111", credit_card.last_4)
self.assertEquals("05", credit_card.expiration_month)
self.assertEquals("2009", credit_card.expiration_year)
self.assertEquals("05/2009", credit_card.expiration_date)
def test_find_returns_associated_subsriptions(self):
customer = Customer.create().customer
credit_card = CreditCard.create({
"customer_id": customer.id,
"number": "4111111111111111",
"expiration_date": "05/2009"
}).credit_card
id = "id_" + str(random.randint(1, 1000000))
subscription = Subscription.create({
"id": id,
"plan_id": "integration_trialless_plan",
"payment_method_token": credit_card.token,
"price": Decimal("1.00")
}).subscription
found_credit_card = CreditCard.find(credit_card.token)
self.assertEquals(id, found_credit_card.subscriptions[0].id)
self.assertEquals(Decimal("1.00"), found_credit_card.subscriptions[0].price)
self.assertEquals(credit_card.token, found_credit_card.subscriptions[0].payment_method_token)
def test_find_with_invalid_token(self):
try:
CreditCard.find("bad_token")
self.assertTrue(False)
except Exception, e:
self.assertEquals("payment method with token bad_token not found", str(e))
def test_create_from_transparent_redirect(self):
customer = Customer.create().customer
tr_data = {
"credit_card": {
"customer_id": customer.id
}
}
post_params = {
"tr_data": CreditCard.tr_data_for_create(tr_data, "http://example.com/path?foo=bar"),
"credit_card[cardholder_name]": "Card Holder",
"credit_card[number]": "4111111111111111",
"credit_card[expiration_date]": "05/2012",
"credit_card[billing_address][country_code_alpha2]": "MX",
"credit_card[billing_address][country_code_alpha3]": "MEX",
"credit_card[billing_address][country_code_numeric]": "484",
"credit_card[billing_address][country_name]": "Mexico",
}
query_string = TestHelper.simulate_tr_form_post(post_params, CreditCard.transparent_redirect_create_url())
result = CreditCard.confirm_transparent_redirect(query_string)
self.assertTrue(result.is_success)
credit_card = result.credit_card
self.assertEquals("411111", credit_card.bin)
self.assertEquals("1111", credit_card.last_4)
self.assertEquals("05", credit_card.expiration_month)
self.assertEquals("2012", credit_card.expiration_year)
self.assertEquals(customer.id, credit_card.customer_id)
self.assertEquals("MX", credit_card.billing_address.country_code_alpha2)
self.assertEquals("MEX", credit_card.billing_address.country_code_alpha3)
self.assertEquals("484", credit_card.billing_address.country_code_numeric)
self.assertEquals("Mexico", credit_card.billing_address.country_name)
def test_create_from_transparent_redirect_and_make_default(self):
customer = Customer.create().customer
card1 = CreditCard.create({
"customer_id": customer.id,
"number": "4111111111111111",
"expiration_date": "05/2009",
"cvv": "100",
"cardholder_name": "John Doe"
}).credit_card
self.assertTrue(card1.default)
tr_data = {
"credit_card": {
"customer_id": customer.id,
"options": {
"make_default": True
}
}
}
post_params = {
"tr_data": CreditCard.tr_data_for_create(tr_data, "http://example.com/path?foo=bar"),
"credit_card[cardholder_name]": "Card Holder",
"credit_card[number]": "4111111111111111",
"credit_card[expiration_date]": "05/2012",
}
query_string = TestHelper.simulate_tr_form_post(post_params, CreditCard.transparent_redirect_create_url())
card2 = CreditCard.confirm_transparent_redirect(query_string).credit_card
self.assertFalse(CreditCard.find(card1.token).default)
self.assertTrue(card2.default)
def test_create_from_transparent_redirect_with_error_result(self):
customer = Customer.create().customer
tr_data = {
"credit_card": {
"customer_id": customer.id
}
}
post_params = {
"tr_data": CreditCard.tr_data_for_create(tr_data, "http://example.com/path"),
"credit_card[cardholder_name]": "Card Holder",
"credit_card[number]": "eleventy",
"credit_card[expiration_date]": "y2k"
}
query_string = TestHelper.simulate_tr_form_post(post_params, CreditCard.transparent_redirect_create_url())
result = CreditCard.confirm_transparent_redirect(query_string)
self.assertFalse(result.is_success)
self.assertEquals(
ErrorCodes.CreditCard.NumberHasInvalidLength,
result.errors.for_object("credit_card").on("number")[0].code
)
self.assertEquals(
ErrorCodes.CreditCard.ExpirationDateIsInvalid,
result.errors.for_object("credit_card").on("expiration_date")[0].code
)
def test_update_from_transparent_redirect_with_successful_result(self):
old_token = str(random.randint(1, 1000000))
new_token = str(random.randint(1, 1000000))
credit_card = Customer.create({
"credit_card": {
"cardholder_name": "Old Cardholder Name",
"number": "4111111111111111",
"expiration_date": "05/2012",
"token": old_token
}
}).customer.credit_cards[0]
tr_data = {
"payment_method_token": old_token,
"credit_card": {
"token": new_token
}
}
post_params = {
"tr_data": CreditCard.tr_data_for_update(tr_data, "http://example.com/path"),
"credit_card[cardholder_name]": "New Cardholder Name",
"credit_card[expiration_date]": "05/2014"
}
query_string = TestHelper.simulate_tr_form_post(post_params, CreditCard.transparent_redirect_update_url())
result = CreditCard.confirm_transparent_redirect(query_string)
self.assertTrue(result.is_success)
credit_card = result.credit_card
self.assertEquals(new_token, credit_card.token)
self.assertEquals("411111", credit_card.bin)
self.assertEquals("1111", credit_card.last_4)
self.assertEquals("05", credit_card.expiration_month)
self.assertEquals("2014", credit_card.expiration_year)
def test_update_from_transparent_redirect_and_make_default(self):
customer = Customer.create({
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2012"
}
}).customer
card1 = customer.credit_cards[0]
card2 = CreditCard.create({
"customer_id": customer.id,
"number": "4111111111111111",
"expiration_date": "05/2009",
}).credit_card
self.assertTrue(card1.default)
self.assertFalse(card2.default)
tr_data = {
"payment_method_token": card2.token,
"credit_card": {
"options": {
"make_default": True
}
}
}
post_params = {
"tr_data": CreditCard.tr_data_for_update(tr_data, "http://example.com/path"),
"credit_card[cardholder_name]": "New Cardholder Name",
"credit_card[expiration_date]": "05/2014"
}
query_string = TestHelper.simulate_tr_form_post(post_params, CreditCard.transparent_redirect_update_url())
result = CreditCard.confirm_transparent_redirect(query_string)
self.assertFalse(CreditCard.find(card1.token).default)
self.assertTrue(CreditCard.find(card2.token).default)
def test_update_from_transparent_redirect_and_update_existing_billing_address(self):
customer = Customer.create({
"credit_card": {
"number": "4111111111111111",
"expiration_date": "05/2012",
"billing_address": {
"street_address": "123 Old St",
"locality": "Chicago",
"region": "Illinois",
"postal_code": "60621"
}
}
}).customer
card = customer.credit_cards[0]
tr_data = {
"payment_method_token": card.token,
"credit_card": {
"billing_address": {
"street_address": "123 New St",
"locality": "Columbus",
"region": "Ohio",
"postal_code": "43215",
"options": {
"update_existing": True
}
}
}
}
post_params = {
"tr_data": CreditCard.tr_data_for_update(tr_data, "http://example.com/path")
}
query_string = TestHelper.simulate_tr_form_post(post_params, CreditCard.transparent_redirect_update_url())
result = CreditCard.confirm_transparent_redirect(query_string)
self.assertEquals(1, len(Customer.find(customer.id).addresses))
updated_card = CreditCard.find(card.token)
self.assertEquals("123 New St", updated_card.billing_address.street_address)
self.assertEquals("Columbus", updated_card.billing_address.locality)
self.assertEquals("Ohio", updated_card.billing_address.region)
self.assertEquals("43215", updated_card.billing_address.postal_code)
def test_update_from_transparent_redirect_with_error_result(self):
old_token = str(random.randint(1, 1000000))
credit_card = Customer.create({
"credit_card": {
"cardholder_name": "Old Cardholder Name",
"number": "4111111111111111",
"expiration_date": "05/2012",
"token": old_token
}
}).customer.credit_cards[0]
tr_data = {
"payment_method_token": old_token,
"credit_card": {
"token": "bad token"
}
}
post_params = {
"tr_data": CreditCard.tr_data_for_update(tr_data, "http://example.com/path"),
"credit_card[cardholder_name]": "New Cardholder Name",
"credit_card[expiration_date]": "05/2014"
}
query_string = TestHelper.simulate_tr_form_post(post_params, CreditCard.transparent_redirect_update_url())
result = CreditCard.confirm_transparent_redirect(query_string)
self.assertFalse(result.is_success)
self.assertEquals(
ErrorCodes.CreditCard.TokenInvalid,
result.errors.for_object("credit_card").on("token")[0].code
)
def test_expired_can_iterate_over_all_items(self):
customer_id = Customer.all().first.id
for i in range(110 - CreditCard.expired().maximum_size):
CreditCard.create({
"customer_id": customer_id,
"number": "4111111111111111",
"expiration_date": "05/2009",
"cvv": "100",
"cardholder_name": "John Doe"
})
collection = CreditCard.expired()
self.assertTrue(collection.maximum_size > 100)
credit_card_tokens = [credit_card.token for credit_card in collection.items]
self.assertEquals(collection.maximum_size, len(TestHelper.unique(credit_card_tokens)))
self.assertEquals(set([True]), TestHelper.unique([credit_card.is_expired for credit_card in collection.items]))
def test_expiring_between(self):
customer_id = Customer.all().first.id
for i in range(110 - CreditCard.expiring_between(date(2010, 1, 1), date(2010, 12, 31)).maximum_size):
CreditCard.create({
"customer_id": customer_id,
"number": "4111111111111111",
"expiration_date": "05/2010",
"cvv": "100",
"cardholder_name": "John Doe"
})
collection = CreditCard.expiring_between(date(2010, 1, 1), date(2010, 12, 31))
self.assertTrue(collection.maximum_size > 100)
credit_card_tokens = [credit_card.token for credit_card in collection.items]
self.assertEquals(collection.maximum_size, len(TestHelper.unique(credit_card_tokens)))
self.assertEquals(set(['2010']), TestHelper.unique([credit_card.expiration_year for credit_card in collection.items]))
|
{
"content_hash": "e539d1b33d26982f0314876571c9956f",
"timestamp": "",
"source": "github",
"line_count": 841,
"max_line_length": 143,
"avg_line_length": 40.36504161712247,
"alnum_prop": 0.588770730845141,
"repo_name": "eldarion/braintree_python",
"id": "ebba18528a7542752f5d4766273452cab2d974f0",
"size": "33947",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/test_credit_card.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "394920"
},
{
"name": "Ruby",
"bytes": "3033"
}
],
"symlink_target": ""
}
|
from twisted.internet import defer
from twisted.trial.unittest import TestCase
from scrapy.utils.test import get_crawler
from tests.spiders import FollowAllSpider, ItemSpider, ErrorSpider
from tests.mockserver import MockServer
class TestCloseSpider(TestCase):
def setUp(self):
self.mockserver = MockServer()
self.mockserver.__enter__()
def tearDown(self):
self.mockserver.__exit__(None, None, None)
@defer.inlineCallbacks
def test_closespider_itemcount(self):
close_on = 5
crawler = get_crawler(ItemSpider, {'CLOSESPIDER_ITEMCOUNT': close_on})
yield crawler.crawl()
reason = crawler.spider.meta['close_reason']
self.assertEqual(reason, 'closespider_itemcount')
itemcount = crawler.stats.get_value('item_scraped_count')
self.assertTrue(itemcount >= close_on)
@defer.inlineCallbacks
def test_closespider_pagecount(self):
close_on = 5
crawler = get_crawler(FollowAllSpider, {'CLOSESPIDER_PAGECOUNT': close_on})
yield crawler.crawl()
reason = crawler.spider.meta['close_reason']
self.assertEqual(reason, 'closespider_pagecount')
pagecount = crawler.stats.get_value('response_received_count')
self.assertTrue(pagecount >= close_on)
@defer.inlineCallbacks
def test_closespider_errorcount(self):
close_on = 5
crawler = get_crawler(ErrorSpider, {'CLOSESPIDER_ERRORCOUNT': close_on})
yield crawler.crawl(total=1000000)
self.flushLoggedErrors(crawler.spider.exception_cls)
reason = crawler.spider.meta['close_reason']
self.assertEqual(reason, 'closespider_errorcount')
key = 'spider_exceptions/{name}'\
.format(name=crawler.spider.exception_cls.__name__)
errorcount = crawler.stats.get_value(key)
self.assertTrue(errorcount >= close_on)
@defer.inlineCallbacks
def test_closespider_timeout(self):
close_on = 0.1
crawler = get_crawler(FollowAllSpider, {'CLOSESPIDER_TIMEOUT': close_on})
yield crawler.crawl(total=1000000)
reason = crawler.spider.meta['close_reason']
self.assertEqual(reason, 'closespider_timeout')
stats = crawler.stats
start = stats.get_value('start_time')
stop = stats.get_value('finish_time')
diff = stop - start
total_seconds = diff.seconds + diff.microseconds
self.assertTrue(total_seconds >= close_on)
|
{
"content_hash": "c6831f03473dbf28316b3ae084f85821",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 83,
"avg_line_length": 39.903225806451616,
"alnum_prop": 0.6709781729991916,
"repo_name": "jorik041/scrapy",
"id": "1700a861ea6f85baf33a0dde7d25a6b1e4496e1c",
"size": "2474",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "tests/test_closespider.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Groff",
"bytes": "2008"
},
{
"name": "HTML",
"bytes": "1809"
},
{
"name": "Python",
"bytes": "1216572"
},
{
"name": "Shell",
"bytes": "673"
}
],
"symlink_target": ""
}
|
import re
import subprocess
from oslo.config import cfg
import tempest.cli
from tempest.openstack.common import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class SimpleReadOnlyGlanceClientTest(tempest.cli.ClientTestBase):
"""Basic, read-only tests for Glance CLI client.
Checks return values and output of read-only commands.
These tests do not presume any content, nor do they create
their own. They only verify the structure of output if present.
"""
def test_glance_fake_action(self):
self.assertRaises(subprocess.CalledProcessError,
self.glance,
'this-does-not-exist')
def test_glance_image_list(self):
out = self.glance('image-list')
endpoints = self.parser.listing(out)
self.assertTableStruct(endpoints, [
'ID', 'Name', 'Disk Format', 'Container Format',
'Size', 'Status'])
def test_glance_member_list(self):
tenant_name = '--tenant-id %s' % self.identity.admin_tenant_name
out = self.glance('member-list',
params=tenant_name)
endpoints = self.parser.listing(out)
self.assertTableStruct(endpoints,
['Image ID', 'Member ID', 'Can Share'])
def test_glance_help(self):
help_text = self.glance('help')
lines = help_text.split('\n')
self.assertFirstLineStartsWith(lines, 'usage: glance')
commands = []
cmds_start = lines.index('Positional arguments:')
cmds_end = lines.index('Optional arguments:')
command_pattern = re.compile('^ {4}([a-z0-9\-\_]+)')
for line in lines[cmds_start:cmds_end]:
match = command_pattern.match(line)
if match:
commands.append(match.group(1))
commands = set(commands)
wanted_commands = set(('image-create', 'image-delete', 'help',
'image-download', 'image-show', 'image-update',
'member-add', 'member-create', 'member-delete',
'member-list'))
self.assertFalse(wanted_commands - commands)
# Optional arguments:
def test_glance_version(self):
self.glance('', flags='--version')
def test_glance_debug_list(self):
self.glance('image-list', flags='--debug')
def test_glance_timeout(self):
self.glance('image-list', flags='--timeout %d' % CONF.cli.timeout)
|
{
"content_hash": "8db6e73ebd758233b3ceea5c43b3317b",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 78,
"avg_line_length": 34.861111111111114,
"alnum_prop": 0.5920318725099601,
"repo_name": "adkerr/tempest",
"id": "a5a229c196eb170c72fd9f16adace78815ae5e18",
"size": "3191",
"binary": false,
"copies": "3",
"ref": "refs/heads/netapp/akerr",
"path": "tempest/cli/simple_read_only/test_glance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1855736"
},
{
"name": "Shell",
"bytes": "5748"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
## batteries
import os
import sys
import pytest
## 3rd party
import numpy as np
import pandas as pd
## package
from SIPSim import Utils
from SIPSim.Commands import OTU_PCR as OTU_PCR_CMD
# data dir
test_dir = os.path.join(os.path.dirname(__file__))
data_dir = os.path.join(test_dir, 'data')
# tests
def test_cmd():
otu_file = os.path.join(data_dir, 'ampFrag_OTU_n2_abs1e9.txt')
args = [otu_file]
OTU_PCR_CMD.opt_parse(args)
|
{
"content_hash": "7c230e6229ddedb3fe3a1cc850502e94",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 66,
"avg_line_length": 21.681818181818183,
"alnum_prop": 0.7044025157232704,
"repo_name": "nick-youngblut/SIPSim",
"id": "63801b217cea6b4a5526dcb118d298861feb85e9",
"size": "508",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_OTU_PCR.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "3184"
},
{
"name": "HTML",
"bytes": "237223"
},
{
"name": "Jupyter Notebook",
"bytes": "195945288"
},
{
"name": "Python",
"bytes": "581527"
}
],
"symlink_target": ""
}
|
import sys
class RedirectStdoutTo:
def __init__(self, out_new):
self.out_new = out_new
def __enter__(self):
self.out_old = sys.stdout
sys.stdout = self.out_new
def __exit__(self, *args):
sys.stdout = self.out_old
print('A')
with open('out.log', mode='w', encoding='utf-8') as a_file, RedirectStdoutTo(a_file):
print('B')
print('C')
|
{
"content_hash": "9c1b74e0ffeac51bb8276aafc0c1e5b8",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 85,
"avg_line_length": 19.35,
"alnum_prop": 0.5788113695090439,
"repo_name": "ctasims/Dive-Into-Python-3",
"id": "84085f66ead6d806617daa969306c3c5291059b9",
"size": "387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ch11-files/stdout.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "0"
},
{
"name": "Python",
"bytes": "1053014"
}
],
"symlink_target": ""
}
|
from lib.samlib import samlib
samlib = samlib()
resp_login = samlib.login()
if "Ogiltig login" in resp_login or resp_login == "":
exit("Wrong username/password")
else:
print "Logged in successfully"
samlib.menu()
|
{
"content_hash": "777c0a08e1016d17164e871b6dc1dbb7",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 53,
"avg_line_length": 21.8,
"alnum_prop": 0.7201834862385321,
"repo_name": "flawwan/Samarbeta-Helper",
"id": "734b8bdc895a5e72917e29ce3fb93561d210c35c",
"size": "242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samarbeta.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11059"
}
],
"symlink_target": ""
}
|
"""Tests for tensorflow.python.client.session.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import threading
import time
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
# NOTE(mrry): Dummy shape registration for ops used in the tests, since they
# don't have C++ op registrations on which to attach C++ shape fns.
ops.RegisterShape('ConstructionFails')(common_shapes.unknown_shape)
class SessionTest(test_util.TensorFlowTestCase):
def testUseExistingGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session(graph=g):
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testUseDefaultGraph(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session():
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testCreate(self):
with session.Session():
inp = constant_op.constant(10.0, shape=[2, 3], name='W1')
copy = array_ops.identity(inp)
# Test with feed.
# TODO(mrry): Investigate why order='F' didn't work.
arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C')
copy_val = copy.eval({'W1:0': arr})
self.assertAllEqual(arr, copy_val)
# Test without feed.
copy_val = copy.eval()
self.assertAllEqual(np.asarray([[10.0, 10.0, 10.0], [10.0, 10.0, 10.0]],
dtype=np.float32), copy_val)
def testManyCPUs(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(device_count={'CPU': 2})):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testPerSessionThreads(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(use_per_session_threads=True)):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testSessionInterOpThreadPool(self):
config = config_pb2.ConfigProto()
pool = config.session_inter_op_thread_pool.add()
with session.Session(config=config) as s:
inp = constant_op.constant(10.0, name='W1')
results = s.run([inp])
self.assertAllEqual([10.0], results)
pool = config.session_inter_op_thread_pool.add()
pool.num_threads = 1
with session.Session(config=config) as s:
inp = constant_op.constant(20.0, name='W2')
results = s.run([inp])
self.assertAllEqual([20.0], results)
def testErrorsReported(self):
with session.Session() as s:
constant_op.constant(10.0, name='W1')
with self.assertRaises(ValueError):
s.run('foo:0')
def testErrorPayload(self):
with session.Session():
a = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(lambda e: e.op == a.op):
a.eval()
def testErrorCodeWithNoNodeDef(self):
with session.Session() as s:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
def exc_predicate(e):
return (e.op is None and e.node_def is None and
e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
# Run with a bogus handle.
s.partial_run('foo', r1, feed_dict={a: 1, b: 2})
def testOpConstructionErrorPayload(self):
with session.Session():
failing_op = ops.get_default_graph().create_op(
'ConstructionFails', [], [], name='f')
def exc_predicate(e):
return (e.op == failing_op
and e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
failing_op.run()
def testErrorBasedOn(self):
with session.Session() as sess:
a = constant_op.constant(0.0, shape=[2, 3])
# NOTE(mrry): The original_op is nonsense, but used here to test that the
# errors are reported correctly.
# pylint: disable=protected-access
with sess.graph._original_op(a.op):
b = array_ops.identity(a, name='id')
with sess.graph._original_op(b.op):
c = array_ops.placeholder(dtypes.float32)
# pylint: enable=protected-access
def exc_predicate(e):
return (e.op == c.op
and e.op._original_op == b.op
and e.op._original_op._original_op == a.op)
with self.assertRaisesOpError(exc_predicate):
c.eval()
def testFetchNone(self):
with session.Session() as s:
a = constant_op.constant(1.0)
with self.assertRaises(TypeError):
s.run(None)
with self.assertRaises(TypeError):
s.run([None])
with self.assertRaises(TypeError):
s.run({'b': None})
with self.assertRaises(TypeError):
s.run({'a': a, 'b': None})
def testFetchSingleton(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
def testFetchSingletonByName(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a.name)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
def testFetchList(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
v = variables.Variable([54.0])
assign = v.assign([63.0])
res = sess.run([a, b, c, a.name, assign.op])
self.assertTrue(isinstance(res, list))
self.assertEqual(42.0, res[0])
self.assertEqual(None, res[1])
self.assertEqual(44.0, res[2])
self.assertEqual(42.0, res[3])
self.assertEqual(None, res[4])
self.assertEqual(63.0, sess.run(v))
def testFetchTuple(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run((a, b, c, a.name))
self.assertTrue(isinstance(res, tuple))
self.assertEqual(42.0, res[0])
self.assertEqual(None, res[1])
self.assertEqual(44.0, res[2])
self.assertEqual(42.0, res[3])
def testFetchNamedTuple(self):
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
# pylint: enable=invalid-name
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(ABC(a, b, c))
self.assertTrue(isinstance(res, ABC))
self.assertEqual(42.0, res.a)
self.assertEqual(None, res.b)
self.assertEqual(44.0, res.c)
def testFetchDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run({'a': a, 'b': b, 'c': c})
self.assertTrue(isinstance(res, dict))
self.assertEqual(42.0, res['a'])
self.assertEqual(None, res['b'])
self.assertEqual(44.0, res['c'])
def testFetchOrderedDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(collections.OrderedDict([(3, a), (2, b), (1, c)]))
self.assertTrue(isinstance(res, collections.OrderedDict))
self.assertEqual([3, 2, 1], list(res.keys()))
self.assertEqual(42.0, res[3])
self.assertEqual(None, res[2])
self.assertEqual(44.0, res[1])
def testFetchNestingEmptyOneLevel(self):
with session.Session() as sess:
a_val = 11.0
a = constant_op.constant(a_val)
res = sess.run([[], tuple(), {}])
self.assertTrue(isinstance(res, list))
self.assertEquals(3, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(0, len(res[0]))
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(0, len(res[1]))
self.assertTrue(isinstance(res[2], dict))
self.assertEqual(0, len(res[2]))
res = sess.run([[], tuple(), {}, a])
self.assertTrue(isinstance(res, list))
self.assertEquals(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(0, len(res[0]))
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(0, len(res[1]))
self.assertTrue(isinstance(res[2], dict))
self.assertEqual(0, len(res[2]))
self.assertEqual(a_val, res[3])
def testFetchNestingOneLevel(self):
with session.Session() as sess:
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
DEFG = collections.namedtuple('DEFG', ['d', 'e', 'f', 'g'])
# pylint: enable=invalid-name
a_val = 42.0
b_val = None
c_val = 44.0
a = constant_op.constant(a_val)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(c_val)
# List of lists, tuples, namedtuple, and dict
res = sess.run([[a, b, c], (a, b, c), ABC(a=a, b=b, c=c),
{'a': a.name, 'c': c, 'b': b}])
self.assertTrue(isinstance(res, list))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(3, len(res[0]))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(3, len(res[1]))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(3, len(res[3]))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Tuple of lists, tuples, namedtuple, and dict
res = sess.run(([a, b, c], (a.name, b, c), ABC(a=a, b=b, c=c),
{'a': a, 'c': c, 'b': b}))
self.assertTrue(isinstance(res, tuple))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(3, len(res[0]))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(3, len(res[1]))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(3, len(res[3]))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Namedtuple of lists, tuples, namedtuples, and dict
res = sess.run(DEFG(d=[a, b, c],
e=(a, b, c),
f=ABC(a=a.name, b=b, c=c),
g={'a': a, 'c': c, 'b': b}))
self.assertTrue(isinstance(res, DEFG))
self.assertTrue(isinstance(res.d, list))
self.assertEqual(3, len(res.d))
self.assertEqual(a_val, res.d[0])
self.assertEqual(b_val, res.d[1])
self.assertEqual(c_val, res.d[2])
self.assertTrue(isinstance(res.e, tuple))
self.assertEqual(3, len(res.e))
self.assertEqual(a_val, res.e[0])
self.assertEqual(b_val, res.e[1])
self.assertEqual(c_val, res.e[2])
self.assertTrue(isinstance(res.f, ABC))
self.assertEqual(a_val, res.f.a)
self.assertEqual(b_val, res.f.b)
self.assertEqual(c_val, res.f.c)
self.assertTrue(isinstance(res.g, dict))
self.assertEqual(3, len(res.g))
self.assertEqual(a_val, res.g['a'])
self.assertEqual(b_val, res.g['b'])
self.assertEqual(c_val, res.g['c'])
# Dict of lists, tuples, namedtuples, and dict
res = sess.run({'d': [a, b, c],
'e': (a, b, c),
'f': ABC(a=a, b=b, c=c),
'g': {'a': a.name, 'c': c, 'b': b}})
self.assertTrue(isinstance(res, dict))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res['d'], list))
self.assertEqual(3, len(res['d']))
self.assertEqual(a_val, res['d'][0])
self.assertEqual(b_val, res['d'][1])
self.assertEqual(c_val, res['d'][2])
self.assertTrue(isinstance(res['e'], tuple))
self.assertEqual(3, len(res['e']))
self.assertEqual(a_val, res['e'][0])
self.assertEqual(b_val, res['e'][1])
self.assertEqual(c_val, res['e'][2])
self.assertTrue(isinstance(res['f'], ABC))
self.assertEqual(a_val, res['f'].a)
self.assertEqual(b_val, res['f'].b)
self.assertEqual(c_val, res['f'].c)
self.assertTrue(isinstance(res['g'], dict))
self.assertEqual(3, len(res['g']))
self.assertEqual(a_val, res['g']['a'])
self.assertEqual(b_val, res['g']['b'])
self.assertEqual(c_val, res['g']['c'])
def testFetchTensorObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
results_with_list = s.run([c])
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0])
results_with_single = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single)
results_with_get = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get)
a_val, b_val = s.run([a, b]) # Test multiple fetches.
self.assertAllEqual([[1.0, 1.0]], a_val)
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val)
results_with_dict = s.run({'a': [a], 'b': b, 'z': [a, b]})
self.assertAllEqual([[1.0, 1.0]], results_with_dict['a'][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_dict['b'])
self.assertAllEqual(results_with_dict['a'][0], results_with_dict['z'][0])
self.assertAllEqual(results_with_dict['b'], results_with_dict['z'][1])
# Test nested structures
results_with_nested_list = s.run([[[a, b], b], a, [a, b]])
self.assertAllEqual([[1.0, 1.0]], results_with_nested_list[0][0][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_nested_list[0][0][1])
self.assertAllEqual(results_with_nested_list[0][0][0],
results_with_nested_list[1])
self.assertAllEqual(results_with_nested_list[1],
results_with_nested_list[2][0])
self.assertAllEqual(results_with_nested_list[0][0][1],
results_with_nested_list[0][1])
self.assertAllEqual(results_with_nested_list[0][1],
results_with_nested_list[2][1])
def testFetchScalar(self):
with session.Session() as s:
for scalar in np.int32, np.int64, np.float16, np.float32, np.float64:
x = scalar(7)
y = scalar(8)
tf_x = constant_op.constant(x, shape=[])
tf_y = constant_op.constant(y)
tf_xy = math_ops.add(tf_x, tf_y)
# Single fetch
xy = s.run(tf_xy)
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# List fetch
xy, = s.run([tf_xy])
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Dict fetch
xy = s.run({'xy': tf_xy})['xy']
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Nested list fetch
xy = s.run([[[tf_xy]], tf_xy, [tf_xy]])
self.assertAllEqual(xy, [[[x + y]], x + y, [x + y]])
self.assertEqual(scalar, type(xy[0][0][0]))
self.assertEqual(scalar, type(xy[1]))
self.assertEqual(scalar, type(xy[2][0]))
def testFetchOperationObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
v = variables.Variable(a, name='testFetchOperationObject_v')
s.run(v.initializer)
v_val = s.run(v)
self.assertAllEqual([[1.0, 1.0]], v_val)
def testFetchSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = sparse_tensor.SparseTensor(
constant_op.constant(indices),
constant_op.constant(values),
constant_op.constant(shape))
# Single fetch, use as tuple
sp_out = s.run(sp)
indices_out, values_out, shape_out = sp_out
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Single fetch, use as SparseTensorValue
sp_out = s.run(sp)
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Tuple fetch, use as tuple
indices_out, values_out, shape_out = s.run(sp)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as tuple
(indices_out, values_out, shape_out), = s.run([sp])
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as SparseTensorValue
sp_out, = s.run([sp])
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Dict fetch (single value), use as tuple
indices_out, values_out, shape_out = s.run({'sp': sp})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch (list value), use as tuple
(indices_out, values_out, shape_out), = s.run({'sp': [sp]})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch, use as SparseTensorValue
sp_out = s.run({'sp': sp})['sp']
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Nested list fetch use as tuple
sp_out = s.run([[[sp]], sp])
indices_out, values_out, shape_out = sp_out[0][0][0]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
indices_out, values_out, shape_out = sp_out[1]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Nested list fetch, use as SparseTensorValue
sp_out = s.run([[[sp]], sp])
self.assertAllEqual(sp_out[0][0][0].indices, indices)
self.assertAllEqual(sp_out[0][0][0].values, values)
self.assertAllEqual(sp_out[0][0][0].dense_shape, shape)
self.assertAllEqual(sp_out[1].indices, indices)
self.assertAllEqual(sp_out[1].values, values)
self.assertAllEqual(sp_out[1].dense_shape, shape)
def testFeedSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = sparse_tensor.SparseTensor(
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with tuple, fetch sp directly
sp_out = s.run(sp, {sp: (indices, values, shape)})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(
sp2, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
# Feed SparseTensorValue and fetch sp directly.
sp_out = s.run(
sp, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
def testFeedSparsePlaceholder(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(
sp2, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
def testFeedSparsePlaceholderPartialShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(
shape=[None, 9, 2], dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(
sp2, {sp: sparse_tensor.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
def testFeedSparsePlaceholderConstantShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32,
shape=shape,
name='placeholder1')
self.assertAllEqual(sp.dense_shape.eval(session=s), shape)
self.assertAllEqual(tensor_util.constant_value(sp.dense_shape), shape)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
def testFetchIndexedSlices(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices),
constant_op.constant(dense_shape))
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlices(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
array_ops.placeholder(dtype=np.int64,
shape=(3,)),)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind_dense_shape = array_ops.identity(ind.dense_shape)
ind2 = ops.IndexedSlices(ind_values, ind_indices, ind_dense_shape)
# Feed with tuple
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: (values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testFetchIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = None
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices), None)
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = None
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
None)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind2 = ops.IndexedSlices(ind_values, ind_indices)
# Feed with tuple
values_out, indices_out = s.run(
[ind_values, ind_indices], {ind: (values, indices)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue
values_out, indices_out = s.run(
[ind_values, ind_indices],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testExtendWithStatelessOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
# Extend will happen here.
e_val = s.run(e)
self.assertAllEqual([[24.0]], e_val)
def testExtendWithStatefulOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testExtendWithStatefulOperations_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
# Extend will happen here.
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
def testExtendWithGroupBy(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
p = variables.Variable(a, name='testExtendWithGroupBy_p')
a_val = a.eval() # Force an Extend after this op.
self.assertAllEqual([[1.0, 1.0]], a_val)
b = constant_op.constant(2.0, shape=[1, 2])
q = variables.Variable(b, name='testExtendWithGroupBy_q')
# Extend will happen here.
init = control_flow_ops.group(p.initializer, q.initializer)
s.run(init)
p_val, q_val = s.run([p, q])
self.assertAllEqual([[1.0, 1.0]], p_val)
self.assertAllEqual([[2.0, 2.0]], q_val)
def testTensorGetMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]})
self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val)
def testOperationRunMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 2], name='b')
v = variables.Variable(a, a.dtype)
assign_a_to_v = state_ops.assign(v, a)
assign_a_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[1.0, 1.0]], v_val)
assign_b_to_v = state_ops.assign(v, b)
assign_b_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[2.0, 2.0]], v_val)
assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]})
v_val = v.eval()
self.assertAllEqual([[3.0, 3.0]], v_val)
def testDefaultGraph(self):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
self.assertEqual(ops.get_default_graph(), a.graph)
self.assertEqual(ops.get_default_graph(), b.graph)
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testDefaultGraph_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def _testDefaultGraphInThread(self, constructed_event, continue_event, i):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='var_%d' % i)
# Block here until all threads have constructed their graph.
constructed_event.set()
continue_event.wait()
assign_c_to_v = state_ops.assign(v, c)
v.initializer.run()
assign_c_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def testDefaultGraphWithThreads(self):
# Fork ten threads that use their thread-local default graph.
threads = []
constructed_events = [threading.Event() for _ in range(10)]
continue_event = threading.Event()
for i, constructed_event in enumerate(constructed_events):
t = self.checkedThread(target=self._testDefaultGraphInThread,
args=(constructed_event, continue_event, i))
threads.append(t)
for t in threads:
t.start()
for constructed_event in constructed_events:
constructed_event.wait()
continue_event.set()
for t in threads:
t.join()
def testParallelRun(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
ev = threading.Event()
def run_step():
ev.wait()
val = c.eval(session=sess)
self.assertEqual(val, 5.0)
threads = [self.checkedThread(target=run_step) for _ in range(100)]
for t in threads:
t.start()
ev.set()
for t in threads:
t.join()
def testRunFeedDict(self):
with session.Session() as s:
x = array_ops.zeros([2])
y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x: [1, 1]})
assert (y == 2 * np.ones(2)).all()
# Test nested tuple keys
z = (((array_ops.zeros([2]),),), array_ops.zeros([2]),
(array_ops.zeros([2]),))
result = [z[0][0][0] * 2, z[1] * 2, z[2][0] * 2]
values = (((np.array([1, 1]),),), np.array([2, 2]), (np.array([3, 3]),))
result_value = s.run(result, feed_dict={z: values})
self.assertAllEqual(result_value[0], 2 * np.ones(2))
self.assertAllEqual(result_value[1], 2 * np.array([2, 2]))
self.assertAllEqual(result_value[2], 2 * np.array([3, 3]))
def testGraphDef(self):
with session.Session() as sess:
self.assertProtoEquals(
'versions { producer: %d min_consumer: %d }' % (
versions.GRAPH_DEF_VERSION,
versions.GRAPH_DEF_VERSION_MIN_CONSUMER),
sess.graph_def)
c = constant_op.constant(5.0, name='c')
self.assertEquals(len(sess.graph_def.node), 1)
d = constant_op.constant(6.0, name='d')
self.assertEquals(len(sess.graph_def.node), 2)
self.assertAllEqual(c.eval(), 5.0)
self.assertAllEqual(d.eval(), 6.0)
e = constant_op.constant(7.0, name='e')
self.assertEquals(len(sess.graph_def.node), 3)
self.assertAllEqual(e.eval(), 7.0)
def testUseAfterClose(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)):
sess.run(c)
def testUseAfterCloseConcurrent(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
def update_thread():
with self.assertRaisesWithPredicateMatch(
RuntimeError,
lambda e: 'Attempted to use a closed Session.' in str(e)):
while True:
sess.run(c)
t = threading.Thread(target=update_thread)
t.start()
time.sleep(0.1)
sess.close()
t.join()
def testUseEmptyGraph(self):
with session.Session() as sess:
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run([])
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run(())
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run({})
def testNotEntered(self):
# pylint: disable=protected-access
self.assertEqual(ops._default_session_stack.get_default(), None)
# pylint: enable=protected-access
with ops.device('/cpu:0'):
sess = session.Session()
c_1 = constant_op.constant(5.0)
with sess.graph.as_default():
c_2 = constant_op.constant(5.0)
self.assertEqual(c_1.graph, c_2.graph)
self.assertEqual(sess.run(c_2), 5.0)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: 'No default session is registered.' in str(e)):
c_2.eval()
def testInteractive(self):
with ops.device('/cpu:0'):
sess = session.InteractiveSession()
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
self.assertAllEqual([[4.0, 4.0, 4.0]], c.eval())
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
self.assertAllEqual([[24.0]], e.eval())
sess.close()
def testInteractivePlacePrunedGraph(self):
sess = session.InteractiveSession()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/gpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
# Only run the valid op, this should work.
b.eval()
with self.assertRaises(errors.InvalidArgumentError):
a.eval()
sess.close()
def testDefaultSessionPlacePrunedGraph(self):
sess = session.Session()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/gpu:0'):
_ = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
with self.assertRaises(errors.InvalidArgumentError):
# Even though we don't run the bad op, we place the entire
# graph, which should fail with a non-interactive session.
sess.run(b)
sess.close()
def testSharedGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
with session.Session(graph=g) as sess1:
with session.Session(graph=g) as sess2:
self.assertAllEqual(sess1.run(c), sess2.run(c))
def testDuplicatedInputs(self):
with session.Session() as sess:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 3])
a_val, b_val, a2_val = sess.run([a, b, a])
self.assertAllEqual(a_val, [[1.0, 1.0]])
self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]])
self.assertAllEqual(a2_val, [[1.0, 1.0]])
def testFeedAndFetch(self):
with session.Session() as sess:
for dtype in [dtypes.float16,
dtypes.float32,
dtypes.float64,
dtypes.int32,
dtypes.uint8,
dtypes.int16,
dtypes.int8,
dtypes.int64,
dtypes.bool,
dtypes.complex64,
dtypes.complex128]:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
np_dtype = dtype.as_numpy_dtype
feed_t = array_ops.placeholder(dtype=dtype, shape=shape)
out_t = array_ops.identity(feed_t)
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes.bool:
np_array = np_array > 0
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
self.assertAllEqual(np_array,
sess.run(out_t, feed_dict={feed_t: np_array}))
# Check that we can also get the feed back.
self.assertAllEqual(np_array,
sess.run(feed_t, feed_dict={feed_t: np_array}))
# Also check that we can get both back.
out_v, feed_v = sess.run([out_t, feed_t],
feed_dict={feed_t: np_array})
self.assertAllEqual(np_array, out_v)
self.assertAllEqual(np_array, feed_v)
def testFeedError(self):
with session.Session() as sess:
feed_t = array_ops.placeholder(dtype=dtypes.float32)
out_t = array_ops.identity(feed_t)
feed_val = constant_op.constant(5.0)
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
sess.run(out_t, feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.eval(feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.op.run(feed_dict={feed_t: feed_val})
def testFeedPrecisionLossError(self):
with session.Session() as sess:
largest_int64 = np.iinfo(np.int64).max
feed_int_implicit_int32 = constant_op.constant(1)
feed_int_explicit_int32 = constant_op.constant(1, dtype=dtypes.int32)
out_t = constant_op.constant(1.0)
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_implicit_int32: largest_int64})
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_explicit_int32: largest_int64})
def testStringFetch(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape) if size > 0 else []
c = constant_op.constant(c_list)
self.assertAllEqual(c.eval(), c_list)
def testStringFeed(self):
with session.Session() as sess:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape)
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=shape)
c = array_ops.identity(feed_t)
self.assertAllEqual(sess.run(c, feed_dict={feed_t: c_list}), c_list)
self.assertAllEqual(sess.run(feed_t, feed_dict={feed_t: c_list}),
c_list)
c_v, feed_v = sess.run([c, feed_t], feed_dict={feed_t: c_list})
self.assertAllEqual(c_v, c_list)
self.assertAllEqual(feed_v, c_list)
def testStringFeedWithNullCharacters(self):
with session.Session():
c_list = [b'\n\x01\x00', b'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0])
self.assertEqual(c_list[1], out[1])
def testStringFeedWithUnicode(self):
with session.Session():
c_list = [u'\n\x01\x00', u'\n\x00\x01',
u'\u26a3 unicode', u'\U0001f60e deal with it']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[len(c_list)])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object)})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
def testInvalidTargetFails(self):
with self.assertRaisesRegexp(
errors.NotFoundError,
'No session factory registered for the given session options'):
session.Session('INVALID_TARGET')
def testFetchByNameDifferentStringTypes(self):
with session.Session() as sess:
c = constant_op.constant(42.0, name='c')
d = constant_op.constant(43.0, name=u'd')
e = constant_op.constant(44.0, name=b'e')
f = constant_op.constant(45.0, name=r'f')
self.assertTrue(isinstance(c.name, six.text_type))
self.assertTrue(isinstance(d.name, six.text_type))
self.assertTrue(isinstance(e.name, six.text_type))
self.assertTrue(isinstance(f.name, six.text_type))
self.assertEqual(42.0, sess.run('c:0'))
self.assertEqual(42.0, sess.run(u'c:0'))
self.assertEqual(42.0, sess.run(b'c:0'))
self.assertEqual(42.0, sess.run(r'c:0'))
self.assertEqual(43.0, sess.run('d:0'))
self.assertEqual(43.0, sess.run(u'd:0'))
self.assertEqual(43.0, sess.run(b'd:0'))
self.assertEqual(43.0, sess.run(r'd:0'))
self.assertEqual(44.0, sess.run('e:0'))
self.assertEqual(44.0, sess.run(u'e:0'))
self.assertEqual(44.0, sess.run(b'e:0'))
self.assertEqual(44.0, sess.run(r'e:0'))
self.assertEqual(45.0, sess.run('f:0'))
self.assertEqual(45.0, sess.run(u'f:0'))
self.assertEqual(45.0, sess.run(b'f:0'))
self.assertEqual(45.0, sess.run(r'f:0'))
def testIncorrectGraph(self):
with ops.Graph().as_default() as g_1:
c_1 = constant_op.constant(1.0, name='c')
with ops.Graph().as_default() as g_2:
c_2 = constant_op.constant(2.0, name='c')
self.assertEqual('c', c_1.op.name)
self.assertEqual('c', c_2.op.name)
with session.Session(graph=g_1) as sess_1:
self.assertEqual(1.0, sess_1.run(c_1))
with self.assertRaises(ValueError):
sess_1.run(c_2)
with self.assertRaises(ValueError):
sess_1.run(c_2.op)
with session.Session(graph=g_2) as sess_2:
with self.assertRaises(ValueError):
sess_2.run(c_1)
with self.assertRaises(ValueError):
sess_2.run(c_1.op)
self.assertEqual(2.0, sess_2.run(c_2))
def runTestPartialRun(self, sess):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 17
res = sess.partial_run(h, r2, feed_dict={c: temp})
self.assertEqual(153, res)
# Call again on the same graph.
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h2, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 18
res = sess.partial_run(h2, r2, feed_dict={c: temp})
self.assertEqual(162, res)
def runTestPartialRunIncomplete(self, sess):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
def runTestConcurrentPartialRun(self, sess):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(r1, c)
h1 = sess.partial_run_setup([r1], [a, b, c])
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h1, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 19
res = sess.partial_run(h2, r1, feed_dict={a: temp, b: 9})
self.assertEqual(66, res)
res = sess.partial_run(h2, r2, feed_dict={c: 7})
self.assertEqual(462, res)
def runTestManyPartialRun(self, sess):
steps = 200
inputs = []
outputs = []
a = constant_op.constant(2.0, dtypes.float32)
for i in xrange(steps):
inputs.append(array_ops.placeholder(dtypes.float32, shape=[]))
a = math_ops.multiply(a, inputs[i])
outputs.append(a)
h = sess.partial_run_setup(outputs, inputs)
for i in xrange(steps):
res = sess.partial_run(h, outputs[i], feed_dict={inputs[i]: 1.0})
self.assertEqual(2.0, res)
feed_dict = {}
for i in xrange(steps):
feed_dict[inputs[i]] = 1.0
res = sess.run(outputs, feed_dict)
self.assertEqual(steps, len(res))
self.assertEqual(2.0, res[-1])
def runTestRunAndPartialRun(self, sess):
a = constant_op.constant(2.0, dtypes.float32)
b = a * 2
c = b * 3
r1 = sess.run([b, c])
h = sess.partial_run_setup([b, c], [])
r2 = sess.partial_run(h, [b, c])
self.assertEqual(r1, r2)
def runTestPartialRunMissingPlaceholderFeedException(self, sess):
x = array_ops.placeholder(dtypes.float32, shape=())
fetches = [x * 2, x * 3]
handle = sess.partial_run_setup(fetches=fetches, feeds=[])
with self.assertRaisesRegexp(errors.InvalidArgumentError,
'You must feed a value for placeholder'):
sess.partial_run(handle, fetches[0])
def testPartialRunDirect(self):
self.runTestPartialRun(session.Session())
def testPartialRunIncompleteDirect(self):
self.runTestPartialRunIncomplete(session.Session())
def testConcurrentPartialRunDirect(self):
self.runTestConcurrentPartialRun(session.Session())
def testManyPartialRunDirect(self):
self.runTestManyPartialRun(session.Session())
def testRunAndPartialRunDirect(self):
self.runTestRunAndPartialRun(session.Session())
def testPartialRunMissingPlaceholderFeedExceptionDirect(self):
self.runTestPartialRunMissingPlaceholderFeedException(session.Session())
def testPartialRunDist(self):
server = server_lib.Server.create_local_server()
self.runTestPartialRun(session.Session(server.target))
def testPartialRunIncompleteDist(self):
server = server_lib.Server.create_local_server()
self.runTestPartialRunIncomplete(session.Session(server.target))
def testConcurrentPartialRunDist(self):
server = server_lib.Server.create_local_server()
self.runTestConcurrentPartialRun(session.Session(server.target))
def testManyPartialRunDist(self):
server = server_lib.Server.create_local_server()
self.runTestManyPartialRun(session.Session(server.target))
def testRunAndPartialRunDist(self):
server = server_lib.Server.create_local_server()
self.runTestRunAndPartialRun(session.Session(server.target))
def testPartialRunMissingPlaceholderFeedExceptionDist(self):
server = server_lib.Server.create_local_server()
self.runTestPartialRunMissingPlaceholderFeedException(
session.Session(server.target))
def testFeedDictKeyException(self):
with session.Session() as sess:
a = constant_op.constant(1.0, dtypes.float32, name='a')
with self.assertRaisesRegexp(TypeError, 'Cannot interpret feed_dict'):
sess.run(a, feed_dict={'a': [2.0]})
def testPerStepTrace(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
sess.run(constant_op.constant(1.0))
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testRunOptionsRunMetadata(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
# all combinations are valid
sess.run(constant_op.constant(1.0), options=None, run_metadata=None)
sess.run(constant_op.constant(1.0), options=None,
run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), options=run_options,
run_metadata=None)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testFeedShapeCompatibility(self):
with session.Session() as sess:
some_tensor = constant_op.constant([2.0, 2.0, 2.0, 2.0])
new_shape = constant_op.constant([2, 2])
reshaped_tensor = array_ops.reshape(some_tensor, new_shape)
with self.assertRaisesRegexp(ValueError, 'Cannot feed value of shape'):
sess.run(reshaped_tensor, feed_dict={some_tensor: [1.0, 2.0, 3.0]})
with self.assertRaisesRegexp(ValueError, 'may not be fed'):
sess.run(reshaped_tensor, feed_dict={new_shape: [3, 7]})
def testInferShapesFalse(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session()
self.assertFalse('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testInferShapesTrue(self):
config = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(infer_shapes=True))
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session(config=config)
self.assertTrue('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testBuildCostModel(self):
run_options = config_pb2.RunOptions()
config = config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(build_cost_model=100))
with session.Session(config=config) as sess:
with ops.device('/gpu:0'):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = math_ops.add(a, a)
c = array_ops.identity(b)
d = math_ops.multiply(c, c)
for step in xrange(120):
run_metadata = config_pb2.RunMetadata()
sess.run(d, feed_dict={a: 1.0},
options=run_options, run_metadata=run_metadata)
if step == 99:
self.assertTrue(run_metadata.HasField('cost_graph'))
else:
self.assertFalse(run_metadata.HasField('cost_graph'))
def testNonInteractiveSessionNesting(self):
sess1 = session.Session()
sess1_controller = sess1.as_default()
sess1_controller.__enter__()
sess2 = session.Session()
sess2_controller = sess2.as_default()
sess2_controller.__enter__()
with self.assertRaisesRegexp(AssertionError, 'Nesting violated'):
sess1_controller.__exit__(None, None, None)
ops._default_session_stack.reset()
def testInteractiveSessionNesting(self):
sess1 = session.InteractiveSession()
sess2 = session.InteractiveSession()
del sess1
del sess2
def testAsDefault(self):
c = constant_op.constant(37)
sess = session.Session()
with sess.as_default():
self.assertEqual(37, c.eval())
# Ensure that the session remains valid even when it is not captured.
with session.Session().as_default():
self.assertEqual(37, c.eval())
def testReentry(self):
sess = session.Session()
with self.assertRaisesRegexp(RuntimeError, 'not re-entrant'):
with sess:
with sess:
pass
def testInvalidArgument(self):
with self.assertRaisesRegexp(TypeError, 'target must be a string'):
session.Session(37)
with self.assertRaisesRegexp(TypeError, 'config must be a tf.ConfigProto'):
session.Session(config=37)
with self.assertRaisesRegexp(TypeError, 'graph must be a tf.Graph'):
session.Session(graph=37)
def testTimeoutWithShortOperations(self):
num_epochs = 5
q = data_flow_ops.FIFOQueue(
capacity=50, dtypes=[dtypes.int32], shapes=[()])
enqueue_op = q.enqueue_many(constant_op.constant([1, 2]))
# Use a 10-second timeout, which should be longer than any
# non-blocking enqueue_many op.
config = config_pb2.ConfigProto(operation_timeout_in_ms=10000)
with session.Session(config=config) as sess:
for _ in range(num_epochs):
sess.run(enqueue_op)
self.assertEqual(sess.run(q.size()), num_epochs * 2)
def testRegisterFetchAndFeedConversionFunctions(self):
class SquaredTensor(object):
def __init__(self, tensor):
self.sq = math_ops.square(tensor)
fetch_fn = lambda squared_tensor: ([squared_tensor.sq], lambda val: val[0])
feed_fn1 = lambda feed, feed_val: [(feed.sq, feed_val)]
feed_fn2 = lambda feed: [feed.sq]
session.register_session_run_conversion_functions(SquaredTensor, fetch_fn,
feed_fn1, feed_fn2)
with self.assertRaises(ValueError):
session.register_session_run_conversion_functions(SquaredTensor,
fetch_fn, feed_fn1, feed_fn2)
with self.test_session() as sess:
np1 = np.array([1.0, 1.5, 2.0, 2.5])
np2 = np.array([3.0, 3.5, 4.0, 4.5])
squared_tensor = SquaredTensor(np2)
squared_eval = sess.run(squared_tensor)
self.assertAllClose(np2 * np2, squared_eval)
squared_eval = sess.run(squared_tensor, feed_dict={
squared_tensor : np1 * np1})
self.assertAllClose(np1 * np1, squared_eval)
partial_run = sess.partial_run_setup([squared_tensor], [])
squared_eval = sess.partial_run(partial_run, squared_tensor)
self.assertAllClose(np2 * np2, squared_eval)
if __name__ == '__main__':
googletest.main()
|
{
"content_hash": "722480de0e6d1add1fa366e7b6b60594",
"timestamp": "",
"source": "github",
"line_count": 1630,
"max_line_length": 80,
"avg_line_length": 40.26503067484663,
"alnum_prop": 0.6225926377376889,
"repo_name": "chenjun0210/tensorflow",
"id": "13c2736609075a1a100440cfdae968d0a21f5d81",
"size": "66322",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/client/session_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "177254"
},
{
"name": "C++",
"bytes": "22819759"
},
{
"name": "CMake",
"bytes": "140276"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "794578"
},
{
"name": "HTML",
"bytes": "595822"
},
{
"name": "Java",
"bytes": "286562"
},
{
"name": "JavaScript",
"bytes": "13906"
},
{
"name": "Jupyter Notebook",
"bytes": "1833654"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37240"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64166"
},
{
"name": "Protocol Buffer",
"bytes": "210350"
},
{
"name": "Python",
"bytes": "20069220"
},
{
"name": "Shell",
"bytes": "331908"
},
{
"name": "TypeScript",
"bytes": "790493"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(
name='gfm',
version='0.0.3',
description='Convert GitHub-Flavored Markdown to HTML',
long_description=open('README.rst').read(),
url='http://github.com/stewart/gfm/',
license=open('LICENSE').read(),
author='Andrew Stewart',
author_email='andrew@stwrt.ca',
py_modules=['gfm'],
include_package_data=True,
install_requires=['markdown'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
{
"content_hash": "abf5f0f7ab8afdcfe9f1b6771e4b357c",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 71,
"avg_line_length": 34.666666666666664,
"alnum_prop": 0.6132478632478633,
"repo_name": "stewart/gfm",
"id": "82e556b4ffc309cd8a97499d60eb9313054320d0",
"size": "936",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4019"
}
],
"symlink_target": ""
}
|
from setuptools import setup
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
__version__ = None
with open(path.join(here, 'cpc_api', '__version.py')) as __version:
exec(__version.read())
assert __version__ is not None
with open(path.join(here, 'README.md')) as readme:
LONG_DESC = readme.read()
# are we using Python2 ? If yes, then decode.
# If not, it raises AttributeError, that we should ignore
try:
LONG_DESC = LONG_DESC.decode('utf-8')
except AttributeError:
# nothing to do here :
# we are running Python3, the LONG_DESC str is already perfect
pass
setup(
name='cpc_api',
version=__version__,
description='Python api for nosdeputes.fr and nossenateurs.fr',
long_description=LONG_DESC,
license="MIT",
url='https://github.com/regardscitoyens/cpc-api',
author='Regards Citoyens',
author_email='contact@regardscitoyens.org',
include_package_data=True,
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
keywords='api nosdeputes.fr nossenateurs.fr',
packages=['cpc_api'],
install_requires=['requests', 'fuzzywuzzy'],
)
|
{
"content_hash": "d07abf5c99aeefb13a7baf64ab642a9a",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 70,
"avg_line_length": 28.26530612244898,
"alnum_prop": 0.6418772563176895,
"repo_name": "regardscitoyens/cpc-api",
"id": "37cf8b6371c68d20e082fbb0f49258f78c1be1fc",
"size": "1412",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5066"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.