text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""This example gets all packages in progress.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
pkg_service = client.GetService('PackageService', version='v201805')
# Create a statement to select packages.
statement = (ad_manager.StatementBuilder(version='v201805')
.Where('status = :status')
.WithBindVariable('status', 'IN_PROGRESS'))
# Retrieve a small amount of packages at a time, paging
# through until all packages have been retrieved.
while True:
response = pkg_service.getPackagesByStatement(statement.ToStatement())
if 'results' in response and len(response['results']):
for pkg in response['results']:
# Print out some information for each package.
print(
'Package with ID "%d", name "%s", and proposal ID "%d" was found.\n'
% (pkg['id'], pkg['name'], pkg['proposalId']))
statement.offset += statement.limit
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
|
{
"content_hash": "d470593fb53e371fc8e1a1ff8d60bef1",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 80,
"avg_line_length": 35.25,
"alnum_prop": 0.6713947990543735,
"repo_name": "Aloomaio/googleads-python-lib",
"id": "1093281b91ce95fe081be729a6e704a1fd59a2b6",
"size": "1890",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/ad_manager/v201805/package_service/get_in_progress_packages.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "491015"
}
],
"symlink_target": ""
}
|
import time
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from engine.models import WebPage
from engine.crawler import get_page
from engine.forms import WebPageForm
from engine.tasks import crawler_v2
import hashlib
def simple_crawl(page):
# Crawl URL
#page, created = WebPage.objects.get_or_create(url=page_url)
print('hello')
print('URL', page.url)
if page.crawled == False:
results = get_page(page.url)
print('CRAWLING: ', page.url)
print('CRAWLING STATUS: ', results[1])
page_dict = {
'raw_html': None,
'status': 0,
'content_type': None,
}
if results[0]:
start_time_1 = time.time()
resp = results[2]
if 'html' in resp['content_type']:
for out_link in resp['out_links']:
page_out, created_out = WebPage.objects.get_or_create(url=out_link)
#simple_crawl(out_link)
page_dict['raw_html'] = resp['raw']
print('CRAWLING TYPE: ', resp['content_type'])
page_dict['content_type'] = resp['content_type']
page_dict['status'] = results[1]
page_form = WebPageForm(data=page_dict, instance=page)
if page_form.is_valid():
page = page_form.save()
print('## Saved Page')
else:
print('## Error saving page')
else:
print('already crawled')
class Command(BaseCommand):
def handle(self, *args, **options):
num_uncrawled = len(WebPage.objects.filter(status=0))
print('Uncrawled pages: %d' % num_uncrawled)
pages = WebPage.objects.filter(status=0)
i = 0
for page in pages:
i += 1
print('page: %d' % i)
crawler_v2.delay(page.url)
#19:06 -> 41889
|
{
"content_hash": "033ed78cc3fc4364b71a0adf03dc317c",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 84,
"avg_line_length": 26.319444444444443,
"alnum_prop": 0.5641160949868074,
"repo_name": "tanguy-s/ucl-search-engine",
"id": "af8f64c700619c541c2a4342f1ad41dfe3766f9f",
"size": "1895",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "engine/management/commands/add_uncrawled.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "106515"
},
{
"name": "Shell",
"bytes": "10385"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import argparse
import logging
import re
from streamlink.compat import parse_qsl, is_py2
from streamlink.plugin import Plugin, PluginError, PluginArguments, PluginArgument
from streamlink.plugin.api import validate, useragents
from streamlink.plugin.api.utils import itertags, parse_query
from streamlink.stream import HTTPStream, HLSStream
from streamlink.stream.ffmpegmux import MuxedStream
from streamlink.utils import parse_json, search_dict
from streamlink.utils.encoding import maybe_decode
log = logging.getLogger(__name__)
def parse_stream_map(stream_map):
if not stream_map:
return []
return [parse_query(s) for s in stream_map.split(",")]
def parse_fmt_list(formatsmap):
formats = {}
if not formatsmap:
return formats
for format in formatsmap.split(","):
s = format.split("/")
(w, h) = s[1].split("x")
formats[int(s[0])] = "{0}p".format(h)
return formats
_config_schema = validate.Schema(
{
validate.optional("fmt_list"): validate.all(
validate.text,
validate.transform(parse_fmt_list)
),
validate.optional("url_encoded_fmt_stream_map"): validate.all(
validate.text,
validate.transform(parse_stream_map),
[{
"itag": validate.all(
validate.text,
validate.transform(int)
),
"quality": validate.text,
"url": validate.url(scheme="http"),
validate.optional("s"): validate.text,
validate.optional("stereo3d"): validate.all(
validate.text,
validate.transform(int),
validate.transform(bool)
),
}]
),
validate.optional("adaptive_fmts"): validate.all(
validate.text,
validate.transform(parse_stream_map),
[{
validate.optional("s"): validate.text,
"type": validate.all(
validate.text,
validate.transform(lambda t: t.split(";")[0].split("/")),
[validate.text, validate.text]
),
"url": validate.all(
validate.url(scheme="http")
)
}]
),
validate.optional("hlsvp"): validate.text,
validate.optional("live_playback"): validate.transform(bool),
validate.optional("reason"): validate.all(validate.text, validate.transform(maybe_decode)),
validate.optional("livestream"): validate.text,
validate.optional("live_playback"): validate.text,
validate.optional("author"): validate.all(validate.text,
validate.transform(maybe_decode)),
validate.optional("title"): validate.all(validate.text,
validate.transform(maybe_decode)),
"status": validate.text
}
)
_ytdata_re = re.compile(r'window\["ytInitialData"\]\s*=\s*({.*?});', re.DOTALL)
_url_re = re.compile(r"""(?x)https?://(?:\w+\.)?youtube\.com
(?:
(?:
/(?:watch.+v=|embed/(?!live_stream)|v/)
(?P<video_id>[0-9A-z_-]{11})
)
|
(?:
/(?:
(?:user|channel)/
|
embed/live_stream\?channel=
)(?P<user>[^/?&]+)
)
|
(?:
/(?:c/)?(?P<liveChannel>[^/?]+)/live/?$
)
)
""")
class YouTube(Plugin):
_oembed_url = "https://www.youtube.com/oembed"
_video_info_url = "https://youtube.com/get_video_info"
_oembed_schema = validate.Schema(
{
"author_name": validate.all(validate.text,
validate.transform(maybe_decode)),
"title": validate.all(validate.text,
validate.transform(maybe_decode))
}
)
adp_video = {
137: "1080p",
303: "1080p60", # HFR
299: "1080p60", # HFR
264: "1440p",
308: "1440p60", # HFR
266: "2160p",
315: "2160p60", # HFR
138: "2160p",
302: "720p60", # HFR
}
adp_audio = {
140: 128,
141: 256,
171: 128,
249: 48,
250: 64,
251: 160,
256: 256,
258: 258,
}
arguments = PluginArguments(
PluginArgument(
"api-key",
sensitive=True,
help=argparse.SUPPRESS # no longer used
)
)
def __init__(self, url):
super(YouTube, self).__init__(url)
self.author = None
self.title = None
self.video_id = None
self.session.http.headers.update({'User-Agent': useragents.CHROME})
def get_author(self):
if self.author is None:
self.get_oembed
return self.author
def get_title(self):
if self.title is None:
self.get_oembed
return self.title
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
@classmethod
def stream_weight(cls, stream):
match_3d = re.match(r"(\w+)_3d", stream)
match_hfr = re.match(r"(\d+p)(\d+)", stream)
if match_3d:
weight, group = Plugin.stream_weight(match_3d.group(1))
weight -= 1
group = "youtube_3d"
elif match_hfr:
weight, group = Plugin.stream_weight(match_hfr.group(1))
weight += 1
group = "high_frame_rate"
else:
weight, group = Plugin.stream_weight(stream)
return weight, group
@property
def get_oembed(self):
if self.video_id is None:
self.video_id = self._find_video_id(self.url)
params = {
"url": "https://www.youtube.com/watch?v={0}".format(self.video_id),
"format": "json"
}
res = self.session.http.get(self._oembed_url, params=params)
data = self.session.http.json(res, schema=self._oembed_schema)
self.author = data["author_name"]
self.title = data["title"]
def _create_adaptive_streams(self, info, streams, protected):
adaptive_streams = {}
best_audio_itag = None
# Extract audio streams from the DASH format list
for stream_info in info.get("adaptive_fmts", []):
if stream_info.get("s"):
protected = True
continue
stream_params = dict(parse_qsl(stream_info["url"]))
if "itag" not in stream_params:
continue
itag = int(stream_params["itag"])
# extract any high quality streams only available in adaptive formats
adaptive_streams[itag] = stream_info["url"]
stream_type, stream_format = stream_info["type"]
if stream_type == "audio":
stream = HTTPStream(self.session, stream_info["url"])
name = "audio_{0}".format(stream_format)
streams[name] = stream
# find the best quality audio stream m4a, opus or vorbis
if best_audio_itag is None or self.adp_audio[itag] > self.adp_audio[best_audio_itag]:
best_audio_itag = itag
if best_audio_itag and adaptive_streams and MuxedStream.is_usable(self.session):
aurl = adaptive_streams[best_audio_itag]
for itag, name in self.adp_video.items():
if itag in adaptive_streams:
vurl = adaptive_streams[itag]
log.debug("MuxedStream: v {video} a {audio} = {name}".format(
audio=best_audio_itag,
name=name,
video=itag,
))
streams[name] = MuxedStream(self.session,
HTTPStream(self.session, vurl),
HTTPStream(self.session, aurl))
return streams, protected
def _find_video_id(self, url):
m = _url_re.match(url)
if m.group("video_id"):
log.debug("Video ID from URL")
return m.group("video_id")
res = self.session.http.get(url)
datam = _ytdata_re.search(res.text)
if datam:
data = parse_json(datam.group(1))
# find the videoRenderer object, where there is a LVE NOW badge
for vid_ep in search_dict(data, 'currentVideoEndpoint'):
video_id = vid_ep.get("watchEndpoint", {}).get("videoId")
if video_id:
log.debug("Video ID from currentVideoEndpoint")
return video_id
for x in search_dict(data, 'videoRenderer'):
for bstyle in search_dict(x.get("badges", {}), "style"):
if bstyle == "BADGE_STYLE_TYPE_LIVE_NOW":
if x.get("videoId"):
log.debug("Video ID from videoRenderer (live)")
return x["videoId"]
if "/embed/live_stream" in url:
for link in itertags(res.text, "link"):
if link.attributes.get("rel") == "canonical":
canon_link = link.attributes.get("href")
if canon_link != url:
log.debug("Re-directing to canonical URL: {0}".format(canon_link))
return self._find_video_id(canon_link)
raise PluginError("Could not find a video on this page")
def _get_stream_info(self, video_id):
# normal
_params_1 = {"el": "detailpage"}
# age restricted
_params_2 = {"el": "embedded"}
# embedded restricted
_params_3 = {"eurl": "https://youtube.googleapis.com/v/{0}".format(video_id)}
count = 0
info_parsed = None
for _params in (_params_1, _params_2, _params_3):
count += 1
params = {"video_id": video_id}
params.update(_params)
res = self.session.http.get(self._video_info_url, params=params)
info_parsed = parse_query(res.content if is_py2 else res.text, name="config", schema=_config_schema)
if info_parsed.get("status") == "fail":
log.debug("get_video_info - {0}: {1}".format(
count, info_parsed.get("reason"))
)
continue
self.author = info_parsed.get("author")
self.title = info_parsed.get("title")
log.debug("get_video_info - {0}: Found data".format(count))
break
return info_parsed
def _get_streams(self):
is_live = False
self.video_id = self._find_video_id(self.url)
log.debug("Using video ID: {0}", self.video_id)
info = self._get_stream_info(self.video_id)
if info and info.get("status") == "fail":
log.error("Could not get video info: {0}".format(info.get("reason")))
return
elif not info:
log.error("Could not get video info")
return
if info.get("livestream") == '1' or info.get("live_playback") == '1':
log.debug("This video is live.")
is_live = True
formats = info.get("fmt_list")
streams = {}
protected = False
for stream_info in info.get("url_encoded_fmt_stream_map", []):
if stream_info.get("s"):
protected = True
continue
stream = HTTPStream(self.session, stream_info["url"])
name = formats.get(stream_info["itag"]) or stream_info["quality"]
if stream_info.get("stereo3d"):
name += "_3d"
streams[name] = stream
if not is_live:
streams, protected = self._create_adaptive_streams(info, streams, protected)
hls_playlist = info.get("hlsvp")
if hls_playlist:
try:
hls_streams = HLSStream.parse_variant_playlist(
self.session, hls_playlist, namekey="pixels"
)
streams.update(hls_streams)
except IOError as err:
log.warning("Failed to extract HLS streams: {0}", err)
if not streams and protected:
raise PluginError("This plugin does not support protected videos, "
"try youtube-dl instead")
return streams
__plugin__ = YouTube
|
{
"content_hash": "885079da50956d325482715dbc138b9b",
"timestamp": "",
"source": "github",
"line_count": 369,
"max_line_length": 112,
"avg_line_length": 34.24119241192412,
"alnum_prop": 0.5193510091017016,
"repo_name": "back-to/streamlink",
"id": "b1081c13cae8df702ea6aaa44ae86f9be686529e",
"size": "12635",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/streamlink/plugins/youtube.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Python",
"bytes": "1451380"
},
{
"name": "Shell",
"bytes": "18044"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import argparse
import datetime
import difflib
import glob
import os
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument(
"filenames",
help="list of files to check, all files if unspecified",
nargs='*')
rootdir = os.path.dirname(__file__) + "/../../"
rootdir = os.path.abspath(rootdir)
parser.add_argument(
"--rootdir", default=rootdir, help="root directory to examine")
default_boilerplate_dir = os.path.join(rootdir, "hack/boilerplate")
parser.add_argument(
"--boilerplate-dir", default=default_boilerplate_dir)
parser.add_argument(
"-v", "--verbose",
help="give verbose output regarding why a file does not pass",
action="store_true")
args = parser.parse_args()
verbose_out = sys.stderr if args.verbose else open("/dev/null", "w")
def get_refs():
refs = {}
for path in glob.glob(os.path.join(args.boilerplate_dir, "boilerplate.*.txt")):
extension = os.path.basename(path).split(".")[1]
ref_file = open(path, 'r')
ref = ref_file.read().splitlines()
ref_file.close()
refs[extension] = ref
return refs
def is_generated_file(filename, data, regexs):
for d in skipped_ungenerated_files:
if d in filename:
return False
p = regexs["generated"]
return p.search(data)
def file_passes(filename, refs, regexs):
try:
f = open(filename, 'r')
except Exception as exc:
print("Unable to open %s: %s" % (filename, exc), file=verbose_out)
return False
data = f.read()
f.close()
# determine if the file is automatically generated
generated = is_generated_file(filename, data, regexs)
basename = os.path.basename(filename)
if generated:
extension = "generatego"
else:
extension = file_extension(filename)
if extension != "":
ref = refs[extension]
else:
ref = refs[basename]
# remove extra content from the top of files
if extension == "go" or extension == "generatego":
p = regexs["go_build_constraints"]
(data, found) = p.subn("", data, 1)
elif extension == "sh":
p = regexs["shebang"]
(data, found) = p.subn("", data, 1)
data = data.splitlines()
# if our test file is smaller than the reference it surely fails!
if len(ref) > len(data):
print('File %s smaller than reference (%d < %d)' %
(filename, len(data), len(ref)),
file=verbose_out)
return False
# trim our file to the same number of lines as the reference file
data = data[:len(ref)]
p = regexs["year"]
for d in data:
if p.search(d):
if generated:
print('File %s has the YEAR field, but it should not be in generated file' % filename, file=verbose_out)
else:
print('File %s has the YEAR field, but missing the year of date' % filename, file=verbose_out)
return False
if not generated:
# Replace all occurrences of the regex "2014|2015|2016|2017|2018" with "YEAR"
p = regexs["date"]
for i, d in enumerate(data):
(data[i], found) = p.subn('YEAR', d)
if found != 0:
break
# if we don't match the reference at this point, fail
if ref != data:
print("Header in %s does not match reference, diff:" % filename, file=verbose_out)
if args.verbose:
print(file=verbose_out)
for line in difflib.unified_diff(ref, data, 'reference', filename, lineterm=''):
print(line, file=verbose_out)
print(file=verbose_out)
return False
return True
def file_extension(filename):
return os.path.splitext(filename)[1].split(".")[-1].lower()
skipped_dirs = ['Godeps', 'third_party', '_gopath', '_output', '.git', 'cluster/env.sh',
"vendor", "test/e2e/generated/bindata.go", "hack/boilerplate/test",
"pkg/generated/bindata.go",
"cluster-autoscaler/cloudprovider/aws/aws-sdk-go",
"cluster-autoscaler/cloudprovider/huaweicloud/huaweicloud-sdk-go-v3",
"cluster-autoscaler/cloudprovider/bizflycloud/gobizfly",
"cluster-autoscaler/cloudprovider/brightbox/gobrightbox",
"cluster-autoscaler/cloudprovider/brightbox/k8ssdk",
"cluster-autoscaler/cloudprovider/brightbox/linkheader",
"cluster-autoscaler/cloudprovider/brightbox/go-cache",
"cluster-autoscaler/cloudprovider/digitalocean/godo",
"cluster-autoscaler/cloudprovider/magnum/gophercloud",
"cluster-autoscaler/cloudprovider/ionoscloud/ionos-cloud-sdk-go",
"cluster-autoscaler/cloudprovider/hetzner/hcloud-go",
"cluster-autoscaler/cloudprovider/oci"]
# list all the files contain 'DO NOT EDIT', but are not generated
skipped_ungenerated_files = ['hack/build-ui.sh', 'hack/lib/swagger.sh',
'hack/boilerplate/boilerplate.py',
'cluster-autoscaler/cloudprovider/aws/ec2_instance_types/gen.go',
'cluster-autoscaler/cloudprovider/azure/azure_instance_types/gen.go']
def normalize_files(files):
newfiles = []
for pathname in files:
if any(x in pathname for x in skipped_dirs):
continue
newfiles.append(pathname)
for i, pathname in enumerate(newfiles):
if not os.path.isabs(pathname):
newfiles[i] = os.path.join(args.rootdir, pathname)
return newfiles
def get_files(extensions):
files = []
if len(args.filenames) > 0:
files = args.filenames
else:
for root, dirs, walkfiles in os.walk(args.rootdir):
# don't visit certain dirs. This is just a performance improvement
# as we would prune these later in normalize_files(). But doing it
# cuts down the amount of filesystem walking we do and cuts down
# the size of the file list
for d in skipped_dirs:
if d in dirs:
dirs.remove(d)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []
for pathname in files:
basename = os.path.basename(pathname)
extension = file_extension(pathname)
if extension in extensions or basename in extensions:
outfiles.append(pathname)
return outfiles
def get_dates():
years = datetime.datetime.now().year
return '(%s)' % '|'.join((str(year) for year in range(2014, years+1)))
def get_regexs():
regexs = {}
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
regexs["year"] = re.compile( 'YEAR' )
# get_dates return 2014, 2015, 2016, 2017, or 2018 until the current year as a regex like: "(2014|2015|2016|2017|2018)";
# company holder names can be anything
regexs["date"] = re.compile(get_dates())
# strip // +build \n\n build constraints
regexs["go_build_constraints"] = re.compile(
r"^(//(go:build| \+build).*\n)+\n", re.MULTILINE)
# strip #!.* from shell scripts
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
# Search for generated files
regexs["generated"] = re.compile( 'DO NOT EDIT' )
return regexs
def main():
regexs = get_regexs()
refs = get_refs()
filenames = get_files(refs.keys())
for filename in filenames:
if "/cluster-autoscaler/_override/" in filename:
continue
if not file_passes(filename, refs, regexs):
print(filename, file=sys.stdout)
return 0
if __name__ == "__main__":
sys.exit(main())
|
{
"content_hash": "92781ee88daaa9ab8aaaa3e4bcfb25ed",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 124,
"avg_line_length": 34.559471365638764,
"alnum_prop": 0.6108349267049076,
"repo_name": "kubernetes/autoscaler",
"id": "467bf3dd8868954d2709fb74deeac33d6f79497a",
"size": "8457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hack/boilerplate/boilerplate.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "5407"
},
{
"name": "Go",
"bytes": "19468437"
},
{
"name": "Makefile",
"bytes": "19380"
},
{
"name": "Mustache",
"bytes": "4034"
},
{
"name": "Python",
"bytes": "20902"
},
{
"name": "Roff",
"bytes": "1730"
},
{
"name": "Ruby",
"bytes": "1255"
},
{
"name": "Shell",
"bytes": "53412"
}
],
"symlink_target": ""
}
|
"""Generates JavaScript source files from a mojom.Module."""
from generate import mojom
from generate import mojom_pack
from generate import mojom_generator
from generate.template_expander import UseJinja
_kind_to_javascript_default_value = {
mojom.BOOL: "false",
mojom.INT8: "0",
mojom.UINT8: "0",
mojom.INT16: "0",
mojom.UINT16: "0",
mojom.INT32: "0",
mojom.UINT32: "0",
mojom.FLOAT: "0",
mojom.HANDLE: "core.kInvalidHandle",
mojom.DCPIPE: "core.kInvalidHandle",
mojom.DPPIPE: "core.kInvalidHandle",
mojom.MSGPIPE: "core.kInvalidHandle",
mojom.INT64: "0",
mojom.UINT64: "0",
mojom.DOUBLE: "0",
mojom.STRING: '""',
}
def JavaScriptDefaultValue(field):
if field.default:
raise Exception("Default values should've been handled in jinja.")
if field.kind in mojom.PRIMITIVES:
return _kind_to_javascript_default_value[field.kind]
if isinstance(field.kind, mojom.Struct):
return "null";
if isinstance(field.kind, mojom.Array):
return "[]";
if isinstance(field.kind, mojom.Interface):
return _kind_to_javascript_default_value[mojom.MSGPIPE]
def JavaScriptPayloadSize(packed):
packed_fields = packed.packed_fields
if not packed_fields:
return 0;
last_field = packed_fields[-1]
offset = last_field.offset + last_field.size
pad = mojom_pack.GetPad(offset, 8)
return offset + pad;
_kind_to_javascript_type = {
mojom.BOOL: "codec.Uint8",
mojom.INT8: "codec.Int8",
mojom.UINT8: "codec.Uint8",
mojom.INT16: "codec.Int16",
mojom.UINT16: "codec.Uint16",
mojom.INT32: "codec.Int32",
mojom.UINT32: "codec.Uint32",
mojom.FLOAT: "codec.Float",
mojom.HANDLE: "codec.Handle",
mojom.DCPIPE: "codec.Handle",
mojom.DPPIPE: "codec.Handle",
mojom.MSGPIPE: "codec.Handle",
mojom.INT64: "codec.Int64",
mojom.UINT64: "codec.Uint64",
mojom.DOUBLE: "codec.Double",
mojom.STRING: "codec.String",
}
def GetJavaScriptType(kind):
if kind in mojom.PRIMITIVES:
return _kind_to_javascript_type[kind]
if isinstance(kind, mojom.Struct):
return "new codec.PointerTo(%s)" % GetJavaScriptType(kind.name)
if isinstance(kind, mojom.Array):
return "new codec.ArrayOf(%s)" % GetJavaScriptType(kind.kind)
if isinstance(kind, mojom.Interface):
return GetJavaScriptType(mojom.MSGPIPE)
return kind
_kind_to_javascript_decode_snippet = {
mojom.BOOL: "read8() & 1",
mojom.INT8: "read8()",
mojom.UINT8: "read8()",
mojom.INT16: "read16()",
mojom.UINT16: "read16()",
mojom.INT32: "read32()",
mojom.UINT32: "read32()",
mojom.FLOAT: "decodeFloat()",
mojom.HANDLE: "decodeHandle()",
mojom.DCPIPE: "decodeHandle()",
mojom.DPPIPE: "decodeHandle()",
mojom.MSGPIPE: "decodeHandle()",
mojom.INT64: "read64()",
mojom.UINT64: "read64()",
mojom.DOUBLE: "decodeDouble()",
mojom.STRING: "decodeStringPointer()",
}
def JavaScriptDecodeSnippet(kind):
if kind in mojom.PRIMITIVES:
return _kind_to_javascript_decode_snippet[kind]
if isinstance(kind, mojom.Struct):
return "decodeStructPointer(%s)" % GetJavaScriptType(kind.name);
if isinstance(kind, mojom.Array):
return "decodeArrayPointer(%s)" % GetJavaScriptType(kind.kind);
if isinstance(kind, mojom.Interface):
return JavaScriptDecodeSnippet(mojom.MSGPIPE)
_kind_to_javascript_encode_snippet = {
mojom.BOOL: "write8(1 & ",
mojom.INT8: "write8(",
mojom.UINT8: "write8(",
mojom.INT16: "write16(",
mojom.UINT16: "write16(",
mojom.INT32: "write32(",
mojom.UINT32: "write32(",
mojom.FLOAT: "encodeFloat(",
mojom.HANDLE: "encodeHandle(",
mojom.DCPIPE: "encodeHandle(",
mojom.DPPIPE: "encodeHandle(",
mojom.MSGPIPE: "encodeHandle(",
mojom.INT64: "write64(",
mojom.UINT64: "write64(",
mojom.DOUBLE: "encodeDouble(",
mojom.STRING: "encodeStringPointer(",
}
def JavaScriptEncodeSnippet(kind):
if kind in mojom.PRIMITIVES:
return _kind_to_javascript_encode_snippet[kind]
if isinstance(kind, mojom.Struct):
return "encodeStructPointer(%s, " % GetJavaScriptType(kind.name);
if isinstance(kind, mojom.Array):
return "encodeArrayPointer(%s, " % GetJavaScriptType(kind.kind);
if isinstance(kind, mojom.Interface):
return JavaScriptEncodeSnippet(mojom.MSGPIPE)
def GetConstants(module):
"""Returns a generator that enumerates all constants that can be referenced
from this module."""
class Constant:
pass
for enum in module.enums:
for field in enum.fields:
constant = Constant()
constant.namespace = module.namespace
constant.is_current_namespace = True
constant.import_item = None
constant.name = (enum.name, field.name)
yield constant
for each in module.imports:
for enum in each["module"].enums:
for field in enum.fields:
constant = Constant()
constant.namespace = each["namespace"]
constant.is_current_namespace = constant.namespace == module.namespace
constant.import_item = each
constant.name = (enum.name, field.name)
yield constant
def TranslateConstants(value, module):
# We're assuming we're dealing with an identifier, but that may not be
# the case. If we're not, we just won't find any matches.
if value.find(".") != -1:
namespace, identifier = value.split(".")
else:
namespace, identifier = "", value
for constant in GetConstants(module):
if namespace == constant.namespace or (
namespace == "" and constant.is_current_namespace):
if constant.name[1] == identifier:
if constant.import_item:
return "%s.%s.%s" % (constant.import_item["unique_name"],
constant.name[0], constant.name[1])
else:
return "%s.%s" % (constant.name[0], constant.name[1])
return value
def ExpressionToText(value, module):
if value[0] != "EXPRESSION":
raise Exception("Expected EXPRESSION, got" + value)
return "".join(mojom_generator.ExpressionMapper(value,
lambda token: TranslateConstants(token, module)))
def JavascriptType(kind):
if kind.imported_from:
return kind.imported_from["unique_name"] + "." + kind.name
return kind.name
class Generator(mojom_generator.Generator):
js_filters = {
"camel_to_underscores": mojom_generator.CamelToUnderscores,
"default_value": JavaScriptDefaultValue,
"payload_size": JavaScriptPayloadSize,
"decode_snippet": JavaScriptDecodeSnippet,
"encode_snippet": JavaScriptEncodeSnippet,
"expression_to_text": ExpressionToText,
"is_object_kind": mojom_generator.IsObjectKind,
"is_string_kind": mojom_generator.IsStringKind,
"is_array_kind": lambda kind: isinstance(kind, mojom.Array),
"js_type": JavascriptType,
"stylize_method": mojom_generator.StudlyCapsToCamel,
"verify_token_type": mojom_generator.VerifyTokenType,
}
@UseJinja("js_templates/module.js.tmpl", filters=js_filters)
def GenerateJsModule(self):
return {
"imports": self.GetImports(),
"kinds": self.module.kinds,
"enums": self.module.enums,
"module": self.module,
"structs": self.GetStructs() + self.GetStructsFromMethods(),
"interfaces": self.module.interfaces,
}
def GenerateFiles(self):
self.Write(self.GenerateJsModule(), "%s.js" % self.module.name)
def GetImports(self):
# Since each import is assigned a variable in JS, they need to have unique
# names.
counter = 1
for each in self.module.imports:
each["unique_name"] = "import" + str(counter)
counter += 1
return self.module.imports
|
{
"content_hash": "9d2286dcd8141e2498f52d6aba22b1ec",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 78,
"avg_line_length": 31.189300411522634,
"alnum_prop": 0.6796411136033778,
"repo_name": "ChromiumWebApps/chromium",
"id": "252e4ce831b8a1578feb686523a0a9ae2b2404bd",
"size": "7742",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mojo/public/bindings/generators/mojom_js_generator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "52960"
},
{
"name": "Awk",
"bytes": "8660"
},
{
"name": "C",
"bytes": "42286199"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "198616766"
},
{
"name": "CSS",
"bytes": "937333"
},
{
"name": "DOT",
"bytes": "2984"
},
{
"name": "Java",
"bytes": "5695686"
},
{
"name": "JavaScript",
"bytes": "21967126"
},
{
"name": "M",
"bytes": "2190"
},
{
"name": "Matlab",
"bytes": "2262"
},
{
"name": "Objective-C",
"bytes": "7602057"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "1210885"
},
{
"name": "Python",
"bytes": "10774996"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Shell",
"bytes": "1316721"
},
{
"name": "Tcl",
"bytes": "277091"
},
{
"name": "TypeScript",
"bytes": "1560024"
},
{
"name": "XSLT",
"bytes": "13493"
},
{
"name": "nesC",
"bytes": "15243"
}
],
"symlink_target": ""
}
|
from wagtailschemaorg._version import version_bits
# -- General configuration ------------------------------------------------
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
source_suffix = '.rst'
master_doc = 'index'
project = 'Wagtail Schema.org JSON-LD tags'
copyright = '2016, Takeflight'
author = 'Takeflight'
# The short X.Y version.
version = '.'.join(map(str, version_bits[:2]))
# The full version, including alpha/beta/rc tags.
release = '.'.join(map(str, version_bits))
pygments_style = 'sphinx'
# -- Options for HTML output ----------------------------------------------
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
htmlhelp_basename = 'WagtailSchemaOrg'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
latex_documents = [
(master_doc, 'WagtailSchemaOrg.tex', 'Wagtail Schema.org JSON-LD tags documentation',
'Takeflight', 'manual'),
]
# -- Options for manual page output ---------------------------------------
man_pages = [
(master_doc, 'wagtailschemaorg', 'Wagtail Schema.org JSON-LD tags Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'WagtailSchemaorg', 'Wagtail Schema.org JSON-LD tags Documentation',
author, 'WagtailSchemaorg', 'Schema.org JSON-LD tags for Wagtail sites',
'Miscellaneous'),
]
# -- Options for intersphinx ----------------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3.5/', None),
'django': ('https://docs.djangoproject.com/en/dev/', 'https://docs.djangoproject.com/en/dev/_objects/'),
}
|
{
"content_hash": "8944068a9a18f351455858ba0a7abb9b",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 108,
"avg_line_length": 28.205479452054796,
"alnum_prop": 0.5881495871782418,
"repo_name": "takeflight/wagtail-schema.org",
"id": "b2f0a4fca893ab2ba1b3137f96b2607a19a7a558",
"size": "2107",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "22675"
}
],
"symlink_target": ""
}
|
'''Reader for Neurolucida .ASC files, v3, reversed engineered from looking at output from
Neuroludica
'''
import logging
import warnings
from io import open
import numpy as np
from neurom._compat import StringType
from neurom.core.dataformat import COLS, POINT_TYPE
from .datawrapper import DataWrapper
WANTED_SECTIONS = {
'CellBody': POINT_TYPE.SOMA,
'Axon': POINT_TYPE.AXON,
'Dendrite': POINT_TYPE.BASAL_DENDRITE,
'Apical': POINT_TYPE.APICAL_DENDRITE,
}
UNWANTED_SECTION_NAMES = [
# Meta-data?
'Closed', 'Color', 'FillDensity', 'GUID', 'ImageCoords', 'MBFObjectType',
'Marker', 'Name', 'Resolution', 'Set', 'Description',
# Marker names?
'Asterisk', 'Cross', 'Dot', 'DoubleCircle', 'FilledCircle', 'FilledDownTriangle',
'FilledSquare', 'FilledStar', 'FilledUpTriangle', 'FilledUpTriangle', 'Flower',
'Flower2', 'OpenCircle', 'OpenDiamond', 'OpenDownTriangle', 'OpenSquare', 'OpenStar',
'OpenUpTriangle', 'Plus', 'ShadedStar', 'Splat', 'TriStar',
]
UNWANTED_SECTIONS = {name: True for name in UNWANTED_SECTION_NAMES}
L = logging.getLogger(__name__)
def _match_section(section, match):
'''checks whether the `type` of section is in the `match` dictionary
Works around the unknown ordering of s-expressions in each section.
For instance, the `type` is the 3-rd one in for CellBodies
("CellBody"
(Color Yellow)
(CellBody)
(Set "cell10")
)
Returns:
value associated with match[section_type], None if no match
'''
# TODO: rewrite this so it is more clear, and handles sets & dictionaries for matching
for i in range(5):
if i >= len(section):
return None
if isinstance(section[i], StringType) and section[i] in match:
return match[section[i]]
return None
def _get_tokens(morph_fd):
'''split a file-like into tokens: split on whitespace
Note: this also strips newlines and comments
'''
for line in morph_fd:
line = line.rstrip() # remove \r\n
line = line.split(';', 1)[0] # strip comments
squash_token = [] # quoted strings get squashed into one token
if '<(' in line: # skip spines, which exist on a single line
assert ')>' in line, 'Missing end of spine'
continue
for token in line.replace('(', ' ( ').replace(')', ' ) ').split():
if squash_token:
squash_token.append(token)
if token.endswith('"'):
token = ' '.join(squash_token)
squash_token = []
yield token
elif token.startswith('"') and not token.endswith('"'):
squash_token.append(token)
else:
yield token
def _parse_section(token_iter):
'''take a stream of tokens, and create the tree structure that is defined
by the s-expressions
'''
sexp = []
for token in token_iter:
if token == '(':
new_sexp = _parse_section(token_iter)
if not _match_section(new_sexp, UNWANTED_SECTIONS):
sexp.append(new_sexp)
elif token == ')':
return sexp
else:
sexp.append(token)
return sexp
def _parse_sections(morph_fd):
'''returns array of all the sections that exist
The format is nested lists that correspond to the s-expressions
'''
sections = []
token_iter = _get_tokens(morph_fd)
for token in token_iter:
if token == '(': # find top-level sections
section = _parse_section(token_iter)
if not _match_section(section, UNWANTED_SECTIONS):
sections.append(section)
return sections
def _flatten_subsection(subsection, _type, offset, parent):
'''Flatten a subsection from its nested version
Args:
subsection: Nested subsection as produced by _parse_section, except one level in
_type: type of section, ie: AXON, etc
parent: first element has this as it's parent
offset: position in the final array of the first element
Returns:
Generator of values corresponding to [X, Y, Z, R, TYPE, ID, PARENT_ID]
'''
for row in subsection:
# TODO: Figure out what these correspond to in neurolucida
if row in ('Low', 'Generated', 'High', ):
continue
elif isinstance(row[0], StringType):
if len(row) in (4, 5, ):
if len(row) == 5:
assert row[4][0] == 'S', \
'Only known usage of a fifth member is Sn, found: %s' % row[4][0]
yield (float(row[0]), float(row[1]), float(row[2]), float(row[3]) / 2.,
_type, offset, parent)
parent = offset
offset += 1
elif isinstance(row[0], list):
split_parent = offset - 1
start_offset = 0
slices = []
start = 0
for i, value in enumerate(row):
if value == '|':
slices.append(slice(start + start_offset, i))
start = i + 1
slices.append(slice(start + start_offset, len(row)))
for split_slice in slices:
for _row in _flatten_subsection(row[split_slice], _type, offset,
split_parent):
offset += 1
yield _row
def _extract_section(section):
'''Find top level sections, and get their flat contents, and append them all
Returns a numpy array with the row format:
[X, Y, Z, R, TYPE, ID, PARENT_ID]
Note: PARENT_ID starts at -1 for soma and 0 for neurites
'''
# sections with only one element will be skipped,
if len(section) == 1:
assert section[0] == 'Sections', \
('Only known usage of a single Section content is "Sections", found %s' %
section[0])
return None
# try and detect type
_type = WANTED_SECTIONS.get(section[0][0], None)
start = 1
# CellBody often has [['"CellBody"'], ['CellBody'] as its first two elements
if _type is None:
_type = WANTED_SECTIONS.get(section[1][0], None)
if _type is None: # can't determine the type
return None
start = 2
parent = -1 if _type == POINT_TYPE.SOMA else 0
subsection_iter = _flatten_subsection(section[start:], _type, offset=0,
parent=parent)
ret = np.array([row for row in subsection_iter])
return ret
def _sections_to_raw_data(sections):
'''convert list of sections into the `raw_data` format used in neurom
This finds the soma, and attaches the neurites
'''
soma = None
neurites = []
for section in sections:
neurite = _extract_section(section)
if neurite is None:
continue
elif neurite[0][COLS.TYPE] == POINT_TYPE.SOMA:
assert soma is None, 'Multiple somas defined in file'
soma = neurite
else:
neurites.append(neurite)
assert soma is not None, 'Missing CellBody element (ie. soma)'
total_length = len(soma) + sum(len(neurite) for neurite in neurites)
ret = np.zeros((total_length, 7,), dtype=np.float64)
pos = len(soma)
ret[0:pos, :] = soma
for neurite in neurites:
end = pos + len(neurite)
ret[pos:end, :] = neurite
ret[pos:end, COLS.P] += pos
ret[pos:end, COLS.ID] += pos
# TODO: attach the neurite at the closest point on the soma
ret[pos, COLS.P] = len(soma) - 1
pos = end
return ret
def read(morph_file, data_wrapper=DataWrapper):
'''return a 'raw_data' np.array with the full neuron, and the format of the file
suitable to be wrapped by DataWrapper
'''
msg = ('This is an experimental reader. '
'There are no guarantees regarding ability to parse '
'Neurolucida .asc files or correctness of output.')
warnings.warn(msg)
L.warning(msg)
with open(morph_file, encoding='utf-8', errors='replace') as morph_fd:
sections = _parse_sections(morph_fd)
raw_data = _sections_to_raw_data(sections)
return data_wrapper(raw_data, 'NL-ASCII')
|
{
"content_hash": "497478d2a41f3c551fb54895cc6913c4",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 90,
"avg_line_length": 33.62753036437247,
"alnum_prop": 0.5833132675174573,
"repo_name": "mgeplf/NeuroM",
"id": "cfd182d813532d4e352308d617dc549997e38fc5",
"size": "10000",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neurom/io/neurolucida.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "697203"
},
{
"name": "Jupyter Notebook",
"bytes": "2138829"
},
{
"name": "Python",
"bytes": "572707"
}
],
"symlink_target": ""
}
|
"""
Export a sensor object to a JSON file, adding ' API TEST' to the name of the sensor before exporting the JSON file and deleting any pre-existing sensor with the same (new) name, then create a new sensor object from the exported JSON file
"""
# import the basic python packages we need
import os
import sys
import tempfile
import pprint
import traceback
# disable python from generating a .pyc file
sys.dont_write_bytecode = True
# change me to the path of pytan if this script is not running from EXAMPLES/PYTAN_API
pytan_loc = "~/gh/pytan"
pytan_static_path = os.path.join(os.path.expanduser(pytan_loc), 'lib')
# Determine our script name, script dir
my_file = os.path.abspath(sys.argv[0])
my_dir = os.path.dirname(my_file)
# try to automatically determine the pytan lib directory by assuming it is in '../../lib/'
parent_dir = os.path.dirname(my_dir)
pytan_root_dir = os.path.dirname(parent_dir)
lib_dir = os.path.join(pytan_root_dir, 'lib')
# add pytan_loc and lib_dir to the PYTHONPATH variable
path_adds = [lib_dir, pytan_static_path]
[sys.path.append(aa) for aa in path_adds if aa not in sys.path]
# import pytan
import pytan
# create a dictionary of arguments for the pytan handler
handler_args = {}
# establish our connection info for the Tanium Server
handler_args['username'] = "Administrator"
handler_args['password'] = "Tanium2015!"
handler_args['host'] = "10.0.1.240"
handler_args['port'] = "443" # optional
handler_args['trusted_certs'] = "certs"
# optional, level 0 is no output except warnings/errors
# level 1 through 12 are more and more verbose
handler_args['loglevel'] = 1
# optional, use a debug format for the logging output (uses two lines per log entry)
handler_args['debugformat'] = False
# optional, this saves all response objects to handler.session.ALL_REQUESTS_RESPONSES
# very useful for capturing the full exchange of XML requests and responses
handler_args['record_all_requests'] = True
# instantiate a handler using all of the arguments in the handler_args dictionary
print "...CALLING: pytan.handler() with args: {}".format(handler_args)
handler = pytan.Handler(**handler_args)
# print out the handler string
print "...OUTPUT: handler string: {}".format(handler)
# setup the arguments for the handler.get() method
get_kwargs = {}
get_kwargs["objtype"] = u'sensor'
get_kwargs["id"] = 381
# get objects to use as an export to JSON file
print "...CALLING: handler.get() with args: {}".format(get_kwargs)
orig_objs = handler.get(**get_kwargs)
# set the attribute name and value we want to add to the original objects
# this is necessarry to avoid name conflicts when adding the new object
attr_name = u'name'
attr_value = u' API TEST'
# modify the orig_objs to add attr_value to attr_name
for x in orig_objs:
new_attr = getattr(x, attr_name)
new_attr += attr_value
setattr(x, attr_name, new_attr)
# delete the object in case it already exists
del_kwargs = {}
del_kwargs[attr_name] = new_attr
del_kwargs['objtype'] = u'sensor'
print "...CALLING: handler.delete() with args: {}".format(del_kwargs)
try:
handler.delete(**del_kwargs)
except Exception as e:
print "...EXCEPTION: {}".format(e)
# export orig_objs to a json file
export_kwargs = {}
export_kwargs['obj'] = orig_objs
export_kwargs['export_format'] = 'json'
export_kwargs['report_dir'] = tempfile.gettempdir()
print "...CALLING: handler.export_to_report_file() with args: {}".format(export_kwargs)
json_file, results = handler.export_to_report_file(**export_kwargs)
# create the object from the exported JSON file
create_kwargs = {}
create_kwargs['objtype'] = u'sensor'
create_kwargs['json_file'] = json_file
print "...CALLING: handler.create_from_json() with args {}".format(create_kwargs)
response = handler.create_from_json(**create_kwargs)
print "...OUTPUT: Type of response: ", type(response)
print "...OUTPUT: print of response:"
print response
# call the export_obj() method to convert response to JSON and store it in out
export_kwargs = {}
export_kwargs['obj'] = response
export_kwargs['export_format'] = 'json'
print "...CALLING: handler.export_obj() with args {}".format(export_kwargs)
out = handler.export_obj(**export_kwargs)
# trim the output if it is more than 15 lines long
if len(out.splitlines()) > 15:
out = out.splitlines()[0:15]
out.append('..trimmed for brevity..')
out = '\n'.join(out)
print "...OUTPUT: print the objects returned in JSON format:"
print out
'''STDOUT from running this:
...CALLING: pytan.handler() with args: {'username': 'Administrator', 'record_all_requests': True, 'loglevel': 1, 'debugformat': False, 'host': '10.0.1.240', 'password': 'Tanium2015!', 'port': '443'}
...OUTPUT: handler string: PyTan v2.1.4 Handler for Session to 10.0.1.240:443, Authenticated: True, Platform Version: 6.5.314.4301
...CALLING: handler.get() with args: {'objtype': u'sensor', 'id': 381}
...CALLING: handler.delete() with args: {'objtype': u'sensor', u'name': u'Is Mac API TEST'}
...CALLING: handler.export_to_report_file() with args: {'report_dir': '/var/folders/dk/vjr1r_c53yx6k6gzp2bbt_c40000gn/T', 'export_format': 'json', 'obj': <taniumpy.object_types.sensor_list.SensorList object at 0x102b503d0>}
...CALLING: handler.create_from_json() with args {'objtype': u'sensor', 'json_file': '/var/folders/dk/vjr1r_c53yx6k6gzp2bbt_c40000gn/T/SensorList_2015_09_14-15_58_03-EDT.json'}
...OUTPUT: Type of response: <class 'taniumpy.object_types.sensor_list.SensorList'>
...OUTPUT: print of response:
SensorList, len: 1
...CALLING: handler.export_obj() with args {'export_format': 'json', 'obj': <taniumpy.object_types.sensor_list.SensorList object at 0x102b50cd0>}
...OUTPUT: print the objects returned in JSON format:
{
"_type": "sensors",
"sensor": [
{
"_type": "sensor",
"category": "Operating System",
"creation_time": "2015-09-14T19:57:39",
"delimiter": ",",
"description": "Returns whether the machine is a Mac. True if so, False if not.\nExample: True",
"exclude_from_parse_flag": 0,
"hash": 2387245230,
"hidden_flag": 0,
"id": 670,
"ignore_case_flag": 1,
"last_modified_by": "Administrator",
..trimmed for brevity..
'''
'''STDERR from running this:
'''
|
{
"content_hash": "46ad06f8c2444341a16d9016e2097425",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 237,
"avg_line_length": 37.93333333333333,
"alnum_prop": 0.7026681578526921,
"repo_name": "tanium/pytan",
"id": "c4810e9704749607e4d61bc32dd3e64588dceed5",
"size": "6281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "EXAMPLES/PYTAN_API/create_sensor_from_json.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "13251"
},
{
"name": "CSS",
"bytes": "32442"
},
{
"name": "HTML",
"bytes": "1232764"
},
{
"name": "JavaScript",
"bytes": "375167"
},
{
"name": "Makefile",
"bytes": "4287"
},
{
"name": "Python",
"bytes": "2541262"
},
{
"name": "Shell",
"bytes": "3194"
}
],
"symlink_target": ""
}
|
import sys
from glumpy import gl
from glumpy.log import log
from glumpy.app import configuration
from glumpy.app.window import window
# Backend name
__name__ = "FreeGLUT"
# Backend version (if available)
__version__ = ""
# Backend availability
__availability__ = False
# Whether the framework has been initialized
__initialized__ = False
# Active windows
__windows__ = []
# ---------------------------------------------------- convenient functions ---
def name(): return __name__
def version(): return __version__
def available(): return __availability__
# --------------------------------------------------------------- init/exit ---
def __init__():
global __initialized__
if not __initialized__:
glut.glutInit(sys.argv)
__initialized__ = True
def __exit__():
global __initialized__
# Not an error, we cannot really terminate glut
__initialized__ = True
# ------------------------------------------------------------ availability ---
try:
import sys
import OpenGL.GLUT as glut
if sys.platform == 'darwin':
import OpenGL.platform as platform
try:
glutCheckLoop = platform.createBaseFunction(
'glutCheckLoop', dll=platform.GLUT, resultType=None,
argTypes=[], doc='glutCheckLoop( ) -> None', argNames=(), )
except AttributeError:
__availability__ = False
__version__ = None
__availability__ = True
__version__ = "%d" % glut.GLUT_API_VERSION
__init__()
__mouse_map__ = { glut.GLUT_LEFT_BUTTON: window.mouse.LEFT,
glut.GLUT_MIDDLE_BUTTON: window.mouse.MIDDLE,
glut.GLUT_RIGHT_BUTTON: window.mouse.RIGHT }
__key_map__ = { 0x008: window.key.BACKSPACE,
0x009: window.key.TAB,
0x00A: window.key.LINEFEED,
0x00C: window.key.CLEAR,
0x00D: window.key.RETURN,
0x018: window.key.CANCEL,
0x01B: window.key.ESCAPE,
glut.GLUT_KEY_F1: window.key.F1,
glut.GLUT_KEY_F2: window.key.F2,
glut.GLUT_KEY_F3: window.key.F3,
glut.GLUT_KEY_F4: window.key.F4,
glut.GLUT_KEY_F5: window.key.F5,
glut.GLUT_KEY_F6: window.key.F6,
glut.GLUT_KEY_F7: window.key.F7,
glut.GLUT_KEY_F8: window.key.F8,
glut.GLUT_KEY_F9: window.key.F9,
glut.GLUT_KEY_F10: window.key.F10,
glut.GLUT_KEY_F11: window.key.F11,
glut.GLUT_KEY_F12: window.key.F12,
glut.GLUT_KEY_LEFT: window.key.LEFT,
glut.GLUT_KEY_UP: window.key.UP,
glut.GLUT_KEY_RIGHT: window.key.RIGHT,
glut.GLUT_KEY_DOWN: window.key.DOWN,
glut.GLUT_KEY_PAGE_UP: window.key.PAGEUP,
glut.GLUT_KEY_PAGE_DOWN: window.key.PAGEDOWN,
glut.GLUT_KEY_HOME: window.key.HOME,
glut.GLUT_KEY_END: window.key.END,
glut.GLUT_KEY_INSERT: window.key.INSERT }
else:
__availability__ = False
__version__ = None
except ImportError:
__availability__ = False
__version__ = None
# -------------------------------------------------------------- capability ---
capability = {
"Window position get/set" : True,
"Window size get/set" : True,
"Multiple windows" : False,
"Mouse scroll events" : False,
"Non-decorated window" : True,
"Non-sizeable window" : False,
"Fullscreen mode" : True,
"Unicode processing" : False,
"Set GL version" : False,
"Set GL profile" : False,
"Share GL context" : False,
}
# ------------------------------------------------------- set_configuration ---
def set_configuration(config):
""" Set gl configuration """
s = ""
s += "acca=0 " # No accum buffer
s += "red>=%d " % config.red_size
s += "green>=%d " % config.green_size
s += "blue>=%d " % config.blue_size
s += "alpha>=%d " % config.alpha_size
s += "depth>=%d " % config.depth_size
s += "stencil~%d " % config.stencil_size
if config.double_buffer:
s += "double=1 "
else:
s += "single=1 "
s += "stereo=%d " % config.stereo
s += "samples~%d " % config.samples
glut.glutInitDisplayString(s)
# ------------------------------------------------------------------ Window ---
class Window(window.Window):
def __init__( self, width=256, height=256, title=None, visible=True, aspect=None,
decoration=True, fullscreen=False, config=None, context=None, color=(0,0,0,1)):
if len(__windows__) > 0:
log.critical(
"""OSXGLUT backend is unstable with more than one window.\n"""
"""Exiting...""")
sys.exit(0)
window.Window.__init__(self, width=width,
height=height,
title=title,
visible=visible,
aspect=aspect,
decoration=decoration,
fullscreen=fullscreen,
config=config,
context=context,
color=color)
if config is None:
config = configuration.Configuration()
set_configuration(config)
self._native_window = glut.glutCreateWindow( self._title )
if bool(glut.glutSetOption):
glut.glutSetOption(glut.GLUT_ACTION_ON_WINDOW_CLOSE,
glut.GLUT_ACTION_CONTINUE_EXECUTION)
glut.glutSetOption(glut.GLUT_ACTION_GLUTMAINLOOP_RETURNS,
glut.GLUT_ACTION_CONTINUE_EXECUTION)
glut.glutWMCloseFunc( self._close )
glut.glutDisplayFunc( self._display )
glut.glutReshapeFunc( self._reshape )
glut.glutKeyboardFunc( self._keyboard )
glut.glutKeyboardUpFunc( self._keyboard_up )
glut.glutMouseFunc( self._mouse )
glut.glutMotionFunc( self._motion )
glut.glutPassiveMotionFunc( self._passive_motion )
glut.glutVisibilityFunc( self._visibility )
glut.glutEntryFunc( self._entry )
glut.glutSpecialFunc( self._special )
glut.glutSpecialUpFunc( self._special_up )
glut.glutReshapeWindow( self._width, self._height )
if visible:
glut.glutShowWindow()
else:
glut.glutHideWindow()
# This ensures glutCheckLoop never blocks
def on_idle(): pass
glut.glutIdleFunc(on_idle)
__windows__.append(self)
def _keyboard( self, code, x, y ):
symbol = self._keyboard_translate(code)
modifiers = glut.glutGetModifiers()
modifiers = self._modifiers_translate(modifiers)
self.dispatch_event('on_key_press', symbol, modifiers)
def _keyboard_up( self, code, x, y ):
modifiers = glut.glutGetModifiers()
self.dispatch_event('on_key_release',
self._keyboard_translate(code),
self._modifiers_translate(modifiers))
def _special( self, code, x, y ):
modifiers = glut.glutGetModifiers()
self.dispatch_event('on_key_press',
self._keyboard_translate(code),
self._modifiers_translate(modifiers))
def _special_up( self, code, x, y ):
modifiers = glut.glutGetModifiers()
self.dispatch_event('on_key_release',
self._keyboard_translate(code),
self._modifiers_translate(modifiers))
def _modifiers_translate( self, modifiers ):
_modifiers = 0
if modifiers & glut.GLUT_ACTIVE_SHIFT:
_modifiers |= window.key.MOD_SHIFT
if modifiers & glut.GLUT_ACTIVE_CTRL:
_modifiers |= window.key.MOD_CTRL
if modifiers & glut.GLUT_ACTIVE_ALT:
_modifiers |= window.key.MOD_ALT
return _modifiers
def _keyboard_translate( self, code ):
ascii = ord(code.lower())
if (0x020 <= ascii <= 0x040) or (0x05b <= ascii <= 0x07e):
return ascii
elif ascii <= 0x020:
code = ascii
return __key_map__.get(code, window.key.UNKNOWN)
def _display( self ):
pass
def _close( self ):
__windows__.remove(self)
# WARNING: This does not work on OSX 10.9 (seg fault or bus error)
# glut.glutDestroyWindow(self._native_window)
glut.glutSetWindow(self._native_window)
glut.glutHideWindow()
for i in range(len(self._timer_stack)):
handler, interval = self._timer_stack[i]
self._clock.unschedule(handler)
self.dispatch_event('on_close')
def _reshape(self, width, height):
self._width = glut.glutGet(glut.GLUT_WINDOW_WIDTH)
self._height = glut.glutGet(glut.GLUT_WINDOW_HEIGHT)
self.dispatch_event('on_resize', self._width, self._height)
def _visibility(self, state):
if state == glut.GLUT_VISIBLE:
self.dispatch_event('on_show')
elif state == glut.GLUT_NOT_VISIBLE:
self.dispatch_event('on_hide')
def _entry(self, state):
if state == glut.GLUT_ENTERED:
self.dispatch_event('on_enter')
elif state == glut.GLUT_LEFT:
self.dispatch_event('on_leave')
def _mouse(self, button, state, x, y):
button = __mouse_map__.get(button, window.mouse.UNKNOWN)
if state == glut.GLUT_UP:
self._button = 0
self._mouse_x = x
self._mouse_y = y
self.dispatch_event('on_mouse_release', x, y, button)
elif state == glut.GLUT_DOWN:
self._button = button
self._mouse_x = x
self._mouse_y = y
if button == 3:
self._button = 0
self.dispatch_event('on_mouse_scroll', x, y, 0, 1)
elif button == 4:
self._button = 0
self.dispatch_event('on_mouse_scroll', x, y, 0, -1)
else:
self.dispatch_event('on_mouse_press', x, y, button)
def _motion(self, x, y):
dx = x - self._mouse_x
dy = y - self._mouse_y
self._mouse_x = x
self._mouse_y = y
self.dispatch_event('on_mouse_drag', x, y, dx, dy, self._button)
def _passive_motion(self, x, y):
dx = x - self._mouse_x
dy = y - self._mouse_y
self._mouse_x = x
self._mouse_y = y
self.dispatch_event('on_mouse_motion', x, y, dx, dy)
def close(self):
self._close()
def show(self):
self.activate()
glut.glutShowWindow()
self.dispatch_event('on_show')
def hide(self):
self.activate()
glut.glutHideWindow()
self.dispatch_event('on_hide')
def set_title(self, title):
self.activate()
glut.glutSetWindowTitle( title )
self._title = title
def get_title(self, title):
return self._title
def set_size(self, width, height):
self.activate()
glut.glutReshapeWindow(width, height)
def get_size(self):
self.activate()
self._width = glut.glutGet( glut.GLUT_WINDOW_WIDTH )
self._height = glut.glutGet( glut.GLUT_WINDOW_HEIGHT )
return self._width, self._height
def set_position(self, x, y):
glut.glutPositionWindow( x, y )
def get_position(self):
glut.glutSetWindow( self._native_window )
self._x = glut.glutGet( glut.GLUT_WINDOW_W )
self._y = glut.glutGet( glut.GLUT_WINDOW_Y )
return self._x, self._y
def swap(self):
glut.glutSwapBuffers()
def activate(self):
glut.glutSetWindow(self._native_window)
# ----------------------------------------------------------------- windows ---
def windows():
return __windows__
# ----------------------------------------------------------------- process ---
def process(dt):
# Poll for and process events
glut.glutMainLoopEvent()
for window in __windows__:
window.activate()
# Dispatch the main draw event
window.dispatch_event('on_draw', dt)
# Dispatch the idle event
window.dispatch_event('on_idle', dt)
# Swap buffers
window.swap()
return len(__windows__)
|
{
"content_hash": "5d1022076bebac8226641421b9c4788e",
"timestamp": "",
"source": "github",
"line_count": 385,
"max_line_length": 97,
"avg_line_length": 34.187012987012984,
"alnum_prop": 0.5064579851086461,
"repo_name": "duyuan11/glumpy",
"id": "d16fea680e5805b88a1b9ef2a62b6b2a68d98ac9",
"size": "13461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glumpy/app/window/backends/backend_freeglut.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "26075"
},
{
"name": "GLSL",
"bytes": "165997"
},
{
"name": "Makefile",
"bytes": "407"
},
{
"name": "Python",
"bytes": "1201174"
}
],
"symlink_target": ""
}
|
from __future__ import division, print_function
import numpy as np
from astropy import units as u
from astropy import constants
c = 299792458 * u.meter / u.second
wavelength = np.arange(400, 5000) * u.nanometer
rv = 500000 * u.meter / u.second
def doppler_factor_shift(wave, velocity):
"""Doppler shift using doppler factor."""
beta = velocity / constants.c
doppler_factor = ((1 + beta) / (1 - beta)) ** 0.5
y = wave * doppler_factor
return y
def doppler_shift(wave, velocity):
"""Doppler shift using non-realtivistically."""
beta = velocity / constants.c
y = wave * (1 + beta)
return y
shift_factor = doppler_factor_shift(wavelength, rv)
shift = doppler_shift(wavelength, rv)
print(shift - shift_factor)
|
{
"content_hash": "1f01116ff2b8783a6395066be33ef5e8",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 53,
"avg_line_length": 22.818181818181817,
"alnum_prop": 0.6772908366533864,
"repo_name": "jason-neal/equanimous-octo-tribble",
"id": "78d5e29a2330b7d595eff56aebdb22af2d1db706",
"size": "922",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "octotribble/Doppler_factor_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "909669"
},
{
"name": "Jupyter Notebook",
"bytes": "253842"
},
{
"name": "Python",
"bytes": "225047"
},
{
"name": "Shell",
"bytes": "6909"
}
],
"symlink_target": ""
}
|
"""
This module implements a transaction manager that can be used to define
transaction handling in a request or view function. It is used by transaction
control middleware and decorators.
The transaction manager can be in managed or in auto state. Auto state means the
system is using a commit-on-save strategy (actually it's more like
commit-on-change). As soon as the .save() or .delete() (or related) methods are
called, a commit is made.
Managed transactions don't do those commits, but will need some kind of manual
or implicit commits or rollbacks.
"""
import warnings
from functools import wraps
from django.db import (
connections, DEFAULT_DB_ALIAS,
DatabaseError, ProgrammingError)
from django.utils.decorators import available_attrs
class TransactionManagementError(ProgrammingError):
"""
This exception is thrown when transaction management is used improperly.
"""
pass
################
# Private APIs #
################
def get_connection(using=None):
"""
Get a database connection by name, or the default database connection
if no name is provided.
"""
if using is None:
using = DEFAULT_DB_ALIAS
return connections[using]
###########################
# Deprecated private APIs #
###########################
def abort(using=None):
"""
Roll back any ongoing transactions and clean the transaction management
state of the connection.
This method is to be used only in cases where using balanced
leave_transaction_management() calls isn't possible. For example after a
request has finished, the transaction state isn't known, yet the connection
must be cleaned up for the next request.
"""
get_connection(using).abort()
def enter_transaction_management(managed=True, using=None, forced=False):
"""
Enters transaction management for a running thread. It must be balanced with
the appropriate leave_transaction_management call, since the actual state is
managed as a stack.
The state and dirty flag are carried over from the surrounding block or
from the settings, if there is no surrounding block (dirty is always false
when no current block is running).
"""
get_connection(using).enter_transaction_management(managed, forced)
def leave_transaction_management(using=None):
"""
Leaves transaction management for a running thread. A dirty flag is carried
over to the surrounding block, as a commit will commit all changes, even
those from outside. (Commits are on connection level.)
"""
get_connection(using).leave_transaction_management()
def is_dirty(using=None):
"""
Returns True if the current transaction requires a commit for changes to
happen.
"""
return get_connection(using).is_dirty()
def set_dirty(using=None):
"""
Sets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether there are open
changes waiting for commit.
"""
get_connection(using).set_dirty()
def set_clean(using=None):
"""
Resets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether a commit or rollback
should happen.
"""
get_connection(using).set_clean()
def is_managed(using=None):
warnings.warn("'is_managed' is deprecated.",
DeprecationWarning, stacklevel=2)
def managed(flag=True, using=None):
warnings.warn("'managed' no longer serves a purpose.",
DeprecationWarning, stacklevel=2)
def commit_unless_managed(using=None):
warnings.warn("'commit_unless_managed' is now a no-op.",
DeprecationWarning, stacklevel=2)
def rollback_unless_managed(using=None):
warnings.warn("'rollback_unless_managed' is now a no-op.",
DeprecationWarning, stacklevel=2)
###############
# Public APIs #
###############
def get_autocommit(using=None):
"""
Get the autocommit status of the connection.
"""
return get_connection(using).get_autocommit()
def set_autocommit(autocommit, using=None):
"""
Set the autocommit status of the connection.
"""
return get_connection(using).set_autocommit(autocommit)
def commit(using=None):
"""
Commits a transaction and resets the dirty flag.
"""
get_connection(using).commit()
def rollback(using=None):
"""
Rolls back a transaction and resets the dirty flag.
"""
get_connection(using).rollback()
def savepoint(using=None):
"""
Creates a savepoint (if supported and required by the backend) inside the
current transaction. Returns an identifier for the savepoint that will be
used for the subsequent rollback or commit.
"""
return get_connection(using).savepoint()
def savepoint_rollback(sid, using=None):
"""
Rolls back the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
get_connection(using).savepoint_rollback(sid)
def savepoint_commit(sid, using=None):
"""
Commits the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
get_connection(using).savepoint_commit(sid)
def clean_savepoints(using=None):
"""
Resets the counter used to generate unique savepoint ids in this thread.
"""
get_connection(using).clean_savepoints()
def get_rollback(using=None):
"""
Gets the "needs rollback" flag -- for *advanced use* only.
"""
return get_connection(using).get_rollback()
def set_rollback(rollback, using=None):
"""
Sets or unsets the "needs rollback" flag -- for *advanced use* only.
When `rollback` is `True`, it triggers a rollback when exiting the
innermost enclosing atomic block that has `savepoint=True` (that's the
default). Use this to force a rollback without raising an exception.
When `rollback` is `False`, it prevents such a rollback. Use this only
after rolling back to a known-good state! Otherwise, you break the atomic
block and data corruption may occur.
"""
return get_connection(using).set_rollback(rollback)
#################################
# Decorators / context managers #
#################################
class Atomic(object):
"""
This class guarantees the atomic execution of a given block.
An instance can be used either as a decorator or as a context manager.
When it's used as a decorator, __call__ wraps the execution of the
decorated function in the instance itself, used as a context manager.
When it's used as a context manager, __enter__ creates a transaction or a
savepoint, depending on whether a transaction is already in progress, and
__exit__ commits the transaction or releases the savepoint on normal exit,
and rolls back the transaction or to the savepoint on exceptions.
It's possible to disable the creation of savepoints if the goal is to
ensure that some code runs within a transaction without creating overhead.
A stack of savepoints identifiers is maintained as an attribute of the
connection. None denotes the absence of a savepoint.
This allows reentrancy even if the same AtomicWrapper is reused. For
example, it's possible to define `oa = @atomic('other')` and use `@oa` or
`with oa:` multiple times.
Since database connections are thread-local, this is thread-safe.
"""
def __init__(self, using, savepoint):
self.using = using
self.savepoint = savepoint
def __enter__(self):
connection = get_connection(self.using)
if not connection.in_atomic_block:
# Reset state when entering an outermost atomic block.
connection.commit_on_exit = True
connection.needs_rollback = False
if not connection.get_autocommit():
# Some database adapters (namely sqlite3) don't handle
# transactions and savepoints properly when autocommit is off.
# Turning autocommit back on isn't an option; it would trigger
# a premature commit. Give up if that happens.
if connection.features.autocommits_when_autocommit_is_off:
raise TransactionManagementError(
"Your database backend doesn't behave properly when "
"autocommit is off. Turn it on before using 'atomic'.")
# When entering an atomic block with autocommit turned off,
# Django should only use savepoints and shouldn't commit.
# This requires at least a savepoint for the outermost block.
if not self.savepoint:
raise TransactionManagementError(
"The outermost 'atomic' block cannot use "
"savepoint = False when autocommit is off.")
# Pretend we're already in an atomic block to bypass the code
# that disables autocommit to enter a transaction, and make a
# note to deal with this case in __exit__.
connection.in_atomic_block = True
connection.commit_on_exit = False
if connection.in_atomic_block:
# We're already in a transaction; create a savepoint, unless we
# were told not to or we're already waiting for a rollback. The
# second condition avoids creating useless savepoints and prevents
# overwriting needs_rollback until the rollback is performed.
if self.savepoint and not connection.needs_rollback:
sid = connection.savepoint()
connection.savepoint_ids.append(sid)
else:
connection.savepoint_ids.append(None)
else:
# We aren't in a transaction yet; create one.
# The usual way to start a transaction is to turn autocommit off.
# However, some database adapters (namely sqlite3) don't handle
# transactions and savepoints properly when autocommit is off.
# In such cases, start an explicit transaction instead, which has
# the side-effect of disabling autocommit.
if connection.features.autocommits_when_autocommit_is_off:
connection._start_transaction_under_autocommit()
connection.autocommit = False
else:
connection.set_autocommit(False)
connection.in_atomic_block = True
def __exit__(self, exc_type, exc_value, traceback):
connection = get_connection(self.using)
if connection.savepoint_ids:
sid = connection.savepoint_ids.pop()
else:
# Prematurely unset this flag to allow using commit or rollback.
connection.in_atomic_block = False
try:
if exc_type is None and not connection.needs_rollback:
if connection.in_atomic_block:
# Release savepoint if there is one
if sid is not None:
try:
connection.savepoint_commit(sid)
except DatabaseError:
connection.savepoint_rollback(sid)
raise
else:
# Commit transaction
try:
connection.commit()
except DatabaseError:
connection.rollback()
raise
else:
# This flag will be set to True again if there isn't a savepoint
# allowing to perform the rollback at this level.
connection.needs_rollback = False
if connection.in_atomic_block:
# Roll back to savepoint if there is one, mark for rollback
# otherwise.
if sid is None:
connection.needs_rollback = True
else:
connection.savepoint_rollback(sid)
else:
# Roll back transaction
connection.rollback()
finally:
# Outermost block exit when autocommit was enabled.
if not connection.in_atomic_block:
if connection.features.autocommits_when_autocommit_is_off:
connection.autocommit = True
else:
connection.set_autocommit(True)
# Outermost block exit when autocommit was disabled.
elif not connection.savepoint_ids and not connection.commit_on_exit:
connection.in_atomic_block = False
def __call__(self, func):
@wraps(func, assigned=available_attrs(func))
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
def atomic(using=None, savepoint=True):
# Bare decorator: @atomic -- although the first argument is called
# `using`, it's actually the function being decorated.
if callable(using):
return Atomic(DEFAULT_DB_ALIAS, savepoint)(using)
# Decorator: @atomic(...) or context manager: with atomic(...): ...
else:
return Atomic(using, savepoint)
def _non_atomic_requests(view, using):
try:
view._non_atomic_requests.add(using)
except AttributeError:
view._non_atomic_requests = set([using])
return view
def non_atomic_requests(using=None):
if callable(using):
return _non_atomic_requests(using, DEFAULT_DB_ALIAS)
else:
if using is None:
using = DEFAULT_DB_ALIAS
return lambda view: _non_atomic_requests(view, using)
############################################
# Deprecated decorators / context managers #
############################################
class Transaction(object):
"""
Acts as either a decorator, or a context manager. If it's a decorator it
takes a function and returns a wrapped function. If it's a contextmanager
it's used with the ``with`` statement. In either event entering/exiting
are called before and after, respectively, the function/block is executed.
autocommit, commit_on_success, and commit_manually contain the
implementations of entering and exiting.
"""
def __init__(self, entering, exiting, using):
self.entering = entering
self.exiting = exiting
self.using = using
def __enter__(self):
self.entering(self.using)
def __exit__(self, exc_type, exc_value, traceback):
self.exiting(exc_type, self.using)
def __call__(self, func):
@wraps(func)
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
def _transaction_func(entering, exiting, using):
"""
Takes 3 things, an entering function (what to do to start this block of
transaction management), an exiting function (what to do to end it, on both
success and failure, and using which can be: None, indicating using is
DEFAULT_DB_ALIAS, a callable, indicating that using is DEFAULT_DB_ALIAS and
to return the function already wrapped.
Returns either a Transaction objects, which is both a decorator and a
context manager, or a wrapped function, if using is a callable.
"""
# Note that although the first argument is *called* `using`, it
# may actually be a function; @autocommit and @autocommit('foo')
# are both allowed forms.
if using is None:
using = DEFAULT_DB_ALIAS
if callable(using):
return Transaction(entering, exiting, DEFAULT_DB_ALIAS)(using)
return Transaction(entering, exiting, using)
def autocommit(using=None):
"""
Decorator that activates commit on save. This is Django's default behavior;
this decorator is useful if you globally activated transaction management in
your settings file and want the default behavior in some view functions.
"""
warnings.warn("autocommit is deprecated in favor of set_autocommit.",
DeprecationWarning, stacklevel=2)
def entering(using):
enter_transaction_management(managed=False, using=using)
def exiting(exc_type, using):
leave_transaction_management(using=using)
return _transaction_func(entering, exiting, using)
def commit_on_success(using=None):
"""
This decorator activates commit on response. This way, if the view function
runs successfully, a commit is made; if the viewfunc produces an exception,
a rollback is made. This is one of the most common ways to do transaction
control in Web apps.
"""
warnings.warn("commit_on_success is deprecated in favor of atomic.",
DeprecationWarning, stacklevel=2)
def entering(using):
enter_transaction_management(using=using)
def exiting(exc_type, using):
try:
if exc_type is not None:
if is_dirty(using=using):
rollback(using=using)
else:
if is_dirty(using=using):
try:
commit(using=using)
except:
rollback(using=using)
raise
finally:
leave_transaction_management(using=using)
return _transaction_func(entering, exiting, using)
def commit_manually(using=None):
"""
Decorator that activates manual transaction control. It just disables
automatic transaction control and doesn't do any commit/rollback of its
own -- it's up to the user to call the commit and rollback functions
themselves.
"""
warnings.warn("commit_manually is deprecated in favor of set_autocommit.",
DeprecationWarning, stacklevel=2)
def entering(using):
enter_transaction_management(using=using)
def exiting(exc_type, using):
leave_transaction_management(using=using)
return _transaction_func(entering, exiting, using)
def commit_on_success_unless_managed(using=None, savepoint=False):
"""
Transitory API to preserve backwards-compatibility while refactoring.
Once the legacy transaction management is fully deprecated, this should
simply be replaced by atomic. Until then, it's necessary to guarantee that
a commit occurs on exit, which atomic doesn't do when it's nested.
Unlike atomic, savepoint defaults to False because that's closer to the
legacy behavior.
"""
connection = get_connection(using)
if connection.get_autocommit() or connection.in_atomic_block:
return atomic(using, savepoint)
else:
def entering(using):
pass
def exiting(exc_type, using):
set_dirty(using=using)
return _transaction_func(entering, exiting, using)
|
{
"content_hash": "ec76fa45acd59324797a1935f1f07331",
"timestamp": "",
"source": "github",
"line_count": 535,
"max_line_length": 80,
"avg_line_length": 35.36261682242991,
"alnum_prop": 0.6414715365505577,
"repo_name": "beckastar/django",
"id": "25ec81d87a0eb5217a9f67151f3ad9f581ff5e74",
"size": "18919",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/db/transaction.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "42830"
},
{
"name": "HTML",
"bytes": "173972"
},
{
"name": "JavaScript",
"bytes": "102432"
},
{
"name": "Makefile",
"bytes": "140"
},
{
"name": "Python",
"bytes": "9484114"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
from mcmodels import Rocket, Arrow
from mcpi.minecraft import Minecraft
from time import sleep
mc = Minecraft.create()
pos = mc.player.getTilePos()
pos2 = pos.clone()
pos2.y + 10
print("create rocket")
r = Rocket(mc, pos)
sleep(5)
print("create rocket")
a = Arrow(mc, pos2)
sleep(5)
print("clear arrow")
a.clear()
sleep(5)
print("clear rocket")
r.clear()
|
{
"content_hash": "e526f25e351cd20e7dcd65d03c89b6d5",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 36,
"avg_line_length": 12.620689655172415,
"alnum_prop": 0.6994535519125683,
"repo_name": "martinohanlon/SpaceCRAFT",
"id": "7270a35e0f1e59ce62fdec783b845c157309b0e5",
"size": "382",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "poc/testmodels.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "133848"
},
{
"name": "Shell",
"bytes": "949"
}
],
"symlink_target": ""
}
|
try:
import subprocess
except ImportError, e:
pass
class GetInterfaceList(object):
def __init__(self):
# Initiating an empty tuple to hold available wireless interfaces
self.interfaces = ()
# Calls 'iwconfig' command in system and pipes the output as stdout.
proc = subprocess.Popen(['/sbin/iwconfig'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.stdout, self.stderr = proc.communicate()
newline = self.stdout.count('\n')
lines = []
# function to handle the output from 'iwconfig' and return tuple of available interfaces with their corresponding mode of operation.
def getIface(self):
lines = self.stdout.split('\n \n')
lines.remove('')
for i in range(len(lines)):
words = lines[i].split()
interface = words[0]
if "Monitor" in lines[i]:
iface_mode = words[3].split(':')
else:
iface_mode = words[4].split(':')
mode = iface_mode[1].lower()
# Since we cannot add elements into tuple, we first convert it into list, insert elements and finally, convert it back into tuple.
self.interfaces = list(self.interfaces)
self.interfaces.insert(i, [interface, mode])
self.interfaces = tuple(self.interfaces)
# Returns the tuple of interfaces to caller's location
return self.interfaces
class ListInterfaces(object):
def getAllInterfaces(self):
ifaces = []
proc2 = subprocess.Popen(['/sbin/ifconfig', '-s'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc2.communicate()
lines = stdout.split('\n')
lines.remove(lines[0])
lines.remove('')
for i in range(len(lines)):
words = lines[i].split()
iface = words[0]
ifaces.append(iface)
wireless_ifaces = GetInterfaceList().getIface()
for i,m in wireless_ifaces:
if i in ifaces:
ifaces.remove(i)
return ifaces
'''
if __name__ == '__main__':
getifacelist = ListInterfaces().getAllInterfaces()
for iface in getifacelist:
print "Interface: %s" % iface
getwiface = GetInterfaceList().getIface()
for wiface, mode in getwiface:
print "Wireless interface: %s, Mode: %s" % (wiface, mode)
'''
|
{
"content_hash": "c259083610c4804783847b2d37baf92f",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 133,
"avg_line_length": 33.04761904761905,
"alnum_prop": 0.6892411143131604,
"repo_name": "sajjanbh/WLAN-Monitoring",
"id": "5e9abf7c41d3efe94d5af0fd45a073b5cf5974c9",
"size": "2105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wireless/iface_list.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "816"
},
{
"name": "Python",
"bytes": "65863"
}
],
"symlink_target": ""
}
|
import os
import unittest
from ample import constants
from ample.util import rio
class TestContacts(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.thisd = os.path.abspath(os.path.dirname(__file__))
cls.ample_share = constants.SHARE_DIR
cls.testfiles_dir = os.path.join(cls.ample_share, 'testfiles')
def test_parse1(self):
logfile = os.path.join(self.testfiles_dir, "ncont1.log")
c = rio.Rio()
contactData = rio.RioData()
c.parseNcontLog(contactData, logfile=logfile, clean_up=False)
c.analyseRio(contactData)
self.assertEqual(contactData.numContacts, 26)
self.assertEqual(contactData.rioInRegister, 0)
self.assertEqual(contactData.rioOoRegister, 0)
self.assertEqual(contactData.rioBackwards, 0)
def test_parse2(self):
logfile = os.path.join(self.testfiles_dir, "ncont2.log")
c = rio.Rio()
contactData = rio.RioData()
c.parseNcontLog(contactData, logfile=logfile, clean_up=False)
c.analyseRio(contactData)
self.assertEqual(contactData.numContacts, 10)
self.assertEqual(contactData.rioInRegister, 0)
self.assertEqual(contactData.rioOoRegister, 7)
self.assertEqual(contactData.rioBackwards, 7)
def test_parse3(self):
logfile = os.path.join(self.testfiles_dir, "ncont3.log")
c = rio.Rio()
contactData = rio.RioData()
c.parseNcontLog(contactData, logfile=logfile, clean_up=False)
c.analyseRio(contactData)
self.assertEqual(contactData.numContacts, 14)
self.assertEqual(contactData.rioInRegister, 0)
self.assertEqual(contactData.rioOoRegister, 10)
self.assertEqual(contactData.rioBackwards, 0)
def test_parse4(self):
logfile = os.path.join(self.testfiles_dir, "ncont4.log")
c = rio.Rio()
contactData = rio.RioData()
c.parseNcontLog(contactData, logfile=logfile, clean_up=False)
c.analyseRio(contactData)
self.assertEqual(contactData.numContacts, 56)
self.assertEqual(contactData.rioInRegister, 0)
self.assertEqual(contactData.rioOoRegister, 55)
self.assertEqual(contactData.rioBackwards, 0)
def test_parse5(self):
logfile = os.path.join(self.testfiles_dir, "ncont5.log")
c = rio.Rio()
contactData = rio.RioData()
c.parseNcontLog(contactData, logfile=logfile, clean_up=False)
c.analyseRio(contactData)
self.assertEqual(contactData.numContacts, 77)
self.assertEqual(contactData.rioInRegister, 19)
self.assertEqual(contactData.rioOoRegister, 54)
self.assertEqual(contactData.rioBackwards, 16)
def test_parse7(self):
logfile = os.path.join(self.testfiles_dir, "ncont7.log")
c = rio.Rio()
contactData = rio.RioData()
c.parseNcontLog(contactData, logfile=logfile, clean_up=False)
c.analyseRio(contactData)
self.assertEqual(contactData.numContacts, 18)
self.assertEqual(contactData.rioInRegister, 0)
self.assertEqual(contactData.rioOoRegister, 0)
self.assertEqual(contactData.rioBackwards, 0)
def test_parse8(self):
logfile = os.path.join(self.testfiles_dir, "ncont8.log")
c = rio.Rio()
contactData = rio.RioData()
c.parseNcontLog(contactData, logfile=logfile, clean_up=False)
c.analyseRio(contactData)
self.assertEqual(contactData.numContacts, 9)
self.assertEqual(contactData.rioInRegister, 0)
self.assertEqual(contactData.rioOoRegister, 0)
self.assertEqual(contactData.rioBackwards, 0)
def test_helix5(self):
logfile = os.path.join(self.testfiles_dir, "ncont5.log")
dssplog = os.path.join(self.testfiles_dir, "3RA3.dssp")
c = rio.Rio()
contactData = rio.RioData()
c.parseNcontLog(contactData, logfile=logfile, clean_up=False)
sequence = c.helixFromContacts(contactData.contacts, dssplog)
self.assertEqual("NARLKQEIAALEYEIAAL", sequence)
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "c5263fe3feea8d42a58a6a8fee8ae50e",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 70,
"avg_line_length": 40.3921568627451,
"alnum_prop": 0.6716019417475728,
"repo_name": "rigdenlab/ample",
"id": "ed0910e6fd8159694f423031b5e0c7cf8647266f",
"size": "4120",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ample/util/tests/test_rio.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "45"
},
{
"name": "CMake",
"bytes": "426"
},
{
"name": "Fortran",
"bytes": "52396"
},
{
"name": "Python",
"bytes": "1088422"
},
{
"name": "Shell",
"bytes": "1022"
},
{
"name": "TeX",
"bytes": "10539"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/player/shared_player_house_corellia_large_style_02.iff"
result.attribute_template_id = -1
result.stfName("building_name","housing_corellia_large_style_2")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "bfdf8ea0fdbed12df70a00d058738287",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 91,
"avg_line_length": 26.615384615384617,
"alnum_prop": 0.7138728323699421,
"repo_name": "obi-two/Rebelion",
"id": "1c9ec403150804bf726693dea04d3924da7ea212",
"size": "491",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/building/player/shared_player_house_corellia_large_style_02.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
"""Test Home Assistant pressure utility functions."""
import pytest
from homeassistant.const import (
PRESSURE_CBAR,
PRESSURE_HPA,
PRESSURE_INHG,
PRESSURE_KPA,
PRESSURE_MBAR,
PRESSURE_MMHG,
PRESSURE_PA,
PRESSURE_PSI,
)
from homeassistant.exceptions import HomeAssistantError
import homeassistant.util.pressure as pressure_util
INVALID_SYMBOL = "bob"
VALID_SYMBOL = PRESSURE_PA
def test_raise_deprecation_warning(caplog: pytest.LogCaptureFixture) -> None:
"""Ensure that a warning is raised on use of convert."""
assert pressure_util.convert(2, PRESSURE_PA, PRESSURE_PA) == 2
assert "use unit_conversion.PressureConverter instead" in caplog.text
def test_convert_same_unit():
"""Test conversion from any unit to same unit."""
assert pressure_util.convert(2, PRESSURE_PA, PRESSURE_PA) == 2
assert pressure_util.convert(3, PRESSURE_HPA, PRESSURE_HPA) == 3
assert pressure_util.convert(4, PRESSURE_MBAR, PRESSURE_MBAR) == 4
assert pressure_util.convert(5, PRESSURE_INHG, PRESSURE_INHG) == 5
assert pressure_util.convert(6, PRESSURE_KPA, PRESSURE_KPA) == 6
assert pressure_util.convert(7, PRESSURE_CBAR, PRESSURE_CBAR) == 7
assert pressure_util.convert(8, PRESSURE_MMHG, PRESSURE_MMHG) == 8
def test_convert_invalid_unit():
"""Test exception is thrown for invalid units."""
with pytest.raises(HomeAssistantError, match="is not a recognized .* unit"):
pressure_util.convert(5, INVALID_SYMBOL, VALID_SYMBOL)
with pytest.raises(HomeAssistantError, match="is not a recognized .* unit"):
pressure_util.convert(5, VALID_SYMBOL, INVALID_SYMBOL)
def test_convert_nonnumeric_value():
"""Test exception is thrown for nonnumeric type."""
with pytest.raises(TypeError):
pressure_util.convert("a", PRESSURE_HPA, PRESSURE_INHG)
def test_convert_from_hpascals():
"""Test conversion from hPA to other units."""
hpascals = 1000
assert pressure_util.convert(hpascals, PRESSURE_HPA, PRESSURE_PSI) == pytest.approx(
14.5037743897
)
assert pressure_util.convert(
hpascals, PRESSURE_HPA, PRESSURE_INHG
) == pytest.approx(29.5299801647)
assert pressure_util.convert(hpascals, PRESSURE_HPA, PRESSURE_PA) == pytest.approx(
100000
)
assert pressure_util.convert(hpascals, PRESSURE_HPA, PRESSURE_KPA) == pytest.approx(
100
)
assert pressure_util.convert(
hpascals, PRESSURE_HPA, PRESSURE_MBAR
) == pytest.approx(1000)
assert pressure_util.convert(
hpascals, PRESSURE_HPA, PRESSURE_CBAR
) == pytest.approx(100)
def test_convert_from_kpascals():
"""Test conversion from hPA to other units."""
kpascals = 100
assert pressure_util.convert(kpascals, PRESSURE_KPA, PRESSURE_PSI) == pytest.approx(
14.5037743897
)
assert pressure_util.convert(
kpascals, PRESSURE_KPA, PRESSURE_INHG
) == pytest.approx(29.5299801647)
assert pressure_util.convert(kpascals, PRESSURE_KPA, PRESSURE_PA) == pytest.approx(
100000
)
assert pressure_util.convert(kpascals, PRESSURE_KPA, PRESSURE_HPA) == pytest.approx(
1000
)
assert pressure_util.convert(
kpascals, PRESSURE_KPA, PRESSURE_MBAR
) == pytest.approx(1000)
assert pressure_util.convert(
kpascals, PRESSURE_KPA, PRESSURE_CBAR
) == pytest.approx(100)
def test_convert_from_inhg():
"""Test conversion from inHg to other units."""
inhg = 30
assert pressure_util.convert(inhg, PRESSURE_INHG, PRESSURE_PSI) == pytest.approx(
14.7346266155
)
assert pressure_util.convert(inhg, PRESSURE_INHG, PRESSURE_KPA) == pytest.approx(
101.59167
)
assert pressure_util.convert(inhg, PRESSURE_INHG, PRESSURE_HPA) == pytest.approx(
1015.9167
)
assert pressure_util.convert(inhg, PRESSURE_INHG, PRESSURE_PA) == pytest.approx(
101591.67
)
assert pressure_util.convert(inhg, PRESSURE_INHG, PRESSURE_MBAR) == pytest.approx(
1015.9167
)
assert pressure_util.convert(inhg, PRESSURE_INHG, PRESSURE_CBAR) == pytest.approx(
101.59167
)
assert pressure_util.convert(inhg, PRESSURE_INHG, PRESSURE_MMHG) == pytest.approx(
762
)
def test_convert_from_mmhg():
"""Test conversion from mmHg to other units."""
inhg = 30
assert pressure_util.convert(inhg, PRESSURE_MMHG, PRESSURE_PSI) == pytest.approx(
0.580103
)
assert pressure_util.convert(inhg, PRESSURE_MMHG, PRESSURE_KPA) == pytest.approx(
3.99967
)
assert pressure_util.convert(inhg, PRESSURE_MMHG, PRESSURE_HPA) == pytest.approx(
39.9967
)
assert pressure_util.convert(inhg, PRESSURE_MMHG, PRESSURE_PA) == pytest.approx(
3999.67
)
assert pressure_util.convert(inhg, PRESSURE_MMHG, PRESSURE_MBAR) == pytest.approx(
39.9967
)
assert pressure_util.convert(inhg, PRESSURE_MMHG, PRESSURE_CBAR) == pytest.approx(
3.99967
)
assert pressure_util.convert(inhg, PRESSURE_MMHG, PRESSURE_INHG) == pytest.approx(
1.181102
)
|
{
"content_hash": "095652dadef217d0bd2bdd1c0fa7d7be",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 88,
"avg_line_length": 34.567567567567565,
"alnum_prop": 0.6792415949960907,
"repo_name": "w1ll1am23/home-assistant",
"id": "f87b89df3f76932f658425e63db3cb11a773c239",
"size": "5116",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/util/test_pressure.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52277012"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
"""Utilities related to SSH connection management."""
import os
import string
from eventlet import pools
from oslo_config import cfg
import paramiko
from cinder import exception
from cinder.i18n import _, _LI
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
ssh_opts = [
cfg.BoolOpt('strict_ssh_host_key_policy',
default=False,
help='Option to enable strict host key checking. When '
'set to "True" Cinder will only connect to systems '
'with a host key present in the configured '
'"ssh_hosts_key_file". When set to "False" the host key '
'will be saved upon first connection and used for '
'subsequent connections. Default=False'),
cfg.StrOpt('ssh_hosts_key_file',
default='$state_path/ssh_known_hosts',
help='File containing SSH host keys for the systems with which '
'Cinder needs to communicate. OPTIONAL: '
'Default=$state_path/ssh_known_hosts'),
]
CONF = cfg.CONF
CONF.register_opts(ssh_opts)
class SSHPool(pools.Pool):
"""A simple eventlet pool to hold ssh connections."""
def __init__(self, ip, port, conn_timeout, login, password=None,
privatekey=None, *args, **kwargs):
self.ip = ip
self.port = port
self.login = login
self.password = password
self.conn_timeout = conn_timeout if conn_timeout else None
self.privatekey = privatekey
self.hosts_key_file = None
# Validate good config setting here.
# Paramiko handles the case where the file is inaccessible.
if not CONF.ssh_hosts_key_file:
raise exception.ParameterNotFound(param='ssh_hosts_key_file')
elif not os.path.isfile(CONF.ssh_hosts_key_file):
# If using the default path, just create the file.
if CONF.state_path in CONF.ssh_hosts_key_file:
open(CONF.ssh_hosts_key_file, 'a').close()
else:
msg = (_("Unable to find ssh_hosts_key_file: %s") %
CONF.ssh_hosts_key_file)
raise exception.InvalidInput(reason=msg)
if 'hosts_key_file' in kwargs.keys():
self.hosts_key_file = kwargs.pop('hosts_key_file')
LOG.info(_LI("Secondary ssh hosts key file %(kwargs)s will be "
"loaded along with %(conf)s from /etc/cinder.conf.") %
{'kwargs': self.hosts_key_file,
'conf': CONF.ssh_hosts_key_file})
LOG.debug("Setting strict_ssh_host_key_policy to '%(policy)s' "
"using ssh_hosts_key_file '%(key_file)s'." %
{'policy': CONF.strict_ssh_host_key_policy,
'key_file': CONF.ssh_hosts_key_file})
self.strict_ssh_host_key_policy = CONF.strict_ssh_host_key_policy
if not self.hosts_key_file:
self.hosts_key_file = CONF.ssh_hosts_key_file
else:
self.hosts_key_file += ',' + CONF.ssh_hosts_key_file
super(SSHPool, self).__init__(*args, **kwargs)
def create(self):
try:
ssh = paramiko.SSHClient()
if ',' in self.hosts_key_file:
files = string.split(self.hosts_key_file, ',')
for f in files:
ssh.load_host_keys(f)
else:
ssh.load_host_keys(self.hosts_key_file)
# If strict_ssh_host_key_policy is set we want to reject, by
# default if there is not entry in the known_hosts file.
# Otherwise we use AutoAddPolicy which accepts on the first
# Connect but fails if the keys change. load_host_keys can
# handle hashed known_host entries.
if self.strict_ssh_host_key_policy:
ssh.set_missing_host_key_policy(paramiko.RejectPolicy())
else:
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if self.password:
ssh.connect(self.ip,
port=self.port,
username=self.login,
password=self.password,
timeout=self.conn_timeout)
elif self.privatekey:
pkfile = os.path.expanduser(self.privatekey)
privatekey = paramiko.RSAKey.from_private_key_file(pkfile)
ssh.connect(self.ip,
port=self.port,
username=self.login,
pkey=privatekey,
timeout=self.conn_timeout)
else:
msg = _("Specify a password or private_key")
raise exception.CinderException(msg)
# Paramiko by default sets the socket timeout to 0.1 seconds,
# ignoring what we set through the sshclient. This doesn't help for
# keeping long lived connections. Hence we have to bypass it, by
# overriding it after the transport is initialized. We are setting
# the sockettimeout to None and setting a keepalive packet so that,
# the server will keep the connection open. All that does is send
# a keepalive packet every ssh_conn_timeout seconds.
if self.conn_timeout:
transport = ssh.get_transport()
transport.sock.settimeout(None)
transport.set_keepalive(self.conn_timeout)
return ssh
except Exception as e:
msg = _("Error connecting via ssh: %s") % e
LOG.error(msg)
raise paramiko.SSHException(msg)
def get(self):
"""Return an item from the pool, when one is available.
This may cause the calling greenthread to block. Check if a
connection is active before returning it.
For dead connections create and return a new connection.
"""
conn = super(SSHPool, self).get()
if conn:
if conn.get_transport().is_active():
return conn
else:
conn.close()
return self.create()
def remove(self, ssh):
"""Close an ssh client and remove it from free_items."""
ssh.close()
ssh = None
if ssh in self.free_items:
self.free_items.pop(ssh)
if self.current_size > 0:
self.current_size -= 1
|
{
"content_hash": "6fb0f46778cbdc108788d7a01a05a09b",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 79,
"avg_line_length": 40.96875,
"alnum_prop": 0.5600305110602594,
"repo_name": "Accelerite/cinder",
"id": "5ce4b1cf232cf5a69cdef4cad6b53ea62f2f7188",
"size": "7356",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "cinder/ssh_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "3322"
},
{
"name": "Python",
"bytes": "10152545"
},
{
"name": "Shell",
"bytes": "9905"
}
],
"symlink_target": ""
}
|
import json
import logging
import datetime
import random
from flask import url_for, current_app
from flask.ext.security import current_user
from flask.ext.admin.babel import lazy_gettext
from quokka.core.db import db
from quokka import admin
from quokka.core.admin.models import ModelAdmin
from quokka.modules.accounts.models import User
from quokka.utils.text import slugify
logger = logging.getLogger()
###############################################################
# Commom extendable base classes
###############################################################
class Publishable(object):
published = db.BooleanField(default=False)
available_at = db.DateTimeField(default=datetime.datetime.now)
created_at = db.DateTimeField(default=datetime.datetime.now)
updated_at = db.DateTimeField(default=datetime.datetime.now)
created_by = db.ReferenceField(User, reverse_delete_rule=db.DENY)
last_updated_by = db.ReferenceField(User, reverse_delete_rule=db.DENY)
def save(self, *args, **kwargs):
self.updated_at = datetime.datetime.now()
try:
user = User.objects.get(id=current_user.id)
if not self.id:
self.created_by = user
self.last_updated_by = user
except Exception as e:
logger.warning("No user to save the model: %s" % e.message)
super(Publishable, self).save(*args, **kwargs)
class Slugged(object):
slug = db.StringField(max_length=255)
long_slug = db.StringField()
mpath = db.StringField()
def _create_mpath_long_slug(self):
try:
if self.parent and self.parent != self:
self.long_slug = "/".join(
[self.parent.long_slug, self.slug]
)
self.mpath = "".join(
[self.parent.mpath, self.slug, ',']
)
else:
self.long_slug = self.slug
self.mpath = ",%s," % self.slug
except:
logger.info("excepting to content validate_long_slug")
self.long_slug = "/".join(
[self.channel.long_slug, self.slug]
)
self.mpath = "".join([self.channel.mpath, self.slug, ','])
def validate_long_slug(self):
self._create_mpath_long_slug()
filters = dict(long_slug=self.long_slug)
if self.id:
filters['id__ne'] = self.id
exist = self.__class__.objects(**filters)
if exist.count():
if current_app.config.get('SMART_SLUG_ENABLED', False):
self.slug = "{}-{}".format(self.slug, random.getrandbits(32))
self._create_mpath_long_slug()
else:
raise db.ValidationError(
lazy_gettext("%(slug)s slug already exists",
slug=self.long_slug)
)
def validate_slug(self, title=None):
if self.slug:
self.slug = slugify(self.slug)
else:
self.slug = slugify(title or self.title)
class Comment(db.EmbeddedDocument):
body = db.StringField(verbose_name="Comment", required=True)
author = db.StringField(verbose_name="Name", max_length=255, required=True)
published = db.BooleanField(default=True)
created_at = db.DateTimeField(default=datetime.datetime.now)
created_by = db.ReferenceField(User)
def __unicode__(self):
return "{}-{}...".format(self.author, self.body[:10])
meta = {
'indexes': ['-created_at', '-available_at'],
'ordering': ['-created_at']
}
class Commentable(object):
comments = db.ListField(db.EmbeddedDocumentField(Comment))
class Imaged(object):
main_image = db.StringField()
main_image_caption = db.StringField()
class Tagged(object):
tags = db.ListField(db.StringField(max_length=50))
class CustomValue(db.EmbeddedDocument):
FORMATS = (
('json', "json"),
('text', "text"),
('int', "int"),
('float', "float"),
)
DEFAULT_FORMATTER = lambda value: value
FORMATTERS = {
'json': lambda value: json.loads(value),
'text': DEFAULT_FORMATTER,
'int': lambda value: int(value),
'float': lambda value: float(value)
}
REVERSE_FORMATTERS = {
'json': lambda value:
value if isinstance(value, str) else json.dumps(value),
'text': DEFAULT_FORMATTER,
'int': DEFAULT_FORMATTER,
'float': DEFAULT_FORMATTER
}
name = db.StringField(max_length=50, required=True)
rawvalue = db.StringField(verbose_name=lazy_gettext("Value"),
required=True)
formatter = db.StringField(choices=FORMATS, default="text", required=True)
@property
def value(self):
return self.FORMATTERS.get(self.formatter,
self.DEFAULT_FORMATTER)(self.rawvalue)
@value.setter
def value(self, value): # lint:ok
self.rawvalue = self.REVERSE_FORMATTERS.get(self.formatter,
self.STR_FORMATTER)(value)
def clean(self):
try:
self.value
except Exception as e:
raise Exception(e.message)
super(CustomValue, self).clean()
def __unicode__(self):
return self.name
class HasCustomValue(object):
values = db.ListField(db.EmbeddedDocumentField(CustomValue))
def clean(self):
current_names = [value.name for value in self.values]
for name in current_names:
if current_names.count(name) > 1:
raise Exception(lazy_gettext("%(name)s already exists",
name=name))
super(HasCustomValue, self).clean()
class Channel(HasCustomValue, Publishable, Slugged, db.DynamicDocument):
title = db.StringField(max_length=255, required=True)
description = db.StringField()
show_in_menu = db.BooleanField(default=False)
is_homepage = db.BooleanField(default=False)
include_in_rss = db.BooleanField(default=False)
indexable = db.BooleanField(default=True)
canonical_url = db.StringField()
order = db.IntField(default=0)
# MPTT
parent = db.ReferenceField('self', required=False, default=None,
reverse_delete_rule=db.DENY)
@classmethod
def get_homepage(cls, attr=None):
try:
homepage = cls.objects.get(is_homepage=True)
except Exception as e:
logger.info("There is no homepage: %s" % e.message)
return None
else:
if not attr:
return homepage
else:
return getattr(homepage, attr, homepage)
def __unicode__(self):
return "{}-{}".format(self.title, self.long_slug)
def clean(self):
homepage = Channel.objects(is_homepage=True)
if self.is_homepage and homepage and not self in homepage:
raise db.ValidationError(lazy_gettext("Home page already exists"))
super(Channel, self).clean()
def save(self, *args, **kwargs):
self.validate_slug()
self.validate_long_slug()
super(Channel, self).save(*args, **kwargs)
class Channeling(object):
channel = db.ReferenceField(Channel, required=True,
reverse_delete_rule=db.DENY)
# Objects can be in only one main channel it gives an url
# but the objects can also be relates to other channels
related_channels = db.ListField(
db.ReferenceField('Channel', reverse_delete_rule=db.NULLIFY)
)
show_on_channel = db.BooleanField(default=True)
###############################################################
# Base Content for every new content to extend. inheritance=True
###############################################################
class Config(HasCustomValue, Publishable, db.DynamicDocument):
group = db.StringField(max_length=255)
description = db.StringField()
def __unicode__(self):
return self.group
class Content(HasCustomValue, Imaged, Publishable, Slugged, Commentable,
Channeling, Tagged, db.DynamicDocument):
title = db.StringField(max_length=255, required=True)
summary = db.StringField(required=False)
meta = {
'allow_inheritance': True,
'indexes': ['-created_at', 'slug'],
'ordering': ['-created_at']
}
def get_absolute_url(self, endpoint='detail'):
if self.channel.is_homepage:
long_slug = self.slug
else:
long_slug = self.long_slug
try:
return url_for(self.URL_NAMESPACE, long_slug=long_slug)
except:
return url_for(endpoint, long_slug=long_slug)
def __unicode__(self):
return self.title
@property
def content_type(self):
return self.__class__.__name__
def save(self, *args, **kwargs):
self.validate_slug()
self.validate_long_slug()
super(Content, self).save(*args, **kwargs)
###############################################################
# Admin views
###############################################################
class ConfigAdmin(ModelAdmin):
roles_accepted = ('admin', 'developer')
column_list = ("group", "description", "published",
"created_at", "updated_at")
column_filters = ("group", "description")
form_columns = ("group", "description", "published", "values")
admin.register(Config, ConfigAdmin, category="Settings")
class ChannelAdmin(ModelAdmin):
roles_accepted = ('admin', 'editor')
column_list = ('title', 'long_slug', 'is_homepage', 'published')
column_filters = ['published', 'is_homepage', 'include_in_rss',
'show_in_menu', 'indexable']
column_searchable_list = ('title', 'description')
form_columns = ['title', 'slug', 'description', 'parent', 'is_homepage',
'include_in_rss', 'indexable', 'show_in_menu', 'order',
'published', 'canonical_url', 'values']
admin.register(Channel, ChannelAdmin, category="Content")
|
{
"content_hash": "0e84e5327c0d1a66ce69e9bcbb049b36",
"timestamp": "",
"source": "github",
"line_count": 318,
"max_line_length": 79,
"avg_line_length": 31.91194968553459,
"alnum_prop": 0.5782420181316515,
"repo_name": "eltonsantos/quokka",
"id": "0761ee87129a6e74cb84ff096ad5a5fd4deb403f",
"size": "10195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quokka/core/models.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
"""
Module containing filter functions that allow code to be highlighted
from within Jinja templates.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
# pygments must not be imported at the module level
# because errors should be raised at runtime if it's actually needed,
# not import time, when it may not be needed.
from nbconvert.utils.base import NbConvertBase
from warnings import warn
MULTILINE_OUTPUTS = ['text', 'html', 'svg', 'latex', 'javascript', 'json']
__all__ = [
'Highlight2HTML',
'Highlight2Latex'
]
class Highlight2HTML(NbConvertBase):
def __init__(self, pygments_lexer=None, **kwargs):
self.pygments_lexer = pygments_lexer or 'ipython3'
super(Highlight2HTML, self).__init__(**kwargs)
def _default_language_changed(self, name, old, new):
warn('Setting default_language in config is deprecated, '
'please use language_info metadata instead.')
self.pygments_lexer = new
def __call__(self, source, language=None, metadata=None):
"""
Return a syntax-highlighted version of the input source as html output.
Parameters
----------
source : str
source of the cell to highlight
language : str
language to highlight the syntax of
metadata : NotebookNode cell metadata
metadata of the cell to highlight
"""
from pygments.formatters import HtmlFormatter
if not language:
language=self.pygments_lexer
return _pygments_highlight(source if len(source) > 0 else ' ',
# needed to help post processors:
HtmlFormatter(cssclass=" highlight hl-"+language),
language, metadata)
class Highlight2Latex(NbConvertBase):
def __init__(self, pygments_lexer=None, **kwargs):
self.pygments_lexer = pygments_lexer or 'ipython3'
super(Highlight2Latex, self).__init__(**kwargs)
def _default_language_changed(self, name, old, new):
warn('Setting default_language in config is deprecated, '
'please use language_info metadata instead.')
self.pygments_lexer = new
def __call__(self, source, language=None, metadata=None, strip_verbatim=False):
"""
Return a syntax-highlighted version of the input source as latex output.
Parameters
----------
source : str
source of the cell to highlight
language : str
language to highlight the syntax of
metadata : NotebookNode cell metadata
metadata of the cell to highlight
strip_verbatim : bool
remove the Verbatim environment that pygments provides by default
"""
from pygments.formatters import LatexFormatter
if not language:
language=self.pygments_lexer
latex = _pygments_highlight(source, LatexFormatter(), language, metadata)
if strip_verbatim:
latex = latex.replace(r'\begin{Verbatim}[commandchars=\\\{\}]' + '\n', '')
return latex.replace('\n\\end{Verbatim}\n', '')
else:
return latex
def _pygments_highlight(source, output_formatter, language='ipython', metadata=None):
"""
Return a syntax-highlighted version of the input source
Parameters
----------
source : str
source of the cell to highlight
output_formatter : Pygments formatter
language : str
language to highlight the syntax of
metadata : NotebookNode cell metadata
metadata of the cell to highlight
"""
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.util import ClassNotFound
# If the cell uses a magic extension language,
# use the magic language instead.
if language.startswith('ipython') \
and metadata \
and 'magics_language' in metadata:
language = metadata['magics_language']
if language == 'ipython2':
from IPython.lib.lexers import IPythonLexer
lexer = IPythonLexer()
elif language == 'ipython3':
from IPython.lib.lexers import IPython3Lexer
lexer = IPython3Lexer()
else:
try:
lexer = get_lexer_by_name(language, stripall=True)
except ClassNotFound:
warn("No lexer found for language %r. Treating as plain text." % language)
from pygments.lexers.special import TextLexer
lexer = TextLexer()
return highlight(source, lexer, output_formatter)
|
{
"content_hash": "ae6b056146cf49df6fb75d02698c3903",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 86,
"avg_line_length": 34.213235294117645,
"alnum_prop": 0.632495164410058,
"repo_name": "ArcherSys/ArcherSys",
"id": "e7abd0dbee442760b04df67087d83e85b8bfdd79",
"size": "4653",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Lib/site-packages/nbconvert/filters/highlight.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
import random
import unittest
from pymongo import MongoClient
from service.UserService import UserService, DuplicateUserException
class test_user_service(unittest.TestCase):
def setUp(self):
self.dbClient = MongoClient()
self.db = self.dbClient.test_foodbeazt_database
self.db.user_collection.drop()
self.service = UserService(self.db)
def get_model(self, email):
item = {"name": "test", "username": email, "email": email, "auth_type": "google",
"registered_ip": "10.0.0.1"}
return item
def test_create_user(self):
no = str(random.randint(1, 10000))
item = self.get_model("test" + no + "@test.com")
id = self.service.create(item)
assert id is not None
return id
def test_duplicate_user(self):
item = self.get_model("test@test.com")
self.service.create(item)
try:
self.service.create(item)
assert False
except DuplicateUserException as e:
assert True
def test_get_user_by_email(self):
item = self.get_model("test@test.com")
self.service.create(item)
item = self.service.get_by_email("test@test.com")
assert item is not None
assert item["email"] == "test@test.com"
def test_get_all_users(self):
self.test_create_user()
items = self.service.search()
assert items is not None
assert len(items) > 0
def test_delete_user(self):
id = self.test_create_user()
self.service.delete(str(id))
def test_update_user(self):
id = self.test_create_user()
item = self.test_get_by_id(str(id))
item['name'] = "updated test name"
item = self.service.update(item)
assert item is not None
assert item['name'] == 'updated test name'
assert 'updated_at' in item
def test_get_by_id(self, id=None):
if not id:
id = self.test_create_user()
item = self.service.get_by_id(id)
assert item is not None
return item
|
{
"content_hash": "41f9df9b6d2235fe0c5bc8c8e2d65a22",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 89,
"avg_line_length": 31.16417910447761,
"alnum_prop": 0.5953065134099617,
"repo_name": "cackharot/fbeazt",
"id": "ffde9a0add8df7c6d13ed1a0c26009072b77c93d",
"size": "2088",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/test/test_user_service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "255804"
},
{
"name": "Dockerfile",
"bytes": "6751"
},
{
"name": "HTML",
"bytes": "260498"
},
{
"name": "JavaScript",
"bytes": "87570"
},
{
"name": "Python",
"bytes": "218650"
},
{
"name": "Shell",
"bytes": "2290"
},
{
"name": "TypeScript",
"bytes": "87378"
}
],
"symlink_target": ""
}
|
from sqlalchemy.testing import eq_, assert_raises, \
assert_raises_message, is_
from sqlalchemy.ext import declarative as decl
import sqlalchemy as sa
from sqlalchemy import testing
from sqlalchemy import Integer, String, ForeignKey, select, func
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.orm import relationship, create_session, class_mapper, \
configure_mappers, clear_mappers, \
deferred, column_property, Session, base as orm_base
from sqlalchemy.util import classproperty
from sqlalchemy.ext.declarative import declared_attr, declarative_base
from sqlalchemy.orm import events as orm_events
from sqlalchemy.testing import fixtures, mock
from sqlalchemy.testing.util import gc_collect
Base = None
class DeclarativeTestBase(fixtures.TestBase, testing.AssertsExecutionResults):
def setup(self):
global Base
Base = decl.declarative_base(testing.db)
def teardown(self):
Session.close_all()
clear_mappers()
Base.metadata.drop_all()
class DeclarativeMixinTest(DeclarativeTestBase):
def test_simple(self):
class MyMixin(object):
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
def foo(self):
return 'bar' + str(self.id)
class MyModel(Base, MyMixin):
__tablename__ = 'test'
name = Column(String(100), nullable=False, index=True)
Base.metadata.create_all()
session = create_session()
session.add(MyModel(name='testing'))
session.flush()
session.expunge_all()
obj = session.query(MyModel).one()
eq_(obj.id, 1)
eq_(obj.name, 'testing')
eq_(obj.foo(), 'bar1')
def test_unique_column(self):
class MyMixin(object):
id = Column(Integer, primary_key=True)
value = Column(String, unique=True)
class MyModel(Base, MyMixin):
__tablename__ = 'test'
assert MyModel.__table__.c.value.unique
def test_hierarchical_bases(self):
class MyMixinParent:
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
def foo(self):
return 'bar' + str(self.id)
class MyMixin(MyMixinParent):
baz = Column(String(100), nullable=False, index=True)
class MyModel(Base, MyMixin):
__tablename__ = 'test'
name = Column(String(100), nullable=False, index=True)
Base.metadata.create_all()
session = create_session()
session.add(MyModel(name='testing', baz='fu'))
session.flush()
session.expunge_all()
obj = session.query(MyModel).one()
eq_(obj.id, 1)
eq_(obj.name, 'testing')
eq_(obj.foo(), 'bar1')
eq_(obj.baz, 'fu')
def test_mixin_overrides(self):
"""test a mixin that overrides a column on a superclass."""
class MixinA(object):
foo = Column(String(50))
class MixinB(MixinA):
foo = Column(Integer)
class MyModelA(Base, MixinA):
__tablename__ = 'testa'
id = Column(Integer, primary_key=True)
class MyModelB(Base, MixinB):
__tablename__ = 'testb'
id = Column(Integer, primary_key=True)
eq_(MyModelA.__table__.c.foo.type.__class__, String)
eq_(MyModelB.__table__.c.foo.type.__class__, Integer)
def test_not_allowed(self):
class MyMixin:
foo = Column(Integer, ForeignKey('bar.id'))
def go():
class MyModel(Base, MyMixin):
__tablename__ = 'foo'
assert_raises(sa.exc.InvalidRequestError, go)
class MyRelMixin:
foo = relationship('Bar')
def go():
class MyModel(Base, MyRelMixin):
__tablename__ = 'foo'
assert_raises(sa.exc.InvalidRequestError, go)
class MyDefMixin:
foo = deferred(Column('foo', String))
def go():
class MyModel(Base, MyDefMixin):
__tablename__ = 'foo'
assert_raises(sa.exc.InvalidRequestError, go)
class MyCPropMixin:
foo = column_property(Column('foo', String))
def go():
class MyModel(Base, MyCPropMixin):
__tablename__ = 'foo'
assert_raises(sa.exc.InvalidRequestError, go)
def test_table_name_inherited(self):
class MyMixin:
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
id = Column(Integer, primary_key=True)
class MyModel(Base, MyMixin):
pass
eq_(MyModel.__table__.name, 'mymodel')
def test_classproperty_still_works(self):
class MyMixin(object):
@classproperty
def __tablename__(cls):
return cls.__name__.lower()
id = Column(Integer, primary_key=True)
class MyModel(Base, MyMixin):
__tablename__ = 'overridden'
eq_(MyModel.__table__.name, 'overridden')
def test_table_name_not_inherited(self):
class MyMixin:
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
id = Column(Integer, primary_key=True)
class MyModel(Base, MyMixin):
__tablename__ = 'overridden'
eq_(MyModel.__table__.name, 'overridden')
def test_table_name_inheritance_order(self):
class MyMixin1:
@declared_attr
def __tablename__(cls):
return cls.__name__.lower() + '1'
class MyMixin2:
@declared_attr
def __tablename__(cls):
return cls.__name__.lower() + '2'
class MyModel(Base, MyMixin1, MyMixin2):
id = Column(Integer, primary_key=True)
eq_(MyModel.__table__.name, 'mymodel1')
def test_table_name_dependent_on_subclass(self):
class MyHistoryMixin:
@declared_attr
def __tablename__(cls):
return cls.parent_name + '_changelog'
class MyModel(Base, MyHistoryMixin):
parent_name = 'foo'
id = Column(Integer, primary_key=True)
eq_(MyModel.__table__.name, 'foo_changelog')
def test_table_args_inherited(self):
class MyMixin:
__table_args__ = {'mysql_engine': 'InnoDB'}
class MyModel(Base, MyMixin):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
eq_(MyModel.__table__.kwargs, {'mysql_engine': 'InnoDB'})
def test_table_args_inherited_descriptor(self):
class MyMixin:
@declared_attr
def __table_args__(cls):
return {'info': cls.__name__}
class MyModel(Base, MyMixin):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
eq_(MyModel.__table__.info, 'MyModel')
def test_table_args_inherited_single_table_inheritance(self):
class MyMixin:
__table_args__ = {'mysql_engine': 'InnoDB'}
class General(Base, MyMixin):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
type_ = Column(String(50))
__mapper__args = {'polymorphic_on': type_}
class Specific(General):
__mapper_args__ = {'polymorphic_identity': 'specific'}
assert Specific.__table__ is General.__table__
eq_(General.__table__.kwargs, {'mysql_engine': 'InnoDB'})
def test_columns_single_table_inheritance(self):
"""Test a column on a mixin with an alternate attribute name,
mapped to a superclass and single-table inheritance subclass.
The superclass table gets the column, the subclass shares
the MapperProperty.
"""
class MyMixin(object):
foo = Column('foo', Integer)
bar = Column('bar_newname', Integer)
class General(Base, MyMixin):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
type_ = Column(String(50))
__mapper__args = {'polymorphic_on': type_}
class Specific(General):
__mapper_args__ = {'polymorphic_identity': 'specific'}
assert General.bar.prop.columns[0] is General.__table__.c.bar_newname
assert len(General.bar.prop.columns) == 1
assert Specific.bar.prop is General.bar.prop
@testing.skip_if(lambda: testing.against('oracle'),
"Test has an empty insert in it at the moment")
def test_columns_single_inheritance_conflict_resolution(self):
"""Test that a declared_attr can return the existing column and it will
be ignored. this allows conditional columns to be added.
See [ticket:2472].
"""
class Person(Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
class Mixin(object):
@declared_attr
def target_id(cls):
return cls.__table__.c.get(
'target_id',
Column(Integer, ForeignKey('other.id'))
)
@declared_attr
def target(cls):
return relationship("Other")
class Engineer(Mixin, Person):
"""single table inheritance"""
class Manager(Mixin, Person):
"""single table inheritance"""
class Other(Base):
__tablename__ = 'other'
id = Column(Integer, primary_key=True)
is_(
Engineer.target_id.property.columns[0],
Person.__table__.c.target_id
)
is_(
Manager.target_id.property.columns[0],
Person.__table__.c.target_id
)
# do a brief round trip on this
Base.metadata.create_all()
session = Session()
o1, o2 = Other(), Other()
session.add_all([
Engineer(target=o1),
Manager(target=o2),
Manager(target=o1)
])
session.commit()
eq_(session.query(Engineer).first().target, o1)
def test_columns_joined_table_inheritance(self):
"""Test a column on a mixin with an alternate attribute name,
mapped to a superclass and joined-table inheritance subclass.
Both tables get the column, in the case of the subclass the two
columns are joined under one MapperProperty.
"""
class MyMixin(object):
foo = Column('foo', Integer)
bar = Column('bar_newname', Integer)
class General(Base, MyMixin):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
type_ = Column(String(50))
__mapper__args = {'polymorphic_on': type_}
class Specific(General):
__tablename__ = 'sub'
id = Column(Integer, ForeignKey('test.id'), primary_key=True)
__mapper_args__ = {'polymorphic_identity': 'specific'}
assert General.bar.prop.columns[0] is General.__table__.c.bar_newname
assert len(General.bar.prop.columns) == 1
assert Specific.bar.prop is General.bar.prop
eq_(len(Specific.bar.prop.columns), 1)
assert Specific.bar.prop.columns[0] is General.__table__.c.bar_newname
def test_column_join_checks_superclass_type(self):
"""Test that the logic which joins subclass props to those
of the superclass checks that the superclass property is a column.
"""
class General(Base):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
general_id = Column(Integer, ForeignKey('test.id'))
type_ = relationship("General")
class Specific(General):
__tablename__ = 'sub'
id = Column(Integer, ForeignKey('test.id'), primary_key=True)
type_ = Column('foob', String(50))
assert isinstance(General.type_.property, sa.orm.RelationshipProperty)
assert Specific.type_.property.columns[0] is Specific.__table__.c.foob
def test_column_join_checks_subclass_type(self):
"""Test that the logic which joins subclass props to those
of the superclass checks that the subclass property is a column.
"""
def go():
class General(Base):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
type_ = Column('foob', Integer)
class Specific(General):
__tablename__ = 'sub'
id = Column(Integer, ForeignKey('test.id'), primary_key=True)
specific_id = Column(Integer, ForeignKey('sub.id'))
type_ = relationship("Specific")
assert_raises_message(
sa.exc.ArgumentError, "column 'foob' conflicts with property", go
)
def test_table_args_overridden(self):
class MyMixin:
__table_args__ = {'mysql_engine': 'Foo'}
class MyModel(Base, MyMixin):
__tablename__ = 'test'
__table_args__ = {'mysql_engine': 'InnoDB'}
id = Column(Integer, primary_key=True)
eq_(MyModel.__table__.kwargs, {'mysql_engine': 'InnoDB'})
@testing.teardown_events(orm_events.MapperEvents)
def test_declare_first_mixin(self):
canary = mock.Mock()
class MyMixin(object):
@classmethod
def __declare_first__(cls):
canary.declare_first__(cls)
@classmethod
def __declare_last__(cls):
canary.declare_last__(cls)
class MyModel(Base, MyMixin):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
configure_mappers()
eq_(
canary.mock_calls,
[
mock.call.declare_first__(MyModel),
mock.call.declare_last__(MyModel),
]
)
@testing.teardown_events(orm_events.MapperEvents)
def test_declare_first_base(self):
canary = mock.Mock()
class MyMixin(object):
@classmethod
def __declare_first__(cls):
canary.declare_first__(cls)
@classmethod
def __declare_last__(cls):
canary.declare_last__(cls)
class Base(MyMixin):
pass
Base = declarative_base(cls=Base)
class MyModel(Base):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
configure_mappers()
eq_(
canary.mock_calls,
[
mock.call.declare_first__(MyModel),
mock.call.declare_last__(MyModel),
]
)
@testing.teardown_events(orm_events.MapperEvents)
def test_declare_first_direct(self):
canary = mock.Mock()
class MyOtherModel(Base):
__tablename__ = 'test2'
id = Column(Integer, primary_key=True)
@classmethod
def __declare_first__(cls):
canary.declare_first__(cls)
@classmethod
def __declare_last__(cls):
canary.declare_last__(cls)
configure_mappers()
eq_(
canary.mock_calls,
[
mock.call.declare_first__(MyOtherModel),
mock.call.declare_last__(MyOtherModel)
]
)
def test_mapper_args_declared_attr(self):
class ComputedMapperArgs:
@declared_attr
def __mapper_args__(cls):
if cls.__name__ == 'Person':
return {'polymorphic_on': cls.discriminator}
else:
return {'polymorphic_identity': cls.__name__}
class Person(Base, ComputedMapperArgs):
__tablename__ = 'people'
id = Column(Integer, primary_key=True)
discriminator = Column('type', String(50))
class Engineer(Person):
pass
configure_mappers()
assert class_mapper(Person).polymorphic_on \
is Person.__table__.c.type
eq_(class_mapper(Engineer).polymorphic_identity, 'Engineer')
def test_mapper_args_declared_attr_two(self):
# same as test_mapper_args_declared_attr, but we repeat
# ComputedMapperArgs on both classes for no apparent reason.
class ComputedMapperArgs:
@declared_attr
def __mapper_args__(cls):
if cls.__name__ == 'Person':
return {'polymorphic_on': cls.discriminator}
else:
return {'polymorphic_identity': cls.__name__}
class Person(Base, ComputedMapperArgs):
__tablename__ = 'people'
id = Column(Integer, primary_key=True)
discriminator = Column('type', String(50))
class Engineer(Person, ComputedMapperArgs):
pass
configure_mappers()
assert class_mapper(Person).polymorphic_on \
is Person.__table__.c.type
eq_(class_mapper(Engineer).polymorphic_identity, 'Engineer')
def test_table_args_composite(self):
class MyMixin1:
__table_args__ = {'info': {'baz': 'bob'}}
class MyMixin2:
__table_args__ = {'info': {'foo': 'bar'}}
class MyModel(Base, MyMixin1, MyMixin2):
__tablename__ = 'test'
@declared_attr
def __table_args__(self):
info = {}
args = dict(info=info)
info.update(MyMixin1.__table_args__['info'])
info.update(MyMixin2.__table_args__['info'])
return args
id = Column(Integer, primary_key=True)
eq_(MyModel.__table__.info, {'foo': 'bar', 'baz': 'bob'})
def test_mapper_args_inherited(self):
class MyMixin:
__mapper_args__ = {'always_refresh': True}
class MyModel(Base, MyMixin):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
eq_(MyModel.__mapper__.always_refresh, True)
def test_mapper_args_inherited_descriptor(self):
class MyMixin:
@declared_attr
def __mapper_args__(cls):
# tenuous, but illustrates the problem!
if cls.__name__ == 'MyModel':
return dict(always_refresh=True)
else:
return dict(always_refresh=False)
class MyModel(Base, MyMixin):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
eq_(MyModel.__mapper__.always_refresh, True)
def test_mapper_args_polymorphic_on_inherited(self):
class MyMixin:
type_ = Column(String(50))
__mapper_args__ = {'polymorphic_on': type_}
class MyModel(Base, MyMixin):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
col = MyModel.__mapper__.polymorphic_on
eq_(col.name, 'type_')
assert col.table is not None
def test_mapper_args_overridden(self):
class MyMixin:
__mapper_args__ = dict(always_refresh=True)
class MyModel(Base, MyMixin):
__tablename__ = 'test'
__mapper_args__ = dict(always_refresh=False)
id = Column(Integer, primary_key=True)
eq_(MyModel.__mapper__.always_refresh, False)
def test_mapper_args_composite(self):
class MyMixin1:
type_ = Column(String(50))
__mapper_args__ = {'polymorphic_on': type_}
class MyMixin2:
__mapper_args__ = {'always_refresh': True}
class MyModel(Base, MyMixin1, MyMixin2):
__tablename__ = 'test'
@declared_attr
def __mapper_args__(cls):
args = {}
args.update(MyMixin1.__mapper_args__)
args.update(MyMixin2.__mapper_args__)
if cls.__name__ != 'MyModel':
args.pop('polymorphic_on')
args['polymorphic_identity'] = cls.__name__
return args
id = Column(Integer, primary_key=True)
class MySubModel(MyModel):
pass
eq_(
MyModel.__mapper__.polymorphic_on.name,
'type_'
)
assert MyModel.__mapper__.polymorphic_on.table is not None
eq_(MyModel.__mapper__.always_refresh, True)
eq_(MySubModel.__mapper__.always_refresh, True)
eq_(MySubModel.__mapper__.polymorphic_identity, 'MySubModel')
def test_mapper_args_property(self):
class MyModel(Base):
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
@declared_attr
def __table_args__(cls):
return {'mysql_engine': 'InnoDB'}
@declared_attr
def __mapper_args__(cls):
args = {}
args['polymorphic_identity'] = cls.__name__
return args
id = Column(Integer, primary_key=True)
class MySubModel(MyModel):
id = Column(Integer, ForeignKey('mymodel.id'), primary_key=True)
class MySubModel2(MyModel):
__tablename__ = 'sometable'
id = Column(Integer, ForeignKey('mymodel.id'), primary_key=True)
eq_(MyModel.__mapper__.polymorphic_identity, 'MyModel')
eq_(MySubModel.__mapper__.polymorphic_identity, 'MySubModel')
eq_(MyModel.__table__.kwargs['mysql_engine'], 'InnoDB')
eq_(MySubModel.__table__.kwargs['mysql_engine'], 'InnoDB')
eq_(MySubModel2.__table__.kwargs['mysql_engine'], 'InnoDB')
eq_(MyModel.__table__.name, 'mymodel')
eq_(MySubModel.__table__.name, 'mysubmodel')
def test_mapper_args_custom_base(self):
"""test the @declared_attr approach from a custom base."""
class Base(object):
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
@declared_attr
def __table_args__(cls):
return {'mysql_engine': 'InnoDB'}
@declared_attr
def id(self):
return Column(Integer, primary_key=True)
Base = decl.declarative_base(cls=Base)
class MyClass(Base):
pass
class MyOtherClass(Base):
pass
eq_(MyClass.__table__.kwargs['mysql_engine'], 'InnoDB')
eq_(MyClass.__table__.name, 'myclass')
eq_(MyOtherClass.__table__.name, 'myotherclass')
assert MyClass.__table__.c.id.table is MyClass.__table__
assert MyOtherClass.__table__.c.id.table is MyOtherClass.__table__
def test_single_table_no_propagation(self):
class IdColumn:
id = Column(Integer, primary_key=True)
class Generic(Base, IdColumn):
__tablename__ = 'base'
discriminator = Column('type', String(50))
__mapper_args__ = dict(polymorphic_on=discriminator)
value = Column(Integer())
class Specific(Generic):
__mapper_args__ = dict(polymorphic_identity='specific')
assert Specific.__table__ is Generic.__table__
eq_(list(Generic.__table__.c.keys()), ['id', 'type', 'value'])
assert class_mapper(Specific).polymorphic_on \
is Generic.__table__.c.type
eq_(class_mapper(Specific).polymorphic_identity, 'specific')
def test_joined_table_propagation(self):
class CommonMixin:
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
__table_args__ = {'mysql_engine': 'InnoDB'}
timestamp = Column(Integer)
id = Column(Integer, primary_key=True)
class Generic(Base, CommonMixin):
discriminator = Column('python_type', String(50))
__mapper_args__ = dict(polymorphic_on=discriminator)
class Specific(Generic):
__mapper_args__ = dict(polymorphic_identity='specific')
id = Column(Integer, ForeignKey('generic.id'),
primary_key=True)
eq_(Generic.__table__.name, 'generic')
eq_(Specific.__table__.name, 'specific')
eq_(list(Generic.__table__.c.keys()), ['timestamp', 'id',
'python_type'])
eq_(list(Specific.__table__.c.keys()), ['id'])
eq_(Generic.__table__.kwargs, {'mysql_engine': 'InnoDB'})
eq_(Specific.__table__.kwargs, {'mysql_engine': 'InnoDB'})
def test_some_propagation(self):
class CommonMixin:
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
__table_args__ = {'mysql_engine': 'InnoDB'}
timestamp = Column(Integer)
class BaseType(Base, CommonMixin):
discriminator = Column('type', String(50))
__mapper_args__ = dict(polymorphic_on=discriminator)
id = Column(Integer, primary_key=True)
value = Column(Integer())
class Single(BaseType):
__tablename__ = None
__mapper_args__ = dict(polymorphic_identity='type1')
class Joined(BaseType):
__mapper_args__ = dict(polymorphic_identity='type2')
id = Column(Integer, ForeignKey('basetype.id'),
primary_key=True)
eq_(BaseType.__table__.name, 'basetype')
eq_(list(BaseType.__table__.c.keys()), ['timestamp', 'type', 'id',
'value'])
eq_(BaseType.__table__.kwargs, {'mysql_engine': 'InnoDB'})
assert Single.__table__ is BaseType.__table__
eq_(Joined.__table__.name, 'joined')
eq_(list(Joined.__table__.c.keys()), ['id'])
eq_(Joined.__table__.kwargs, {'mysql_engine': 'InnoDB'})
def test_col_copy_vs_declared_attr_joined_propagation(self):
class Mixin(object):
a = Column(Integer)
@declared_attr
def b(cls):
return Column(Integer)
class A(Mixin, Base):
__tablename__ = 'a'
id = Column(Integer, primary_key=True)
class B(A):
__tablename__ = 'b'
id = Column(Integer, ForeignKey('a.id'), primary_key=True)
assert 'a' in A.__table__.c
assert 'b' in A.__table__.c
assert 'a' not in B.__table__.c
assert 'b' not in B.__table__.c
def test_col_copy_vs_declared_attr_joined_propagation_newname(self):
class Mixin(object):
a = Column('a1', Integer)
@declared_attr
def b(cls):
return Column('b1', Integer)
class A(Mixin, Base):
__tablename__ = 'a'
id = Column(Integer, primary_key=True)
class B(A):
__tablename__ = 'b'
id = Column(Integer, ForeignKey('a.id'), primary_key=True)
assert 'a1' in A.__table__.c
assert 'b1' in A.__table__.c
assert 'a1' not in B.__table__.c
assert 'b1' not in B.__table__.c
def test_col_copy_vs_declared_attr_single_propagation(self):
class Mixin(object):
a = Column(Integer)
@declared_attr
def b(cls):
return Column(Integer)
class A(Mixin, Base):
__tablename__ = 'a'
id = Column(Integer, primary_key=True)
class B(A):
pass
assert 'a' in A.__table__.c
assert 'b' in A.__table__.c
def test_non_propagating_mixin(self):
class NoJoinedTableNameMixin:
@declared_attr
def __tablename__(cls):
if decl.has_inherited_table(cls):
return None
return cls.__name__.lower()
class BaseType(Base, NoJoinedTableNameMixin):
discriminator = Column('type', String(50))
__mapper_args__ = dict(polymorphic_on=discriminator)
id = Column(Integer, primary_key=True)
value = Column(Integer())
class Specific(BaseType):
__mapper_args__ = dict(polymorphic_identity='specific')
eq_(BaseType.__table__.name, 'basetype')
eq_(list(BaseType.__table__.c.keys()), ['type', 'id', 'value'])
assert Specific.__table__ is BaseType.__table__
assert class_mapper(Specific).polymorphic_on \
is BaseType.__table__.c.type
eq_(class_mapper(Specific).polymorphic_identity, 'specific')
def test_non_propagating_mixin_used_for_joined(self):
class TableNameMixin:
@declared_attr
def __tablename__(cls):
if decl.has_inherited_table(cls) and TableNameMixin \
not in cls.__bases__:
return None
return cls.__name__.lower()
class BaseType(Base, TableNameMixin):
discriminator = Column('type', String(50))
__mapper_args__ = dict(polymorphic_on=discriminator)
id = Column(Integer, primary_key=True)
value = Column(Integer())
class Specific(BaseType, TableNameMixin):
__mapper_args__ = dict(polymorphic_identity='specific')
id = Column(Integer, ForeignKey('basetype.id'),
primary_key=True)
eq_(BaseType.__table__.name, 'basetype')
eq_(list(BaseType.__table__.c.keys()), ['type', 'id', 'value'])
eq_(Specific.__table__.name, 'specific')
eq_(list(Specific.__table__.c.keys()), ['id'])
def test_single_back_propagate(self):
class ColumnMixin:
timestamp = Column(Integer)
class BaseType(Base):
__tablename__ = 'foo'
discriminator = Column('type', String(50))
__mapper_args__ = dict(polymorphic_on=discriminator)
id = Column(Integer, primary_key=True)
class Specific(BaseType, ColumnMixin):
__mapper_args__ = dict(polymorphic_identity='specific')
eq_(list(BaseType.__table__.c.keys()), ['type', 'id', 'timestamp'])
def test_table_in_model_and_same_column_in_mixin(self):
class ColumnMixin:
data = Column(Integer)
class Model(Base, ColumnMixin):
__table__ = Table('foo', Base.metadata,
Column('data', Integer),
Column('id', Integer, primary_key=True))
model_col = Model.__table__.c.data
mixin_col = ColumnMixin.data
assert model_col is not mixin_col
eq_(model_col.name, 'data')
assert model_col.type.__class__ is mixin_col.type.__class__
def test_table_in_model_and_different_named_column_in_mixin(self):
class ColumnMixin:
tada = Column(Integer)
def go():
class Model(Base, ColumnMixin):
__table__ = Table('foo', Base.metadata,
Column('data', Integer),
Column('id', Integer, primary_key=True))
foo = relationship("Dest")
assert_raises_message(sa.exc.ArgumentError,
"Can't add additional column 'tada' when "
"specifying __table__", go)
def test_table_in_model_and_different_named_alt_key_column_in_mixin(self):
# here, the __table__ has a column 'tada'. We disallow
# the add of the 'foobar' column, even though it's
# keyed to 'tada'.
class ColumnMixin:
tada = Column('foobar', Integer)
def go():
class Model(Base, ColumnMixin):
__table__ = Table('foo', Base.metadata,
Column('data', Integer),
Column('tada', Integer),
Column('id', Integer, primary_key=True))
foo = relationship("Dest")
assert_raises_message(sa.exc.ArgumentError,
"Can't add additional column 'foobar' when "
"specifying __table__", go)
def test_table_in_model_overrides_different_typed_column_in_mixin(self):
class ColumnMixin:
data = Column(String)
class Model(Base, ColumnMixin):
__table__ = Table('foo', Base.metadata,
Column('data', Integer),
Column('id', Integer, primary_key=True))
model_col = Model.__table__.c.data
mixin_col = ColumnMixin.data
assert model_col is not mixin_col
eq_(model_col.name, 'data')
assert model_col.type.__class__ is Integer
def test_mixin_column_ordering(self):
class Foo(object):
col1 = Column(Integer)
col3 = Column(Integer)
class Bar(object):
col2 = Column(Integer)
col4 = Column(Integer)
class Model(Base, Foo, Bar):
id = Column(Integer, primary_key=True)
__tablename__ = 'model'
eq_(list(Model.__table__.c.keys()), ['col1', 'col3', 'col2', 'col4',
'id'])
def test_honor_class_mro_one(self):
class HasXMixin(object):
@declared_attr
def x(self):
return Column(Integer)
class Parent(HasXMixin, Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True)
class Child(Parent):
__tablename__ = 'child'
id = Column(Integer, ForeignKey('parent.id'), primary_key=True)
assert "x" not in Child.__table__.c
def test_honor_class_mro_two(self):
class HasXMixin(object):
@declared_attr
def x(self):
return Column(Integer)
class Parent(HasXMixin, Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True)
def x(self):
return "hi"
class C(Parent):
__tablename__ = 'c'
id = Column(Integer, ForeignKey('parent.id'), primary_key=True)
assert C().x() == 'hi'
def test_arbitrary_attrs_one(self):
class HasMixin(object):
@declared_attr
def some_attr(cls):
return cls.__name__ + "SOME ATTR"
class Mapped(HasMixin, Base):
__tablename__ = 't'
id = Column(Integer, primary_key=True)
eq_(Mapped.some_attr, "MappedSOME ATTR")
eq_(Mapped.__dict__['some_attr'], "MappedSOME ATTR")
def test_arbitrary_attrs_two(self):
from sqlalchemy.ext.associationproxy import association_proxy
class FilterA(Base):
__tablename__ = 'filter_a'
id = Column(Integer(), primary_key=True)
parent_id = Column(Integer(),
ForeignKey('type_a.id'))
filter = Column(String())
def __init__(self, filter_, **kw):
self.filter = filter_
class FilterB(Base):
__tablename__ = 'filter_b'
id = Column(Integer(), primary_key=True)
parent_id = Column(Integer(),
ForeignKey('type_b.id'))
filter = Column(String())
def __init__(self, filter_, **kw):
self.filter = filter_
class FilterMixin(object):
@declared_attr
def _filters(cls):
return relationship(cls.filter_class,
cascade='all,delete,delete-orphan')
@declared_attr
def filters(cls):
return association_proxy('_filters', 'filter')
class TypeA(Base, FilterMixin):
__tablename__ = 'type_a'
filter_class = FilterA
id = Column(Integer(), primary_key=True)
class TypeB(Base, FilterMixin):
__tablename__ = 'type_b'
filter_class = FilterB
id = Column(Integer(), primary_key=True)
TypeA(filters=['foo'])
TypeB(filters=['foo'])
class DeclarativeMixinPropertyTest(DeclarativeTestBase):
def test_column_property(self):
class MyMixin(object):
@declared_attr
def prop_hoho(cls):
return column_property(Column('prop', String(50)))
class MyModel(Base, MyMixin):
__tablename__ = 'test'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
class MyOtherModel(Base, MyMixin):
__tablename__ = 'othertest'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
assert MyModel.__table__.c.prop is not None
assert MyOtherModel.__table__.c.prop is not None
assert MyModel.__table__.c.prop \
is not MyOtherModel.__table__.c.prop
assert MyModel.prop_hoho.property.columns \
== [MyModel.__table__.c.prop]
assert MyOtherModel.prop_hoho.property.columns \
== [MyOtherModel.__table__.c.prop]
assert MyModel.prop_hoho.property \
is not MyOtherModel.prop_hoho.property
Base.metadata.create_all()
sess = create_session()
m1, m2 = MyModel(prop_hoho='foo'), MyOtherModel(prop_hoho='bar')
sess.add_all([m1, m2])
sess.flush()
eq_(sess.query(MyModel).filter(MyModel.prop_hoho == 'foo'
).one(), m1)
eq_(sess.query(MyOtherModel).filter(MyOtherModel.prop_hoho
== 'bar').one(), m2)
def test_doc(self):
"""test documentation transfer.
the documentation situation with @declared_attr is problematic.
at least see if mapped subclasses get the doc.
"""
class MyMixin(object):
@declared_attr
def type_(cls):
"""this is a document."""
return Column(String(50))
@declared_attr
def t2(cls):
"""this is another document."""
return column_property(Column(String(50)))
class MyModel(Base, MyMixin):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
configure_mappers()
eq_(MyModel.type_.__doc__, """this is a document.""")
eq_(MyModel.t2.__doc__, """this is another document.""")
def test_column_in_mapper_args(self):
class MyMixin(object):
@declared_attr
def type_(cls):
return Column(String(50))
__mapper_args__ = {'polymorphic_on': type_}
class MyModel(Base, MyMixin):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
configure_mappers()
col = MyModel.__mapper__.polymorphic_on
eq_(col.name, 'type_')
assert col.table is not None
def test_column_in_mapper_args_used_multiple_times(self):
class MyMixin(object):
version_id = Column(Integer)
__mapper_args__ = {'version_id_col': version_id}
class ModelOne(Base, MyMixin):
__tablename__ = 'm1'
id = Column(Integer, primary_key=True)
class ModelTwo(Base, MyMixin):
__tablename__ = 'm2'
id = Column(Integer, primary_key=True)
is_(
ModelOne.__mapper__.version_id_col,
ModelOne.__table__.c.version_id
)
is_(
ModelTwo.__mapper__.version_id_col,
ModelTwo.__table__.c.version_id
)
def test_deferred(self):
class MyMixin(object):
@declared_attr
def data(cls):
return deferred(Column('data', String(50)))
class MyModel(Base, MyMixin):
__tablename__ = 'test'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
Base.metadata.create_all()
sess = create_session()
sess.add_all([MyModel(data='d1'), MyModel(data='d2')])
sess.flush()
sess.expunge_all()
d1, d2 = sess.query(MyModel).order_by(MyModel.data)
assert 'data' not in d1.__dict__
assert d1.data == 'd1'
assert 'data' in d1.__dict__
def _test_relationship(self, usestring):
class RefTargetMixin(object):
@declared_attr
def target_id(cls):
return Column('target_id', ForeignKey('target.id'))
if usestring:
@declared_attr
def target(cls):
return relationship('Target',
primaryjoin='Target.id==%s.target_id'
% cls.__name__)
else:
@declared_attr
def target(cls):
return relationship('Target')
class Foo(Base, RefTargetMixin):
__tablename__ = 'foo'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
class Bar(Base, RefTargetMixin):
__tablename__ = 'bar'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
class Target(Base):
__tablename__ = 'target'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
Base.metadata.create_all()
sess = create_session()
t1, t2 = Target(), Target()
f1, f2, b1 = Foo(target=t1), Foo(target=t2), Bar(target=t1)
sess.add_all([f1, f2, b1])
sess.flush()
eq_(sess.query(Foo).filter(Foo.target == t2).one(), f2)
eq_(sess.query(Bar).filter(Bar.target == t2).first(), None)
sess.expire_all()
eq_(f1.target, t1)
def test_relationship(self):
self._test_relationship(False)
def test_relationship_primryjoin(self):
self._test_relationship(True)
class DeclaredAttrTest(DeclarativeTestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
def test_singleton_behavior_within_decl(self):
counter = mock.Mock()
class Mixin(object):
@declared_attr
def my_prop(cls):
counter(cls)
return Column('x', Integer)
class A(Base, Mixin):
__tablename__ = 'a'
id = Column(Integer, primary_key=True)
@declared_attr
def my_other_prop(cls):
return column_property(cls.my_prop + 5)
eq_(counter.mock_calls, [mock.call(A)])
class B(Base, Mixin):
__tablename__ = 'b'
id = Column(Integer, primary_key=True)
@declared_attr
def my_other_prop(cls):
return column_property(cls.my_prop + 5)
eq_(
counter.mock_calls,
[mock.call(A), mock.call(B)])
# this is why we need singleton-per-class behavior. We get
# an un-bound "x" column otherwise here, because my_prop() generates
# multiple columns.
a_col = A.my_other_prop.__clause_element__().element.left
b_col = B.my_other_prop.__clause_element__().element.left
is_(a_col.table, A.__table__)
is_(b_col.table, B.__table__)
is_(a_col, A.__table__.c.x)
is_(b_col, B.__table__.c.x)
s = Session()
self.assert_compile(
s.query(A),
"SELECT a.x AS a_x, a.x + :x_1 AS anon_1, a.id AS a_id FROM a"
)
self.assert_compile(
s.query(B),
"SELECT b.x AS b_x, b.x + :x_1 AS anon_1, b.id AS b_id FROM b"
)
@testing.requires.predictable_gc
def test_singleton_gc(self):
counter = mock.Mock()
class Mixin(object):
@declared_attr
def my_prop(cls):
counter(cls.__name__)
return Column('x', Integer)
class A(Base, Mixin):
__tablename__ = 'b'
id = Column(Integer, primary_key=True)
@declared_attr
def my_other_prop(cls):
return column_property(cls.my_prop + 5)
eq_(counter.mock_calls, [mock.call("A")])
del A
gc_collect()
assert "A" not in Base._decl_class_registry
def test_can_we_access_the_mixin_straight(self):
class Mixin(object):
@declared_attr
def my_prop(cls):
return Column('x', Integer)
assert_raises_message(
sa.exc.SAWarning,
"Unmanaged access of declarative attribute my_prop "
"from non-mapped class Mixin",
getattr, Mixin, "my_prop"
)
def test_non_decl_access(self):
counter = mock.Mock()
class Mixin(object):
@declared_attr
def __tablename__(cls):
counter(cls)
return "foo"
class Foo(Mixin, Base):
id = Column(Integer, primary_key=True)
@declared_attr
def x(cls):
cls.__tablename__
@declared_attr
def y(cls):
cls.__tablename__
eq_(
counter.mock_calls,
[mock.call(Foo)]
)
eq_(Foo.__tablename__, 'foo')
eq_(Foo.__tablename__, 'foo')
eq_(
counter.mock_calls,
[mock.call(Foo), mock.call(Foo), mock.call(Foo)]
)
def test_property_noncascade(self):
counter = mock.Mock()
class Mixin(object):
@declared_attr
def my_prop(cls):
counter(cls)
return column_property(cls.x + 2)
class A(Base, Mixin):
__tablename__ = 'a'
id = Column(Integer, primary_key=True)
x = Column(Integer)
class B(A):
pass
eq_(counter.mock_calls, [mock.call(A)])
def test_property_cascade(self):
counter = mock.Mock()
class Mixin(object):
@declared_attr.cascading
def my_prop(cls):
counter(cls)
return column_property(cls.x + 2)
class A(Base, Mixin):
__tablename__ = 'a'
id = Column(Integer, primary_key=True)
x = Column(Integer)
class B(A):
pass
eq_(counter.mock_calls, [mock.call(A), mock.call(B)])
def test_col_prop_attrs_associated_w_class_for_mapper_args(self):
from sqlalchemy import Column
import collections
asserted = collections.defaultdict(set)
class Mixin(object):
@declared_attr.cascading
def my_attr(cls):
if decl.has_inherited_table(cls):
id = Column(ForeignKey('a.my_attr'), primary_key=True)
asserted['b'].add(id)
else:
id = Column(Integer, primary_key=True)
asserted['a'].add(id)
return id
class A(Base, Mixin):
__tablename__ = 'a'
@declared_attr
def __mapper_args__(cls):
asserted['a'].add(cls.my_attr)
return {}
# here:
# 1. A is mapped. so A.my_attr is now the InstrumentedAttribute.
# 2. B wants to call my_attr also. Due to .cascading, it has been
# invoked specific to B, and is present in the dict_ that will
# be used when we map the class. But except for the
# special setattr() we do in _scan_attributes() in this case, would
# otherwise not been set on the class as anything from this call;
# the usual mechanics of calling it from the descriptor also do not
# work because A is fully mapped and because A set it up, is currently
# that non-expected InstrumentedAttribute and replaces the
# descriptor from being invoked.
class B(A):
__tablename__ = 'b'
@declared_attr
def __mapper_args__(cls):
asserted['b'].add(cls.my_attr)
return {}
eq_(
asserted,
{
'a': set([A.my_attr.property.columns[0]]),
'b': set([B.my_attr.property.columns[0]])
}
)
def test_column_pre_map(self):
counter = mock.Mock()
class Mixin(object):
@declared_attr
def my_col(cls):
counter(cls)
assert not orm_base._mapper_or_none(cls)
return Column('x', Integer)
class A(Base, Mixin):
__tablename__ = 'a'
id = Column(Integer, primary_key=True)
eq_(counter.mock_calls, [mock.call(A)])
def test_mixin_attr_refers_to_column_copies(self):
# this @declared_attr can refer to User.id
# freely because we now do the "copy column" operation
# before the declared_attr is invoked.
counter = mock.Mock()
class HasAddressCount(object):
id = Column(Integer, primary_key=True)
@declared_attr
def address_count(cls):
counter(cls.id)
return column_property(
select([func.count(Address.id)]).
where(Address.user_id == cls.id).
as_scalar()
)
class Address(Base):
__tablename__ = 'address'
id = Column(Integer, primary_key=True)
user_id = Column(ForeignKey('user.id'))
class User(Base, HasAddressCount):
__tablename__ = 'user'
eq_(
counter.mock_calls,
[mock.call(User.id)]
)
sess = Session()
self.assert_compile(
sess.query(User).having(User.address_count > 5),
'SELECT (SELECT count(address.id) AS '
'count_1 FROM address WHERE address.user_id = "user".id) '
'AS anon_1, "user".id AS user_id FROM "user" '
'HAVING (SELECT count(address.id) AS '
'count_1 FROM address WHERE address.user_id = "user".id) '
'> :param_1'
)
class AbstractTest(DeclarativeTestBase):
def test_abstract_boolean(self):
class A(Base):
__abstract__ = True
__tablename__ = 'x'
id = Column(Integer, primary_key=True)
class B(Base):
__abstract__ = False
__tablename__ = 'y'
id = Column(Integer, primary_key=True)
class C(Base):
__abstract__ = False
__tablename__ = 'z'
id = Column(Integer, primary_key=True)
class D(Base):
__tablename__ = 'q'
id = Column(Integer, primary_key=True)
eq_(set(Base.metadata.tables), set(['y', 'z', 'q']))
def test_middle_abstract_attributes(self):
# test for [ticket:3219]
class A(Base):
__tablename__ = 'a'
id = Column(Integer, primary_key=True)
name = Column(String)
class B(A):
__abstract__ = True
data = Column(String)
class C(B):
c_value = Column(String)
eq_(
sa.inspect(C).attrs.keys(), ['id', 'name', 'data', 'c_value']
)
def test_middle_abstract_inherits(self):
# test for [ticket:3240]
class A(Base):
__tablename__ = 'a'
id = Column(Integer, primary_key=True)
class AAbs(A):
__abstract__ = True
class B1(A):
__tablename__ = 'b1'
id = Column(ForeignKey('a.id'), primary_key=True)
class B2(AAbs):
__tablename__ = 'b2'
id = Column(ForeignKey('a.id'), primary_key=True)
assert B1.__mapper__.inherits is A.__mapper__
assert B2.__mapper__.inherits is A.__mapper__
|
{
"content_hash": "35b3b58d7731dcc08ccad46fdd005be5",
"timestamp": "",
"source": "github",
"line_count": 1731,
"max_line_length": 79,
"avg_line_length": 30.291161178509533,
"alnum_prop": 0.5270625929740245,
"repo_name": "bdupharm/sqlalchemy",
"id": "1f9fa1dfaf0c2b43067363c2ff4ba771c7af821b",
"size": "52434",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "test/ext/declarative/test_mixin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "46062"
},
{
"name": "Python",
"bytes": "8859450"
}
],
"symlink_target": ""
}
|
"""Tests for the Connector Webex Teams class."""
import asyncio
import unittest
import asynctest
import asynctest.mock as amock
from opsdroid.core import OpsDroid
from opsdroid.connector.webexteams import ConnectorWebexTeams
from opsdroid.events import Message
from opsdroid.cli.start import configure_lang
class TestConnectorCiscoWebexTeams(unittest.TestCase):
"""Test the opsdroid Webex Teams connector class."""
def setUp(self):
self.loop = asyncio.new_event_loop()
configure_lang({})
def test_init(self):
"""Test that the connector is initialised properly."""
connector = ConnectorWebexTeams({})
self.assertEqual("webexteams", connector.name)
self.assertEqual("opsdroid", connector.bot_name)
def test_webhook_url_is_valid(self):
connector = ConnectorWebexTeams({"webhook-url": "https://example.com"})
assert connector.config.get("webhook-url").startswith("https")
def test_missing_api_key(self):
"""Test that creating without an API without config raises an error."""
with self.assertRaises(TypeError):
ConnectorWebexTeams()
class TestConnectorCiscoSparkAsync(asynctest.TestCase):
"""Test the async methods of the opsdroid webex teams connector class."""
async def setUp(self):
configure_lang({})
async def test_connect(self):
connector = ConnectorWebexTeams({"token": "abc123"}, opsdroid=OpsDroid())
opsdroid = amock.CoroutineMock()
opsdroid.eventloop = self.loop
connector.clean_up_webhooks = amock.CoroutineMock()
connector.subscribe_to_rooms = amock.CoroutineMock()
connector.set_own_id = amock.CoroutineMock()
with amock.patch("websockets.connect", new=amock.CoroutineMock()):
await connector.connect()
self.assertTrue(connector.clean_up_webhooks.called)
self.assertTrue(connector.subscribe_to_rooms.called)
self.assertTrue(connector.set_own_id.called)
async def test_message_handler(self):
connector = ConnectorWebexTeams({"token": "abc123"})
connector.opsdroid = OpsDroid()
connector.bot_spark_id = "spark123"
connector.api = amock.CoroutineMock()
request = amock.Mock()
request.json = amock.CoroutineMock()
request.json.return_value = {
"data": {"id": "3vABZrQgDzfcz7LZi", "personId": "21ABZrQgDzfcz7Lsi"}
}
message = amock.Mock()
connector.api.messages.get = amock.Mock()
message.text = "Hello"
message.roomId = "90ABCrWgrzfcz7LZi"
message.roomType = "general"
connector.api.messages.get.return_value = message
connector.get_person = amock.CoroutineMock()
person = amock.CoroutineMock()
person.displayName = "Himanshu"
connector.get_person.return_value = person
response = await connector.webexteams_message_handler(request)
self.assertLogs("_LOGGER", "debug")
self.assertEqual(201, response.status)
self.assertEqual('"Received"', response.text)
self.assertTrue(connector.api.messages.get.called)
self.assertTrue(connector.get_person.called)
connector.opsdroid = amock.CoroutineMock()
connector.opsdroid.parse = amock.CoroutineMock()
connector.opsdroid.parse.side_effect = KeyError
await connector.webexteams_message_handler(request)
self.assertLogs("_LOGGER", "error")
async def test_connect_fail_keyerror(self):
connector = ConnectorWebexTeams({}, opsdroid=OpsDroid())
connector.clean_up_webhooks = amock.CoroutineMock()
connector.subscribe_to_rooms = amock.CoroutineMock()
connector.set_own_id = amock.CoroutineMock()
await connector.connect()
self.assertLogs("_LOGGER", "error")
async def test_listen(self):
"""Test the listen method.
The Webex Teams connector listens using an API endoint and so the listen
method should just pass and do nothing. We just need to test that it
does not block.
"""
connector = ConnectorWebexTeams({}, opsdroid=OpsDroid())
self.assertEqual(await connector.listen(), None)
async def test_respond(self):
connector = ConnectorWebexTeams({"token": "abc123"})
connector.api = amock.CoroutineMock()
connector.api.messages.create = amock.CoroutineMock()
message = Message(
text="Hello",
user="opsdroid",
target={"id": "3vABZrQgDzfcz7LZi"},
connector=None,
)
await connector.send(message)
self.assertTrue(connector.api.messages.create.called)
async def test_get_person(self):
connector = ConnectorWebexTeams({"token": "abc123"})
connector.api = amock.CoroutineMock()
connector.api.messages.create = amock.CoroutineMock()
connector.api.people.get = amock.CoroutineMock()
connector.api.people.get.return_value = "Himanshu"
self.assertEqual(len(connector.people), 0)
await connector.get_person("3vABZrQgDzfcz7LZi")
self.assertEqual(len(connector.people), 1)
async def test_subscribe_to_rooms(self):
connector = ConnectorWebexTeams(
{"token": "abc123", "webhook-url": "http://127.0.0.1"}
)
connector.api = amock.CoroutineMock()
connector.opsdroid = amock.CoroutineMock()
connector.opsdroid.web_server.web_app.router.add_post = amock.CoroutineMock()
connector.api.webhooks.create = amock.CoroutineMock()
await connector.subscribe_to_rooms()
self.assertTrue(connector.api.webhooks.create.called)
self.assertTrue(connector.opsdroid.web_server.web_app.router.add_post.called)
async def test_clean_up_webhooks(self):
connector = ConnectorWebexTeams({"token": "abc123"})
connector.api = amock.CoroutineMock()
x = amock.CoroutineMock()
x.id = amock.CoroutineMock()
connector.api.webhooks.list = amock.Mock()
connector.api.webhooks.list.return_value = [x, x]
connector.api.webhooks.delete = amock.Mock()
await connector.clean_up_webhooks()
self.assertTrue(connector.api.webhooks.list.called)
self.assertTrue(connector.api.webhooks.delete.called)
async def test_set_own_id(self):
connector = ConnectorWebexTeams({"token": "abc123"})
connector.api = amock.CoroutineMock()
connector.api.people.me().id = "3vABZrQgDzfcz7LZi"
await connector.set_own_id()
self.assertTrue(connector.bot_webex_id, "3vABZrQgDzfcz7LZi")
|
{
"content_hash": "f03a4f28af30f582775d91e23957119e",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 85,
"avg_line_length": 40.5,
"alnum_prop": 0.6663655525444143,
"repo_name": "FabioRosado/opsdroid",
"id": "73b5c18c87c64a5915a2b97c81727b3d20ce769a",
"size": "6642",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_connector_webexteams.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1755"
},
{
"name": "Jinja",
"bytes": "2320"
},
{
"name": "Jupyter Notebook",
"bytes": "848"
},
{
"name": "Python",
"bytes": "1180611"
}
],
"symlink_target": ""
}
|
from __future__ import division, print_function, absolute_import
from os.path import join
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
from numpy.distutils.misc_util import get_info as get_misc_info
from numpy.distutils.system_info import get_info as get_sys_info
from distutils.sysconfig import get_python_inc
config = Configuration('spatial', parent_package, top_path)
config.add_data_dir('tests')
# qhull
qhull_src = ['geom2.c', 'geom.c', 'global.c', 'io.c', 'libqhull.c',
'mem.c', 'merge.c', 'poly2.c', 'poly.c', 'qset.c',
'random.c', 'rboxlib.c', 'stat.c', 'user.c', 'usermem.c',
'userprintf.c', 'userprintf_rbox.c']
qhull_src = [join('qhull', 'src', x) for x in qhull_src]
inc_dirs = [get_python_inc()]
if inc_dirs[0] != get_python_inc(plat_specific=1):
inc_dirs.append(get_python_inc(plat_specific=1))
inc_dirs.append(get_numpy_include_dirs())
cfg = dict(get_sys_info('lapack_opt'))
cfg.setdefault('include_dirs', []).extend(inc_dirs)
cfg.setdefault('define_macros', []).append(('qh_QHpointer','1'))
config.add_extension('qhull',
sources=['qhull.c'] + qhull_src,
**cfg)
# cKDTree
ckdtree_src = ['query.cxx',
'build.cxx',
'globals.cxx',
'cpp_exc.cxx',
'query_pairs.cxx',
'count_neighbors.cxx',
'query_ball_point.cxx',
'query_ball_tree.cxx',
'sparse_distances.cxx']
ckdtree_src = [join('ckdtree', 'src', x) for x in ckdtree_src]
ckdtree_headers = ['ckdtree_decl.h',
'cpp_exc.h',
'ckdtree_methods.h',
'cpp_utils.h',
'rectangle.h',
'distance.h',
'distance_box.h',
'ordered_pair.h']
ckdtree_headers = [join('ckdtree', 'src', x) for x in ckdtree_headers]
ckdtree_dep = ['ckdtree.cxx'] + ckdtree_headers + ckdtree_src
config.add_extension('ckdtree',
sources=['ckdtree.cxx'] + ckdtree_src,
depends=ckdtree_dep,
include_dirs=inc_dirs + [join('ckdtree','src')])
# _distance_wrap
config.add_extension('_distance_wrap',
sources=[join('src', 'distance_wrap.c')],
depends=[join('src', 'distance_impl.h')],
include_dirs=[get_numpy_include_dirs()],
extra_info=get_misc_info("npymath"))
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(maintainer="SciPy Developers",
author="Anne Archibald",
maintainer_email="scipy-dev@scipy.org",
description="Spatial algorithms and data structures",
url="https://www.scipy.org",
license="SciPy License (BSD Style)",
**configuration(top_path='').todict()
)
|
{
"content_hash": "4f4eb7a7aeb6f18ba5dafd298f9ba133",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 79,
"avg_line_length": 39.370370370370374,
"alnum_prop": 0.533082470994042,
"repo_name": "larsmans/scipy",
"id": "00c4e0f79d380ab6783b5e9f560b8a81dc7d6be4",
"size": "3212",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "scipy/spatial/setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4306240"
},
{
"name": "C++",
"bytes": "3692038"
},
{
"name": "FORTRAN",
"bytes": "5661284"
},
{
"name": "HTML",
"bytes": "124330"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "10440452"
},
{
"name": "Shell",
"bytes": "2218"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
}
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
setup(
name='negative-cycles',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.4',
description='For Negative Cycle Detection',
long_description='',
# The project's main homepage.
url='https://github.com/mnpatil17/negative_cycles/',
# Author details
author='Mihir Patil',
author_email='mnpatil17@gmail.com',
# Choose your license
license='BSD 3-clause "New" or "Revised License"',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
],
# What does your project relate to?
keywords='negative-cycle bellman-ford',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=['numpy', 'nose'],
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
)
|
{
"content_hash": "420a15874278b64f5efd0e3b85fe0243",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 78,
"avg_line_length": 29.928571428571427,
"alnum_prop": 0.6634844868735084,
"repo_name": "mnpatil17/negative_cycles",
"id": "a6f38eae58e6a77f75a7d523e8c54bab5543efd2",
"size": "1676",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12654"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from sqlalchemy import func
from indico.modules.rb.models.rooms import Room
from indico.modules.rb.operations.rooms import get_managed_room_ids
from indico.modules.rb.settings import RoomEmailMode, rb_user_settings
from indico.modules.users import ExtraUserPreferences
from indico.util.i18n import _
from indico.web.forms.fields import IndicoEnumSelectField, IndicoQuerySelectMultipleField
class RBUserPreferences(ExtraUserPreferences):
@property
def fields(self):
query = (Room.query
.filter(~Room.is_deleted, Room.id.in_(get_managed_room_ids(self.user)))
.order_by(func.indico.natsort(Room.full_name)))
fields = {
'email_mode': IndicoEnumSelectField(_('Room notifications'), enum=RoomEmailMode,
description=_(
'If you own or manage any rooms, you can choose whether to '
'receive notifications about activity related to them.')),
'email_blacklist': IndicoQuerySelectMultipleField(_('Room blacklist'),
query_factory=lambda: query,
get_label='full_name', collection_class=set,
render_kw={'size': 10},
description=_(
'Regardless of the room notifications selected '
'above, you will never receive notifications '
'for rooms selected in this list.'))
}
if not query.count():
# don't show an empty select field if user doesn't manage any rooms
del fields['email_blacklist']
return fields
def load(self):
return {
'email_mode': rb_user_settings.get(self.user, 'email_mode'),
'email_blacklist': rb_user_settings.get(self.user, 'email_blacklist'),
}
def save(self, data):
rb_user_settings.set_multi(self.user, data)
@classmethod
def is_active(cls, user):
return (rb_user_settings.get(user, 'email_mode', None) is not None or
rb_user_settings.get(user, 'email_blacklist', None) is not None or
get_managed_room_ids(user))
|
{
"content_hash": "32707b14767264fd3b703484e08edec9",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 114,
"avg_line_length": 49.88461538461539,
"alnum_prop": 0.5239013107170393,
"repo_name": "OmeGak/indico",
"id": "1b2bfc07c1116d669b28d662f3279f894da80145",
"size": "2808",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indico/modules/rb/user_prefs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "547418"
},
{
"name": "HTML",
"bytes": "1366687"
},
{
"name": "JavaScript",
"bytes": "1678182"
},
{
"name": "Mako",
"bytes": "1340"
},
{
"name": "Python",
"bytes": "4488419"
},
{
"name": "Shell",
"bytes": "2724"
},
{
"name": "TeX",
"bytes": "23051"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
}
|
number = raw_input('Enter a number to find a square : ')
try :
# In order to accept floating numbers, we are converting the varibale to float.
actualNumber = float(number)**2
print 'Square of the number is', actualNumber
except :
print 'Instead of typing number you entered -', number
|
{
"content_hash": "918a9b124b0d06ccf18316d84083bf66",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 83,
"avg_line_length": 42.714285714285715,
"alnum_prop": 0.7123745819397993,
"repo_name": "rahulbohra/Python-Basic",
"id": "b8ead120867c65f46e8fffd0c0e0223affb72253",
"size": "299",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "11_first-try-except.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21539"
}
],
"symlink_target": ""
}
|
import html.parser
import re
import urllib.error
import urllib.parse
import urllib.request
from time import time
import dateutil.parser
import socks
from sockshandler import SocksiPyHandler
USER_AGENT = yui.config_val('url', 'httpUserAgent', default='Yui')
PROXY_HOST = yui.config_val('url', 'socksProxyHost')
PROXY_PORT = yui.config_val('url', 'socksProxyPort')
PROXY_REGEX = yui.config_val('url', 'socksProxyRegex')
URLCACHE = {} # url -> info cache
class TitleParser(html.parser.HTMLParser):
def __init__(self):
html.parser.HTMLParser.__init__(self)
self.reading = False
self.done = False
self.title = ''
def handle_starttag(self, tag, attrs):
if tag == 'title' and not self.done:
self.reading = True
def handle_endtag(self, tag):
if tag == 'title':
self.reading = False
self.done = True
def handle_data(self, data):
if self.reading:
self.title += data
def handle_charref(self, ref):
if self.reading:
self.handle_entityref('#' + ref)
def handle_entityref(self, ref):
if self.reading:
self.title += '&%s;' % ref
def error(self, message):
pass
def humanify(num):
pre = ['KiB', 'MiB', 'GiB', 'TiB']
if num < 1024:
return '%d Byte' % num
num /= 1024
for p in pre:
div = num / 1024
if div < 1.0:
return '%.2f%s' % (num, p)
num = div
# returns a properly encoded url (escaped unicode etc)
# or None if it's not a valid url
def get_encoded_url(url):
# test if it's a valid URL and encode it properly, if it is
parts = urllib.request.urlparse(url)
if not ((parts[0] == 'http' or parts[0] == 'https')
and parts[1]
and parts[1] != 'localhost'
and not parts[1].split('.')[-1].isdigit()):
return None
# handle unicode URLs
url = urllib.request.urlunparse(
p if i == 1
else urllib.parse.quote(p, safe='/%+#&,._-=')
for i, p in enumerate(parts)
)
return url
def get_url_title(url):
enc = ['utf8', 'iso-8869-1', 'shift-jis']
title = ''
headers = {
'User-Agent': USER_AGENT
}
try:
req = urllib.request.Request(url, data=None, headers=headers)
host = urllib.request.urlparse(url).netloc.split(':')[0]
if PROXY_HOST and PROXY_PORT and re.match(PROXY_REGEX, host):
opener = urllib.request.build_opener(SocksiPyHandler(socks.SOCKS5, PROXY_HOST, PROXY_PORT))
resp = opener.open(req, timeout=5)
else:
resp = urllib.request.urlopen(req, timeout=5)
except urllib.error.HTTPError as e:
return 'Status: ' + str(e.code)
except urllib.error.URLError as e:
return 'Error: ' + str(e.reason)
except Exception as e:
return
# get the site's title, only in html content
if 'content-type' in resp.headers and 'html' in resp.headers['content-type']:
# try the charset set in the html header first, if there is one
if 'charset=' in resp.headers['content-type']:
enc = enc + [resp.headers['content-type'].split('charset=')[-1]]
# read up to 1mb
chunk = resp.read(1024 * 1024)
parser = TitleParser()
for e in enc:
try:
decoded_chunk = chunk.decode(e, 'ignore')
parser.feed(decoded_chunk)
if parser.done:
title = parser.title
parser.close()
if len(title) > 0:
esc = parser.unescape(title)
return 'Title: ' + esc.strip()
except Exception as ex:
pass
# no title, try to output some other useful data
info = []
if 'content-type' in resp.headers:
info.append('Type: ' + resp.headers['content-type'].split(';')[0])
if 'content-length' in resp.headers:
info.append('Size: ' + humanify(int(resp.headers['content-length'])))
if 'last-modified' in resp.headers:
d = resp.headers['last-modified']
try:
parsed_date = dateutil.parser.parse(d)
d = parsed_date.strftime('%F %T') + ' ' + parsed_date.tzname()
except ValueError:
pass
info.append('Modified: ' + d)
return ', '.join(info)
@yui.event('msg_recv')
def url(msg, channel, is_cmd):
if is_cmd:
return
# find urls in channel message
words = msg.split()
titles = []
max_urls = 3
for w in words:
enc_url = get_encoded_url(w)
if not enc_url:
continue
if enc_url in URLCACHE and URLCACHE[enc_url]['timestamp'] > (time() - 60 * 60):
title = URLCACHE[enc_url]['title']
else:
title = get_url_title(enc_url).strip()
URLCACHE[enc_url] = {
'timestamp': time(),
'title': title
}
if title:
titles.append(title)
if len(titles) >= max_urls:
break
# don't say anything, if we couldn't get any titles
if len(titles) > 0:
concat = ' \x035|\x03 '.join(titles)
yui.send_msg(channel, concat)
|
{
"content_hash": "63d8f7763e750def65dab6492fbc9773",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 103,
"avg_line_length": 29.5,
"alnum_prop": 0.5568463149876214,
"repo_name": "Rouji/Yui",
"id": "88eeffe692aa11535eed88344a13d15cd925cf90",
"size": "5267",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plugins/url.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88675"
}
],
"symlink_target": ""
}
|
"""Implementation of the TinyQuery service."""
import collections
import compiler
import context
import evaluator
import tq_types
class TinyQueryError(Exception):
# TODO: Use BigQuery-specific error codes here.
pass
class TinyQuery(object):
def __init__(self):
self.tables_by_name = {}
self.next_job_num = 0
self.job_map = {}
def load_table_or_view(self, table):
"""Create a table."""
self.tables_by_name[table.name] = table
def load_table_from_csv(self, table_name, raw_schema, filename):
result_table = self.make_empty_table(table_name, raw_schema)
with open(filename, 'r') as f:
for line in f:
if line[-1] == '\n':
line = line[:-1]
tokens = line.split(',')
assert len(tokens) == len(result_table.columns), (
'Expected {} tokens on line {}, but got {}'.format(
len(result_table.columns), line, len(tokens)))
for token, column in zip(tokens,
result_table.columns.itervalues()):
if column.type == tq_types.INT:
token = int(token)
elif column.type == tq_types.FLOAT:
token = float(token)
elif token == 'null':
token = None
column.values.append(token)
result_table.num_rows += 1
self.load_table_or_view(result_table)
def make_empty_table(self, table_name, raw_schema):
columns = collections.OrderedDict()
for field in raw_schema['fields']:
# TODO: Handle the mode here. We should default to NULLABLE, but
# allow other specifiers.
# TODO: Validate that the type is legal. Currently we take
# advantage of the fact that type names match the types defined in
# tq_types.py.
columns[field['name']] = context.Column(field['type'], [])
return Table(table_name, 0, columns)
def make_view(self, view_name, query):
# TODO: Figure out the schema by compiling the query, and refactor the
# code so that the compiler can use the schema instead of expecting
# every TableId to have actual Columns. For now, we just validate that
# the view works, and things will break later if the view is actually
# used.
compiler.compile_text(query, self.tables_by_name)
return View(view_name, query)
def get_all_tables(self):
return self.tables_by_name
def get_table_names_for_dataset(self, dataset):
# TODO(alan): Improve this to use a more first-class dataset structure.
return [full_table[len(dataset + '.'):]
for full_table in self.tables_by_name.iterkeys()
if full_table.startswith(dataset + '.')]
def get_all_table_info_in_dataset(self, project_id, dataset):
"""Gets a "table info" dictionary for each table, sorted by name.
In practice, this is a bit wasteful when it is used for multiple pages.
"""
return [self.get_short_table_info(project_id, dataset, table)
for table in sorted(self.get_table_names_for_dataset(dataset))]
def get_short_table_info(self, project_id, dataset, table_name):
"""Returns the format from bq_service.tables().list()."""
return {
'tableReference': {
'projectId': project_id,
'datasetId': dataset,
'tableId': table_name
}
}
def get_table_info(self, project, dataset, table_name):
# TODO(alan): Don't just ignore the project parameter.
# Will throw KeyError if the table doesn't exist.
table = self.tables_by_name[dataset + '.' + table_name]
schema_fields = []
for col_name, column in table.columns.iteritems():
schema_fields.append({
'name': col_name,
'type': column.type,
'mode': 'NULLABLE'
})
return {
'schema': {
'fields': schema_fields
},
'tableReference': {
'projectId': project,
'datasetId': dataset,
'tableId': table_name
}
}
def get_table(self, dataset, table_name):
"""Returns the tinyquery.Table with the given dataset and name."""
return self.tables_by_name[dataset + '.' + table_name]
def delete_table(self, dataset, table_name):
del self.tables_by_name[dataset + '.' + table_name]
def evaluate_query(self, query):
select_ast = compiler.compile_text(query, self.tables_by_name)
select_evaluator = evaluator.Evaluator(self.tables_by_name)
return select_evaluator.evaluate_select(select_ast)
def create_job(self, project_id, job_object):
"""Create a job with the given status and return the info for it."""
job_id = 'job:%s' % self.next_job_num
self.next_job_num += 1
job_object.job_info['jobReference'] = {
'projectId': project_id,
'jobId': job_id
}
self.job_map[job_id] = job_object
return job_object.job_info
def run_query_job(self, project_id, query, dest_dataset, dest_table_name,
create_disposition, write_disposition):
query_result_context = self.evaluate_query(query)
query_result_table = self.table_from_context('query_results',
query_result_context)
if dest_dataset is not None and dest_table_name is not None:
dest_full_table_name = dest_dataset + '.' + dest_table_name
self.copy_table(query_result_table, dest_full_table_name,
create_disposition, write_disposition)
return self.create_job(project_id, QueryJob({
'status': {
'state': 'DONE'
},
'statistics': {
'query': {
'totalBytesProcessed': '0'
}
}
}, query_result_table))
@staticmethod
def table_from_context(table_name, ctx):
return Table(table_name, ctx.num_rows, collections.OrderedDict(
(col_name, column)
for (_, col_name), column in ctx.columns.iteritems()
))
def run_copy_job(self, project_id, src_dataset, src_table_name,
dest_dataset, dest_table_name, create_disposition,
write_disposition):
# TODO: Handle errors in the same way as BigQuery.
src_full_table_name = src_dataset + '.' + src_table_name
dest_full_table_name = dest_dataset + '.' + dest_table_name
src_table = self.tables_by_name[src_full_table_name]
self.copy_table(src_table, dest_full_table_name, create_disposition,
write_disposition)
return self.create_job(project_id, CopyJob({
'status': {
'state': 'DONE'
},
}))
def copy_table(self, src_table, dest_table_name, create_disposition,
write_disposition):
"""Write the given Table object to the destination table name."""
if dest_table_name not in self.tables_by_name:
if create_disposition == 'CREATE_NEVER':
raise TinyQueryError('CREATE_NEVER specified, but table did '
'not exist: {}'.format(dest_table_name))
self.load_empty_table_from_template(dest_table_name, src_table)
# TODO: Handle schema differences and raise errors with illegal schema
# updates.
dest_table = self.tables_by_name[dest_table_name]
if dest_table.num_rows > 0:
if write_disposition == 'WRITE_EMPTY':
raise TinyQueryError(
'WRITE_EMPTY was specified, but the table {} was not '
'empty.'.format(dest_table_name))
if write_disposition == 'WRITE_TRUNCATE':
self.clear_table(dest_table)
self.append_to_table(src_table, dest_table)
def load_empty_table_from_template(self, table_name, template_table):
columns = collections.OrderedDict(
(col_name, context.Column(col.type, []))
for col_name, col in template_table.columns.iteritems()
)
table = Table(table_name, 0, columns)
self.load_table_or_view(table)
@staticmethod
def clear_table(table):
table.num_rows = 0
for column in table.columns.itervalues():
column.values[:] = []
@staticmethod
def append_to_table(src_table, dest_table):
dest_table.num_rows += src_table.num_rows
for col_name, column in dest_table.columns.iteritems():
if col_name in src_table.columns:
column.values.extend(src_table.columns[col_name].values)
else:
column.values.extend([None] * src_table.num_rows)
def get_job_info(self, job_id):
# Raise a KeyError if the table doesn't exist.
return self.job_map[job_id].job_info
def get_query_result_table(self, job_id):
# TODO: Return an appropriate error if not a query job.
return self.job_map[job_id].query_results
class Table(object):
"""Information containing metadata and contents of a table.
Fields:
name: The name of the table.
num_rows: The number of rows in the table.
columns: An OrderedDict mapping column name to Column. Note that unlike
in Context objects, the column name is just a string and does not
include a table component.
"""
def __init__(self, name, num_rows, columns):
assert isinstance(columns, collections.OrderedDict)
for col_name, column in columns.iteritems():
assert isinstance(col_name, basestring)
assert len(column.values) == num_rows, (
'Column %s had %s rows, expected %s.' % (
col_name, len(column.values), num_rows))
self.name = name
self.num_rows = num_rows
self.columns = columns
def __repr__(self):
return 'Table({}, {}, {})'.format(self.name, self.num_rows,
self.columns)
class View(object):
"""Information about a view (a virtual table defined by a query).
Fields:
name: The name of the view.
query: The query string for the view.
"""
def __init__(self, name, query):
self.name = name
self.query = query
class QueryJob(collections.namedtuple('QueryJob', ['job_info',
'query_results'])):
pass
class CopyJob(collections.namedtuple('CopyJob', ['job_info'])):
pass
|
{
"content_hash": "5db7499289e10b699cec5ebad7377e8e",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 79,
"avg_line_length": 39.071174377224196,
"alnum_prop": 0.5665361144002186,
"repo_name": "burnhamup/tinyquery",
"id": "7f545f85ba7c83094d5d933e90e5bf55053097e8",
"size": "10979",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tinyquery/tinyquery.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "209784"
}
],
"symlink_target": ""
}
|
"""
Contains concepts related to requests, tracing them and the work they generate.
"""
import collections
import simpy
from .base import NamedObject
TraceItem = collections.namedtuple('TraceItem', 'when who direction')
def trace_request(instance_method):
"""
Decorates an instance method to trace a request as it goes through the
distributed systems. This decorator makes the following assumptions about
what it decorates:
- The instance has an `_env` attribute that points to a simulation
environment.
- The method has a `request` attribute that points to request to be traced.
"""
# pylint: disable=protected-access
def wrapper(instance, request, *v, **k):
"""
Wraps a SimPy-style generator-based process, marking entry and exit of a
request.
"""
request.do_trace(who=instance, when=instance._env.now,
direction='enter')
yield from instance_method(instance, request, *v, **k)
request.do_trace(who=instance, when=instance._env.now,
direction='exit')
return wrapper
class Cancelled(Exception):
"""
Represents the fact that a request was cancelled, as requested by the user.
"""
pass
class RequestTiePair(object):
"""
Represents the information necessary to produce request ties. Tying requests
is a technique popularised by Google (Tail at Scale) to reduce tail response
time. It consists in sending two tied requests: When one starts, the other
one is cancelled. To avoid the two requests cancelling each other out,
symmetry is broken by marking one request as high-priority, while the other
one as low-priority. Once started, the high-priority request can no longer
be cancelled, whereas the low-priority request can be cancelled after
startal.
"""
def __init__(self, env):
self._start_event_high_prio = simpy.events.Event(env)
self._start_event_low_prio = simpy.events.Event(env)
@property
def high_prio(self):
"""
Returns a tuple necessary for the high-priority request:
cancel_after_start, start_event, cancel_event. The work is supposed to
trigger `start_event` on startal and cancel itself when `cancel_event`
is triggered.
"""
return False, self._start_event_high_prio, self._start_event_low_prio
@property
def low_prio(self):
"""
Returns a tuple necessary for the high-priority request:
cancel_after_start, start_event, cancel_event
"""
return True, self._start_event_low_prio, self._start_event_high_prio
class Work(object):
"""
Simulates work that has to be performed. Work must only be created by
microservices and consumed by lowest-level executors.
"""
def __init__(self, env, work, request, tie=None):
assert work >= 0
self._env = env
self._initial = work
self._remaining = work
self._process = None
self._cancelled = False
self._request = request
self._tie = tie
if self._tie:
cancel_event = self._tie[2]
cancel_event.callbacks.append(self.cancel)
def consume(self, max_work_to_consume, inverse_rate=1):
"""
Consumes work, i.e., sleeps for a given maximum amount of time. Work is
currently equal to time, but may in future be modulated, e.g., due to
frequency scaling. The method returns as soon as either all work is
consumed or the maximum amount, given as parameter, is reached.
"""
assert max_work_to_consume > 0
assert self._process is None
cancel_after_start, start_event, cancel_event = \
self._tie if self._tie else (False, None, None)
if self._cancelled:
# cancelled before startal
raise Cancelled()
self._process = self._env.active_process
work_to_consume = min(self._remaining, max_work_to_consume)
assert work_to_consume > 0
try:
started_at = self._env.now
if not cancel_after_start and cancel_event and \
cancel_event.callbacks:
cancel_event.callbacks.remove(self.cancel)
if start_event and not start_event.triggered:
start_event.succeed()
yield \
self._env.timeout(self._env.to_time(work_to_consume*inverse_rate))
except simpy.Interrupt as interrupt:
if interrupt.cause == 'cancelled':
assert self._cancelled
raise Cancelled() from interrupt
raise
finally:
ended_at = self._env.now
self._remaining = max(
self._remaining - self._env.to_time((ended_at-started_at)/inverse_rate),
0)
self._process = None
if cancel_event and cancel_event.callbacks:
cancel_event.callbacks.remove(self.cancel)
def cancel(self, _=None):
"""
Cancel all outstanding work, if any is left.
"""
if self.consumed or self._cancelled:
return
self._cancelled = True
if self._process:
self._process.interrupt(cause='cancelled')
@property
def cancelled(self):
"""
True if the request was cancelled before completion.
"""
return self._cancelled
@property
def amount_consumed(self):
"""
Returns the amount of work consumed so far. We try not to return the
amount of work remaining, to hide this information from the scheduler.
"""
return self._initial-self._remaining
@property
def consumed(self):
"""
Returns True if this work item was fully consumed, False otherwise.
"""
return self._remaining == 0
class Request(NamedObject):
"""
Represents a request, travelling horizontally and vertically through the
system. Only the client is allowed to create new requests.
"""
def __init__(self, start_time):
super().__init__(prefix='r')
self._start_time = start_time
self._end_time = None
self._trace = []
self._attained_time = 0
@property
def start_time(self):
"The time the request entered the cloud application."
return self._start_time
def _get_end_time(self):
"Get the time the request exited the cloud application."
return self._end_time
def _set_end_time(self, new_value):
"Set the time the request exited the cloud application."
assert self._end_time is None
self._end_time = new_value
end_time = property(_get_end_time, _set_end_time)
def do_trace(self, when, who, direction):
"""
Record a particular boundary event in the live of the request, such as
entering or exiting a microservice, a VCPU or a CPU.
"""
#print('{0:.6f} {1!s:<6} {2} {3}'.format(when, who, self, direction))
self._trace.append(TraceItem(when=when, who=who, direction=direction))
@property
def trace(self):
"Return the trace of this request."
return self._trace
@property
def attained_time(self):
"Return the amount of time this request has been processed so far."
return self._attained_time
def add_attained_time(self, time):
"Adds attained time as required for the Least-Attained-Time scheduler."
self._attained_time += time
|
{
"content_hash": "6b7141edbfe44f6058336396e4636352",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 88,
"avg_line_length": 34.61643835616438,
"alnum_prop": 0.6189157103284527,
"repo_name": "cristiklein/tailtamer",
"id": "568f50a0741403d2892618c2f3fbba9617a99344",
"size": "7581",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tailtamer/request.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "677"
},
{
"name": "Python",
"bytes": "53834"
},
{
"name": "R",
"bytes": "6795"
}
],
"symlink_target": ""
}
|
from __future__ import division
import htpc, cherrypy, logging, xmlrpclib, base64
from cherrypy.lib.auth2 import require, member_of
from htpc.helpers import serve_template, fix_basepath, striphttp
class RTorrent(object):
def __init__(self):
self.logger = logging.getLogger('modules.rtorrent')
htpc.MODULES.append({
'name': 'rTorrent',
'id': 'rtorrent',
'test': htpc.WEBDIR + 'rtorrent/ping',
'fields': [
{'type': 'bool', 'label': 'Enable', 'name': 'rtorrent_enable'},
{'type': 'text', 'label': 'Menu name', 'name': 'rtorrent_menuname'},
{'type': 'bool', 'label': 'Use SSL', 'name': 'rtorrent_ssl'},
{'type': 'text', 'label': 'Host *', 'name': 'rtorrent_host', 'placeholder': 'localhost:80',
'desc': 'RPC Communication URI. Usually scgi://localhost:5000, httprpc://localhost/rutorrent or localhost:80'},
{'type': 'text', 'label': 'RPC Path', 'name': 'rtorrent_rpcpath',
'placeholder': '/RPC2', 'desc': 'Change if your RPC mount is at a different path'},
{'type': 'text', 'label': 'Username', 'name': 'rtorrent_username'},
{'type': 'password', 'label': 'Password',
'name': 'rtorrent_password'},
]
})
@cherrypy.expose()
@require()
def index(self):
return htpc.LOOKUP.get_template('rtorrent.html').render(scriptname='rtorrent')
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def queue(self):
server = xmlrpclib.Server(self.stored_rpcurl())
torrents = server.d.multicall("main", "d.get_name=",
"d.get_bytes_done=", "d.get_complete=", "d.get_ratio=",
"d.get_down_rate=", "d.get_up_rate=", "d.get_size_bytes=",
"d.get_hash=", "d.get_state=")
results = []
for torrent in torrents:
results.append({
'name': torrent[0],
'progress': (torrent[1] / torrent[6]) * 100,
'is_finished': torrent[2],
'ratio': torrent[3],
'download_payload_rate': torrent[4],
'upload_payload_rate': torrent[5],
'eta': '-1', # TODO implement eta calculation
'state': 'Started' if torrent[8] == 1 else 'Paused',
'hash': torrent[7],
'total_size': torrent[6]
})
return {'result': results}
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def ping(self, rtorrent_host, rtorrent_rpcpath, rtorrent_username='', rtorrent_password='', rtorrent_ssl=False, **kwargs):
server_url = self.rpc_url(
rtorrent_host, rtorrent_rpcpath, rtorrent_ssl, rtorrent_username, rtorrent_password)
self.logger.debug("Trying to contact rtorrent via %s" % server_url)
server = xmlrpclib.Server(server_url)
return server.system.client_version()
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def start(self, torrentId=False):
self.logger.debug("Starting torrent %s" % (torrentId))
server = xmlrpclib.Server(self.stored_rpcurl())
if torrentId is False:
return server.d.multicall("main", "d.start")
return server.d.start(torrentId)
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def stop(self, torrentId=False):
self.logger.debug("Stopping torrent %s" % (torrentId))
server = xmlrpclib.Server(self.stored_rpcurl())
if torrentId is False:
return server.d.multicall("main", "d.stop")
return server.d.stop(torrentId)
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def remove(self, torrentId):
self.logger.debug("Removing torrent %s" % (torrentId))
server = xmlrpclib.Server(self.stored_rpcurl())
return server.d.erase(torrentId)
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def add(self, filename=None, metainfo=None):
self.logger.debug("Adding torrent: %s" % filename)
server = xmlrpclib.Server(self.stored_rpcurl())
if metainfo:
data = base64.b64decode(metainfo)
res = server.load_raw_start(xmlrpclib.Binary(data))
else:
res = server.load_start(filename)
return {'error': False} if res == 0 else {'error': True}
# For torrent search
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def to_client(self, link, torrentname, **kwargs):
self.logger.debug("Adding torrent from torrentsearch")
try:
return self.add(link)
except Exception as e:
self.logger.debug('Failed to add %s to rTorrent %s %s' % (torrentname, link, e))
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def stats(self):
server = xmlrpclib.Server(self.stored_rpcurl())
mc = xmlrpclib.MultiCall(server)
mc.throttle.global_down.rate()
mc.throttle.global_up.rate()
mc.throttle.global_down.max_rate()
mc.throttle.global_up.max_rate()
results = mc()
return {
'result': {
'stats': {
'download_rate': str(results[0] if results[0] >= 1024 else 0),
'upload_rate': str(results[1] if results[1] >= 1024 else 0),
'max_download_speed': str(results[2] / 1024 if results[2] >= 1024 else -1),
'max_upload_speed': str(results[3] / 1024 if results[3] >= 1024 else -1)
}
}
}
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def set_downspeed(self, speed):
speed = "%sk" % speed
self.logger.debug('Set download speed to %s' % speed)
server = xmlrpclib.Server(self.stored_rpcurl())
result = server.set_download_rate(speed)
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def set_upspeed(self, speed):
speed = "%sk" % speed
self.logger.debug('Set upload speed to %s' % speed)
server = xmlrpclib.Server(self.stored_rpcurl())
result = server.set_upload_rate(speed)
def stored_rpcurl(self):
return self.rpc_url(htpc.settings.get('rtorrent_host', ''), htpc.settings.get('rtorrent_rpcpath', ''),
htpc.settings.get('rtorrent_ssl'), htpc.settings.get('rtorrent_username', ''), htpc.settings.get('rtorrent_password', ''))
def rpc_url(self, host, rpc_path, ssl, username, password):
host = striphttp(host)
rpc_path = fix_basepath(rpc_path).rstrip('/')
if not rpc_path:
rpc_path = '/RPC2'
ssl = 's' if ssl else ''
auth_string = ""
if username or password:
auth_string = "%s:%s@" % (username, password)
server_url = 'http%s://%s%s%s' % (ssl, auth_string, host, rpc_path)
return server_url
|
{
"content_hash": "8ac01562592d890b4ca9bdc75d2a789b",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 150,
"avg_line_length": 41.764367816091955,
"alnum_prop": 0.564744736479978,
"repo_name": "Hellowlol/HTPC-Manager",
"id": "5c124e80e55cfaa4cab174333c4a7c3415452ea3",
"size": "7289",
"binary": false,
"copies": "2",
"ref": "refs/heads/master2",
"path": "modules/rtorrent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "352"
},
{
"name": "CSS",
"bytes": "55957"
},
{
"name": "HTML",
"bytes": "193526"
},
{
"name": "JavaScript",
"bytes": "596435"
},
{
"name": "Python",
"bytes": "4737828"
},
{
"name": "Shell",
"bytes": "5255"
}
],
"symlink_target": ""
}
|
from flask.ext.login import current_user
from flask.ext.restless import *
from models import (Warrior, Queue)
from app import db, bcrypt
from wqueue import schedule_queue
# Create the REST API
def check_auth(instance_id=None, **kw):
if not current_user.is_authenticated():
raise ProcessingException(message='Not Authorized',
status_code=401)
def check_admin(instance_id=None, **kw):
if not current_user.is_authenticated():
raise ProcessingException(message='Not Authorized',
status_code=401)
if not current_user.admin:
raise ProcessingException(message='Not Authorized as Administrator',
status_code=401)
def check_admin_or_user(instance_id=None, **kw):
if not current_user.is_authenticated():
raise ProcessingException(message='Not Authorized',
status_code=401)
if current_user.admin:
return
if current_user.id != int(instance_id):
raise ProcessingException(message='Not Authorized for this Resource',
status_code=401)
def check_owner_single(instance_id=None, data=None, **kw):
if current_user.admin:
return
if not data:
w = Warrior.query.get(instance_id)
if w.public:
return
is_owner = False
for owner in w.owners:
if current_user.id == owner.id:
is_owner = True
if not is_owner:
raise ProcessingException(message='Not Authorized for this Resource',
status_code=401)
return
if data['public']:
return
is_owner = False
for owner in data['owners']:
if current_user.id == owner['id']:
is_owner = True
if not is_owner:
raise ProcessingException(message='Not Authorized for this Resource',
status_code=401)
def post_check_owner_many(result=None, **kw):
print(result)
if current_user.admin:
return
for obj in result['objects']:
is_owner = False
for owner in obj['owners']:
if current_user.id == owner['id']:
is_owner = True
if (not is_owner) and (not obj['public']):
raise ProcessingException(message='Not Authorized for this Resource',
status_code=401)
def deny(**kw):
raise ProcessingException(message='Not Allowed',
status_code=401)
def pre_hash(data=None, **kw):
if "passwdHash" in data.keys():
data["passwdHash"] = bcrypt.generate_password_hash(data["passwdHash"])
def post_create_testq(result=None, **kw):
testq = Queue(name=result['name'] + " Testing",
machineId=result['id'],
qType=0,
maxSubsPerWarrior=-1,
maxSubsPerUser=-1)
db.session.add(testq)
db.session.commit()
def post_schedule_queue_job(result=None, **kw):
if result['active'] and result['qType'] == 2:
queue = Queue.query.get(result['id'])
if not queue.job:
queue.job = schedule_queue(queue)
db.session.commit()
if not result['active'] and result['qType'] == 2:
queue = Queue.query.get(result['id'])
if queue.job:
queue.stop_queue()
queue.job = None
db.session.commit()
|
{
"content_hash": "cdaabb5988d5d759ee10181d918284fe",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 81,
"avg_line_length": 31.43243243243243,
"alnum_prop": 0.5663513900831184,
"repo_name": "mahrz/kernkrieg",
"id": "a57c8c1a977deae96634386b31cb8ed0ab3543d9",
"size": "3489",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "processors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "89857"
},
{
"name": "JavaScript",
"bytes": "42205"
},
{
"name": "Python",
"bytes": "155583"
},
{
"name": "Shell",
"bytes": "288"
}
],
"symlink_target": ""
}
|
"""The tests for the Async Media player helper functions."""
import pytest
import homeassistant.components.media_player as mp
from homeassistant.const import (
STATE_IDLE,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
STATE_PLAYING,
)
class ExtendedMediaPlayer(mp.MediaPlayerEntity):
"""Media player test class."""
def __init__(self, hass):
"""Initialize the test media player."""
self.hass = hass
self._volume = 0
self._state = STATE_OFF
@property
def state(self):
"""State of the player."""
return self._state
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
@property
def supported_features(self):
"""Flag media player features that are supported."""
return (
mp.const.MediaPlayerEntityFeature.VOLUME_SET
| mp.const.MediaPlayerEntityFeature.VOLUME_STEP
| mp.const.MediaPlayerEntityFeature.PLAY
| mp.const.MediaPlayerEntityFeature.PAUSE
| mp.const.MediaPlayerEntityFeature.TURN_OFF
| mp.const.MediaPlayerEntityFeature.TURN_ON
)
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._volume = volume
def volume_up(self):
"""Turn volume up for media player."""
if self.volume_level < 1:
self.set_volume_level(min(1, self.volume_level + 0.1))
def volume_down(self):
"""Turn volume down for media player."""
if self.volume_level > 0:
self.set_volume_level(max(0, self.volume_level - 0.1))
def media_play(self):
"""Play the media player."""
self._state = STATE_PLAYING
def media_pause(self):
"""Plause the media player."""
self._state = STATE_PAUSED
def media_play_pause(self):
"""Play or pause the media player."""
if self._state == STATE_PLAYING:
self._state = STATE_PAUSED
else:
self._state = STATE_PLAYING
def turn_on(self):
"""Turn on state."""
self._state = STATE_ON
def turn_off(self):
"""Turn off state."""
self._state = STATE_OFF
def toggle(self):
"""Toggle the power on the media player."""
if self._state in [STATE_OFF, STATE_IDLE]:
self._state = STATE_ON
else:
self._state = STATE_OFF
class SimpleMediaPlayer(mp.MediaPlayerEntity):
"""Media player test class."""
def __init__(self, hass):
"""Initialize the test media player."""
self.hass = hass
self._volume = 0
self._state = STATE_OFF
@property
def state(self):
"""State of the player."""
return self._state
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
@property
def supported_features(self):
"""Flag media player features that are supported."""
return (
mp.const.MediaPlayerEntityFeature.VOLUME_SET
| mp.const.MediaPlayerEntityFeature.VOLUME_STEP
| mp.const.MediaPlayerEntityFeature.PLAY
| mp.const.MediaPlayerEntityFeature.PAUSE
| mp.const.MediaPlayerEntityFeature.TURN_OFF
| mp.const.MediaPlayerEntityFeature.TURN_ON
)
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._volume = volume
def media_play(self):
"""Play the media player."""
self._state = STATE_PLAYING
def media_pause(self):
"""Plause the media player."""
self._state = STATE_PAUSED
def turn_on(self):
"""Turn on state."""
self._state = STATE_ON
def turn_off(self):
"""Turn off state."""
self._state = STATE_OFF
@pytest.fixture(params=[ExtendedMediaPlayer, SimpleMediaPlayer])
def player(hass, request):
"""Return a media player."""
return request.param(hass)
async def test_volume_up(player):
"""Test the volume_up and set volume methods."""
assert player.volume_level == 0
await player.async_set_volume_level(0.5)
assert player.volume_level == 0.5
await player.async_volume_up()
assert player.volume_level == 0.6
async def test_volume_down(player):
"""Test the volume_down and set volume methods."""
assert player.volume_level == 0
await player.async_set_volume_level(0.5)
assert player.volume_level == 0.5
await player.async_volume_down()
assert player.volume_level == 0.4
async def test_media_play_pause(player):
"""Test the media_play_pause method."""
assert player.state == STATE_OFF
await player.async_media_play_pause()
assert player.state == STATE_PLAYING
await player.async_media_play_pause()
assert player.state == STATE_PAUSED
async def test_turn_on_off(player):
"""Test the turn on and turn off methods."""
assert player.state == STATE_OFF
await player.async_turn_on()
assert player.state == STATE_ON
await player.async_turn_off()
assert player.state == STATE_OFF
async def test_toggle(player):
"""Test the toggle method."""
assert player.state == STATE_OFF
await player.async_toggle()
assert player.state == STATE_ON
await player.async_toggle()
assert player.state == STATE_OFF
|
{
"content_hash": "3b1d11d9c7d421249e2d609e75e85abb",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 66,
"avg_line_length": 28.44736842105263,
"alnum_prop": 0.6103607770582794,
"repo_name": "toddeye/home-assistant",
"id": "53c80bfc8deb1cd531b8bfcd1b5e874b92743a77",
"size": "5405",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/media_player/test_async_helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
"""Solvers for linear equations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.solvers.python.ops import util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import linalg_ops
def conjugate_gradient(operator,
rhs,
preconditioner=None,
x=None,
tol=1e-4,
max_iter=20,
name="conjugate_gradient"):
r"""Conjugate gradient solver.
Solves a linear system of equations `A*x = rhs` for selfadjoint, positive
definite matrix `A` and right-hand side vector `rhs`, using an iterative,
matrix-free algorithm where the action of the matrix A is represented by
`operator`. The iteration terminates when either the number of iterations
exceeds `max_iter` or when the residual norm has been reduced to `tol`
times its initial value, i.e. \\(||rhs - A x_k|| <= tol ||rhs||\\).
Args:
operator: An object representing a linear operator with attributes:
- shape: Either a list of integers or a 1-D `Tensor` of type `int32` of
length 2. `shape[0]` is the dimension on the domain of the operator,
`shape[1]` is the dimension of the co-domain of the operator. On other
words, if operator represents an N x N matrix A, `shape` must contain
`[N, N]`.
- dtype: The datatype of input to and output from `apply`.
- apply: Callable object taking a vector `x` as input and returning a
vector with the result of applying the operator to `x`, i.e. if
`operator` represents matrix `A`, `apply` should return `A * x`.
rhs: A rank-1 `Tensor` of shape `[N]` containing the right-hand size vector.
preconditioner: An object representing a linear operator, see `operator`
for detail. The preconditioner should approximate the inverse of `A`.
An efficient preconditioner could dramatically improve the rate of
convergence. If `preconditioner` represents matrix `M`(`M` approximates
`A^{-1}`), the algorithm uses `preconditioner.apply(x)` to estimate
`A^{-1}x`. For this to be useful, the cost of applying `M` should be
much lower than computing `A^{-1}` directly.
x: A rank-1 `Tensor` of shape `[N]` containing the initial guess for the
solution.
tol: A float scalar convergence tolerance.
max_iter: An integer giving the maximum number of iterations.
name: A name scope for the operation.
Returns:
output: A namedtuple representing the final state with fields:
- i: A scalar `int32` `Tensor`. Number of iterations executed.
- x: A rank-1 `Tensor` of shape `[N]` containing the computed solution.
- r: A rank-1 `Tensor` of shape `[M]` containing the residual vector.
- p: A rank-1 `Tensor` of shape `[N]`. `A`-conjugate basis vector.
- gamma: \\(r \dot M \dot r\\), equivalent to \\(||r||_2^2\\) when
`preconditioner=None`.
"""
# ephemeral class holding CG state.
cg_state = collections.namedtuple("CGState", ["i", "x", "r", "p", "gamma"])
def stopping_criterion(i, state):
return math_ops.logical_and(i < max_iter, linalg_ops.norm(state.r) > tol)
def cg_step(i, state): # pylint: disable=missing-docstring
z = operator.apply(state.p)
alpha = state.gamma / util.dot(state.p, z)
x = state.x + alpha * state.p
r = state.r - alpha * z
if preconditioner is None:
gamma = util.dot(r, r)
beta = gamma / state.gamma
p = r + beta * state.p
else:
q = preconditioner.apply(r)
gamma = util.dot(r, q)
beta = gamma / state.gamma
p = q + beta * state.p
return i + 1, cg_state(i + 1, x, r, p, gamma)
with ops.name_scope(name):
n = operator.shape[1:]
rhs = array_ops.expand_dims(rhs, -1)
if x is None:
x = array_ops.expand_dims(
array_ops.zeros(n, dtype=rhs.dtype.base_dtype), -1)
r0 = rhs
else:
x = array_ops.expand_dims(x, -1)
r0 = rhs - operator.apply(x)
if preconditioner is None:
p0 = r0
else:
p0 = preconditioner.apply(r0)
gamma0 = util.dot(r0, p0)
tol *= linalg_ops.norm(r0)
i = constant_op.constant(0, dtype=dtypes.int32)
state = cg_state(i=i, x=x, r=r0, p=p0, gamma=gamma0)
_, state = control_flow_ops.while_loop(stopping_criterion, cg_step,
[i, state])
return cg_state(
state.i,
x=array_ops.squeeze(state.x),
r=array_ops.squeeze(state.r),
p=array_ops.squeeze(state.p),
gamma=state.gamma)
|
{
"content_hash": "11a8f9248ffa2d0eb3cc4e4e6cbed68b",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 80,
"avg_line_length": 42.43589743589744,
"alnum_prop": 0.6418932527693857,
"repo_name": "dendisuhubdy/tensorflow",
"id": "9305c6a11c4ec898c82553773e8e7277a54ab82e",
"size": "5654",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/solvers/python/ops/linear_equations.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "304178"
},
{
"name": "C++",
"bytes": "43473103"
},
{
"name": "CMake",
"bytes": "202538"
},
{
"name": "Go",
"bytes": "1148824"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "755551"
},
{
"name": "Jupyter Notebook",
"bytes": "2211560"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48603"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "36820408"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "428510"
},
{
"name": "Smarty",
"bytes": "6870"
}
],
"symlink_target": ""
}
|
from sqlalchemy import Column, ForeignKey, UniqueConstraint
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import backref, relationship
from sqlalchemy.types import Boolean, Integer, String
from pycroft.model.base import IntegerIdModel
class Site(IntegerIdModel):
name = Column(String(), nullable=False)
class Building(IntegerIdModel):
site_id = Column(Integer, ForeignKey(Site.id), nullable=False, index=True)
site = relationship(Site, backref=backref("buildings"))
number = Column(String(), nullable=False)
short_name = Column(String(), unique=True, nullable=False)
street = Column(String(), nullable=False)
__table_args__ = (UniqueConstraint("street", "number", name="address"),)
class Room(IntegerIdModel):
number = Column(String(), nullable=False)
level = Column(Integer, nullable=False)
inhabitable = Column(Boolean, nullable=False)
# many to one from Room to Building
building_id = Column(
Integer, ForeignKey(Building.id, onupdate='CASCADE'), nullable=False,
index=True,
)
building = relationship(Building, backref=backref("rooms", order_by=(level, number)))
connected_patch_ports = relationship(
'PatchPort',
primaryjoin='and_(PatchPort.room_id == Room.id, PatchPort.switch_port_id != None)',
)
def __str__(self):
return "{} {} {}".format(self.building.short_name, self.level,
self.number)
def __unicode__(self):
return u"{} {} {}".format(self.building.short_name, self.level,
self.number)
@hybrid_property
def short_name(self):
return "{} {}-{}".format(self.building.short_name, self.level, self.number)
@hybrid_property
def is_switch_room(self):
from pycroft.model.host import Host
from pycroft.model.host import Switch
return Host.q.join(Switch, Host.id == Switch.host_id).filter(Host.room_id==self.id).first() is not None
|
{
"content_hash": "0fbad10efd65f538a6f8579bfbbbb195",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 111,
"avg_line_length": 35.839285714285715,
"alnum_prop": 0.6626806178375685,
"repo_name": "lukasjuhrich/pycroft",
"id": "d62ceae807cdedfeac46d21ae8fc91765e0457da",
"size": "2239",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pycroft/model/facilities.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9709"
},
{
"name": "Dockerfile",
"bytes": "2877"
},
{
"name": "HTML",
"bytes": "98163"
},
{
"name": "JavaScript",
"bytes": "66723"
},
{
"name": "Mako",
"bytes": "509"
},
{
"name": "Python",
"bytes": "907170"
},
{
"name": "Shell",
"bytes": "12435"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python
''' unit test suite for SQL parsing features of rdbhdb.'''
import unittest
import time
import sys, os
import accounts
import asyncio
sys.path.insert(0, '..\lib')
from rdbhdb import rdbhdb
def asyncio_meth_ruc(f):
def asy(self, *args):
loop = asyncio.get_event_loop()
loop.run_until_complete(f(self, *args))
return asy
def asyncio_ruc(f):
loop = asyncio.get_event_loop()
loop.run_until_complete(f())
need_version = '0.11.0'
class test_Rdbhdb_sql(unittest.TestCase):
driver = rdbhdb
# get choice of server from environment
HOST = os.environ.get('RDBHOST_TEST', "dev.rdbhost.com").strip("'")
#print >> sys.stderr, 'Using SERVER', HOST
connect_args = ()
connect_kw_args = {
'asyncio': True,
'role': accounts.demo['role'],
'authcode': accounts.demo['authcode'],
'host': HOST}
table_prefix = 'extras_' # If you need to specify a prefix for tables
ddl1 = '''CREATE TABLE %sbig (value) AS SELECT * FROM generate_series(0, 509);''' % table_prefix
xddl1 = 'drop table %sbig' % table_prefix
lowerfunc = 'lower' # Name of stored procedure to convert string->lowercase
# Some drivers may need to override these helpers, for example adding
# a 'commit' after the execute.
def executeDDL1(self, cursor):
yield from cursor.execute(self.ddl1)
def setUp(self):
# Call superclass setUp In case this does something in the
# future
try:
con = self._connect()
con.close()
except Exception as e:
print('connection not made. %s db must be created online.'%e.args[0])
sys.exit(2)
@asyncio_meth_ruc
def tearDown(self):
''' self.drivers should override this method to perform required cleanup
if any is necessary, such as deleting the test database.
The default drops the tables that may be created.
'''
con = self._connect()
try:
cur = con.cursor()
for ddl in (self.xddl1, ):
try:
yield from cur.execute(ddl)
con.commit()
except self.driver.Error:
# Assume table didn't exist. Other tests will check if
# execute is busted.
pass
finally:
con.close()
def _connect(self):
try:
return self.driver.connect(*self.connect_args, **self.connect_kw_args)
except AttributeError:
self.fail("No connect method found in self.driver module")
def _fetch(self, q, ct=50):
con = self._connect()
#con.autorefill = True
try:
cur = con.cursor()
yield from self.executeDDL1(cur)
yield from cur.execute(q, ())
#results = cur.fetchmany(ct)
results = cur.fetchall()
self.assertEqual(len(results), ct, 'fetchmany wanted %s records, got %s' % (ct, len(results)))
finally:
con.close()
def test000_host(self):
"""Announce which server we are using. """
print('using server', self.HOST, file=sys.stderr, end=" ")
def test001_version(self):
"""Verify correct version of DB API module. """
self.assertTrue(rdbhdb.__version__ >= need_version, rdbhdb.__version__)
@asyncio_meth_ruc
def test01_limit5(self):
"""Tests small limit. """
q = 'SELECT * FROM %sbig LIMIT 5' % self.table_prefix
yield from self._fetch(q, 5)
@asyncio_meth_ruc
def test02_limit250(self):
"""Tests high 250 limit"""
q = 'SELECT * FROM %sbig LIMIT 250' % self.table_prefix
yield from self._fetch(q, 250)
@asyncio_meth_ruc
def test03_commented25_lim(self):
"""Test dblhyphen comment. """
q = """SELECT * FROM %sbig
-- LIMIT 25
WHERE value < 100
""" % self.table_prefix
yield from self._fetch(q, 100)
@asyncio_meth_ruc
def test04_commented250_lim(self):
"""Test dblhyphen comment with limit. """
q = """SELECT * FROM %sbig
-- LIMIT 25
WHERE value < 300
LIMIT 250
""" % self.table_prefix
yield from self._fetch(q, 250)
@asyncio_meth_ruc
def test05_nestedcomment250_lim(self):
"""Tests nested comments with high limit"""
q = """SELECT * FROM %sbig
/* beginning of comment that
/* nests once */ OFFSET 100
how about that. Limit 200 */
WHERE value < 300
LIMIT 250
""" % self.table_prefix
yield from self._fetch(q, 250)
@asyncio_meth_ruc
def test06_nestedcomment150_limoff(self):
"""Tests nested comments with limit and offset. """
q = """SELECT * FROM %sbig
/* beginning of comment that
/* nests once */ OFFSET 100
how about that. Limit 200 */
WHERE value < 300
LIMIT 250
OFFSET 150
""" % self.table_prefix
yield from self._fetch(q, 150)
@asyncio_meth_ruc
def test07_subsel150_lim(self):
"""Tests subselect with limit 150"""
q = """SELECT * FROM %sbig
WHERE value IN (SELECT * FROM %sbig LIMIT 150)
LIMIT 250
""" % (self.table_prefix, self.table_prefix)
yield from self._fetch(q, 150)
@asyncio_meth_ruc
def test08_postcomment_lim(self):
"""Test -- commenting w/o newline"""
q = """SELECT 1+1;
--CREATE table %sdummy ();
""" % self.table_prefix
yield from self._fetch(q, 1)
class test_Rdbhdb_sql_ws(test_Rdbhdb_sql):
connect_kw_args = {
'role': accounts.demo['role'],
'asyncio': True,
'authcode': accounts.demo['authcode'],
'host': test_Rdbhdb_sql.HOST,
'useWebsocket': True
}
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "37ea979ec28dd1a535b72d36d2a1905a",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 106,
"avg_line_length": 31.540816326530614,
"alnum_prop": 0.5469103849886768,
"repo_name": "rdbhost/Rdbhdb",
"id": "57b0d926d24fc589a49f328b3e71178e5edfa304",
"size": "6182",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unittests-py3aio/test_rdbhdb_sql.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16403"
},
{
"name": "JavaScript",
"bytes": "54513"
},
{
"name": "Makefile",
"bytes": "6762"
},
{
"name": "Python",
"bytes": "458232"
},
{
"name": "Shell",
"bytes": "6479"
}
],
"symlink_target": ""
}
|
import os
from numpy import array, concatenate
import scipy
from scipy import ndimage
import numpy as np
def convNd_sparse(src, kernel, centroids):
"""ND convolution at sparse sampling centroids.
For input of K N-dimensional centroids which are locations
within N-dimensional src image, K scalars are produced as if
the convolution src*kernel was sampled at those centroids.
The coordinates of each centroid MUST lie within the valid
sub-region of the src image grid, i.e. at least kernel-radius
distance from image edges in each dimension.
"""
results = []
kernel_radii = [w//2 for w in kernel.shape]
for centroid in centroids:
slc = tuple(
slice(int(centroid[d] - kernel_radii[d]), int(centroid[d] + kernel_radii[d] + 1))
for d in range(len(src.shape))
)
box = src[slc]
results.append((box * kernel).sum())
return array(results, dtype=src.dtype)
def convNx1d(src, kernels):
"""ND convolution using 1D kernels
Trims borders by filter kernel width in each dimension.
This version uses ndimage.convolve1d() and produces float32
intermediate and final results regardless of input type.
"""
for d in range(len(kernels)):
L = src.shape[d]
kernel = kernels[d]
kernel_width = len(kernel)
if (kernel_width % 2) != 1:
raise NotImplementedError('convNx1d on even-length kernel')
kernel_radius = kernel_width//2
if kernel_radius < 1:
print("warning: dimension %d kernel %d is too small, has no effect" % (d, kernel_width))
continue
elif kernel_radius > L:
raise ValueError("dimension %d length %d too small for kernel %d" % (d, L, kernel_width))
src = ndimage.convolve1d(
src.astype('float32', copy=False),
array(kernel, dtype='float32'),
mode='constant',
axis=d
)
# trim off invalid borders
src = src[ tuple([slice(None) for j in range(d)] + [slice(kernel_radius,kernel_radius and -kernel_radius or None)] + [ Ellipsis ]) ]
return src
def maxNx1d(src, lengths):
"""ND maximum filter using 1D kernels
"""
for d in range(len(lengths)):
L = src.shape[d]
kernel_width = lengths[d]
if (kernel_width % 2) != 1:
raise NotImplementedError('maxNx1d on even-length %d kernel' % lengths[d])
kernel_radius = kernel_width//2
if kernel_radius < 1:
print("warning: dimension %d kernel %d is too small, has no effect" % (d, kernel_width))
continue
elif kernel_width > L:
raise ValueError("dimension %d length %d too small for kernel %d" % (d, L, kernel_width))
src = ndimage.maximum_filter1d(
src,
lengths[d],
mode='constant',
axis=d
)
# trim off invalid borders
src = src[ tuple([slice(None) for j in range(d)] + [slice(kernel_radius,kernel_radius and -kernel_radius or None)] + [ Ellipsis ]) ]
return src
def equitrim(arrays):
minshape = None
for a in arrays:
if a is None:
continue
if minshape is None:
minshape = a.shape
else:
assert len(minshape) == len(a.shape)
minshape = list(map(min, minshape, a.shape))
for a in arrays:
if a is None:
yield a
else:
yield a[
tuple(
[
a.shape[d] > minshape[d]
and slice(
(a.shape[d]-minshape[d])//2,
-(a.shape[d]-minshape[d])//2
)
or None
for d in range(len(minshape))
]
)
]
def assign_voxels(syn_values, centroids, valid_shape, syn_kernel_3d, gridsize=None):
"""Assign voxels to features and fill with segment ID.
Parameters:
syn_values: measured synapse core intensity as from analyze
centroids: synapse locations as from analyze
valid_shape: the size of the processed volume
syn_kernel_3d: the 3D kernel representing synapse cores
Results:
a Numpy array with voxels filled with segment IDs
The length N of syn_values and centroids is mapped to segment
ID range (1...N) while non-synapse voxels are labeled zero.
The array has shape matching valid_shape and integer type large
enough to hold all segment IDs.
"""
# compute mutually-exclusive gaussian segments
# in overlaps, voxel assigned by max kernel-weighted segment core value
assert len(centroids) == len(syn_values)
N = len(syn_values)
if N < 2**8:
dtype = np.uint8
elif N < 2**16:
dtype = np.uint16
elif N < 2**32:
dtype = np.uint32
elif N < 2**64:
dtype = np.uint64
else:
raise NotImplementedError("Absurdly large segment count %s" % N)
gaussian_map = np.zeros( valid_shape, dtype=np.float32 ) # voxel -> weighted core values
segment_map = np.zeros( valid_shape, dtype=dtype ) # voxel -> label
# use a slight subset as the splatting body
body_shape = syn_kernel_3d.shape
D, H, W = body_shape
radial_fraction = np.clip(float(os.getenv('SYNSPY_SPLAT_SIZE', '1.0')), 0, 2)
limits = (
syn_kernel_3d[D//2-1,0,W//2-1],
syn_kernel_3d[D//2,H//2,W//2]
)
limit = limits[1] - (limits[1] - limits[0]) * radial_fraction
mask_3d = syn_kernel_3d >= limit
mask_3d[tuple([w//2 for w in mask_3d.shape])] = 1 # fill at least central voxel
print("SPLAT BOX SHAPE %s USER COEFFICIENT %f MASK VOXELS %d" % (
body_shape,
radial_fraction,
mask_3d.sum()
))
weights = syn_kernel_3d * mask_3d
def splat_segment(label):
weighted = weights * syn_values[label]
centroid = centroids[label]
def map_slice(centroid):
# splats are confined to boundaries of valid_shape map
def helper(d):
lower = centroid[d] - body_shape[d]//2
upper = centroid[d] + body_shape[d]//2 + body_shape[d]%2
if lower < 0:
lower = 0
if upper > valid_shape[d]:
upper = valid_shape[d]
return slice(lower,upper)
return tuple(map(helper, list(range(3))))
def body_slice(centroid):
# splats are cropped by boundaries of map
def helper(d):
lower = 0
upper = body_shape[d]
if centroid[d] < body_shape[d]//2:
lower = body_shape[d]//2 - centroid[d]
if (centroid[d] + body_shape[d]//2 + body_shape[d]%2) > valid_shape[d]:
upper -= (centroid[d] + body_shape[d]//2 + body_shape[d]%2) - valid_shape[d]
return slice(lower,upper)
return tuple(map(helper, list(range(3))))
mslc = map_slice(centroid)
bslc = body_slice(centroid)
# update maps for assigned voxels
try:
segvoxels = gaussian_map[mslc] < weighted[bslc]
segment_map[mslc] = segment_map[mslc] * (~segvoxels) + (label+1) * segvoxels
gaussian_map[mslc] = gaussian_map[mslc] * (~segvoxels) + weighted[bslc] * segvoxels
except:
print(label, centroid, mslc, bslc, valid_shape, body_shape)
for label in range(len(syn_values)):
splat_segment(label)
gaussian_map = None
return segment_map
|
{
"content_hash": "4d96ff5f066d1304d65914993146d7d7",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 140,
"avg_line_length": 33.46551724137931,
"alnum_prop": 0.5633693972179289,
"repo_name": "informatics-isi-edu/synspy",
"id": "1f9c26a3764bd0f79b5037ce82334afeb5147940",
"size": "7900",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "synspy/analyze/np.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "MATLAB",
"bytes": "12425"
},
{
"name": "Python",
"bytes": "4283044"
},
{
"name": "Shell",
"bytes": "2422"
}
],
"symlink_target": ""
}
|
from flask import Blueprint, render_template, session, redirect, url_for, request, abort, flash
from flask import current_app
from itsdangerous import URLSafeSerializer, BadSignature
from flask.ext.login import current_user, login_user, logout_user
from flask.ext.mail import Message
from app.helpers import login_required
from app import db, login_manager, mail
from .models import User
from .forms import LoginForm, RegisterForm, AccountSettingsForm
auth = Blueprint(
'auth',
__name__,
template_folder='templates',
url_prefix='/users'
)
@login_manager.user_loader
def load_user(user_id):
"""Flask-Login hook to load a User instance from ID"""
return db.session.query(User).get(user_id)
def get_serializer(secret_key=None):
if secret_key is None:
secret_key = current_app.config['SECRET_KEY']
return URLSafeSerializer(secret_key)
def get_activation_link(user):
s = get_serializer()
payload = s.dumps(user.id)
return url_for('auth.activate_user', payload=payload, _external=True)
@auth.route('/login/', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated():
return redirect(url_for('donors.index'))
form = LoginForm()
error = None
if request.method == 'POST' and form.validate_on_submit():
username = form.username.data.lower().strip()
password = form.password.data
user, authenticated = User.authenticate(db.session.query, username, password)
if authenticated:
login_user(user)
return redirect(url_for('donors.index'))
else:
error = 'Incorrect username or password'
return render_template('auth/login.html', form=form, error=error)
@auth.route('/logout/', methods=['GET', 'POST'])
def logout():
logout_user()
return redirect(url_for('auth.login'))
@auth.route('/settings/<int:user_id>/', methods=['GET', 'POST'])
@login_required
def settings(user_id):
user = db.session.query(User).get(user_id)
error = None
if request.method == 'GET':
form = AccountSettingsForm(obj=user)
elif request.method == 'POST':
form = AccountSettingsForm()
if form.validate_on_submit():
# insert into db
user.name = form.name.data
user.email = form.email.data
user.phone = form.phone.data
user.shop_realtime = form.shop_realtime.data
if form.password.data:
user.password = form.password.data
db.session.commit()
return redirect(url_for('auth.settings', user_id=user.id))
# On Error render index page
user = db.session.query(User).get(user_id)
return render_template('auth/settings.html', user=user, form=form, error=error )
@auth.route('/register/', methods=['GET', 'POST'])
def register():
form = RegisterForm()
error = None
if request.method == 'POST' and form.validate_on_submit():
new_user = User()
new_user.username = form.username.data
new_user.name = form.name.data
new_user.password = form.password.data
new_user.email = form.email.data
new_user.phone = form.phone.data
new_user.active = False
new_user.status = 'awaiting_confirm'
db.session.add(new_user)
db.session.commit()
### Registering the new user here* ###
activation_url = get_activation_link(new_user)
send_awaiting_confirm_mail(new_user, message_url=activation_url)
flash("Email Verification sent. Please confirm your account activation via e-mail", 'info')
return redirect(url_for('auth.login'))
return render_template('auth/register.html', form=form, error=error )
@auth.route('/activate/<payload>/')
def activate_user(payload):
s = get_serializer()
try:
user_id = s.loads(payload)
except BadSignature:
abort(404)
user = db.session.query(User).get(user_id)
if not user:
return abort(404)
user.activate()
db.session.commit()
flash('Your Account is now Active. Please reach us teamgivn@gmail.com if you have any questions')
flash('We offer free setup and help you take control of your store displays.')
return redirect(url_for('auth.login'))
def send_awaiting_confirm_mail(user, message_url):
"""
Send the awaiting for confirmation mail to the user.
"""
subject = "Welcome to Givn"
msg = Message(subject=subject, sender=current_app.config['MAIL_SENDER'], recipients=[user.email])
msg.body = """
Dear %s,
Thank you for signing up for Givn to give you greater control over your website display and measure what works. There's one last thing before you can upload images and start running your carousel sliders. Please confirm your account by clicking on this link: %s
Thanks,
Givn Team
teamgivn@gmail.com
""" %(user.name, message_url)
# msg.html = '<b>HTML</b> body'
with current_app.app_context():
mail.send(msg)
|
{
"content_hash": "99f5dcdccdf365f4aaad64e58c1cfb17",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 267,
"avg_line_length": 36.74264705882353,
"alnum_prop": 0.6577946768060836,
"repo_name": "teamgivn/givnapp",
"id": "88f8783b5355478d39c031e6aa6fcb01ac54bb6c",
"size": "4997",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/blueprints/auth/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "210090"
},
{
"name": "JavaScript",
"bytes": "560673"
},
{
"name": "Python",
"bytes": "18897"
},
{
"name": "Shell",
"bytes": "2557"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
import django.db.models.manager
class Migration(migrations.Migration):
dependencies = [
('daiquiri_query', '0001_initial'),
]
operations = [
migrations.AlterModelManagers(
name='queryjob',
managers=[
('submission', django.db.models.manager.Manager()),
],
),
]
|
{
"content_hash": "b9a10b9c952b3baa052365adf1c2f031",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 67,
"avg_line_length": 21.5,
"alnum_prop": 0.5837209302325581,
"repo_name": "aipescience/django-daiquiri",
"id": "330e143001745d4a54b94c3f32baf7f413c5de06",
"size": "502",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "daiquiri/query/migrations/0002_meta.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "28598"
},
{
"name": "HTML",
"bytes": "236579"
},
{
"name": "JavaScript",
"bytes": "97087"
},
{
"name": "Python",
"bytes": "602159"
}
],
"symlink_target": ""
}
|
from traitlets import directional_link, HasTraits, TraitError, observe, validate, Int, Instance
from .tree import Tree
from .traittypes import TreeType
class HasTree(HasTraits):
tree = Instance(klass=Tree, allow_none=True)
src = TreeType(allow_none=True)
def __init__(self, src=None):
self._link = None
super().__init__()
if src is not None:
self.src = src
@validate('src')
def validate_src(self, proposal):
# print(f'>> HasTree {id(self)} validate src\n\t {proposal}')
src = proposal['value']
link = None
if isinstance(src, Tree):
self.tree = src
elif isinstance(src, HasTraits) and src.has_trait('tree'):
link = directional_link((src, 'tree'), (self, 'tree'))
elif hasattr(src, 'tree'):
self.tree = getattr(src, 'tree')
elif src is None:
self.tree = None
else:
raise TraitError('must be a tree or an owner of a tree')
if self._link is not None:
self._link.unlink()
self._link = link
# print(f'<< HasTree {id(self)} validate src')
return src
# @observe('tree')
# def act(self, change):
# print(f'HasTree {id(self)}received a new tree {change["new"]}')
|
{
"content_hash": "baaa995935e3a608be3a74968c5a8d92",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 95,
"avg_line_length": 30.976190476190474,
"alnum_prop": 0.5703305149884704,
"repo_name": "yarden-livnat/regulus",
"id": "65b971fbec7cb02b6bc2a6ed4626ab750cbf1c22",
"size": "1301",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "regulus/tree/hastree.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "70587"
}
],
"symlink_target": ""
}
|
"""
:synopsis: Define the basic script class that will generate the script code.
"""
try:
from urllib import quote
except ImportError:
from urllib.parse import quote
from importlib import import_module
from .url import get_url, check_valid_url
class AbstractScript(object):
"""Abstract representation of a script."""
__language__ = ''
code_begin = ''
code_header = ''
code_proxy = ''
code_post = ''
code_https = ''
code_search = ''
code_nosearch = ''
def __init__(self, headers=None, details=None, search=None):
"""Initialize the script generation.
:param list headers: Headers list containing fields like 'Host', 'User-Agent', etc.
:param dict details: Request specific details dictionary like body and method of the request.
:param str search: String to search for in the response to the request.
:raises ValueError: When url is invalid.
"""
self.load_attributes(self.__class__)
self._script = ''
self.headers = headers
self.details = details
self.search = search
self.url = ''
if self.details:
self.url = self.encode_url(self.create_url())
def generate_script(self, headers=None, details=None, search=None):
"""Generate script code.
:param list headers: Headers list containing fields like 'Host', 'User-Agent', etc.
:param dict details: Request specific details dictionary like body and method of the request.
:param str search: String to search for in the response to the request.
:raises ValueError: when unsupported HTTP method, invalid `headers` or `details` values.
:return: Generated script code.
:rtype: str
"""
self.headers = headers or self.headers
self.details = details or self.details
self.search = search or self.search
if not self.headers:
raise ValueError("'headers' cannot be equal to '%s'" % self.headers)
elif not self.details:
raise ValueError("'details' cannot be equal to '%s'" % self.details)
if not self.url and self.details:
self.url = self.encode_url(self.create_url())
if self.code_begin:
self._script += self._generate_begin()
if self.code_proxy:
self._script += self._generate_proxy()
method = self.details.get('method', '').strip().lower()
if method == 'get':
pass
elif method == 'post':
if self.code_post:
self._script += self._generate_post()
else:
raise ValueError("'%s' is not supported! Only GET and POST are supported for now." % self.details['method'])
if self.code_https:
self._script += self._generate_https()
self._script += self._generate_request()
return self._script
def _generate_begin(self):
"""Default generation of the beginning of the code.
:return: Beginning of the code.
:rtype: str
"""
return self.code_begin
def _generate_headers(self):
"""Default generation of request headers.
:return: Code snippet with HTTP requests headers.
:rtype: str
"""
code = ''
for item in self.headers:
header, value = item.split(':', 1)
code += self.code_header.format(header=header.replace('"', '\\"'), value=value.replace('"', '\\"'))
return code
def _generate_proxy(self):
"""Default generation of the proxy specific code.
:return: Code snippet with the proxy information.
:rtype: str
"""
if 'proxy_host' in self.details and 'proxy_port' in self.details:
return self.code_proxy.format(proxy='%s:%s' % (self.details['proxy_host'], self.details['proxy_port']))
return ''
def _generate_post(self):
"""Default generation of the post body code.
:return: Code snippet containing body to be sent in request.
:rtype: str
"""
return self.code_post.format(data=self.details.get('data', '').replace('"', '\\"'))
def _generate_https(self):
"""Default generation of the HTTPS specific code.
:return: Code snippet with HTTPS setup.
:rtype: str
"""
return self.code_https
def _generate_request(self):
"""Default generation of the request code.
:return: Code snippet for the request to send.
:rtype: str
"""
code = ''
if self.search:
if self.code_search:
code += self._generate_search(self.search)
else:
if self.code_nosearch:
code += self._generate_nosearch()
return code
def _generate_search(self, search_string=''):
"""Default generation of the code having search functionality.
:param str search_string: String to search for in the response to the request.
:return: Code snippet with the HTTP response search feature.
:rtype: str
"""
return self.code_search.format(search_string=search_string.replace('"', '\\"'))
def _generate_nosearch(self):
"""Default generation of the code having no search functionality.
:return: Code snippet absent of HTTP response search feature.
:rtype: str
"""
return self.code_nosearch
def create_url(self):
"""Create valid URL.
:raises ValueError: When URL is invalid.
:return: Created URL.
:rtype: str
"""
url = get_url(self.details.get('Host', ''), self.details.get('pre_scheme', '')) + self.details.get('path', '')
if not check_valid_url(url):
raise ValueError("Invalid URL '%s'." % url)
return url
def encode_url(self, url):
"""Check if the URL of the HTTP request needs encoding.
:param str url: URL to encode if needed.
:return: Encoded URL if encoding is needed.
:rtype: str
"""
http_verb_with_encoding = ['head', 'options', 'get']
encoded_url = url
if self.details.get('data') and (self.details.get('method', '').lower() in http_verb_with_encoding):
encoded_url += quote(self.details['data'], '')
return encoded_url
@staticmethod
def load_attributes(cls):
"""Loads attributes to Script class from a given script's template
Imports the template file/module, assigns all the attributes defined in the template file to the given class.
:param class cls: Script class to which template is to be loaded.
:raises AttributeError: When __language__ attribute is not present.
"""
templates_path = "{}.templates".format(__name__.split('.', 1)[0])
if not hasattr(cls, '__language__'):
raise AttributeError("__language__ not found in class: {}, attributes cannot be loaded".format(cls.__name__))
template = import_module("{templates_path}.{class_template}".format(
templates_path=templates_path,
class_template=cls.__language__))
attributes = (var for var in vars(template) if var.startswith('code_'))
for attr in attributes:
setattr(cls, attr, getattr(template, attr))
|
{
"content_hash": "53c46af8aebc1efe2671dc784a29cb4c",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 121,
"avg_line_length": 34.96666666666667,
"alnum_prop": 0.5974397385264878,
"repo_name": "dhruvagarwal/http-request-translator",
"id": "3fdd30667bfa000462e8d54550b3f1a3ce2c8248",
"size": "7343",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "hrt/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "238"
},
{
"name": "Python",
"bytes": "70046"
}
],
"symlink_target": ""
}
|
from unimodel.model import ModelRegistry
import datetime
class Serializer(object):
def __init__(
self,
validate_before_write=True,
model_registry=None):
self.validate_before_write = validate_before_write
self.model_registry = model_registry or ModelRegistry()
def serialize(self, obj):
raise NotImplementedError()
def deserialize(self, cls, stream):
raise NotImplementedError()
class SchemaWriter(object):
""" A schemawriter gets a SchemaAST object and produces a
schema (jsonschema, thrift, python code). """
def __init__(
self,
ast,
model_registry=None):
self.ast = ast
self.model_registry = model_registry or ModelRegistry()
def get_schema(self):
raise NotImplementedError()
class SchemaReader(object):
""" A schemareader gets an external schema (thrift, jsonschema
or python code) and outputs the corresponding SchemaAST
datastructure. """
def get_ast(self):
raise NotImplementedError()
|
{
"content_hash": "5ef927e500ec3cdcea12e772ed03188e",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 66,
"avg_line_length": 26.51219512195122,
"alnum_prop": 0.6421343146274149,
"repo_name": "neumark/unimodel",
"id": "ec05f00acd87defee938399b7c7439d9ed5913d5",
"size": "1087",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unimodel/backends/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "165709"
},
{
"name": "Shell",
"bytes": "440"
}
],
"symlink_target": ""
}
|
"SikuliLogo.png"
"btnnightly.png"
"netblogo.png"
"nightly.png"
"quickstart.png"
"raimanlogo.png"
"sxpower.png"
"netbicons.png"
"firefox.png"
|
{
"content_hash": "3d0319a976bfaff9e3da6afd0a077a50",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 16,
"avg_line_length": 15.555555555555555,
"alnum_prop": 0.75,
"repo_name": "henriqueguchi/SikuliServer",
"id": "cfe0b7ef12802e199932ef9111c4592a58d6aa2a",
"size": "140",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "new/ImagesAPI.sikuli/ImagesAPI.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1672"
},
{
"name": "C",
"bytes": "3771"
},
{
"name": "CSS",
"bytes": "21689"
},
{
"name": "GAP",
"bytes": "11337"
},
{
"name": "HTML",
"bytes": "73016"
},
{
"name": "Java",
"bytes": "1185124"
},
{
"name": "JavaScript",
"bytes": "45235"
},
{
"name": "Python",
"bytes": "2372565"
},
{
"name": "Ruby",
"bytes": "8679"
},
{
"name": "Shell",
"bytes": "1990"
}
],
"symlink_target": ""
}
|
"""
Utilities for generating random numbers, random sequences, and
random selections.
"""
# Copyright (C) 2004-2011 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import random
import sys
import networkx as nx
__author__ = '\n'.join(['Aric Hagberg (hagberg@lanl.gov)',
'Dan Schult(dschult@colgate.edu)',
'Ben Edwards(bedwards@cs.unm.edu)'])
import warnings as _warnings
def create_degree_sequence(n, sfunction=None, max_tries=50, **kwds):
_warnings.warn("create_degree_sequence() is deprecated",
DeprecationWarning)
""" Attempt to create a valid degree sequence of length n using
specified function sfunction(n,**kwds).
Parameters
----------
n : int
Length of degree sequence = number of nodes
sfunction: function
Function which returns a list of n real or integer values.
Called as "sfunction(n,**kwds)".
max_tries: int
Max number of attempts at creating valid degree sequence.
Notes
-----
Repeatedly create a degree sequence by calling sfunction(n,**kwds)
until achieving a valid degree sequence. If unsuccessful after
max_tries attempts, raise an exception.
For examples of sfunctions that return sequences of random numbers,
see networkx.Utils.
Examples
--------
>>> from networkx.utils import uniform_sequence, create_degree_sequence
>>> seq=create_degree_sequence(10,uniform_sequence)
"""
tries=0
max_deg=n
while tries < max_tries:
trialseq=sfunction(n,**kwds)
# round to integer values in the range [0,max_deg]
seq=[min(max_deg, max( int(round(s)),0 )) for s in trialseq]
# if graphical return, else throw away and try again
if nx.is_valid_degree_sequence(seq):
return seq
tries+=1
raise nx.NetworkXError(\
"Exceeded max (%d) attempts at a valid sequence."%max_tries)
# The same helpers for choosing random sequences from distributions
# uses Python's random module
# http://www.python.org/doc/current/lib/module-random.html
def pareto_sequence(n,exponent=1.0):
"""
Return sample sequence of length n from a Pareto distribution.
"""
return [random.paretovariate(exponent) for i in range(n)]
def powerlaw_sequence(n,exponent=2.0):
"""
Return sample sequence of length n from a power law distribution.
"""
return [random.paretovariate(exponent-1) for i in range(n)]
def zipf_rv(alpha, xmin=1, seed=None):
r"""Return a random value chosen from the Zipf distribution.
The return value is an integer drawn from the probability distribution
::math::
p(x)=\frac{x^{-\alpha}}{\zeta(\alpha,x_{min})},
where `\zeta(\alpha,x_{min})` is the Hurwitz zeta function.
Parameters
----------
alpha : float
Exponent value of the distribution
xmin : int
Minimum value
seed : int
Seed value for random number generator
Returns
-------
x : int
Random value from Zipf distribution
Raises
------
ValueError:
If xmin < 1 or
If alpha <= 1
Notes
-----
The rejection algorithm generates random values for a the power-law
distribution in uniformly bounded expected time dependent on
parameters. See [1] for details on its operation.
Examples
--------
>>> nx.zipf_rv(alpha=2, xmin=3, seed=42) # doctest: +SKIP
References
----------
..[1] Luc Devroye, Non-Uniform Random Variate Generation,
Springer-Verlag, New York, 1986.
"""
if xmin < 1:
raise ValueError("xmin < 1")
if alpha <= 1:
raise ValueError("a <= 1.0")
if not seed is None:
random.seed(seed)
a1 = alpha - 1.0
b = 2**a1
while True:
u = 1.0 - random.random() # u in (0,1]
v = random.random() # v in [0,1)
x = int(xmin*u**-(1.0/a1))
t = (1.0+(1.0/x))**a1
if v*x*(t-1.0)/(b-1.0) <= t/b:
break
return x
def zipf_sequence(n, alpha=2.0, xmin=1):
"""Return a sample sequence of length n from a Zipf distribution with
exponent parameter alpha and minimum value xmin.
See Also
--------
zipf_rv
"""
return [ zipf_rv(alpha,xmin) for _ in range(n)]
def uniform_sequence(n):
"""
Return sample sequence of length n from a uniform distribution.
"""
return [ random.uniform(0,n) for i in range(n)]
def cumulative_distribution(distribution):
"""Return normalized cumulative distribution from discrete distribution."""
cdf=[]
cdf.append(0.0)
psum=float(sum(distribution))
for i in range(0,len(distribution)):
cdf.append(cdf[i]+distribution[i]/psum)
return cdf
def discrete_sequence(n, distribution=None, cdistribution=None):
"""
Return sample sequence of length n from a given discrete distribution
or discrete cumulative distribution.
One of the following must be specified.
distribution = histogram of values, will be normalized
cdistribution = normalized discrete cumulative distribution
"""
import bisect
if cdistribution is not None:
cdf=cdistribution
elif distribution is not None:
cdf=cumulative_distribution(distribution)
else:
raise nx.NetworkXError(
"discrete_sequence: distribution or cdistribution missing")
# get a uniform random number
inputseq=[random.random() for i in range(n)]
# choose from CDF
seq=[bisect.bisect_left(cdf,s)-1 for s in inputseq]
return seq
def random_weighted_sample(mapping, k):
"""Return k items without replacement from a weighted sample.
The input is a dictionary of items with weights as values.
"""
if k > len(mapping):
raise ValueError("sample larger than population")
sample = set()
while len(sample) < k:
sample.add(weighted_choice(mapping))
return list(sample)
def weighted_choice(mapping):
"""Return a single element from a weighted sample.
The input is a dictionary of items with weights as values.
"""
# use roulette method
rnd = random.random() * sum(mapping.values())
for k, w in mapping.items():
rnd -= w
if rnd < 0:
return k
|
{
"content_hash": "a30443b3c1bea89db6fbad21286a79d8",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 79,
"avg_line_length": 28.56888888888889,
"alnum_prop": 0.6316116988176727,
"repo_name": "destijl/forensicartifacts",
"id": "229479105143b94f443c7ad689bedda1c1ec27e1",
"size": "6428",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "frontend/thirdparty/networkx-1.9/networkx/utils/random_sequence.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "467"
},
{
"name": "Python",
"bytes": "11655"
}
],
"symlink_target": ""
}
|
import os
import grp
import pwd
import stat
import sys
from angel.locks import get_lock_filename
from angel.util.pidfile import *
def is_lock_available(config, lockname):
if None == who_has_lock(config, lockname):
return True
return False
def who_has_lock(config, lockname):
''' Return the pid owner for a given lock, or None if no pid owns it. '''
lock_filename = get_lock_filename(config, lockname)
if lock_filename is None:
return None
if not os.path.exists(lock_filename):
return None
pid_in_lockfile = get_pid_from_pidfile(lock_filename)
if pid_in_lockfile is None:
print >>sys.stderr, 'No pid in lockfile; removing %s.' % lock_filename
os.remove(lock_filename)
return None
if not is_pid_running(pid_in_lockfile):
print >>sys.stderr, 'Removing stale lockfile %s' % lock_filename
os.remove(lock_filename)
return None
return pid_in_lockfile
def get_lock(config, lockname, lock_timeout=15, print_errors=True, waiting_message=None):
lock_filename = get_lock_filename(config, lockname)
if lock_filename is None:
return -1
other_pid = None
waiting_message_has_been_shown = False
if not is_lock_available(config, lockname):
other_pid = get_pid_from_pidfile(lock_filename)
if print_errors and lock_timeout > 0: print >>sys.stderr, 'Waiting on process %s for lock %s (%s second timeout)' % (other_pid, lock_filename, lock_timeout)
try:
while (not is_lock_available(config, lockname)) and lock_timeout >= 0:
time.sleep(0.5)
lock_timeout -= 0.5
if not waiting_message_has_been_shown and waiting_message is not None:
waiting_message_has_been_shown = True
print >>sys.stderr, waiting_message
except:
if print_errors: print >>sys.stderr, "Error: exception when getting lock %s from process %s" % (lock_filename, other_pid)
return -1
if not is_lock_available(config, lockname):
if print_errors: print >>sys.stderr, 'Error: unable to get lock %s from process %s' % (lock_filename, other_pid)
return -1
return write_pidfile(lock_filename, os.getpid())
def release_lock(config, lockname):
''' Return 0 if the next call to get_lock will work; non-zero otherwise. '''
lock_filename = get_lock_filename(config, lockname)
if lock_filename is None:
return -1
if os.path.exists(lock_filename):
locker_pid = get_pid_from_pidfile(lock_filename)
if locker_pid == os.getpid():
#print >>sys.stderr, "Releasing lock %s (pid %s/%s)" % (lock_filename, locker_pid, os.getpid())
os.remove(lock_filename)
return 0
else:
if is_pid_running(locker_pid):
print >> sys.stderr, "Warning: went to release lock %s, but lockfile %s is owned by different process (%s). Did a forked child fail to exit somewhere?" % (lockname, lock_filename, locker_pid)
return -1
else:
print >> sys.stderr, "Warning: went to release lock %s, but lockfile %s is owned by different process (%s). That process is dead, so removing lock anyway." % (lockname, lock_filename, locker_pid)
os.remove(lock_filename)
return 0
print >> sys.stderr, 'Warning: went to release lock %s, but lockfile %s already missing. THIS SHOULD NEVER HAPPEN; WAS THE LOCKDIR DELETED DURING OUR RUN?' % (lockname, lock_filename)
return 0
def set_file_owner(absolute_path, owner_user=None, owner_group=None, recursive=False, ignore_ownership_errors=False):
''' Set the file at the given path to the give user/group. Return 0 on success; non-zero otherwise (unless ignore_ownership_errors is true). '''
# ignore_ownership_errors: We might need to ignore errors in one case: lock files for some server processes get created as root and
# if status is called (non-root run), then the permission set will fail.
if owner_user is None and owner_group is None:
return 0
current_euid = os.getuid()
if 0 != current_euid and owner_group is None:
return 0 # If non-root, we could still have a required chgrp call on OS X...
try:
absolute_path_stat = os.stat(absolute_path)
if owner_user is not None:
target_uid = pwd.getpwnam(owner_user).pw_uid
if absolute_path_stat.st_uid != target_uid:
if current_euid != 0 and target_uid != current_euid: # Non-root, and not the target owner; can't fix.
if ignore_ownership_errors: return 0
print >>sys.stderr, "Error: can't set owner for %s (not running as root). Re-run your command as root?" % (absolute_path)
return 1
if 0 == current_euid:
try:
os.chown(absolute_path, target_uid, -1)
except Exception as e:
print >>sys.stderr, "Error: can't set owner for %s (%s)." % (absolute_path, e)
return 1
else:
if not ignore_ownership_errors:
print >>sys.stderr, "Error: can't set owner for %s (user should be %s). Re-run your command as root?" % (absolute_path, owner_user)
return 1
if owner_group is not None:
target_gid = grp.getgrnam(owner_group).gr_gid
if absolute_path_stat.st_gid != target_gid:
try: # Skip uid==0 check -- a non-root user might have permission to chgrp something to a different group that they are a member of.
os.chown(absolute_path, -1, target_gid)
except:
pass
absolute_path_stat = os.stat(absolute_path)
if absolute_path_stat.st_gid != target_gid:
if not ignore_ownership_errors:
print >>sys.stderr, "Error: can't set owner for %s (group should be %s). Re-run your command as root?" % (absolute_path, owner_group)
return 1
except:
if ignore_ownership_errors: return 0
print >>sys.stderr, "Error: can't find user/group %s/%s as needed for setting permissions on %s." % (owner_user, owner_group, absolute_path)
return 1
if recursive and os.path.isdir(absolute_path):
try:
for file in os.listdir(absolute_path):
if 0 != set_file_owner('%s/%s' % (absolute_path,file), owner_user=owner_user, owner_group=owner_group, recursive=recursive, ignore_ownership_errors=ignore_ownership_errors):
return 1
except:
if not ignore_ownership_errors:
print >>sys.stderr, "Error: can't read %s. Either run your command as root, change the config path, or change RUN_AS_USER/RUN_AS_GROUP to your current user." % absolute_path
return 1
return 0
def create_dirs_if_needed(absolute_path, name="service", owner_user=None, owner_group=None, mode=0755, recursive_fix_owner=False, ignore_ownership_errors=False):
''' Given a directory path, make sure that it exists and that the dir has the correct ownership.
If recursive_fix_owner is True, check that all its contents are owned by the given user with the top level dir having the given read/write permissions.
Returns 0 on success, non-zero otherwise. '''
if absolute_path.startswith('~'):
absolute_path = os.path.expanduser(absolute_path)
def _mkdir(path, owner_user, owner_group, mode):
if not os.path.exists(path):
try:
parent_dir = os.path.dirname(path)
if not os.path.exists(parent_dir):
_mkdir(parent_dir, owner_user, owner_group, mode)
os.mkdir(path, mode)
set_file_owner(path, owner_user=owner_user, owner_group=owner_group)
except:
print >>sys.stderr, "Error: can't create %s directory '%s'; do you have write permission?" % (name, path)
return 1
_mkdir(absolute_path, owner_user, owner_group, mode)
if 0 != set_file_owner(absolute_path, owner_user, owner_group, recursive=recursive_fix_owner, ignore_ownership_errors=ignore_ownership_errors):
return 1
try:
# The dir might have existed before but with old permissions, so reset it:
if stat.S_IMODE(os.stat(absolute_path).st_mode) != mode:
os.chmod(absolute_path, mode)
except Exception as e:
print >>sys.stderr, "Error: can't update permission mode on %s to %s (%s)." % (absolute_path, oct(mode), e)
return 1 # Even if ignore_ownership_errors is True, fail if top-level dir is wrong
return 0
|
{
"content_hash": "dd2dfe432dc107c9ff350182a45ae11b",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 211,
"avg_line_length": 47.74731182795699,
"alnum_prop": 0.6152460308523815,
"repo_name": "chillinc/angel",
"id": "27ddbb523c45eda671d1d7453ee95a658bc7804e",
"size": "8881",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/devops/file_and_dir_helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9703"
},
{
"name": "Makefile",
"bytes": "4795"
},
{
"name": "Python",
"bytes": "655620"
},
{
"name": "Ruby",
"bytes": "6756"
},
{
"name": "Shell",
"bytes": "15859"
}
],
"symlink_target": ""
}
|
from tempest.api.identity import base
from tempest.common.utils import data_utils
from tempest import test
class ProjectsTestJSON(base.BaseIdentityV3AdminTest):
@test.idempotent_id('0ecf465c-0dc4-4532-ab53-91ffeb74d12d')
def test_project_create_with_description(self):
# Create project with a description
project_name = data_utils.rand_name('project')
project_desc = data_utils.rand_name('desc')
project = self.projects_client.create_project(
project_name, description=project_desc)['project']
self.data.projects.append(project)
project_id = project['id']
desc1 = project['description']
self.assertEqual(desc1, project_desc, 'Description should have '
'been sent in response for create')
body = self.projects_client.show_project(project_id)['project']
desc2 = body['description']
self.assertEqual(desc2, project_desc, 'Description does not appear'
'to be set')
@test.idempotent_id('5f50fe07-8166-430b-a882-3b2ee0abe26f')
def test_project_create_with_domain(self):
# Create project with a domain
self.data.setup_test_domain()
project_name = data_utils.rand_name('project')
project = self.projects_client.create_project(
project_name, domain_id=self.data.domain['id'])['project']
self.data.projects.append(project)
project_id = project['id']
self.assertEqual(project_name, project['name'])
self.assertEqual(self.data.domain['id'], project['domain_id'])
body = self.projects_client.show_project(project_id)['project']
self.assertEqual(project_name, body['name'])
self.assertEqual(self.data.domain['id'], body['domain_id'])
@test.idempotent_id('1f66dc76-50cc-4741-a200-af984509e480')
def test_project_create_enabled(self):
# Create a project that is enabled
project_name = data_utils.rand_name('project')
project = self.projects_client.create_project(
project_name, enabled=True)['project']
self.data.projects.append(project)
project_id = project['id']
en1 = project['enabled']
self.assertTrue(en1, 'Enable should be True in response')
body = self.projects_client.show_project(project_id)['project']
en2 = body['enabled']
self.assertTrue(en2, 'Enable should be True in lookup')
@test.idempotent_id('78f96a9c-e0e0-4ee6-a3ba-fbf6dfd03207')
def test_project_create_not_enabled(self):
# Create a project that is not enabled
project_name = data_utils.rand_name('project')
project = self.projects_client.create_project(
project_name, enabled=False)['project']
self.data.projects.append(project)
en1 = project['enabled']
self.assertEqual('false', str(en1).lower(),
'Enable should be False in response')
body = self.projects_client.show_project(project['id'])['project']
en2 = body['enabled']
self.assertEqual('false', str(en2).lower(),
'Enable should be False in lookup')
@test.idempotent_id('f608f368-048c-496b-ad63-d286c26dab6b')
def test_project_update_name(self):
# Update name attribute of a project
p_name1 = data_utils.rand_name('project')
project = self.projects_client.create_project(p_name1)['project']
self.data.projects.append(project)
resp1_name = project['name']
p_name2 = data_utils.rand_name('project2')
body = self.projects_client.update_project(project['id'],
name=p_name2)['project']
resp2_name = body['name']
self.assertNotEqual(resp1_name, resp2_name)
body = self.projects_client.show_project(project['id'])['project']
resp3_name = body['name']
self.assertNotEqual(resp1_name, resp3_name)
self.assertEqual(p_name1, resp1_name)
self.assertEqual(resp2_name, resp3_name)
@test.idempotent_id('f138b715-255e-4a7d-871d-351e1ef2e153')
def test_project_update_desc(self):
# Update description attribute of a project
p_name = data_utils.rand_name('project')
p_desc = data_utils.rand_name('desc')
project = self.projects_client.create_project(
p_name, description=p_desc)['project']
self.data.projects.append(project)
resp1_desc = project['description']
p_desc2 = data_utils.rand_name('desc2')
body = self.projects_client.update_project(
project['id'], description=p_desc2)['project']
resp2_desc = body['description']
self.assertNotEqual(resp1_desc, resp2_desc)
body = self.projects_client.show_project(project['id'])['project']
resp3_desc = body['description']
self.assertNotEqual(resp1_desc, resp3_desc)
self.assertEqual(p_desc, resp1_desc)
self.assertEqual(resp2_desc, resp3_desc)
@test.idempotent_id('b6b25683-c97f-474d-a595-55d410b68100')
def test_project_update_enable(self):
# Update the enabled attribute of a project
p_name = data_utils.rand_name('project')
p_en = False
project = self.projects_client.create_project(p_name,
enabled=p_en)['project']
self.data.projects.append(project)
resp1_en = project['enabled']
p_en2 = True
body = self.projects_client.update_project(project['id'],
enabled=p_en2)['project']
resp2_en = body['enabled']
self.assertNotEqual(resp1_en, resp2_en)
body = self.projects_client.show_project(project['id'])['project']
resp3_en = body['enabled']
self.assertNotEqual(resp1_en, resp3_en)
self.assertEqual('false', str(resp1_en).lower())
self.assertEqual(resp2_en, resp3_en)
@test.idempotent_id('59398d4a-5dc5-4f86-9a4c-c26cc804d6c6')
def test_associate_user_to_project(self):
# Associate a user to a project
# Create a Project
p_name = data_utils.rand_name('project')
project = self.projects_client.create_project(p_name)['project']
self.data.projects.append(project)
# Create a User
u_name = data_utils.rand_name('user')
u_desc = u_name + 'description'
u_email = u_name + '@testmail.tm'
u_password = data_utils.rand_password()
user = self.users_client.create_user(
u_name, description=u_desc, password=u_password,
email=u_email, project_id=project['id'])['user']
# Delete the User at the end of this method
self.addCleanup(self.users_client.delete_user, user['id'])
# Get User To validate the user details
new_user_get = self.users_client.show_user(user['id'])['user']
# Assert response body of GET
self.assertEqual(u_name, new_user_get['name'])
self.assertEqual(u_desc, new_user_get['description'])
self.assertEqual(project['id'],
new_user_get['project_id'])
self.assertEqual(u_email, new_user_get['email'])
|
{
"content_hash": "f1c1d0bccda84dc68ccd19dd931a7fa6",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 78,
"avg_line_length": 44.19512195121951,
"alnum_prop": 0.6226545253863135,
"repo_name": "nuagenetworks/tempest",
"id": "607bebe3e654a9a7bda05fb75cd7d608f9bbdd67",
"size": "7878",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tempest/api/identity/admin/v3/test_projects.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
"""Test the Soundtouch component."""
from unittest.mock import call, patch
from libsoundtouch.device import (
Config,
Preset,
SoundTouchDevice as STD,
Status,
Volume,
ZoneSlave,
ZoneStatus,
)
import pytest
from homeassistant.components.media_player.const import (
ATTR_INPUT_SOURCE,
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
)
from homeassistant.components.soundtouch import media_player as soundtouch
from homeassistant.components.soundtouch.const import DOMAIN
from homeassistant.components.soundtouch.media_player import (
ATTR_SOUNDTOUCH_GROUP,
ATTR_SOUNDTOUCH_ZONE,
DATA_SOUNDTOUCH,
)
from homeassistant.const import STATE_OFF, STATE_PAUSED, STATE_PLAYING
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.setup import async_setup_component
# pylint: disable=super-init-not-called
DEVICE_1_IP = "192.168.0.1"
DEVICE_2_IP = "192.168.0.2"
DEVICE_1_ID = 1
DEVICE_2_ID = 2
def get_config(host=DEVICE_1_IP, port=8090, name="soundtouch"):
"""Return a default component."""
return {"platform": DOMAIN, "host": host, "port": port, "name": name}
DEVICE_1_CONFIG = {**get_config(), "name": "soundtouch_1"}
DEVICE_2_CONFIG = {**get_config(), "host": DEVICE_2_IP, "name": "soundtouch_2"}
@pytest.fixture(name="one_device")
def one_device_fixture():
"""Mock one master device."""
device_1 = MockDevice()
device_patch = patch(
"homeassistant.components.soundtouch.media_player.soundtouch_device",
return_value=device_1,
)
with device_patch as device:
yield device
@pytest.fixture(name="two_zones")
def two_zones_fixture():
"""Mock one master and one slave."""
device_1 = MockDevice(
DEVICE_1_ID,
MockZoneStatus(
is_master=True,
master_id=DEVICE_1_ID,
master_ip=DEVICE_1_IP,
slaves=[MockZoneSlave(DEVICE_2_IP)],
),
)
device_2 = MockDevice(
DEVICE_2_ID,
MockZoneStatus(
is_master=False,
master_id=DEVICE_1_ID,
master_ip=DEVICE_1_IP,
slaves=[MockZoneSlave(DEVICE_2_IP)],
),
)
devices = {DEVICE_1_IP: device_1, DEVICE_2_IP: device_2}
device_patch = patch(
"homeassistant.components.soundtouch.media_player.soundtouch_device",
side_effect=lambda host, _: devices[host],
)
with device_patch as device:
yield device
@pytest.fixture(name="mocked_status")
def status_fixture():
"""Mock the device status."""
status_patch = patch(
"libsoundtouch.device.SoundTouchDevice.status", side_effect=MockStatusPlaying
)
with status_patch as status:
yield status
@pytest.fixture(name="mocked_volume")
def volume_fixture():
"""Mock the device volume."""
volume_patch = patch("libsoundtouch.device.SoundTouchDevice.volume")
with volume_patch as volume:
yield volume
async def setup_soundtouch(hass, config):
"""Set up soundtouch integration."""
assert await async_setup_component(hass, "media_player", {"media_player": config})
await hass.async_block_till_done()
await hass.async_start()
class MockDevice(STD):
"""Mock device."""
def __init__(self, id=None, zone_status=None):
"""Init the class."""
self._config = MockConfig(id)
self._zone_status = zone_status or MockZoneStatus()
def zone_status(self, refresh=True):
"""Zone status mock object."""
return self._zone_status
class MockConfig(Config):
"""Mock config."""
def __init__(self, id=None):
"""Init class."""
self._name = "name"
self._id = id or DEVICE_1_ID
class MockZoneStatus(ZoneStatus):
"""Mock zone status."""
def __init__(self, is_master=True, master_id=None, master_ip=None, slaves=None):
"""Init the class."""
self._is_master = is_master
self._master_id = master_id
self._master_ip = master_ip
self._slaves = slaves or []
class MockZoneSlave(ZoneSlave):
"""Mock zone slave."""
def __init__(self, device_ip=None, role=None):
"""Init the class."""
self._ip = device_ip
self._role = role
def _mocked_presets(*args, **kwargs):
"""Return a list of mocked presets."""
return [MockPreset("1")]
class MockPreset(Preset):
"""Mock preset."""
def __init__(self, id_):
"""Init the class."""
self._id = id_
self._name = "preset"
class MockVolume(Volume):
"""Mock volume with value."""
def __init__(self):
"""Init class."""
self._actual = 12
self._muted = False
class MockVolumeMuted(Volume):
"""Mock volume muted."""
def __init__(self):
"""Init the class."""
self._actual = 12
self._muted = True
class MockStatusStandby(Status):
"""Mock status standby."""
def __init__(self):
"""Init the class."""
self._source = "STANDBY"
class MockStatusPlaying(Status):
"""Mock status playing media."""
def __init__(self):
"""Init the class."""
self._source = ""
self._play_status = "PLAY_STATE"
self._image = "image.url"
self._artist = "artist"
self._track = "track"
self._album = "album"
self._duration = 1
self._station_name = None
class MockStatusPlayingRadio(Status):
"""Mock status radio."""
def __init__(self):
"""Init the class."""
self._source = ""
self._play_status = "PLAY_STATE"
self._image = "image.url"
self._artist = None
self._track = None
self._album = None
self._duration = None
self._station_name = "station"
class MockStatusUnknown(Status):
"""Mock status unknown media."""
def __init__(self):
"""Init the class."""
self._source = ""
self._play_status = "PLAY_STATE"
self._image = "image.url"
self._artist = None
self._track = None
self._album = None
self._duration = None
self._station_name = None
class MockStatusPause(Status):
"""Mock status pause."""
def __init__(self):
"""Init the class."""
self._source = ""
self._play_status = "PAUSE_STATE"
self._image = "image.url"
self._artist = None
self._track = None
self._album = None
self._duration = None
self._station_name = None
class MockStatusPlayingAux(Status):
"""Mock status AUX."""
def __init__(self):
"""Init the class."""
self._source = "AUX"
self._play_status = "PLAY_STATE"
self._image = "image.url"
self._artist = None
self._track = None
self._album = None
self._duration = None
self._station_name = None
class MockStatusPlayingBluetooth(Status):
"""Mock status Bluetooth."""
def __init__(self):
"""Init the class."""
self._source = "BLUETOOTH"
self._play_status = "PLAY_STATE"
self._image = "image.url"
self._artist = "artist"
self._track = "track"
self._album = "album"
self._duration = None
self._station_name = None
async def test_ensure_setup_config(mocked_status, mocked_volume, hass, one_device):
"""Test setup OK with custom config."""
await setup_soundtouch(
hass, get_config(host="192.168.1.44", port=8888, name="custom_sound")
)
assert one_device.call_count == 1
assert one_device.call_args == call("192.168.1.44", 8888)
assert len(hass.states.async_all()) == 1
state = hass.states.get("media_player.custom_sound")
assert state.name == "custom_sound"
async def test_ensure_setup_discovery(mocked_status, mocked_volume, hass, one_device):
"""Test setup with discovery."""
new_device = {
"port": "8090",
"host": "192.168.1.1",
"properties": {},
"hostname": "hostname.local",
}
await async_load_platform(
hass, "media_player", DOMAIN, new_device, {"media_player": {}}
)
await hass.async_block_till_done()
assert one_device.call_count == 1
assert one_device.call_args == call("192.168.1.1", 8090)
assert len(hass.states.async_all()) == 1
async def test_ensure_setup_discovery_no_duplicate(
mocked_status, mocked_volume, hass, one_device
):
"""Test setup OK if device already exists."""
await setup_soundtouch(hass, DEVICE_1_CONFIG)
assert one_device.call_count == 1
assert len(hass.states.async_all()) == 1
new_device = {
"port": "8090",
"host": "192.168.1.1",
"properties": {},
"hostname": "hostname.local",
}
await async_load_platform(
hass, "media_player", DOMAIN, new_device, {"media_player": DEVICE_1_CONFIG}
)
await hass.async_block_till_done()
assert one_device.call_count == 2
assert len(hass.states.async_all()) == 2
existing_device = {
"port": "8090",
"host": "192.168.0.1",
"properties": {},
"hostname": "hostname.local",
}
await async_load_platform(
hass, "media_player", DOMAIN, existing_device, {"media_player": DEVICE_1_CONFIG}
)
await hass.async_block_till_done()
assert one_device.call_count == 2
assert len(hass.states.async_all()) == 2
async def test_playing_media(mocked_status, mocked_volume, hass, one_device):
"""Test playing media info."""
await setup_soundtouch(hass, DEVICE_1_CONFIG)
assert one_device.call_count == 1
assert mocked_status.call_count == 2
assert mocked_volume.call_count == 2
entity_1_state = hass.states.get("media_player.soundtouch_1")
assert entity_1_state.state == STATE_PLAYING
assert entity_1_state.attributes["media_title"] == "artist - track"
assert entity_1_state.attributes["media_track"] == "track"
assert entity_1_state.attributes["media_artist"] == "artist"
assert entity_1_state.attributes["media_album_name"] == "album"
assert entity_1_state.attributes["media_duration"] == 1
async def test_playing_unknown_media(mocked_status, mocked_volume, hass, one_device):
"""Test playing media info."""
mocked_status.side_effect = MockStatusUnknown
await setup_soundtouch(hass, DEVICE_1_CONFIG)
assert one_device.call_count == 1
assert mocked_status.call_count == 2
assert mocked_volume.call_count == 2
entity_1_state = hass.states.get("media_player.soundtouch_1")
assert entity_1_state.state == STATE_PLAYING
async def test_playing_radio(mocked_status, mocked_volume, hass, one_device):
"""Test playing radio info."""
mocked_status.side_effect = MockStatusPlayingRadio
await setup_soundtouch(hass, DEVICE_1_CONFIG)
assert one_device.call_count == 1
assert mocked_status.call_count == 2
assert mocked_volume.call_count == 2
entity_1_state = hass.states.get("media_player.soundtouch_1")
assert entity_1_state.state == STATE_PLAYING
assert entity_1_state.attributes["media_title"] == "station"
async def test_playing_aux(mocked_status, mocked_volume, hass, one_device):
"""Test playing AUX info."""
mocked_status.side_effect = MockStatusPlayingAux
await setup_soundtouch(hass, DEVICE_1_CONFIG)
assert one_device.call_count == 1
assert mocked_status.call_count == 2
assert mocked_volume.call_count == 2
entity_1_state = hass.states.get("media_player.soundtouch_1")
assert entity_1_state.state == STATE_PLAYING
assert entity_1_state.attributes["source"] == "AUX"
async def test_playing_bluetooth(mocked_status, mocked_volume, hass, one_device):
"""Test playing Bluetooth info."""
mocked_status.side_effect = MockStatusPlayingBluetooth
await setup_soundtouch(hass, DEVICE_1_CONFIG)
assert one_device.call_count == 1
assert mocked_status.call_count == 2
assert mocked_volume.call_count == 2
entity_1_state = hass.states.get("media_player.soundtouch_1")
assert entity_1_state.state == STATE_PLAYING
assert entity_1_state.attributes["source"] == "BLUETOOTH"
assert entity_1_state.attributes["media_track"] == "track"
assert entity_1_state.attributes["media_artist"] == "artist"
assert entity_1_state.attributes["media_album_name"] == "album"
async def test_get_volume_level(mocked_status, mocked_volume, hass, one_device):
"""Test volume level."""
mocked_volume.side_effect = MockVolume
await setup_soundtouch(hass, DEVICE_1_CONFIG)
assert one_device.call_count == 1
assert mocked_status.call_count == 2
assert mocked_volume.call_count == 2
entity_1_state = hass.states.get("media_player.soundtouch_1")
assert entity_1_state.attributes["volume_level"] == 0.12
async def test_get_state_off(mocked_status, mocked_volume, hass, one_device):
"""Test state device is off."""
mocked_status.side_effect = MockStatusStandby
await setup_soundtouch(hass, DEVICE_1_CONFIG)
assert one_device.call_count == 1
assert mocked_status.call_count == 2
assert mocked_volume.call_count == 2
entity_1_state = hass.states.get("media_player.soundtouch_1")
assert entity_1_state.state == STATE_OFF
async def test_get_state_pause(mocked_status, mocked_volume, hass, one_device):
"""Test state device is paused."""
mocked_status.side_effect = MockStatusPause
await setup_soundtouch(hass, DEVICE_1_CONFIG)
assert one_device.call_count == 1
assert mocked_status.call_count == 2
assert mocked_volume.call_count == 2
entity_1_state = hass.states.get("media_player.soundtouch_1")
assert entity_1_state.state == STATE_PAUSED
async def test_is_muted(mocked_status, mocked_volume, hass, one_device):
"""Test device volume is muted."""
mocked_volume.side_effect = MockVolumeMuted
await setup_soundtouch(hass, DEVICE_1_CONFIG)
assert one_device.call_count == 1
assert mocked_status.call_count == 2
assert mocked_volume.call_count == 2
entity_1_state = hass.states.get("media_player.soundtouch_1")
assert entity_1_state.attributes["is_volume_muted"]
async def test_media_commands(mocked_status, mocked_volume, hass, one_device):
"""Test supported media commands."""
await setup_soundtouch(hass, DEVICE_1_CONFIG)
assert one_device.call_count == 1
assert mocked_status.call_count == 2
assert mocked_volume.call_count == 2
entity_1_state = hass.states.get("media_player.soundtouch_1")
assert entity_1_state.attributes["supported_features"] == 151485
@patch("libsoundtouch.device.SoundTouchDevice.power_off")
async def test_should_turn_off(
mocked_power_off, mocked_status, mocked_volume, hass, one_device
):
"""Test device is turned off."""
await setup_soundtouch(hass, DEVICE_1_CONFIG)
assert one_device.call_count == 1
assert mocked_status.call_count == 2
assert mocked_volume.call_count == 2
await hass.services.async_call(
"media_player",
"turn_off",
{"entity_id": "media_player.soundtouch_1"},
True,
)
assert mocked_status.call_count == 3
assert mocked_power_off.call_count == 1
@patch("libsoundtouch.device.SoundTouchDevice.power_on")
async def test_should_turn_on(
mocked_power_on, mocked_status, mocked_volume, hass, one_device
):
"""Test device is turned on."""
mocked_status.side_effect = MockStatusStandby
await setup_soundtouch(hass, DEVICE_1_CONFIG)
assert one_device.call_count == 1
assert mocked_status.call_count == 2
assert mocked_volume.call_count == 2
await hass.services.async_call(
"media_player",
"turn_on",
{"entity_id": "media_player.soundtouch_1"},
True,
)
assert mocked_status.call_count == 3
assert mocked_power_on.call_count == 1
@patch("libsoundtouch.device.SoundTouchDevice.volume_up")
async def test_volume_up(
mocked_volume_up, mocked_status, mocked_volume, hass, one_device
):
"""Test volume up."""
await setup_soundtouch(hass, DEVICE_1_CONFIG)
assert one_device.call_count == 1
assert mocked_status.call_count == 2
assert mocked_volume.call_count == 2
await hass.services.async_call(
"media_player",
"volume_up",
{"entity_id": "media_player.soundtouch_1"},
True,
)
assert mocked_volume.call_count == 3
assert mocked_volume_up.call_count == 1
@patch("libsoundtouch.device.SoundTouchDevice.volume_down")
async def test_volume_down(
mocked_volume_down, mocked_status, mocked_volume, hass, one_device
):
"""Test volume down."""
await setup_soundtouch(hass, DEVICE_1_CONFIG)
assert one_device.call_count == 1
assert mocked_status.call_count == 2
assert mocked_volume.call_count == 2
await hass.services.async_call(
"media_player",
"volume_down",
{"entity_id": "media_player.soundtouch_1"},
True,
)
assert mocked_volume.call_count == 3
assert mocked_volume_down.call_count == 1
@patch("libsoundtouch.device.SoundTouchDevice.set_volume")
async def test_set_volume_level(
mocked_set_volume, mocked_status, mocked_volume, hass, one_device
):
"""Test set volume level."""
await setup_soundtouch(hass, DEVICE_1_CONFIG)
assert one_device.call_count == 1
assert mocked_status.call_count == 2
assert mocked_volume.call_count == 2
await hass.services.async_call(
"media_player",
"volume_set",
{"entity_id": "media_player.soundtouch_1", "volume_level": 0.17},
True,
)
assert mocked_volume.call_count == 3
mocked_set_volume.assert_called_with(17)
@patch("libsoundtouch.device.SoundTouchDevice.mute")
async def test_mute(mocked_mute, mocked_status, mocked_volume, hass, one_device):
"""Test mute volume."""
await setup_soundtouch(hass, DEVICE_1_CONFIG)
assert one_device.call_count == 1
assert mocked_status.call_count == 2
assert mocked_volume.call_count == 2
await hass.services.async_call(
"media_player",
"volume_mute",
{"entity_id": "media_player.soundtouch_1", "is_volume_muted": True},
True,
)
assert mocked_volume.call_count == 3
assert mocked_mute.call_count == 1
@patch("libsoundtouch.device.SoundTouchDevice.play")
async def test_play(mocked_play, mocked_status, mocked_volume, hass, one_device):
"""Test play command."""
await setup_soundtouch(hass, DEVICE_1_CONFIG)
assert one_device.call_count == 1
assert mocked_status.call_count == 2
assert mocked_volume.call_count == 2
await hass.services.async_call(
"media_player",
"media_play",
{"entity_id": "media_player.soundtouch_1"},
True,
)
assert mocked_status.call_count == 3
assert mocked_play.call_count == 1
@patch("libsoundtouch.device.SoundTouchDevice.pause")
async def test_pause(mocked_pause, mocked_status, mocked_volume, hass, one_device):
"""Test pause command."""
await setup_soundtouch(hass, DEVICE_1_CONFIG)
assert one_device.call_count == 1
assert mocked_status.call_count == 2
assert mocked_volume.call_count == 2
await hass.services.async_call(
"media_player",
"media_pause",
{"entity_id": "media_player.soundtouch_1"},
True,
)
assert mocked_status.call_count == 3
assert mocked_pause.call_count == 1
@patch("libsoundtouch.device.SoundTouchDevice.play_pause")
async def test_play_pause(
mocked_play_pause, mocked_status, mocked_volume, hass, one_device
):
"""Test play/pause."""
await setup_soundtouch(hass, DEVICE_1_CONFIG)
assert one_device.call_count == 1
assert mocked_status.call_count == 2
assert mocked_volume.call_count == 2
await hass.services.async_call(
"media_player",
"media_play_pause",
{"entity_id": "media_player.soundtouch_1"},
True,
)
assert mocked_status.call_count == 3
assert mocked_play_pause.call_count == 1
@patch("libsoundtouch.device.SoundTouchDevice.previous_track")
@patch("libsoundtouch.device.SoundTouchDevice.next_track")
async def test_next_previous_track(
mocked_next_track,
mocked_previous_track,
mocked_status,
mocked_volume,
hass,
one_device,
):
"""Test next/previous track."""
await setup_soundtouch(hass, DEVICE_1_CONFIG)
assert one_device.call_count == 1
assert mocked_status.call_count == 2
assert mocked_volume.call_count == 2
await hass.services.async_call(
"media_player",
"media_next_track",
{"entity_id": "media_player.soundtouch_1"},
True,
)
assert mocked_status.call_count == 3
assert mocked_next_track.call_count == 1
await hass.services.async_call(
"media_player",
"media_previous_track",
{"entity_id": "media_player.soundtouch_1"},
True,
)
assert mocked_status.call_count == 4
assert mocked_previous_track.call_count == 1
@patch("libsoundtouch.device.SoundTouchDevice.select_preset")
@patch("libsoundtouch.device.SoundTouchDevice.presets", side_effect=_mocked_presets)
async def test_play_media(
mocked_presets, mocked_select_preset, mocked_status, mocked_volume, hass, one_device
):
"""Test play preset 1."""
await setup_soundtouch(hass, DEVICE_1_CONFIG)
assert one_device.call_count == 1
assert mocked_status.call_count == 2
assert mocked_volume.call_count == 2
await hass.services.async_call(
"media_player",
"play_media",
{
"entity_id": "media_player.soundtouch_1",
ATTR_MEDIA_CONTENT_TYPE: "PLAYLIST",
ATTR_MEDIA_CONTENT_ID: 1,
},
True,
)
assert mocked_presets.call_count == 1
assert mocked_select_preset.call_count == 1
await hass.services.async_call(
"media_player",
"play_media",
{
"entity_id": "media_player.soundtouch_1",
ATTR_MEDIA_CONTENT_TYPE: "PLAYLIST",
ATTR_MEDIA_CONTENT_ID: 2,
},
True,
)
assert mocked_presets.call_count == 2
assert mocked_select_preset.call_count == 1
@patch("libsoundtouch.device.SoundTouchDevice.play_url")
async def test_play_media_url(
mocked_play_url, mocked_status, mocked_volume, hass, one_device
):
"""Test play preset 1."""
await setup_soundtouch(hass, DEVICE_1_CONFIG)
assert one_device.call_count == 1
assert mocked_status.call_count == 2
assert mocked_volume.call_count == 2
await hass.services.async_call(
"media_player",
"play_media",
{
"entity_id": "media_player.soundtouch_1",
ATTR_MEDIA_CONTENT_TYPE: "MUSIC",
ATTR_MEDIA_CONTENT_ID: "http://fqdn/file.mp3",
},
True,
)
mocked_play_url.assert_called_with("http://fqdn/file.mp3")
@patch("libsoundtouch.device.SoundTouchDevice.select_source_aux")
async def test_select_source_aux(
mocked_select_source_aux, mocked_status, mocked_volume, hass, one_device
):
"""Test select AUX."""
await setup_soundtouch(hass, DEVICE_1_CONFIG)
assert mocked_select_source_aux.call_count == 0
await hass.services.async_call(
"media_player",
"select_source",
{"entity_id": "media_player.soundtouch_1", ATTR_INPUT_SOURCE: "AUX"},
True,
)
assert mocked_select_source_aux.call_count == 1
@patch("libsoundtouch.device.SoundTouchDevice.select_source_bluetooth")
async def test_select_source_bluetooth(
mocked_select_source_bluetooth, mocked_status, mocked_volume, hass, one_device
):
"""Test select Bluetooth."""
await setup_soundtouch(hass, DEVICE_1_CONFIG)
assert mocked_select_source_bluetooth.call_count == 0
await hass.services.async_call(
"media_player",
"select_source",
{"entity_id": "media_player.soundtouch_1", ATTR_INPUT_SOURCE: "BLUETOOTH"},
True,
)
assert mocked_select_source_bluetooth.call_count == 1
@patch("libsoundtouch.device.SoundTouchDevice.select_source_bluetooth")
@patch("libsoundtouch.device.SoundTouchDevice.select_source_aux")
async def test_select_source_invalid_source(
mocked_select_source_aux,
mocked_select_source_bluetooth,
mocked_status,
mocked_volume,
hass,
one_device,
):
"""Test select unsupported source."""
await setup_soundtouch(hass, DEVICE_1_CONFIG)
assert mocked_select_source_aux.call_count == 0
assert mocked_select_source_bluetooth.call_count == 0
await hass.services.async_call(
"media_player",
"select_source",
{
"entity_id": "media_player.soundtouch_1",
ATTR_INPUT_SOURCE: "SOMETHING_UNSUPPORTED",
},
True,
)
assert mocked_select_source_aux.call_count == 0
assert mocked_select_source_bluetooth.call_count == 0
@patch("libsoundtouch.device.SoundTouchDevice.create_zone")
async def test_play_everywhere(
mocked_create_zone, mocked_status, mocked_volume, hass, two_zones
):
"""Test play everywhere."""
mocked_device = two_zones
await setup_soundtouch(hass, [DEVICE_1_CONFIG, DEVICE_2_CONFIG])
assert mocked_device.call_count == 2
assert mocked_status.call_count == 4
assert mocked_volume.call_count == 4
# one master, one slave => create zone
await hass.services.async_call(
soundtouch.DOMAIN,
soundtouch.SERVICE_PLAY_EVERYWHERE,
{"master": "media_player.soundtouch_1"},
True,
)
assert mocked_create_zone.call_count == 1
# unknown master, create zone must not be called
await hass.services.async_call(
soundtouch.DOMAIN,
soundtouch.SERVICE_PLAY_EVERYWHERE,
{"master": "media_player.entity_X"},
True,
)
assert mocked_create_zone.call_count == 1
# no slaves, create zone must not be called
for entity in list(hass.data[DATA_SOUNDTOUCH]):
if entity.entity_id == "media_player.soundtouch_1":
continue
hass.data[DATA_SOUNDTOUCH].remove(entity)
await entity.async_remove()
await hass.services.async_call(
soundtouch.DOMAIN,
soundtouch.SERVICE_PLAY_EVERYWHERE,
{"master": "media_player.soundtouch_1"},
True,
)
assert mocked_create_zone.call_count == 1
@patch("libsoundtouch.device.SoundTouchDevice.create_zone")
async def test_create_zone(
mocked_create_zone, mocked_status, mocked_volume, hass, two_zones
):
"""Test creating a zone."""
mocked_device = two_zones
await setup_soundtouch(hass, [DEVICE_1_CONFIG, DEVICE_2_CONFIG])
assert mocked_device.call_count == 2
assert mocked_status.call_count == 4
assert mocked_volume.call_count == 4
# one master, one slave => create zone
await hass.services.async_call(
soundtouch.DOMAIN,
soundtouch.SERVICE_CREATE_ZONE,
{
"master": "media_player.soundtouch_1",
"slaves": ["media_player.soundtouch_2"],
},
True,
)
assert mocked_create_zone.call_count == 1
# unknown master, create zone must not be called
await hass.services.async_call(
soundtouch.DOMAIN,
soundtouch.SERVICE_CREATE_ZONE,
{"master": "media_player.entity_X", "slaves": ["media_player.soundtouch_2"]},
True,
)
assert mocked_create_zone.call_count == 1
# no slaves, create zone must not be called
await hass.services.async_call(
soundtouch.DOMAIN,
soundtouch.SERVICE_CREATE_ZONE,
{"master": "media_player.soundtouch_1", "slaves": []},
True,
)
assert mocked_create_zone.call_count == 1
@patch("libsoundtouch.device.SoundTouchDevice.remove_zone_slave")
async def test_remove_zone_slave(
mocked_remove_zone_slave, mocked_status, mocked_volume, hass, two_zones
):
"""Test adding a slave to an existing zone."""
mocked_device = two_zones
await setup_soundtouch(hass, [DEVICE_1_CONFIG, DEVICE_2_CONFIG])
assert mocked_device.call_count == 2
assert mocked_status.call_count == 4
assert mocked_volume.call_count == 4
# remove one slave
await hass.services.async_call(
soundtouch.DOMAIN,
soundtouch.SERVICE_REMOVE_ZONE_SLAVE,
{
"master": "media_player.soundtouch_1",
"slaves": ["media_player.soundtouch_2"],
},
True,
)
assert mocked_remove_zone_slave.call_count == 1
# unknown master. add zone slave is not called
await hass.services.async_call(
soundtouch.DOMAIN,
soundtouch.SERVICE_REMOVE_ZONE_SLAVE,
{"master": "media_player.entity_X", "slaves": ["media_player.soundtouch_2"]},
True,
)
assert mocked_remove_zone_slave.call_count == 1
# no slave to add, add zone slave is not called
await hass.services.async_call(
soundtouch.DOMAIN,
soundtouch.SERVICE_REMOVE_ZONE_SLAVE,
{"master": "media_player.soundtouch_1", "slaves": []},
True,
)
assert mocked_remove_zone_slave.call_count == 1
@patch("libsoundtouch.device.SoundTouchDevice.add_zone_slave")
async def test_add_zone_slave(
mocked_add_zone_slave,
mocked_status,
mocked_volume,
hass,
two_zones,
):
"""Test removing a slave from a zone."""
mocked_device = two_zones
await setup_soundtouch(hass, [DEVICE_1_CONFIG, DEVICE_2_CONFIG])
assert mocked_device.call_count == 2
assert mocked_status.call_count == 4
assert mocked_volume.call_count == 4
# add one slave
await hass.services.async_call(
soundtouch.DOMAIN,
soundtouch.SERVICE_ADD_ZONE_SLAVE,
{
"master": "media_player.soundtouch_1",
"slaves": ["media_player.soundtouch_2"],
},
True,
)
assert mocked_add_zone_slave.call_count == 1
# unknown master, add zone slave is not called
await hass.services.async_call(
soundtouch.DOMAIN,
soundtouch.SERVICE_ADD_ZONE_SLAVE,
{"master": "media_player.entity_X", "slaves": ["media_player.soundtouch_2"]},
True,
)
assert mocked_add_zone_slave.call_count == 1
# no slave to add, add zone slave is not called
await hass.services.async_call(
soundtouch.DOMAIN,
soundtouch.SERVICE_ADD_ZONE_SLAVE,
{"master": "media_player.soundtouch_1", "slaves": ["media_player.entity_X"]},
True,
)
assert mocked_add_zone_slave.call_count == 1
@patch("libsoundtouch.device.SoundTouchDevice.create_zone")
async def test_zone_attributes(
mocked_create_zone,
mocked_status,
mocked_volume,
hass,
two_zones,
):
"""Test play everywhere."""
mocked_device = two_zones
await setup_soundtouch(hass, [DEVICE_1_CONFIG, DEVICE_2_CONFIG])
assert mocked_device.call_count == 2
assert mocked_status.call_count == 4
assert mocked_volume.call_count == 4
entity_1_state = hass.states.get("media_player.soundtouch_1")
assert entity_1_state.attributes[ATTR_SOUNDTOUCH_ZONE]["is_master"]
assert (
entity_1_state.attributes[ATTR_SOUNDTOUCH_ZONE]["master"]
== "media_player.soundtouch_1"
)
assert entity_1_state.attributes[ATTR_SOUNDTOUCH_ZONE]["slaves"] == [
"media_player.soundtouch_2"
]
assert entity_1_state.attributes[ATTR_SOUNDTOUCH_GROUP] == [
"media_player.soundtouch_1",
"media_player.soundtouch_2",
]
entity_2_state = hass.states.get("media_player.soundtouch_2")
assert not entity_2_state.attributes[ATTR_SOUNDTOUCH_ZONE]["is_master"]
assert (
entity_2_state.attributes[ATTR_SOUNDTOUCH_ZONE]["master"]
== "media_player.soundtouch_1"
)
assert entity_2_state.attributes[ATTR_SOUNDTOUCH_ZONE]["slaves"] == [
"media_player.soundtouch_2"
]
assert entity_2_state.attributes[ATTR_SOUNDTOUCH_GROUP] == [
"media_player.soundtouch_1",
"media_player.soundtouch_2",
]
|
{
"content_hash": "f74d11cfc7c70e7154cb69078f68d566",
"timestamp": "",
"source": "github",
"line_count": 1062,
"max_line_length": 88,
"avg_line_length": 30.130885122410547,
"alnum_prop": 0.6421763180099378,
"repo_name": "toddeye/home-assistant",
"id": "797b5b440d166e5d687131690969dbc380862d89",
"size": "31999",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/soundtouch/test_media_player.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
"""
DMLC submission script, local machine version
"""
import argparse
import sys
import os
import subprocess
from threading import Thread
import tracker
import signal
import logging
keepalive = """
nrep=0
rc=254
while [ $rc -eq 254 ];
do
export DMLC_NUM_ATTEMPT=$nrep
%s
rc=$?;
nrep=$((nrep+1));
done
"""
class LocalLauncher(object):
def __init__(self, args, unknown):
self.args = args
self.cmd = ' '.join(args.command) + ' ' + ' '.join(unknown)
def exec_cmd(self, cmd, role, pass_env):
env = os.environ.copy()
for k, v in pass_env.items():
env[k] = str(v)
env['DMLC_ROLE'] = role
ntrial = 0
while True:
if os.name == 'nt':
env['DMLC_NUM_ATTEMPT'] = str(ntrial)
ret = subprocess.call(cmd, shell=True, env = env)
if ret == 254:
ntrial += 1
continue
else:
bash = keepalive % (cmd)
ret = subprocess.call(bash, shell=True, executable='bash', env = env)
if ret == 0:
logging.debug('Thread %d exit with 0')
return
else:
if os.name == 'nt':
os.exit(-1)
else:
raise Exception('Get nonzero return code=%d' % ret)
def submit(self):
def mthread_submit(nworker, nserver, envs):
"""
customized submit script
"""
procs = {}
for i in range(nworker + nserver):
role = 'worker' if i < nworker else 'server'
procs[i] = Thread(target = self.exec_cmd, args = (self.cmd, role, envs))
procs[i].setDaemon(True)
procs[i].start()
return mthread_submit
def run(self):
tracker.config_logger(self.args)
tracker.submit(self.args.num_workers,
self.args.num_servers,
fun_submit = self.submit(),
pscmd = self.cmd)
def main():
parser = argparse.ArgumentParser(
description='DMLC script to submit dmlc jobs as local process')
parser.add_argument('-n', '--num-workers', required=True, type=int,
help = 'number of worker nodes to be launched')
parser.add_argument('-s', '--num-servers', type=int,
help = 'number of server nodes to be launched')
parser.add_argument('--log-level', default='INFO', type=str,
choices=['INFO', 'DEBUG'],
help = 'logging level')
parser.add_argument('--log-file', type=str,
help = 'output log to the specific log file')
parser.add_argument('command', nargs='+',
help = 'command for launching the program')
args, unknown = parser.parse_known_args()
launcher = LocalLauncher(args, unknown)
launcher.run()
if __name__ == '__main__':
main()
|
{
"content_hash": "b7207a9491bdaabbc3eebcc10132c101",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 88,
"avg_line_length": 30.595959595959595,
"alnum_prop": 0.5123803235391218,
"repo_name": "xcgoner/ps-lite-new",
"id": "c9cef4dbd813fe051ffc407e239e0d7b58c883ac",
"size": "3051",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tracker/dmlc_local.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1505"
},
{
"name": "C++",
"bytes": "119966"
},
{
"name": "CMake",
"bytes": "10052"
},
{
"name": "Makefile",
"bytes": "10180"
},
{
"name": "Protocol Buffer",
"bytes": "1442"
},
{
"name": "Python",
"bytes": "34198"
},
{
"name": "Shell",
"bytes": "1756"
}
],
"symlink_target": ""
}
|
import os, re, sys, subprocess, plistlib
import eclim
from util import caret_position
def call_eclim(project, file, line, offset, applied_correction=None):
eclim.update_java_src(project, file)
correct_cmd = "$ECLIM -command java_correct \
-p %s \
-f %s \
-l %i \
-o %i \
-e utf-8 " % (project, file, line, offset)
if applied_correction != None:
correct_cmd += " -a %i" % (applied_correction)
out = eclim.call_eclim(correct_cmd)
return out
def show_corrections_window(corrections):
options = {"corrections": [dict([("message",m),("number", x+1)])
for x, m in enumerate(corrections)]}
path = os.path.join(os.path.dirname(sys.argv[0]), "corrections.nib")
cmd = eclim.DIALOG + ' -cm "' + path + '"'
popen = subprocess.Popen(
cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,shell=True)
out, err = popen.communicate(plistlib.writePlistToString(options))
out = plistlib.readPlistFromString(out)
if "result" not in out:
return None
return int(out["result"]["returnArgument"])-1
def to_list(corrections):
re1 = re.compile("^(\d+)\.\d+:(.*)")
result = []
corrections = corrections.splitlines()
for l in corrections:
match1 = re1.match(l)
if match1:
result.append(match1.group(2).strip())
return result
def correction_command():
project, file = eclim.get_context()
# we cannot read the code from TM via stdin, as it will not have
# the correct line endings when editing windows files (it will just have \n)
#code = sys.stdin.read()
# so we read from disk
with open(os.environ["TM_FILEPATH"]) as f:
code = f.read()
pos = caret_position(code)
line = int(os.environ['TM_LINE_NUMBER'])
corrections = call_eclim(project, file, line, pos)
corrections = to_list(corrections)
if corrections:
correction_to_apply = show_corrections_window(corrections)
else: correction_to_apply = None
if correction_to_apply != None:
new_code = call_eclim(project, file, line, pos, correction_to_apply)
if new_code:
return new_code
return code
if __name__ == '__main__':
out = correction_command()
print out
|
{
"content_hash": "d8752fdd77f5cebcd6cdb26c162b84d7",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 80,
"avg_line_length": 32.971830985915496,
"alnum_prop": 0.6048697137975224,
"repo_name": "JulianEberius/Eclim.tmbundle",
"id": "9393cce0d9a13556cddceae89980ae4e934cf510",
"size": "2363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Support/bin/correction.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18666"
}
],
"symlink_target": ""
}
|
"""
Unit tests for PySpark; additional tests are implemented as doctests in
individual modules.
"""
import os
import shutil
import sys
from tempfile import NamedTemporaryFile
import time
import unittest
from pyspark.context import SparkContext
from pyspark.files import SparkFiles
from pyspark.java_gateway import SPARK_HOME
from pyspark.serializers import read_int
class PySparkTestCase(unittest.TestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
self.sc = SparkContext('local[4]', class_name , batchSize=2)
def tearDown(self):
self.sc.stop()
sys.path = self._old_sys_path
# To avoid Akka rebinding to the same port, since it doesn't unbind
# immediately on shutdown
self.sc._jvm.System.clearProperty("spark.driver.port")
class TestCheckpoint(PySparkTestCase):
def setUp(self):
PySparkTestCase.setUp(self)
self.checkpointDir = NamedTemporaryFile(delete=False)
os.unlink(self.checkpointDir.name)
self.sc.setCheckpointDir(self.checkpointDir.name)
def tearDown(self):
PySparkTestCase.tearDown(self)
shutil.rmtree(self.checkpointDir.name)
def test_basic_checkpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertIsNone(flatMappedRDD.getCheckpointFile())
flatMappedRDD.checkpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
self.assertEqual(self.checkpointDir.name,
os.path.dirname(flatMappedRDD.getCheckpointFile()))
def test_checkpoint_and_restore(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: [x])
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertIsNone(flatMappedRDD.getCheckpointFile())
flatMappedRDD.checkpoint()
flatMappedRDD.count() # forces a checkpoint to be computed
time.sleep(1) # 1 second
self.assertIsNotNone(flatMappedRDD.getCheckpointFile())
recovered = self.sc._checkpointFile(flatMappedRDD.getCheckpointFile())
self.assertEquals([1, 2, 3, 4], recovered.collect())
class TestAddFile(PySparkTestCase):
def test_add_py_file(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this job fails due to `userlibrary` not being on the Python path:
def func(x):
from userlibrary import UserClass
return UserClass().hello()
self.assertRaises(Exception,
self.sc.parallelize(range(2)).map(func).first)
# Add the file, so the job should now succeed:
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
res = self.sc.parallelize(range(2)).map(func).first()
self.assertEqual("Hello World!", res)
def test_add_file_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
self.sc.addFile(path)
download_path = SparkFiles.get("hello.txt")
self.assertNotEqual(path, download_path)
with open(download_path) as test_file:
self.assertEquals("Hello World!\n", test_file.readline())
def test_add_py_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlibrary import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addFile(path)
from userlibrary import UserClass
self.assertEqual("Hello World!", UserClass().hello())
class TestIO(PySparkTestCase):
def test_stdout_redirection(self):
import subprocess
def func(x):
subprocess.check_call('ls', shell=True)
self.sc.parallelize([1]).foreach(func)
class TestDaemon(unittest.TestCase):
def connect(self, port):
from socket import socket, AF_INET, SOCK_STREAM
sock = socket(AF_INET, SOCK_STREAM)
sock.connect(('127.0.0.1', port))
# send a split index of -1 to shutdown the worker
sock.send("\xFF\xFF\xFF\xFF")
sock.close()
return True
def do_termination_test(self, terminator):
from subprocess import Popen, PIPE
from errno import ECONNREFUSED
# start daemon
daemon_path = os.path.join(os.path.dirname(__file__), "daemon.py")
daemon = Popen([sys.executable, daemon_path], stdin=PIPE, stdout=PIPE)
# read the port number
port = read_int(daemon.stdout)
# daemon should accept connections
self.assertTrue(self.connect(port))
# request shutdown
terminator(daemon)
time.sleep(1)
# daemon should no longer accept connections
with self.assertRaises(EnvironmentError) as trap:
self.connect(port)
self.assertEqual(trap.exception.errno, ECONNREFUSED)
def test_termination_stdin(self):
"""Ensure that daemon and workers terminate when stdin is closed."""
self.do_termination_test(lambda daemon: daemon.stdin.close())
def test_termination_sigterm(self):
"""Ensure that daemon and workers terminate on SIGTERM."""
from signal import SIGTERM
self.do_termination_test(lambda daemon: os.kill(daemon.pid, SIGTERM))
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "8ca692a1d9cf5257c408244118be7768",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 79,
"avg_line_length": 35.676829268292686,
"alnum_prop": 0.6602290206802256,
"repo_name": "vax11780/spark",
"id": "dfd841b10a0dfa31b0c32c08adf690a9e7405538",
"size": "6636",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "python/pyspark/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "143194"
},
{
"name": "JavaScript",
"bytes": "16945"
},
{
"name": "Python",
"bytes": "154728"
},
{
"name": "Ruby",
"bytes": "2033"
},
{
"name": "Scala",
"bytes": "2278591"
},
{
"name": "Shell",
"bytes": "54259"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('part', '0035_auto_20200406_0045'),
]
operations = [
migrations.AddField(
model_name='partattachment',
name='user',
field=models.ForeignKey(blank=True, help_text='User', null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
]
|
{
"content_hash": "46e23c5f9dde1579a02c1a7bb5d20745",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 152,
"avg_line_length": 30.157894736842106,
"alnum_prop": 0.6596858638743456,
"repo_name": "inventree/InvenTree",
"id": "d59bc7ffb21dac8a16b2a439eb0ae7344fb42c10",
"size": "622",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "InvenTree/part/migrations/0036_partattachment_user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "246444"
},
{
"name": "Dockerfile",
"bytes": "7169"
},
{
"name": "HTML",
"bytes": "586821"
},
{
"name": "JavaScript",
"bytes": "1970070"
},
{
"name": "Procfile",
"bytes": "164"
},
{
"name": "Python",
"bytes": "2606104"
},
{
"name": "Shell",
"bytes": "27115"
}
],
"symlink_target": ""
}
|
"""A very simple MNIST classifier.
See extensive documentation at
http://tensorflow.org/tutorials/mnist/beginners/index.md
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Import data
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('data_dir', '/tmp/data/', 'Directory for storing data')
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
sess = tf.InteractiveSession()
# Create the model
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
# Train
tf.initialize_all_variables().run()
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
train_step.run({x: batch_xs, y_: batch_ys})
# Test trained model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(accuracy.eval({x: mnist.test.images, y_: mnist.test.labels}))
|
{
"content_hash": "55f8cb00e28bca3ace611137234392af",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 85,
"avg_line_length": 31.25581395348837,
"alnum_prop": 0.7247023809523809,
"repo_name": "dhalleine/tensorflow",
"id": "6621d7bb3972f62d3ece240e42824f5f37568a6b",
"size": "2034",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "tensorflow/examples/tutorials/mnist/mnist_softmax.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "152404"
},
{
"name": "C++",
"bytes": "7305808"
},
{
"name": "CMake",
"bytes": "29325"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "677843"
},
{
"name": "Java",
"bytes": "50361"
},
{
"name": "JavaScript",
"bytes": "16098"
},
{
"name": "Jupyter Notebook",
"bytes": "777976"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "101759"
},
{
"name": "Python",
"bytes": "4184469"
},
{
"name": "Shell",
"bytes": "77957"
},
{
"name": "TypeScript",
"bytes": "328956"
}
],
"symlink_target": ""
}
|
import argparse
from datetime import datetime
from dateutil import tz
import logging
import logging.handlers
import requests
import sys
from xml.etree import ElementTree
logger = logging.getLogger(__name__)
# Python 2.6 does not support total_seconds()
def timedelta_total_seconds(timedelta):
return (
timedelta.microseconds +
(timedelta.seconds + timedelta.days * 24 * 3600) * 10 ** 6) / 10 ** 6
class WeatherObject:
def __init__(self, zip_code, temperature, city=None, state=None, source="MockData",
last_updated=timedelta_total_seconds(datetime.utcnow() - datetime.utcfromtimestamp(0))):
self.zip_code = zip_code
self.temperature = temperature
self.city = city
self.state = state
self.source = source
self.last_updated = last_updated # UTC epoch seconds
# Two temperatures are considered equal if they have the same zip code. Bad
def __eq__(self, other):
return isinstance(other, self.__class__) and self.zip_code == other.zip_code
def __str__(self):
return "WeatherObject(Zip Code: %s, City: %s, State: %s, Temperature: %s, Last Updated: %s])" % (
self.zip_code, self.city, self.state, self.temperature, self.last_updated)
def print_weather(self):
print(self.return_weather())
def return_weather(self):
if self.last_updated:
return "The temperature is %sF (%sC) in %s [last updated: %s by %s)" % (
self.temperature, self.temperature_in_celsius(), self.city, self.time_as_local(), self.source)
else:
return "The temperature is %sF (%sC) in %s [%s]" % (
self.temperature, self.temperature_in_celsius(), self.city, self.source)
def temperature_in_celsius(self):
return round((float(self.temperature) - 32) * 5/9, 2)
def is_valid(self):
if not self.temperature:
return False
if not self.zip_code:
return False
if not self.city:
return False
if not self.last_updated:
return False
# All conditions met
return True
def time_as_local(self):
utc = datetime.utcfromtimestamp(self.last_updated)
# Assert the current time is in UTC
utc = utc.replace(tzinfo=tz.tzutc())
# Convert to local time
return utc.astimezone(tz.tzlocal()).strftime('%b %d %Y %I:%M:%S%p')
def create_weather_from_openweathermap_for_zip_code(zip_code, api_key_openweathermap):
source = "OpenWeatherMap"
data = get_openweathermap_xml_for_zip_code(zip_code, api_key_openweathermap)
if data is not None and is_valid_openweathermap_data(data):
temperature = data.find('temperature').get('value')
city = data.find('city').get('name')
date_raw = data.find('lastupdate').get('value')
# Convert from 2015-08-27T05:21:06 to seconds since epoch
date_time = datetime.strptime(date_raw, '%Y-%m-%dT%H:%M:%S')
epoch_seconds = timedelta_total_seconds(date_time - datetime.utcfromtimestamp(0))
return WeatherObject(zip_code=zip_code, temperature=temperature, city=city,
source=source, last_updated=float(epoch_seconds))
else:
logger.error("Invalid weather data for zip code %s" % zip_code)
return None
def create_weather_from_weatherunderground_for_zip_code(zip_code, api_key_weatherunderground):
source = "WeatherUnderground"
data = get_weather_underground_data_for_zip_code(zip_code, api_key_weatherunderground)
if data is not None and is_valid_wu_weather(data):
return WeatherObject(zip_code=zip_code, temperature=data['current_observation']['temp_f'],
city=data['current_observation']['display_location']['city'],
state=data['current_observation']['display_location']['state'],
source=source, last_updated=float(data['current_observation']['observation_epoch']))
else:
logger.error("Invalid weather data for zip code %s" % zip_code)
return None
def get_openweathermap_xml_for_zip_code(zip_code, api_key_openweathermap):
logger.info("Fetching OpenWeatherMap data for zip code %s..." % zip_code)
r = requests.get('http://api.openweathermap.org/data/2.5/weather?zip=%s,us&mode=xml&units=imperial&appid=%s' % (
zip_code, api_key_openweathermap))
if r.status_code == 200:
try:
response = ElementTree.fromstring(r.content)
except ElementTree.ParseError:
response = None
return response
else:
return None
def get_weather_underground_data_for_zip_code(zip_code, api_key_weatherunderground):
logger.info("Fetching WeatherUnderground data for zip code %s..." % zip_code)
r = requests.get('http://api.wunderground.com/api/%s/conditions/q/%s.json' % (api_key_weatherunderground, zip_code))
if r.status_code == 200:
return r.json()
else:
return None
# Checks to see if the json response contains certain required elements to print
def is_valid_wu_weather(json):
if json is None:
return False
if 'current_observation' not in json:
return False
if 'temp_f' not in json['current_observation']:
return False
if 'full' not in json['current_observation']['display_location']:
return False
if 'city' not in json['current_observation']['display_location']:
return False
if 'state' not in json['current_observation']['display_location']:
return False
if 'observation_epoch' not in json['current_observation']:
return False
if 'zip' not in json['current_observation']['display_location']:
return False
# All conditions met
return True
def is_valid_openweathermap_data(xml):
temperature = xml.find('temperature').get('value')
city = xml.find('city').get('name')
date_raw = xml.find('lastupdate').get('value')
if not temperature:
return False
if not city:
return False
if not date_raw:
return False
# All conditions met
return True
def is_valid_zip_code(zip_code):
if len(str(zip_code)) == 5 and zip_code.isdigit():
return True
else:
return False
def print_warmest_weather_from_list(weather_objects):
warmest_weather = return_warmest_weather_object_from_list(weather_objects)
if warmest_weather:
print("\n%s, %s (%s) has the warmest weather of %sF" % (
warmest_weather.city, warmest_weather.state, warmest_weather.zip_code, warmest_weather.temperature))
else:
print("No locations found")
def print_weather_list(weather_objects):
for item in weather_objects:
item.print_weather()
def return_warmest_weather_object_from_list(weather_objects):
# Default value
warmest_weather_object = None
for item in weather_objects:
if warmest_weather_object is None or item.temperature > warmest_weather_object.temperature:
warmest_weather_object = item
return warmest_weather_object
def return_most_recent_weather_object_from_list(weather_objects):
# Default value
most_recent_weather_object = None
for item in weather_objects:
if most_recent_weather_object:
logger.debug("Comparing %s (%s) against %s (%s)" % (item.last_updated, item.source,
most_recent_weather_object.last_updated,
most_recent_weather_object.source))
if most_recent_weather_object is None or item.last_updated > most_recent_weather_object.last_updated:
most_recent_weather_object = item
# Mostly just informational
if most_recent_weather_object:
logger.info("%s has the most recent updated temperature for %s" % (
most_recent_weather_object.source, most_recent_weather_object.zip_code))
return most_recent_weather_object
else:
return None
def return_zip_code_string_from_file(filepath):
data = None
try:
with open(filepath, "r") as myfile:
data = myfile.read().replace('\n', '')
except IOError:
logger.error("Unable to find file")
if data:
return data
else:
return None
def parse_args_and_return():
parser = argparse.ArgumentParser(description='Fetch weather for one or many zip codes')
parser.add_argument('-z', '--zip-code', help='comma separated list of zip codes', required=False)
parser.add_argument('-f', '--file', help='file containing comma separated list of zip codes', required=False)
parser.add_argument('-v', '--verbose', help='enable verbose logging', required=False, action='store_true')
args = vars(parser.parse_args())
if args['verbose']:
numeric_log_level = getattr(logging, 'DEBUG', None)
logging.basicConfig(level=numeric_log_level, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
else:
# numeric_log_level = getattr(logging, 'INFO', None)
# logging.basicConfig(level=numeric_log_level, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
pass
# Deal with both zip_code and file args being provided. Exit
if args['zip_code'] and args['file']:
print("Ambiguous input. Cannot use both --zip-code and --file options")
sys.exit(1)
return args
def return_zip_code_list_from_input(args):
if args['zip_code'] and len(args['zip_code']) >= 5:
zip_code_string = args['zip_code']
elif args['file']:
zip_code_string = return_zip_code_string_from_file(args['file'])
else:
zip_code_string = raw_input('Enter a comma separated list of zip codes (ex. 94110, 80123): ')
# Exit if no values provided by user
if not zip_code_string:
print("No values provided. Exiting")
sys.exit(1)
# Create a list from the string
zip_code_list = zip_code_string.split(',')
return zip_code_list
def add_most_recent_weather_in_list_to_master_list(new_list, master_list):
most_recent_weather_object = return_most_recent_weather_object_from_list(new_list)
if most_recent_weather_object and most_recent_weather_object.is_valid():
# Add the result
master_list.append(most_recent_weather_object)
else:
logger.debug("No recent weather objects found")
def return_temperatures_list(zip_code_list, api_key_wu, api_key_owm, max_zip_codes_per_query):
temperatures = []
# Fetch weather data for the zip codes in the global list
if len(zip_code_list) > max_zip_codes_per_query:
logger.info("Too many zip codes requested. Fetching only the first %d elements" % max_zip_codes_per_query)
for item in zip_code_list[:max_zip_codes_per_query]:
# Strip off spaces if the user provided them during input
zip_code_temp = item.strip()
if is_valid_zip_code(zip_code_temp):
# Create an empty list
sources = []
# Get data from WeatherUnderground
if api_key_wu is not None and len(api_key_wu) > 0:
weather = create_weather_from_weatherunderground_for_zip_code(zip_code_temp, api_key_wu)
if weather:
sources.append(weather)
# Get data from OpenWeatherMap
if api_key_owm is not None and len(api_key_owm) > 0:
weather_alt = create_weather_from_openweathermap_for_zip_code(zip_code_temp, api_key_owm)
if weather_alt:
sources.append(weather_alt)
add_most_recent_weather_in_list_to_master_list(sources, temperatures)
else:
logger.info("'%s' is not a valid zip code" % zip_code_temp)
return temperatures
def main():
# Deal with command line arguments
args = parse_args_and_return()
# Create a list of zip codes from input
zip_code_list = return_zip_code_list_from_input(args)
# Create a list of temperature objects from a zip code list
temperatures = return_temperatures_list(zip_code_list, None, None, 5)
# Print the weather list
print_weather_list(temperatures)
# Print the warmest place I should go to
print_warmest_weather_from_list(temperatures)
if __name__ == "__main__":
main()
|
{
"content_hash": "af7ddb5b96981adf44decddca784ea08",
"timestamp": "",
"source": "github",
"line_count": 352,
"max_line_length": 120,
"avg_line_length": 35.32102272727273,
"alnum_prop": 0.6379795704978686,
"repo_name": "markperdue/havocbot",
"id": "0e8a975b58817ee1870e1d7951099ab4d905a966",
"size": "12433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/havocbot/plugins/weather.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "273183"
}
],
"symlink_target": ""
}
|
"""
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from nose.tools import assert_equal
from streamalert.classifier.payload.sns import SnsPayload
class TestSnsPayload:
"""SnsPayload tests"""
# pylint: disable=no-self-use,protected-access
def test_pre_parse(self):
"""SnsPayload - Pre Parse"""
# pylint: disable=protected-access
expected_result = ['foobarbaz']
record = {
'Sns': {
'MessageId': 'db42ca0e-215c-5f63-9e92-9e2e953c4e6c',
'Message': expected_result[0]
},
'EventSubscriptionArn': (
'arn:aws:sns:us-east-1:123456789012:foobar:44dbbe73-3aca-4bb1-863b-b82f058c0b19'
)
}
payload = SnsPayload(None, record)
result = [rec._record_data for rec in list(payload.pre_parse())]
assert_equal(result, expected_result)
|
{
"content_hash": "83946f13681123c7281be92da0396f72",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 96,
"avg_line_length": 34.34146341463415,
"alnum_prop": 0.6725852272727273,
"repo_name": "airbnb/streamalert",
"id": "0ab99202352c667c5fd1225efb1ad14100df9997",
"size": "1408",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/streamalert/classifier/payload/test_payload_sns.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HCL",
"bytes": "142275"
},
{
"name": "Python",
"bytes": "2209853"
},
{
"name": "Shell",
"bytes": "2975"
}
],
"symlink_target": ""
}
|
from django.conf.urls.defaults import *
from tastypie.api import Api
from validation.api.resources import NoteResource, UserResource, AnnotatedNoteResource
api = Api(api_name='v1')
api.register(NoteResource(), canonical=True)
api.register(UserResource(), canonical=True)
api.register(AnnotatedNoteResource(), canonical=True)
urlpatterns = patterns('',
url(r'^api/', include(api.urls)),
)
|
{
"content_hash": "1474d1c3042889f185c0aa6f442a52e5",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 86,
"avg_line_length": 32.833333333333336,
"alnum_prop": 0.7741116751269036,
"repo_name": "rbraley/django-tastypie",
"id": "8a54422a786af8f61d200505fcd1638bc518a17e",
"size": "394",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "tests/validation/api/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "528375"
},
{
"name": "Shell",
"bytes": "842"
}
],
"symlink_target": ""
}
|
"""
This is the Ultra-faint Galaxy Likelihood (UGaLi) software package.
"""
__author__ = "Keith Bechtol & Alex Drlica-Wagner"
__email__ = "bechtol@kicp.uchicago.edu, kadrlica@fnal.gov"
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
# ADW: Is this a good idea?
#import ugali.analysis.isochrone as isochrone
#from ugali import isochrone
#import ugali.analysis.kernel as kernel
#import ugali.analysis.source as source
# Hack for backward compatibitility with: ugali.analysis.isochrone
#sys.modules['ugali.analysis.isochrone'] = __import__('ugali.isochrone')
|
{
"content_hash": "823b007b561724b219d360e12bfb34a1",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 72,
"avg_line_length": 31.789473684210527,
"alnum_prop": 0.75,
"repo_name": "DarkEnergySurvey/ugali",
"id": "070fe3a9a79fd978c01f714a54f7d131bd1a9087",
"size": "604",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ugali/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "355304"
},
{
"name": "Python",
"bytes": "949638"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class GeoFilter(Model):
"""Rules defining user's geo access within a CDN endpoint.
:param relative_path: Relative path applicable to geo filter. (e.g.
'/mypictures', '/mypicture/kitty.jpg', and etc.)
:type relative_path: str
:param action: Action of the geo filter, i.e. allow or block access.
Possible values include: 'Block', 'Allow'
:type action: str or ~azure.mgmt.cdn.models.GeoFilterActions
:param country_codes: Two letter country codes defining user country
access in a geo filter, e.g. AU, MX, US.
:type country_codes: list[str]
"""
_validation = {
'relative_path': {'required': True},
'action': {'required': True},
'country_codes': {'required': True},
}
_attribute_map = {
'relative_path': {'key': 'relativePath', 'type': 'str'},
'action': {'key': 'action', 'type': 'GeoFilterActions'},
'country_codes': {'key': 'countryCodes', 'type': '[str]'},
}
def __init__(self, relative_path, action, country_codes):
self.relative_path = relative_path
self.action = action
self.country_codes = country_codes
|
{
"content_hash": "01d8cb427242e55553d4988fbe47d2cb",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 72,
"avg_line_length": 36.09090909090909,
"alnum_prop": 0.6238455079764903,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "17c73205995f27112cdf73ffc1a77d012c1a4f92",
"size": "1665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-cdn/azure/mgmt/cdn/models/geo_filter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
}
|
from provglish import transform, prov
from provglish.lexicalisation import urn_from_uri as lex
from provglish.lexicalisation import plural_p
from provglish.prov import PROV
from provglish.nl.tools import SETTINGS, realise_sentence
import rdflib
from rdflib.plugins import sparql
from rdflib import RDF
import urllib2
_assoc_query = sparql.prepareQuery(
"""SELECT DISTINCT ?activity ?agent ?assoc WHERE {
GRAPH <prov_graph> {
{
?activity a prov:Activity .
?activity prov:wasAssociatedWith ?agent .
?agent a prov:Agent
} UNION {
?activity a prov:Activity .
?activity prov:qualifiedAssociation ?assoc .
?assoc a prov:Association .
?assoc prov:agent ?agent .
?agent a prov:Agent
}
}
}""",
initNs={"prov":PROV}
)
def _assoc_binding(graph):
results = graph.query(_assoc_query)
return results.bindings
def _assoc_coverage(bindings, graph):
if "?assoc" in bindings:
return [(bindings["?activity"], RDF.type, PROV.Activity),
(bindings["?activity"], PROV.qualifiedAssociation, bindings["?assoc"]),
(bindings["?assoc"], RDF.type, PROV.Association),
(bindings["?assoc"], PROV.agent, bindings["?agent"]),
(bindings["?agent"], RDF.type, PROV.Agent)]
else:
return [(bindings["?activity"], RDF.type, PROV.Activity),
(bindings["?activity"], PROV.wasAssociatedWith, bindings["?agent"]),
(bindings["?agent"], RDF.type, PROV.Agent)]
def _assoc_string(bindings, history):
sentence = {}
sentence["object"] = {"type": "noun_phrase",
"head": lex(bindings["?activity"])}
sentence["verb"] = "associate"
sentence["modifiers"] = [{"type": "preposition_phrase",
"preposition": "with",
"noun": lex(bindings["?agent"])}]
sentence["features"] = {"tense": "past",
"passive": "true"}
return realise_sentence({"sentence":sentence})
association = transform.Template("Association", _assoc_binding, _assoc_coverage, _assoc_string)
|
{
"content_hash": "26649a5a68b047aae7e8aa2e21f00163",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 95,
"avg_line_length": 34.98461538461538,
"alnum_prop": 0.5765171503957783,
"repo_name": "mnestis/provglish",
"id": "bc42a73f9f0d275a2433226bf7b2405013ef9310",
"size": "2274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "provglish/nl/templates/association_template.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "100690"
},
{
"name": "Web Ontology Language",
"bytes": "170917"
}
],
"symlink_target": ""
}
|
"""
Загрузка конфига.
"""
from openre.agent.decorators import action
from openre.agent.domain.decorators import state
from openre.domain import create_domain_factory
from openre.domain.packets import TransmitterVector, TransmitterMetadata, \
ReceiverVector, ReceiverMetadata, SpikesVector, SpikesMetadata
from openre.domain.remote import RemoteDomainBase
from openre.agent.helpers import RPCBrokerProxy
import types
import time
from openre.vector import StandaloneVector
def remote_domain_factory(agent):
class RemoteDomain(RemoteDomainBase):
"""
Прокси к удаленному домену.
"""
def __init__(self, config, net, domain_index):
super(RemoteDomain, self).__init__(config, net, domain_index)
def lazy_socket():
"""Отложенное создание сокета"""
self.server_socket = agent.connect(
config.get('server', {}).get('host', '127.0.0.1'),
config.get('server', {}).get('port', 8932))
return self.server_socket
self.server_socket = lazy_socket
self.transport = RPCBrokerProxy(
self.server_socket,
'broker_domain_proxy',
config['id'],
domain_index
)
self.broker = RPCBrokerProxy(
self.server_socket,
'broker_proxy',
config['id'],
)
self.is_subscribed = False
self.subscribe_next_try = 0
self.transmitter_pos = -1
self.transmitter_vector = TransmitterVector()
self.transmitter_metadata = TransmitterMetadata(0)
self.transmitter_vector.add(self.transmitter_metadata)
self.receiver_pos = -1
self.receiver_vector = ReceiverVector()
self.receiver_metadata = ReceiverMetadata(0)
self.receiver_vector.add(self.receiver_metadata)
self.spikes_pos = -1
self.spikes_vector = SpikesVector()
self.spikes_metadata = SpikesMetadata(0)
self.spikes_vector.add(self.spikes_metadata)
def send_synapse(self,
pre_domain_index, pre_layer_index, pre_neuron_address,
post_layer_index, post_x, post_y):
"""
Обрабатываем информацию о синапсе из другого домена
self == post_domain
"""
# FIXME: optimize send_synapse (collect portion of data and send
# it in one request)
self.transmitter_pos += 1
pos = self.transmitter_pos
self.transmitter_metadata.pre_domain_index[pos] = pre_domain_index
self.transmitter_metadata.pre_layer_index[pos] = pre_layer_index
self.transmitter_metadata.pre_neuron_address[pos] \
= pre_neuron_address
self.transmitter_metadata.post_layer_index[pos] = post_layer_index
self.transmitter_metadata.post_x[pos] = post_x
self.transmitter_metadata.post_y[pos] = post_y
if self.transmitter_pos >= 99999:
self.send_synapse_pack()
# return self.__getattr__('send_synapse').no_reply(
# pre_domain_index, pre_layer_index, pre_neuron_address,
# post_layer_index, post_x, post_y)
def send_synapse_pack(self):
if self.transmitter_pos == -1:
return
self.transmitter_vector.shrink()
pack = self.transmitter_vector.bytes()
self.transmitter_metadata.resize(length=0)
self.transmitter_pos = -1
return self.__getattr__('send_synapse_pack') \
.set_bytes(pack) \
.inc_priority \
.no_reply()
def send_receiver_index(self, post_domain_index, pre_neuron_address,
remote_pre_neuron_address,
remote_pre_neuron_receiver_index):
"""
Запоминаем remote_neuron_address (IS_RECEIVER) для
pre_neuron_address (IS_TRANSMITTER)
self == pre_domain
"""
self.receiver_pos += 1
pos = self.receiver_pos
self.receiver_metadata.post_domain_index[pos] = post_domain_index
self.receiver_metadata.pre_neuron_address[pos] = pre_neuron_address
self.receiver_metadata.remote_pre_neuron_address[pos] \
= remote_pre_neuron_address
self.receiver_metadata.remote_pre_neuron_receiver_index[pos] \
= remote_pre_neuron_receiver_index
if self.receiver_pos >= 99999:
self.send_receiver_index_pack()
# return self.__getattr__('send_receiver_index').no_reply(
# post_domain_index, pre_neuron_address,
# remote_pre_neuron_address,
# remote_pre_neuron_receiver_index)
def send_receiver_index_pack(self):
if self.receiver_pos == -1:
return
self.receiver_vector.shrink()
pack = self.receiver_vector.bytes()
self.receiver_metadata.resize(length=0)
self.receiver_pos = -1
return self.__getattr__('send_receiver_index_pack') \
.set_bytes(pack) \
.inc_priority \
.no_reply()
def register_spike(self, receiver_neuron_index):
"""
Накапливаем информацию о спайках что бы переслать в другой домен с
помощью self.register_spike_pack
"""
self.spikes_pos += 1
pos = self.spikes_pos
self.spikes_metadata.receiver_neuron_index[pos] \
= receiver_neuron_index
def register_spike_pack(self, bytes=None):
"""
Посылаем данные о спайках в удаленный домен
"""
if self.spikes_pos == -1:
return
self.spikes_vector.shrink()
pack_length = len(self.spikes_vector)
pack = self.spikes_vector.receiver_neuron_index.bytes()
self.spikes_metadata.resize(length=0)
self.spikes_pos = -1
if self.pub_data('S', pack):
local = agent.context['local_domain']
local.stat_inc('spikes_sent', pack_length)
local.stat_inc('spikes_packets_sent')
local.stat_inc(['spikes_sent_to', self.name], pack_length)
def subscribe(self):
"""
Ask remote domain to subscribe on pub data from this domain
"""
if self.is_subscribed:
return True
if time.time() > self.subscribe_next_try:
self.broker.subscribe.inc_priority \
.no_reply(agent.context['local_domain'].index)
self.subscribe_next_try = time.time() + 1
return False
def pub_data(self, *args):
"""
Pub data for remote domain, where self domain is local
"""
# ask base domain to subscribe this domain
if not self.subscribe():
return False
# a few messages at the begining will be discarded because we
# asynchronously ask to subscribe
params = [self.config['id'].bytes]
params.extend(args)
agent.pub.send_multipart(params)
return True
def register_input_layer_data(self, layer_index, data):
"""
Send data over network
"""
vector = StandaloneVector().set_data(data)
self.pub_data('NP', str(layer_index), vector.bytes())
def __getattr__(self, name):
return getattr(self.transport, name)
return RemoteDomain
@action(namespace='domain')
@state('deploy_domains')
def deploy_domains(event, local_domains=None):
"""
Указываем какие домены будут локальными и создаем их.
local_domains - список имен доменов в конфиге, которые будут моделироваться
локально
"""
agent = event.pool.context['agent']
net = agent.context['net']
remote_domain_class = remote_domain_factory(agent)
net.deploy_domains(create_domain_factory(
remote_domain_class=remote_domain_class,
local_domains=local_domains
))
if local_domains and len(local_domains) == 1:
for domain in net.domains:
if domain.name in local_domains:
agent.context['local_domain'] = domain
assert agent.context['local_domain']
@action(namespace='domain')
@state('deploy_layers')
def deploy_layers(event):
"""
Создание слоев.
"""
agent = event.pool.context['agent']
net = agent.context['net']
net.deploy_layers()
@action(namespace='domain')
@state('deploy_neurons')
def deploy_neurons(event):
"""
Создание пустых векторов нейронов.
"""
agent = event.pool.context['agent']
net = agent.context['net']
net.deploy_neurons()
@action(namespace='domain')
@state('pre_deploy_synapses')
def pre_deploy_synapses(event):
"""
Создание пустого вектора нейронов.
"""
agent = event.pool.context['agent']
net = agent.context['net']
net.pre_deploy_synapses()
@action(namespace='domain')
@state('deploy_synapses')
def deploy_synapses(event):
"""
Создание нейронов и синапсов.
"""
def tick():
try:
event.context['generator'].next()
event.prevent_done()
except StopIteration:
return False
return True
if 'generator' in event.context:
tick()
return
agent = event.pool.context['agent']
net = agent.context['net']
ret = net.deploy_synapses_async()
if isinstance(ret, types.GeneratorType):
event.context['generator'] = ret
tick()
return
else:
return ret
@action(namespace='domain')
@state('post_deploy_synapses')
def post_deploy_synapses(event):
"""
Удаление пустого места с конца вектора синапсов.
"""
agent = event.pool.context['agent']
net = agent.context['net']
net.post_deploy_synapses()
@action(namespace='domain')
@state('post_deploy')
def post_deploy(event):
"""
Создание дополнительных индексов и загрузка данных на устройство.
"""
agent = event.pool.context['agent']
net = agent.context['net']
net.post_deploy()
|
{
"content_hash": "3151e5928a102c7221bd75164446a9e3",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 79,
"avg_line_length": 35.96907216494845,
"alnum_prop": 0.5750453807203593,
"repo_name": "openre/openre",
"id": "9662fbc99195a28ad80e051f364673249be41973",
"size": "11011",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openre/agent/domain/action/deploy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "17920"
},
{
"name": "Python",
"bytes": "389791"
}
],
"symlink_target": ""
}
|
from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
from django.db import migrations
from analytics.lib.counts import do_delete_count_stat
def delete_messages_sent_to_stream_stat(apps, schema_editor):
# type: (StateApps, DatabaseSchemaEditor) -> None
do_delete_count_stat('messages_sent_to_stream:is_bot')
class Migration(migrations.Migration):
dependencies = [
('analytics', '0008_add_count_indexes'),
]
operations = [
migrations.RunPython(delete_messages_sent_to_stream_stat),
]
|
{
"content_hash": "3c757b682cd4b6292f832ea1abcf0853",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 78,
"avg_line_length": 31.68421052631579,
"alnum_prop": 0.739202657807309,
"repo_name": "samatdav/zulip",
"id": "7d3da10e76aae1f279009c0a1dcc0ab6d25c37b4",
"size": "626",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "analytics/migrations/0009_remove_messages_to_stream_stat.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "285788"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "GCC Machine Description",
"bytes": "142"
},
{
"name": "Groovy",
"bytes": "5509"
},
{
"name": "HTML",
"bytes": "528895"
},
{
"name": "JavaScript",
"bytes": "1590082"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "86946"
},
{
"name": "Python",
"bytes": "3441106"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "37821"
}
],
"symlink_target": ""
}
|
import sys
import propertyWidget as pw
import listPropertyWidget as lpw
import actionWidget as aw
import labelWidget as lw
import commonWidget as common
class Container (common.Widget):
def __init__(self, generated, element, parentName, languageSetName, isRoot = 0) :
common.Widget.__init__(self, generated, element, parentName, languageSetName)
self.isRoot = isRoot
self.widgetName = "Container"
if isRoot :
self.parentAddFunc = "setRootWidget"
def generate(self) :
common.Widget.generate(self)
if self.isRoot:
self.setDismissable()
self.generateChildElements()
def setDismissable(self) :
if not hasattr(self.element, "dismissable") :
return
self.generated.initCode += " {0}->setIsDismissable({1});\n".format(self.name, self.element.dismissable)
def generateChildElements (self) :
elements = self.element.elements.sub_nodes
for element in elements:
elementType = element._name
if elementType == "action" :
action = aw.Action(self.generated, element, self.name, self.languageSetName)
action.generate()
elif elementType == "container" :
container = Container(self.generated, element, self.name, self.languageSetName)
container.generate()
elif elementType == "scalarProperty" or elementType == "stringProperty" or elementType == "booleanProperty" :
propertyW = pw.Property(self.generated, element, self.name, self.languageSetName)
propertyW.generate()
elif elementType == "dateProperty" or elementType == "timeProperty" :
propertyW = pw.Property(self.generated, element, self.name, self.languageSetName)
propertyW.generate()
elif elementType == "labelProperty" :
label = lw.Label(self.generated, element, self.name, self.languageSetName)
label.generate()
# elif elementType == "listProperty" :
# listProp = lpw.ListProperty(self.generated, element, (self.parentObjectPath + self.objectPathSuffix), self.languageSetName)
# listProp.generate()
else :
print >> sys.stderr, "ERROR - This type is not supported. Exiting " + elementType
sys.exit(2)
|
{
"content_hash": "ffa6f1dea8720e78a2cb1265514cdb83",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 140,
"avg_line_length": 41.58620689655172,
"alnum_prop": 0.6252072968490879,
"repo_name": "ADVANTECH-Corp/node-alljoyn",
"id": "f20d2cd5e758caa42d89cc293f5b4c3b3604c378",
"size": "3215",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "alljoyn/services/controlpanel/cpp/tools/CPSAppGenerator/GeneratorUtils/containerWidget.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4995"
},
{
"name": "C",
"bytes": "638481"
},
{
"name": "C#",
"bytes": "775008"
},
{
"name": "C++",
"bytes": "12965271"
},
{
"name": "CSS",
"bytes": "17461"
},
{
"name": "Groff",
"bytes": "3068"
},
{
"name": "HTML",
"bytes": "45149"
},
{
"name": "Java",
"bytes": "4802386"
},
{
"name": "JavaScript",
"bytes": "606220"
},
{
"name": "Makefile",
"bytes": "42536"
},
{
"name": "Objective-C",
"bytes": "1829239"
},
{
"name": "Objective-C++",
"bytes": "856772"
},
{
"name": "Python",
"bytes": "559767"
},
{
"name": "Shell",
"bytes": "40697"
},
{
"name": "TeX",
"bytes": "817"
},
{
"name": "Visual Basic",
"bytes": "1285"
},
{
"name": "XSLT",
"bytes": "100471"
}
],
"symlink_target": ""
}
|
from data_migrator.emitters.base import BaseEmitter
from data_migrator.models.fields import HiddenField
from data_migrator.utils import sql_escape, default_logger
log = default_logger()
class MySQLEmitter(BaseEmitter):
'''MySQL emitter to output MySQL specific insert statements
Attributes:
base_template: base template to output the object
extension (str): file extension for output file of this emitter.
Defaults to .sql
'''
base_template = '''INSERT %sINTO `%s` (%s) VALUES (%s);'''
extension = '.sql'
def __init__(self, *args, **kwargs):
super(MySQLEmitter, self).__init__(*args, **kwargs)
self._prepare()
def emit(self, l):
'''Output the result set of an object as MYSQL insert statement'''
res = []
if hasattr(l, self.meta.remark):
res.append("# %s" % getattr(l, self.meta.remark))
res.append(self._template % l.emit(escaper=sql_escape))
return res
def preamble(self, headers):
'''override the preamble method to make it specific for MySQL'''
# before we spit out the data
_meta = self.meta
h1 = [
"transformation for %s to table %s" % (_meta.model_name, _meta.table_name),
"input headers: %s" % ",".join(headers),
'stats: %s' % ",".join(["%s=%d" % (k, v) for k, v in self.manager.stats().items()]),
]
r = []
r += ['# %s' % l for l in h1]
r += [""]
if isinstance(_meta.prefix, list):
r += ["%s" % l for l in _meta.prefix]
else:
r += [
"SET SQL_SAFE_UPDATES = 0; -- you need this to delete without WHERE clause",
"DELETE FROM `%s`;" % _meta.table_name, #nosec
"ALTER TABLE `%s` AUTO_INCREMENT = 1;" % _meta.table_name,
]
r += [""]
return r
def _prepare(self):
# generate the base query template
c = [f.name for k, f in self.meta.fields.items() if not isinstance(f, HiddenField)]
columns = ", ".join(["`" + x + "`" for x in c])
replacements = ", ".join(["%(" + x + ")s" for x in c])
_ignore = 'IGNORE ' if self.meta.drop_non_unique else ''
template = self.base_template % (_ignore, self.meta.table_name, columns, replacements)
log.debug('emit template: %s', template)
self._template = template
|
{
"content_hash": "64b582974dcce2f670b96397fdd2bd51",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 96,
"avg_line_length": 39.03225806451613,
"alnum_prop": 0.5566115702479338,
"repo_name": "schubergphilis/data-migrator",
"id": "514c5049a749a31df531b292d7feed5f428f2051",
"size": "2467",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/data_migrator/emitters/mysql.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1780"
},
{
"name": "Python",
"bytes": "107709"
}
],
"symlink_target": ""
}
|
import logging
from fabric.api import env, task, require, prompt
from neckbeard.actions.utils import _get_gen_target
from neckbeard.environment_manager import Deployment
logger = logging.getLogger('actions.override')
@task
def override():
"""
Manually fix the generational config for the given generation.
This is required for initial setup of the generational system. We are only
modifying the simpledb records of the instances, not the instances
themselves.
"""
require('_deployment_name')
require('_deployment_confs')
generation_target = _get_gen_target()
deployment = Deployment(
env._deployment_name,
env._deployment_confs['ec2'],
env._deployment_confs['rds'],
env._deployment_confs['elb'],
)
deployment.verify_deployment_state()
if generation_target not in ['ACTIVE', 'PENDING']:
exit(1)
opts = ['Y', 'N']
for aws_type, confs in env._deployment_confs.items():
for node_name, node_confs in confs.items():
if generation_target == 'ACTIVE':
node = deployment.get_active_node(aws_type, node_name)
else:
node = deployment.get_pending_node(aws_type, node_name)
if node:
print "Existing node found for %s: %s\n" % (node_name, node)
replace_node = ''
while replace_node not in opts:
replace_node = prompt("Change this node? (Y/N)")
if replace_node == 'N':
continue
else:
print "No node for %s: %s\n" % (aws_type, node_name)
retire_alter_opts = ['Retire', 'Alter']
retire_alter_response = ''
should_alter_node = False
should_retire_node = False
while retire_alter_response not in retire_alter_opts:
retire_alter_response = prompt(
"Retire or Alter node? (Retire/Alter)")
if retire_alter_response == 'Retire':
should_retire_node = True
else:
should_alter_node = True
if should_alter_node:
# Prompt if the node doesn't already exist
if not node:
add_node = ''
while add_node not in opts:
add_node = prompt(
'No node record found for <%s>-%s. Add one? '
'(Y/N)' % (aws_type, node_name)
)
if add_node == 'N':
should_alter_node = False
if should_retire_node and not node:
logger.critical(
"No node record found. Can't retire a non-existent node.")
continue
if should_alter_node:
_override_node(
node, deployment, aws_type, node_name)
elif should_retire_node:
logger.info("Retiring: %s", node)
confirm = ''
while confirm not in opts:
confirm = prompt(
"Are you sure you want to RETIRE this node? (Y/N)")
if confirm == 'Y':
node.make_fully_inoperative()
node.retire()
def _override_node(node, deployment, aws_type, node_name):
aws_id = prompt(
"Enter the %s id for <%s>-%s:" % (aws_type, aws_type, node_name))
if not node:
node = deployment.get_blank_node(aws_type)
node.aws_id = aws_id
# Make sure this node actually exists on aws
node.refresh_boto_instance()
assert node.boto_instance
node.generation_id = deployment.active_gen_id
node.is_active_generation = 1
if not node.generation_id:
node.generation_id = deployment.pending_gen_id
node.is_active_generation = 0
node.deployment_name = deployment.deployment_name
node.name = node_name
node.creation_date = node.launch_time
node.is_running = 1
node.initial_deploy_complete = 1
if not node.is_actually_running():
print "ERROR: %s isn't actually running" % aws_id
exit(1)
node.save()
logger.info("Node %s altered", node)
|
{
"content_hash": "6923dedf043bde3fdba5bd116ae46f7a",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 78,
"avg_line_length": 34.37903225806452,
"alnum_prop": 0.5446868402533427,
"repo_name": "winhamwr/neckbeard",
"id": "9f6048a0cc13d7bd270f41d6a66ebf4e125a288e",
"size": "4263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neckbeard/actions/override.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "338178"
}
],
"symlink_target": ""
}
|
import threading
from utils import list_contains
class TagCache:
def __init__(self):
self.data = []
self.threadLock = threading.Lock()
def append(self, tag):
self.threadLock.acquire()
self.data.append(tag)
self.threadLock.release()
def remove(self, tag):
self.threadLock.acquire()
self.data.remove(tag)
self.threadLock.release()
def hasTagByMac(self, mac):
self.threadLock.acquire()
result = list_contains(self.data, lambda t: t.mac == mac)
self.threadLock.release()
return result
def findByMac(self, mac):
result = None
self.threadLock.acquire()
for tag in self.data:
if tag.mac == mac:
result = tag
break
self.threadLock.release()
return result
def getData(self):
return self.data[:]
|
{
"content_hash": "afd4c4e5997ce2c34b3490efc7f8ee6c",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 65,
"avg_line_length": 22.7,
"alnum_prop": 0.5704845814977973,
"repo_name": "fablab-ka/labtags",
"id": "5c99ff6d1cf5d9be2d0f9b39eb82b751eb460d68",
"size": "908",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blot-gateway/tagcache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "40738"
},
{
"name": "Makefile",
"bytes": "1331"
},
{
"name": "Python",
"bytes": "124106"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from indico.core.db.sqlalchemy import PyIntEnum, db
from indico.modules.events.models.persons import PersonLinkBase
from indico.util.locators import locator_property
from indico.util.string import format_repr, return_ascii
from indico.util.struct.enum import IndicoEnum
class AuthorType(int, IndicoEnum):
none = 0
primary = 1
secondary = 2
@classmethod
def get_highest(cls, *types):
if any(t == cls.primary for t in types):
return cls.primary
elif any(t == cls.secondary for t in types):
return cls.secondary
else:
return cls.none
class ContributionPersonLink(PersonLinkBase):
"""Association between EventPerson and Contribution."""
__tablename__ = 'contribution_person_links'
__auto_table_args = {'schema': 'events'}
person_link_backref_name = 'contribution_links'
person_link_unique_columns = ('contribution_id',)
object_relationship_name = 'contribution'
contribution_id = db.Column(
db.Integer,
db.ForeignKey('events.contributions.id'),
index=True,
nullable=False
)
is_speaker = db.Column(
db.Boolean,
nullable=False,
default=False
)
author_type = db.Column(
PyIntEnum(AuthorType),
nullable=False,
default=AuthorType.none
)
# relationship backrefs:
# - contribution (Contribution.person_links)
@property
def is_submitter(self):
if not self.contribution:
raise Exception("No contribution to check submission rights against")
return self.person.has_role('submit', self.contribution)
@property
def is_author(self):
return self.author_type != AuthorType.none
@locator_property
def locator(self):
return dict(self.contribution.locator, person_id=self.id)
@return_ascii
def __repr__(self):
return format_repr(self, 'id', 'person_id', 'contribution_id', is_speaker=False, author_type=AuthorType.none,
_text=self.full_name)
class SubContributionPersonLink(PersonLinkBase):
"""Association between EventPerson and SubContribution."""
__tablename__ = 'subcontribution_person_links'
__auto_table_args = {'schema': 'events'}
person_link_backref_name = 'subcontribution_links'
person_link_unique_columns = ('subcontribution_id',)
object_relationship_name = 'subcontribution'
# subcontribution persons are always speakers and never authors
# we provide these attributes to make subcontribution links
# compatible with contribution links
is_speaker = True
author_type = AuthorType.none
subcontribution_id = db.Column(
db.Integer,
db.ForeignKey('events.subcontributions.id'),
index=True,
nullable=False
)
# relationship backrefs:
# - subcontribution (SubContribution.person_links)
@return_ascii
def __repr__(self):
return format_repr(self, 'id', 'person_id', 'subcontribution_id', _text=self.full_name)
|
{
"content_hash": "2c9dd6f4a429bc6eab55317d455a0ee0",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 117,
"avg_line_length": 30.544554455445546,
"alnum_prop": 0.6638573743922204,
"repo_name": "mic4ael/indico",
"id": "a544c97b31e94d7bc76e10f4ea06414fd459c788",
"size": "3299",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indico/modules/events/contributions/models/persons.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "553825"
},
{
"name": "HTML",
"bytes": "1375160"
},
{
"name": "JavaScript",
"bytes": "1852830"
},
{
"name": "Mako",
"bytes": "1340"
},
{
"name": "Python",
"bytes": "4612709"
},
{
"name": "Shell",
"bytes": "2665"
},
{
"name": "TeX",
"bytes": "23292"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
}
|
from testix import *
import pytest
import line_monitor
@pytest.fixture
def override_imports(patch_module):
patch_module(line_monitor, 'subprocess')
patch_module(line_monitor, 'pty')
patch_module(line_monitor, 'open')
def launch_scenario(s):
s.pty.openpty() >> ('write_to_fd', 'read_from_fd')
s.open('read_from_fd', encoding='latin-1') >> Fake('reader')
s.subprocess.Popen(['my', 'command', 'line'], stdout='write_to_fd', close_fds=True)
def test_lauch_subprocess_with_pseudoterminal(override_imports):
tested = line_monitor.LineMonitor()
with Scenario() as s:
launch_scenario(s)
tested.launch_subprocess(['my', 'command', 'line'])
def test_receive_output_lines_via_callback(override_imports):
tested = line_monitor.LineMonitor()
with Scenario() as s:
launch_scenario(s)
tested.launch_subprocess(['my', 'command', 'line'])
s.reader.readline() >> 'line 1'
s.my_callback('line 1')
s.reader.readline() >> 'line 2'
s.my_callback('line 2')
s.reader.readline() >> 'line 3'
s.my_callback('line 3')
tested.register_callback(Fake('my_callback'))
tested.monitor()
|
{
"content_hash": "d51af3f0e516bb6722018df35516a437",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 87,
"avg_line_length": 33.19444444444444,
"alnum_prop": 0.6376569037656904,
"repo_name": "haarcuba/testix",
"id": "e885adedf0f6dce557cce2cd3b67ed6b1110ffcd",
"size": "1195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/line_monitor/tests/unit/4/test_line_monitor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46054"
},
{
"name": "Ruby",
"bytes": "2247"
},
{
"name": "Shell",
"bytes": "450"
},
{
"name": "Vim Script",
"bytes": "76189"
}
],
"symlink_target": ""
}
|
"""Mechanisms for ION services and service management infrastructure"""
__author__ = 'Adam R. Smith, Michael Meisinger'
from zope.interface import implementedBy
from mi.core.exceptions import BadRequest, ServerError
from mi.core.log import log
from mi.core.containers import named_any, itersubclasses
from mi.core.context import LocalContextMixin
class BaseClients(object):
"""
Basic object to hold clients for a service. Derived in implementations.
Placeholder, may not need any functionality.
"""
pass
class BaseService(LocalContextMixin):
"""
Base class providing a 'service'. Pure Python class. Not dependent on messaging.
Such services can be executed by ION processes.
"""
# The following are set one per implementation (class)
name = None
running = False
dependencies = []
process_type = "service"
def __init__(self, *args, **kwargs):
self.id = None
self._proc_name = None
self._proc_type = None
self._proc_res_id = None
self._proc_start_time = None
self.errcause = None
self.org_governance_name = None
self.container = None
self.CFG = None
self._process = None # reference to IonProcess, internal
super(BaseService, self).__init__()
def init(self):
self._on_init()
return self.on_init()
def _on_init(self):
"""Framework hook to initialize"""
def on_init(self):
"""
Method to be overridden as necessary by implementing service classes to perform
initialization actions prior to service start. Configuration parameters are
accessible via the self.CFG dict.
"""
def start(self):
self._on_start()
return self.on_start()
def _on_start(self):
"""Framework hook to start"""
self.running = True
def on_start(self):
"""
Method called at service startup.
"""
def stop(self):
res = self.on_stop()
self._on_stop()
return res
def _on_stop(self):
"""Framework hook to stop"""
self.running = False
def on_stop(self):
"""
Method called at service stop. (May not be called if service is terminated immediately).
"""
def quit(self):
res = None
try:
res = self.on_quit()
except Exception:
log.exception("Error while service %s, id: %s quitting" % (self.name, self.id))
self._on_quit()
return res
def _on_quit(self):
"""Framework hook to quit"""
self.running = False
def on_quit(self):
"""
Method called just before service termination.
"""
def assert_condition(self, condition, errorstr):
if not condition:
raise BadRequest(errorstr)
def __str__(self):
proc_name = 'Unknown proc_name' if self._proc_name is None else self._proc_name
proc_type = 'Unknown proc_type' if self._proc_type is None else self._proc_type
return "".join((self.__class__.__name__, "(",
"name=", proc_name,
",id=", self.id,
",type=", proc_type,
")"))
def add_endpoint(self, endpoint):
"""
Adds a managed listening endpoint to this service/process.
The service/process must be running inside of an IonProcessThread, or this
method will raise an error.
A managed listening endpoint will report failures up to the process, then to
the container's process manager.
"""
if self._process is None:
raise ServerError("No attached IonProcessThread")
self._process.add_endpoint(endpoint)
def remove_endpoint(self, endpoint):
"""
Removes an endpoint from being managed by this service/process.
The service/process must be running inside of an IonProcessThread, or this
method will raise an error. It will also raise an error if the endpoint is
not currently managed.
Errors raised in the endpoint will no longer be reported to the process or
process manager.
"""
if self._process is None:
raise ServerError("No attached IonProcessThread")
self._process.remove_endpoint(endpoint)
# -----------------------------------------------------------------------------------------------
# Service management infrastructure
class IonServiceDefinition(object):
"""
Provides a walkable structure for ION service metadata and object definitions.
"""
def __init__(self, name, dependencies=[], version=''):
self.name = name
self.dependencies = list(dependencies)
self.version = version
self.operations = []
# Points to service (Zope) interface
self.interface = None
# Points to abstract base class
self.base = None
# Points to implementation class
self.impl = []
# Points to process client class
self.client = None
# Points to non-process client class
self.simple_client = None
def __str__(self):
return "IonServiceDefinition(name=%s):%s" % (self.name, self.__dict__)
def __repr__(self):
return str(self)
class IonServiceOperation(object):
def __init__(self, name):
self.name = name
self.docstring = ''
self.in_object_type = None
self.out_object_type = None
self.throws = []
def __str__(self):
return "IonServiceOperation(name=%s):%s" % (self.name, self.__dict__)
def __repr__(self):
return str(self)
class IonServiceRegistry(object):
def __init__(self):
self.services = {}
self.services_by_name = {}
self.classes_loaded = False
self.operations = None
def add_servicedef_entry(self, name, key, value, append=False):
if not name:
#log.warning("No name for key=%s, value=%s" % (key, value))
return
if not name in self.services:
svc_def = IonServiceDefinition(name)
self.services[name] = svc_def
else:
svc_def = self.services[name]
oldvalue = getattr(svc_def, key, None)
if oldvalue is not None:
if append:
assert type(oldvalue) is list, "Cannot append to non-list: %s" % oldvalue
oldvalue.append(value)
else:
log.warning("Service %s, key=%s exists. Old=%s, new=%s" % (name, key, getattr(svc_def, key), value))
if not append:
setattr(svc_def, key, value)
@classmethod
def load_service_mods(cls, path):
import pkgutil
import string
mod_prefix = string.replace(path, "/", ".")
for mod_imp, mod_name, is_pkg in pkgutil.iter_modules([path]):
if is_pkg:
cls.load_service_mods(path + "/" + mod_name)
else:
mod_qual = "%s.%s" % (mod_prefix, mod_name)
#print "Import", mod_qual
try:
named_any(mod_qual)
except Exception, ex:
log.warning("Import module '%s' failed: %s" % (mod_qual, ex))
def build_service_map(self):
"""
Adds all known service definitions to service registry.
@todo: May be a bit fragile due to using BaseService.__subclasses__
"""
for cls in BaseService.__subclasses__():
assert hasattr(cls, 'name'), 'Service class must define name value. Service class in error: %s' % cls
if cls.name:
self.services_by_name[cls.name] = cls
self.add_servicedef_entry(cls.name, "base", cls)
interfaces = list(implementedBy(cls))
if interfaces:
self.add_servicedef_entry(cls.name, "interface", interfaces[0])
if cls.__name__.startswith("Base"):
try:
client = "%s.%sProcessClient" % (cls.__module__, cls.__name__[4:])
self.add_servicedef_entry(cls.name, "client", named_any(client))
sclient = "%s.%sClient" % (cls.__module__, cls.__name__[4:])
self.add_servicedef_entry(cls.name, "simple_client", named_any(sclient))
except Exception, ex:
log.warning("Cannot find client for service %s" % (cls.name))
def discover_service_classes(self):
"""
Walk implementation directories and find service implementation classes.
@todo Only works for ion packages and submodules
"""
IonServiceRegistry.load_service_mods("ion")
sclasses = [s for s in itersubclasses(BaseService) if not s.__subclasses__()]
for scls in sclasses:
self.add_servicedef_entry(scls.name, "impl", scls, append=True)
self.classes_loaded = True
def get_service_base(self, name):
"""
Returns the service base class with interface for the given service name or None.
"""
if name in self.services:
return getattr(self.services[name], 'base', None)
else:
return None
def get_service_by_name(self, name):
"""
Returns the service definition for the given service name or None.
"""
if name in self.services:
return self.services[name]
else:
return None
def is_service_available(self, service_name, local_rr_only=False):
try:
service_resource = None
#from pyon.core.bootstrap import container_instance
from mi.core.bootstrap import container_instance
from interface.objects import ServiceStateEnum
# Use container direct RR connection if available, otherwise use messaging to the RR service
if hasattr(container_instance, 'has_capability') and container_instance.has_capability('RESOURCE_REGISTRY'):
service_resource, _ = container_instance.resource_registry.find_resources(restype='Service', name=service_name)
elif not local_rr_only:
from interface.services.coi.iresource_registry_service import ResourceRegistryServiceClient
rr_client = ResourceRegistryServiceClient(container_instance.node)
service_resource, _ = rr_client.find_resources(restype='Service', name=service_name)
else:
log.warn("is_service_available(%s) - No RR connection" % service_name)
# The service is available only of there is a single RR object for it and it is in one of these states:
if service_resource and len(service_resource) > 1:
log.warn("is_service_available(%s) - Found multiple service instances: %s", service_name, service_resource)
# MM 2013-08-17: Added PENDING, because this means service will be there shortly
if service_resource and service_resource[0].state in (ServiceStateEnum.READY, ServiceStateEnum.STEADY, ServiceStateEnum.PENDING):
return True
elif service_resource:
log.warn("is_service_available(%s) - Service resource in invalid state", service_resource)
return False
except Exception as ex:
return False
|
{
"content_hash": "86fadfa952c312570b47e37f921ac5c2",
"timestamp": "",
"source": "github",
"line_count": 330,
"max_line_length": 141,
"avg_line_length": 34.96666666666667,
"alnum_prop": 0.5811595458878586,
"repo_name": "rmanoni/mi-instrument",
"id": "196cdef29c82155bd5611650a03a198d887bb23c",
"size": "11562",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mi/core/service.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "6778456"
}
],
"symlink_target": ""
}
|
from azure.identity import DefaultAzureCredential
from azure.mgmt.mixedreality import MixedRealityClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-mixedreality
# USAGE
python update_object_anchors_account.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = MixedRealityClient(
credential=DefaultAzureCredential(),
subscription_id="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
)
response = client.object_anchors_accounts.update(
resource_group_name="MyResourceGroup",
account_name="MyAccount",
object_anchors_account={
"identity": {"type": "SystemAssigned"},
"location": "eastus2euap",
"tags": {"hero": "romeo", "heroine": "juliet"},
},
)
print(response)
# x-ms-original-file: specification/mixedreality/resource-manager/Microsoft.MixedReality/preview/2021-03-01-preview/examples/object-anchors/Patch.json
if __name__ == "__main__":
main()
|
{
"content_hash": "f8827b21bf1266f345f5a7398999617e",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 150,
"avg_line_length": 34.86842105263158,
"alnum_prop": 0.6988679245283019,
"repo_name": "Azure/azure-sdk-for-python",
"id": "99fe799d25480c3993a4819cba13faca1a985ff2",
"size": "1793",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/mixedreality/azure-mgmt-mixedreality/generated_samples/update_object_anchors_account.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
import datetime
from django.core import exceptions
from django.utils import translation
from django.utils.timezone import activate
from django.conf import settings
from django import http
from lino.core import constants
from .utils import AnonymousUser
from lino.utils import get_client_ip_address
from lino.core import auth
from lino.core.auth import load_backend
from .backends import RemoteUserBackend
from django.core.exceptions import ImproperlyConfigured
from django.utils.deprecation import MiddlewareMixin
from django.utils.functional import SimpleLazyObject
ACTIVITY_SLOT = datetime.timedelta(seconds=10)
# time to wait before we update last_activity in session
def get_user(request):
if not hasattr(request, '_cached_user'):
request._cached_user = auth.get_user(request)
return request._cached_user
class AuthenticationMiddleware(MiddlewareMixin):
def process_request(self, request):
assert hasattr(request, 'session'), (
"Requires session middleware "
"to be installed. Edit your MIDDLEWARE%s setting to insert "
"'django.contrib.sessions.middleware.SessionMiddleware' before "
"'django.contrib.auth.middleware.AuthenticationMiddleware'."
) % ("_CLASSES" if settings.MIDDLEWARE is None else "")
request.user = SimpleLazyObject(lambda: get_user(request))
class RemoteUserMiddleware(MiddlewareMixin):
"""
Middleware for utilizing Web-server-provided authentication.
If request.user is not authenticated, then this middleware attempts to
authenticate the username passed in the ``REMOTE_USER`` request header.
If authentication is successful, the user is automatically logged in to
persist the user in the session.
The header used is configurable and defaults to ``REMOTE_USER``. Subclass
this class and change the ``header`` attribute if you need to use a
different header.
"""
# Name of request header to grab username from. This will be the key as
# used in the request.META dictionary, i.e. the normalization of headers to
# all uppercase and the addition of "HTTP_" prefix apply.
header = "REMOTE_USER"
force_logout_if_no_header = True
def process_request(self, request):
# AuthenticationMiddleware is required so that request.user exists.
if not hasattr(request, 'user'):
raise ImproperlyConfigured(
"The Django remote user auth middleware requires the"
" authentication middleware to be installed. Edit your"
" MIDDLEWARE setting to insert"
" 'django.contrib.auth.middleware.AuthenticationMiddleware'"
" before the RemoteUserMiddleware class.")
try:
username = request.META[self.header]
except KeyError:
# If specified header doesn't exist then remove any existing
# authenticated remote-user, or return (leaving request.user set to
# AnonymousUser by the AuthenticationMiddleware).
if self.force_logout_if_no_header and request.user.is_authenticated:
self._remove_invalid_user(request)
return
# If the user is already authenticated and that user is the user we are
# getting passed in the headers, then the correct user is already
# persisted in the session and we don't need to continue.
if request.user.is_authenticated:
if request.user.get_username() == self.clean_username(username, request):
return
else:
# An authenticated user is associated with the request, but
# it does not match the authorized user in the header.
self._remove_invalid_user(request)
# We are seeing this user for the first time in this session, attempt
# to authenticate the user.
user = auth.authenticate(request, remote_user=username)
if user:
# User is valid. Set request.user and persist user in the session
# by logging the user in.
request.user = user
auth.login(request, user)
def clean_username(self, username, request):
"""
Allows the backend to clean the username, if the backend defines a
clean_username method.
"""
# LS 20171221 : support remote auth without session
backend_str = request.session.get(auth.BACKEND_SESSION_KEY, '')
if backend_str:
backend = auth.load_backend(backend_str)
try:
username = backend.clean_username(username)
except AttributeError: # Backend has no clean_username method.
pass
return username
def _remove_invalid_user(self, request):
"""
Removes the current authenticated user in the request which is invalid
but only if the user is authenticated via the RemoteUserBackend.
"""
try:
stored_backend = load_backend(request.session.get(auth.BACKEND_SESSION_KEY, ''))
except ImportError:
# backend failed to load
auth.logout(request)
else:
if isinstance(stored_backend, RemoteUserBackend):
auth.logout(request)
def request2data(request, user_language=None):
if request.method == 'GET':
rqdata = request.GET
elif request.method in ('PUT', 'DELETE'):
# raw_post_data before Django 1.4
rqdata = http.QueryDict(request.body)
elif request.method == 'POST':
rqdata = request.POST
else:
# e.g. OPTIONS, HEAD
if user_language and len(settings.SITE.languages) > 1:
translation.activate(user_language)
request.LANGUAGE_CODE = translation.get_language()
# ~ logger.info("20121205 on_login %r",translation.get_language())
request.requesting_panel = None
request.subst_user = None
return
# ~ else: # DELETE
# ~ request.subst_user = None
# ~ request.requesting_panel = None
# ~ return
request.requesting_panel = rqdata.get(
constants.URL_PARAM_REQUESTING_PANEL, None)
request.device_type = rqdata.get(
constants.URL_PARAM_DEVICE_TYPE, settings.SITE.device_type)
if len(settings.SITE.languages) > 1:
user_language = rqdata.get(
constants.URL_PARAM_USER_LANGUAGE, user_language)
if user_language:
translation.activate(user_language)
request.LANGUAGE_CODE = translation.get_language()
return rqdata
class NoUserMiddleware(MiddlewareMixin):
def process_request(self, request):
if settings.USE_TZ:
activate(settings.SITE.models.about.TimeZones.default.tzinfo)
request.subst_user = None
request.device_type = None
request.user = AnonymousUser()
request2data(request)
class WithUserMiddleware(MiddlewareMixin):
def process_request(self, request):
user = request.user
user_language = user.language # or settings.SITE.get_default_language()
if settings.USE_TZ:
if user.time_zone:
activate(user.time_zone.tzinfo)
else:
activate(settings.SITE.models.about.TimeZones.default.tzinfo)
if user.is_anonymous:
request.subst_user = None
request.requesting_panel = None
return
update = False
now = datetime.datetime.now()
last = request.session.get('last_activity', None)
if last is None:
update = True
else:
last = datetime.datetime.strptime(last, "%Y-%m-%dT%H:%M:%S.%f")
if now - last > ACTIVITY_SLOT:
# print("20210116 update last activity")
update = True
if update:
# information shown in users.ActiveSessions
request.session['last_activity'] = now.isoformat()
request.session['last_ip_addr'] = get_client_ip_address(request)
rqdata = request2data(request, user_language)
if rqdata is None:
return
su = rqdata.get(constants.URL_PARAM_SUBST_USER, None)
if su is not None:
if su:
try:
su = settings.SITE.user_model.objects.get(id=int(su))
# ~ logger.info("20120714 su is %s",su.username)
except settings.SITE.user_model.DoesNotExist:
su = None
else:
su = None # e.g. when it was an empty string "su="
request.subst_user = su
# class DeviceTypeMiddleware(MiddlewareMixin):
# """Sets the `device_type` attribute on every incoming request.
# """
# def process_request(self, request):
# user = request.user
# user_language = user.language # or settings.SITE.get_default_language()
# rqdata = request2data(request, user_language)
# if rqdata is None:
# return
#
# dt = rqdata.get(
# constants.URL_PARAM_DEVICE_TYPE, settings.SITE.device_type)
# request.device_type = dt
|
{
"content_hash": "4fcbc2ab4decfd5141236759d0b714fa",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 112,
"avg_line_length": 39.774468085106385,
"alnum_prop": 0.6231946078955815,
"repo_name": "lino-framework/lino",
"id": "ce45f4e5555908e5aa91b4311a56d7f5dc21eede",
"size": "9560",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lino/core/auth/middleware.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "704"
},
{
"name": "CSS",
"bytes": "1281825"
},
{
"name": "Emacs Lisp",
"bytes": "277895"
},
{
"name": "HTML",
"bytes": "928037"
},
{
"name": "Hack",
"bytes": "3416"
},
{
"name": "JavaScript",
"bytes": "1128493"
},
{
"name": "PHP",
"bytes": "53997"
},
{
"name": "Python",
"bytes": "2601694"
},
{
"name": "Shell",
"bytes": "4469"
},
{
"name": "TSQL",
"bytes": "2427"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ja_social', '0008_userprofile_profile_pic'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='profile_pic',
field=models.CharField(default='volunteer.jpg', max_length=128),
),
]
|
{
"content_hash": "1784a3d9963742e31cf0609a4eb52043",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 76,
"avg_line_length": 23.38888888888889,
"alnum_prop": 0.6128266033254157,
"repo_name": "Bryconc/JA-Social",
"id": "bb7f7d226bfc174cfddbc4ab09b2c37b50da0e88",
"size": "494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ja_social/migrations/0009_auto_20161023_0506.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4889"
},
{
"name": "HTML",
"bytes": "19675"
},
{
"name": "Python",
"bytes": "30852"
}
],
"symlink_target": ""
}
|
"""Class for probabilistic distance QSR
:Author: Christian Dondrup <cdondrup@lincoln.ac.uk>
:Organization: University of Lincoln
"""
from __future__ import print_function, division
import numpy as np
from qsr_arg_relations_distance import QSR_Arg_Relations_Distance
from random import uniform
class QSR_Arg_Prob_Relations_Distance(QSR_Arg_Relations_Distance):
def __init__(self):
super(QSR_Arg_Prob_Relations_Distance, self).__init__()
self._unique_id = "argprobd"
self.allowed_value_types = (tuple, list)
self.value_sort_key = lambda x: x[1][0] # Sort by first element in value tuple, i.e. mean
def __normpdf(self, x, mu, sigma):
u = (x-mu)/np.abs(sigma)
y = (1/(np.sqrt(2*np.pi)*np.abs(sigma)))*np.exp(-u*u/2)
return np.around(y, decimals=3)
def _compute_qsr(self, data1, data2, qsr_params, **kwargs):
d = np.sqrt(np.square(data1.x - data2.x) + np.square(data1.y - data2.y))
r = (None, 0.0)
for values, relation in zip(self.all_possible_values, self._all_possible_relations):
prob = uniform(0.0, self.__normpdf(d, mu=values[0], sigma=values[1]))
r = (relation, prob) if prob > r[1] else r
return r[0] if r[0] else self._all_possible_relations[-1]
|
{
"content_hash": "6ee3908af68ebce0989c707d2b5a0942",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 97,
"avg_line_length": 41.25806451612903,
"alnum_prop": 0.6411258795934324,
"repo_name": "yianni/rtd-dbg",
"id": "c444cf8c823d0201a55724b5946f8de009d2e310",
"size": "1303",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qsr_lib/src/qsrlib_qsrs/qsr_arg_prob_relations_distance.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CMake",
"bytes": "2610"
},
{
"name": "Python",
"bytes": "269838"
}
],
"symlink_target": ""
}
|
"""A set of utility functions."""
class cached_property:
""" A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
"""
def __init__(self, function):
self.__doc__ = getattr(function, '__doc__')
self.function = function
def __get__(self, obj, cls):
if obj is None:
return self
value = obj.__dict__[self.function.__name__] = self.function(obj)
return value
|
{
"content_hash": "f5c49f8538ff0831d03a6b6b0b85a807",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 76,
"avg_line_length": 31,
"alnum_prop": 0.6053130929791272,
"repo_name": "AlanCristhian/symbolic_old",
"id": "6ffb2d00c05904d8914fe110024d60578401fd5a",
"size": "527",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "symbolic/utils.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "45335"
},
{
"name": "Shell",
"bytes": "59"
}
],
"symlink_target": ""
}
|
import re
charset_re = re.compile('CHARACTER_SET[ ]+(?P<charset>.*)')
header_item_or_end_re = re.compile('(((?P<key>[^ ]+)(?P<space>[ ]*:[ ]*)(?P<value>.*))|(?P<end_comment>[*]/))')
header_item_re = re.compile('(?P<key>[^ ]+)(?P<space>[ ]*:[ ]*)(?P<value>.*)')
string_entry_re = re.compile('(?P<start>rls_string[ ]+)(?P<id>[^ ]+)(?P<space>[ ]+)(?P<str>.*)')
def identity(x):
return x
class ParseState(object):
def __init__(self, f, charset, read_hook=identity):
self.f = f
self.charset = charset
self.current_line = u''
self.read_hook = read_hook
self.read_line()
def read_line(self):
current_line = self.current_line
self.read_hook(current_line)
self.current_line = self.f.next().decode(self.charset)
return current_line
def read_while(ps, f, test):
result = f(ps.current_line)
while test(result):
ps.read_line()
result = f(ps.current_line)
return result
def eat_whitespace(ps):
read_while(ps, identity, lambda line: line.strip() == '')
def skip_no_translate(ps):
if ps.current_line.startswith('// DO NOT TRANSLATE'):
ps.read_line()
read_while(ps, identity, lambda line: not line.startswith('// DO NOT TRANSLATE'))
ps.read_line()
eat_whitespace(ps)
def read_charset(lines):
for line in lines:
match = charset_re.match(line)
if match is not None:
return match.groupdict()['charset']
return 'UTF-8'
|
{
"content_hash": "4dad2ab0ff10377777fa51f2a1c5347c",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 111,
"avg_line_length": 31.78723404255319,
"alnum_prop": 0.5823293172690763,
"repo_name": "dbbhattacharya/kitsune",
"id": "df06a0c4f64357ed7b06e016d6bc3440f725c8e6",
"size": "2317",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "vendor/packages/translate-toolkit/translate/storage/symbian.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2694"
},
{
"name": "CSS",
"bytes": "276585"
},
{
"name": "HTML",
"bytes": "600145"
},
{
"name": "JavaScript",
"bytes": "800276"
},
{
"name": "Python",
"bytes": "2762831"
},
{
"name": "Shell",
"bytes": "6720"
},
{
"name": "Smarty",
"bytes": "1752"
}
],
"symlink_target": ""
}
|
# Copyright 2012-2015 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library of random helper functionality."""
import sys
import stat
import time
import platform, subprocess, operator, os, shutil, re
import collections
from mesonbuild import mlog
have_fcntl = False
have_msvcrt = False
# {subproject: project_meson_version}
project_meson_versions = {}
try:
import fcntl
have_fcntl = True
except Exception:
pass
try:
import msvcrt
have_msvcrt = True
except Exception:
pass
from glob import glob
if os.path.basename(sys.executable) == 'meson.exe':
# In Windows and using the MSI installed executable.
python_command = [sys.executable, 'runpython']
else:
python_command = [sys.executable]
meson_command = None
def is_ascii_string(astring):
try:
if isinstance(astring, str):
astring.encode('ascii')
if isinstance(astring, bytes):
astring.decode('ascii')
except UnicodeDecodeError:
return False
return True
def check_direntry_issues(direntry_array):
import locale
# Warn if the locale is not UTF-8. This can cause various unfixable issues
# such as os.stat not being able to decode filenames with unicode in them.
# There is no way to reset both the preferred encoding and the filesystem
# encoding, so we can just warn about it.
e = locale.getpreferredencoding()
if e.upper() != 'UTF-8' and not is_windows():
if not isinstance(direntry_array, list):
direntry_array = [direntry_array]
for de in direntry_array:
if is_ascii_string(de):
continue
mlog.warning('''You are using {!r} which is not a Unicode-compatible '
locale but you are trying to access a file system entry called {!r} which is
not pure ASCII. This may cause problems.
'''.format(e, de), file=sys.stderr)
# Put this in objects that should not get dumped to pickle files
# by accident.
import threading
an_unpicklable_object = threading.Lock()
class MesonException(Exception):
'''Exceptions thrown by Meson'''
def get_msg_with_context(self):
s = ''
if hasattr(self, 'lineno') and hasattr(self, 'file'):
s = get_error_location_string(self.file, self.lineno) + ' '
s += str(self)
return s
class EnvironmentException(MesonException):
'''Exceptions thrown while processing and creating the build environment'''
class FileMode:
# The first triad is for owner permissions, the second for group permissions,
# and the third for others (everyone else).
# For the 1st character:
# 'r' means can read
# '-' means not allowed
# For the 2nd character:
# 'w' means can write
# '-' means not allowed
# For the 3rd character:
# 'x' means can execute
# 's' means can execute and setuid/setgid is set (owner/group triads only)
# 'S' means cannot execute and setuid/setgid is set (owner/group triads only)
# 't' means can execute and sticky bit is set ("others" triads only)
# 'T' means cannot execute and sticky bit is set ("others" triads only)
# '-' means none of these are allowed
#
# The meanings of 'rwx' perms is not obvious for directories; see:
# https://www.hackinglinuxexposed.com/articles/20030424.html
#
# For information on this notation such as setuid/setgid/sticky bits, see:
# https://en.wikipedia.org/wiki/File_system_permissions#Symbolic_notation
symbolic_perms_regex = re.compile('[r-][w-][xsS-]' # Owner perms
'[r-][w-][xsS-]' # Group perms
'[r-][w-][xtT-]') # Others perms
def __init__(self, perms=None, owner=None, group=None):
self.perms_s = perms
self.perms = self.perms_s_to_bits(perms)
self.owner = owner
self.group = group
def __repr__(self):
ret = '<FileMode: {!r} owner={} group={}'
return ret.format(self.perms_s, self.owner, self.group)
@classmethod
def perms_s_to_bits(cls, perms_s):
'''
Does the opposite of stat.filemode(), converts strings of the form
'rwxr-xr-x' to st_mode enums which can be passed to os.chmod()
'''
if perms_s is None:
# No perms specified, we will not touch the permissions
return -1
eg = 'rwxr-xr-x'
if not isinstance(perms_s, str):
msg = 'Install perms must be a string. For example, {!r}'
raise MesonException(msg.format(eg))
if len(perms_s) != 9 or not cls.symbolic_perms_regex.match(perms_s):
msg = 'File perms {!r} must be exactly 9 chars. For example, {!r}'
raise MesonException(msg.format(perms_s, eg))
perms = 0
# Owner perms
if perms_s[0] == 'r':
perms |= stat.S_IRUSR
if perms_s[1] == 'w':
perms |= stat.S_IWUSR
if perms_s[2] == 'x':
perms |= stat.S_IXUSR
elif perms_s[2] == 'S':
perms |= stat.S_ISUID
elif perms_s[2] == 's':
perms |= stat.S_IXUSR
perms |= stat.S_ISUID
# Group perms
if perms_s[3] == 'r':
perms |= stat.S_IRGRP
if perms_s[4] == 'w':
perms |= stat.S_IWGRP
if perms_s[5] == 'x':
perms |= stat.S_IXGRP
elif perms_s[5] == 'S':
perms |= stat.S_ISGID
elif perms_s[5] == 's':
perms |= stat.S_IXGRP
perms |= stat.S_ISGID
# Others perms
if perms_s[6] == 'r':
perms |= stat.S_IROTH
if perms_s[7] == 'w':
perms |= stat.S_IWOTH
if perms_s[8] == 'x':
perms |= stat.S_IXOTH
elif perms_s[8] == 'T':
perms |= stat.S_ISVTX
elif perms_s[8] == 't':
perms |= stat.S_IXOTH
perms |= stat.S_ISVTX
return perms
class File:
def __init__(self, is_built, subdir, fname):
self.is_built = is_built
self.subdir = subdir
self.fname = fname
assert(isinstance(self.subdir, str))
assert(isinstance(self.fname, str))
def __str__(self):
return self.relative_name()
def __repr__(self):
ret = '<File: {0}'
if not self.is_built:
ret += ' (not built)'
ret += '>'
return ret.format(self.relative_name())
@staticmethod
def from_source_file(source_root, subdir, fname):
if not os.path.isfile(os.path.join(source_root, subdir, fname)):
raise MesonException('File %s does not exist.' % fname)
return File(False, subdir, fname)
@staticmethod
def from_built_file(subdir, fname):
return File(True, subdir, fname)
@staticmethod
def from_absolute_file(fname):
return File(False, '', fname)
def rel_to_builddir(self, build_to_src):
if self.is_built:
return self.relative_name()
else:
return os.path.join(build_to_src, self.subdir, self.fname)
def absolute_path(self, srcdir, builddir):
absdir = srcdir
if self.is_built:
absdir = builddir
return os.path.join(absdir, self.relative_name())
def endswith(self, ending):
return self.fname.endswith(ending)
def split(self, s):
return self.fname.split(s)
def __eq__(self, other):
return (self.fname, self.subdir, self.is_built) == (other.fname, other.subdir, other.is_built)
def __hash__(self):
return hash((self.fname, self.subdir, self.is_built))
def relative_name(self):
return os.path.join(self.subdir, self.fname)
def get_compiler_for_source(compilers, src):
for comp in compilers:
if comp.can_compile(src):
return comp
raise MesonException('No specified compiler can handle file {!s}'.format(src))
def classify_unity_sources(compilers, sources):
compsrclist = {}
for src in sources:
comp = get_compiler_for_source(compilers, src)
if comp not in compsrclist:
compsrclist[comp] = [src]
else:
compsrclist[comp].append(src)
return compsrclist
def is_osx():
return platform.system().lower() == 'darwin'
def is_linux():
return platform.system().lower() == 'linux'
def is_android():
return platform.system().lower() == 'android'
def is_haiku():
return platform.system().lower() == 'haiku'
def is_openbsd():
return platform.system().lower() == 'openbsd'
def is_windows():
platname = platform.system().lower()
return platname == 'windows' or 'mingw' in platname
def is_cygwin():
platname = platform.system().lower()
return platname.startswith('cygwin')
def is_debianlike():
return os.path.isfile('/etc/debian_version')
def is_dragonflybsd():
return platform.system().lower() == 'dragonfly'
def is_freebsd():
return platform.system().lower() == 'freebsd'
def for_windows(is_cross, env):
"""
Host machine is windows?
Note: 'host' is the machine on which compiled binaries will run
"""
if not is_cross:
return is_windows()
return env.cross_info.get_host_system() == 'windows'
def for_cygwin(is_cross, env):
"""
Host machine is cygwin?
Note: 'host' is the machine on which compiled binaries will run
"""
if not is_cross:
return is_cygwin()
return env.cross_info.get_host_system() == 'cygwin'
def for_linux(is_cross, env):
"""
Host machine is linux?
Note: 'host' is the machine on which compiled binaries will run
"""
if not is_cross:
return is_linux()
return env.cross_info.get_host_system() == 'linux'
def for_darwin(is_cross, env):
"""
Host machine is Darwin (iOS/OS X)?
Note: 'host' is the machine on which compiled binaries will run
"""
if not is_cross:
return is_osx()
return env.cross_info.get_host_system() in ('darwin', 'ios')
def for_android(is_cross, env):
"""
Host machine is Android?
Note: 'host' is the machine on which compiled binaries will run
"""
if not is_cross:
return is_android()
return env.cross_info.get_host_system() == 'android'
def for_haiku(is_cross, env):
"""
Host machine is Haiku?
Note: 'host' is the machine on which compiled binaries will run
"""
if not is_cross:
return is_haiku()
return env.cross_info.get_host_system() == 'haiku'
def for_openbsd(is_cross, env):
"""
Host machine is OpenBSD?
Note: 'host' is the machine on which compiled binaries will run
"""
if not is_cross:
return is_openbsd()
elif env.cross_info.has_host():
return env.cross_info.config['host_machine']['system'] == 'openbsd'
return False
def exe_exists(arglist):
try:
p = subprocess.Popen(arglist, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
if p.returncode == 0:
return True
except FileNotFoundError:
pass
return False
def detect_vcs(source_dir):
vcs_systems = [
dict(name = 'git', cmd = 'git', repo_dir = '.git', get_rev = 'git describe --dirty=+', rev_regex = '(.*)', dep = '.git/logs/HEAD'),
dict(name = 'mercurial', cmd = 'hg', repo_dir = '.hg', get_rev = 'hg id -i', rev_regex = '(.*)', dep = '.hg/dirstate'),
dict(name = 'subversion', cmd = 'svn', repo_dir = '.svn', get_rev = 'svn info', rev_regex = 'Revision: (.*)', dep = '.svn/wc.db'),
dict(name = 'bazaar', cmd = 'bzr', repo_dir = '.bzr', get_rev = 'bzr revno', rev_regex = '(.*)', dep = '.bzr'),
]
segs = source_dir.replace('\\', '/').split('/')
for i in range(len(segs), -1, -1):
curdir = '/'.join(segs[:i])
for vcs in vcs_systems:
if os.path.isdir(os.path.join(curdir, vcs['repo_dir'])) and shutil.which(vcs['cmd']):
vcs['wc_dir'] = curdir
return vcs
return None
def grab_leading_numbers(vstr, strict=False):
result = []
for x in vstr.rstrip('.').split('.'):
try:
result.append(int(x))
except ValueError as e:
if strict:
msg = 'Invalid version to compare against: {!r}; only ' \
'numeric digits separated by "." are allowed: ' + str(e)
raise MesonException(msg.format(vstr))
break
return result
def make_same_len(listA, listB):
maxlen = max(len(listA), len(listB))
for i in listA, listB:
for n in range(len(i), maxlen):
i.append(0)
numpart = re.compile('[0-9.]+')
def version_compare(vstr1, vstr2, strict=False):
match = numpart.match(vstr1.strip())
if match is None:
msg = 'Uncomparable version string {!r}.'
raise MesonException(msg.format(vstr1))
vstr1 = match.group(0)
if vstr2.startswith('>='):
cmpop = operator.ge
vstr2 = vstr2[2:]
elif vstr2.startswith('<='):
cmpop = operator.le
vstr2 = vstr2[2:]
elif vstr2.startswith('!='):
cmpop = operator.ne
vstr2 = vstr2[2:]
elif vstr2.startswith('=='):
cmpop = operator.eq
vstr2 = vstr2[2:]
elif vstr2.startswith('='):
cmpop = operator.eq
vstr2 = vstr2[1:]
elif vstr2.startswith('>'):
cmpop = operator.gt
vstr2 = vstr2[1:]
elif vstr2.startswith('<'):
cmpop = operator.lt
vstr2 = vstr2[1:]
else:
cmpop = operator.eq
varr1 = grab_leading_numbers(vstr1, strict)
varr2 = grab_leading_numbers(vstr2, strict)
make_same_len(varr1, varr2)
return cmpop(varr1, varr2)
def version_compare_many(vstr1, conditions):
if not isinstance(conditions, (list, tuple, frozenset)):
conditions = [conditions]
found = []
not_found = []
for req in conditions:
if not version_compare(vstr1, req, strict=True):
not_found.append(req)
else:
found.append(req)
return not_found == [], not_found, found
def version_compare_condition_with_min(condition, minimum):
match = numpart.match(minimum.strip())
if match is None:
msg = 'Uncomparable version string {!r}.'
raise MesonException(msg.format(minimum))
minimum = match.group(0)
if condition.startswith('>='):
cmpop = operator.le
condition = condition[2:]
elif condition.startswith('<='):
return True
condition = condition[2:]
elif condition.startswith('!='):
return True
condition = condition[2:]
elif condition.startswith('=='):
cmpop = operator.le
condition = condition[2:]
elif condition.startswith('='):
cmpop = operator.le
condition = condition[1:]
elif condition.startswith('>'):
cmpop = operator.lt
condition = condition[1:]
elif condition.startswith('<'):
return True
condition = condition[2:]
else:
cmpop = operator.le
varr1 = grab_leading_numbers(minimum, True)
varr2 = grab_leading_numbers(condition, True)
make_same_len(varr1, varr2)
return cmpop(varr1, varr2)
def version_compare_condition_with_max(condition, maximum):
match = numpart.match(maximum.strip())
if match is None:
msg = 'Uncomparable version string {!r}.'
raise MesonException(msg.format(maximum))
maximum = match.group(0)
if condition.startswith('>='):
return False
condition = condition[2:]
elif condition.startswith('<='):
cmpop = operator.ge
condition = condition[2:]
elif condition.startswith('!='):
return False
condition = condition[2:]
elif condition.startswith('=='):
cmpop = operator.ge
condition = condition[2:]
elif condition.startswith('='):
cmpop = operator.ge
condition = condition[1:]
elif condition.startswith('>'):
return False
condition = condition[1:]
elif condition.startswith('<'):
cmpop = operator.gt
condition = condition[2:]
else:
cmpop = operator.ge
varr1 = grab_leading_numbers(maximum, True)
varr2 = grab_leading_numbers(condition, True)
make_same_len(varr1, varr2)
return cmpop(varr1, varr2)
def default_libdir():
if is_debianlike():
try:
pc = subprocess.Popen(['dpkg-architecture', '-qDEB_HOST_MULTIARCH'],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
(stdo, _) = pc.communicate()
if pc.returncode == 0:
archpath = stdo.decode().strip()
return 'lib/' + archpath
except Exception:
pass
if os.path.isdir('/usr/lib64') and not os.path.islink('/usr/lib64'):
return 'lib64'
return 'lib'
def default_libexecdir():
# There is no way to auto-detect this, so it must be set at build time
return 'libexec'
def default_prefix():
return 'c:/' if is_windows() else '/usr/local'
def get_library_dirs():
if is_windows():
return ['C:/mingw/lib'] # Fixme
if is_osx():
return ['/usr/lib'] # Fix me as well.
# The following is probably Debian/Ubuntu specific.
# /usr/local/lib is first because it contains stuff
# installed by the sysadmin and is probably more up-to-date
# than /usr/lib. If you feel that this search order is
# problematic, please raise the issue on the mailing list.
unixdirs = ['/usr/local/lib', '/usr/lib', '/lib']
plat = subprocess.check_output(['uname', '-m']).decode().strip()
# This is a terrible hack. I admit it and I'm really sorry.
# I just don't know what the correct solution is.
if plat == 'i686':
plat = 'i386'
if plat.startswith('arm'):
plat = 'arm'
unixdirs += glob('/usr/lib/' + plat + '*')
if os.path.exists('/usr/lib64'):
unixdirs.append('/usr/lib64')
unixdirs += glob('/lib/' + plat + '*')
if os.path.exists('/lib64'):
unixdirs.append('/lib64')
unixdirs += glob('/lib/' + plat + '*')
return unixdirs
def has_path_sep(name, sep='/\\'):
'Checks if any of the specified @sep path separators are in @name'
for each in sep:
if each in name:
return True
return False
def do_replacement(regex, line, format, confdata):
missing_variables = set()
start_tag = '@'
backslash_tag = '\\@'
if format == 'cmake':
start_tag = '${'
backslash_tag = '\\${'
def variable_replace(match):
# Pairs of escape characters before '@' or '\@'
if match.group(0).endswith('\\'):
num_escapes = match.end(0) - match.start(0)
return '\\' * (num_escapes // 2)
# Single escape character and '@'
elif match.group(0) == backslash_tag:
return start_tag
# Template variable to be replaced
else:
varname = match.group(1)
if varname in confdata:
(var, desc) = confdata.get(varname)
if isinstance(var, str):
pass
elif isinstance(var, int):
var = str(var)
else:
msg = 'Tried to replace variable {!r} value with ' \
'something other than a string or int: {!r}'
raise MesonException(msg.format(varname, var))
else:
missing_variables.add(varname)
var = ''
return var
return re.sub(regex, variable_replace, line), missing_variables
def do_mesondefine(line, confdata):
arr = line.split()
if len(arr) != 2:
raise MesonException('#mesondefine does not contain exactly two tokens: %s' % line.strip())
varname = arr[1]
try:
(v, desc) = confdata.get(varname)
except KeyError:
return '/* #undef %s */\n' % varname
if isinstance(v, bool):
if v:
return '#define %s\n' % varname
else:
return '#undef %s\n' % varname
elif isinstance(v, int):
return '#define %s %d\n' % (varname, v)
elif isinstance(v, str):
return '#define %s %s\n' % (varname, v)
else:
raise MesonException('#mesondefine argument "%s" is of unknown type.' % varname)
def do_conf_file(src, dst, confdata, format, encoding='utf-8'):
try:
with open(src, encoding=encoding) as f:
data = f.readlines()
except Exception as e:
raise MesonException('Could not read input file %s: %s' % (src, str(e)))
# Only allow (a-z, A-Z, 0-9, _, -) as valid characters for a define
# Also allow escaping '@' with '\@'
if format in ['meson', 'cmake@']:
regex = re.compile(r'(?:\\\\)+(?=\\?@)|\\@|@([-a-zA-Z0-9_]+)@')
elif format == 'cmake':
regex = re.compile(r'(?:\\\\)+(?=\\?\$)|\\\${|\${([-a-zA-Z0-9_]+)}')
else:
raise MesonException('Format "{}" not handled'.format(format))
search_token = '#mesondefine'
if format != 'meson':
search_token = '#cmakedefine'
result = []
missing_variables = set()
# Detect when the configuration data is empty and no tokens were found
# during substitution so we can warn the user to use the `copy:` kwarg.
confdata_useless = not confdata.keys()
for line in data:
if line.startswith(search_token):
confdata_useless = False
line = do_mesondefine(line, confdata)
else:
line, missing = do_replacement(regex, line, format, confdata)
missing_variables.update(missing)
if missing:
confdata_useless = False
result.append(line)
dst_tmp = dst + '~'
try:
with open(dst_tmp, 'w', encoding=encoding) as f:
f.writelines(result)
except Exception as e:
raise MesonException('Could not write output file %s: %s' % (dst, str(e)))
shutil.copymode(src, dst_tmp)
replace_if_different(dst, dst_tmp)
return missing_variables, confdata_useless
CONF_C_PRELUDE = '''/*
* Autogenerated by the Meson build system.
* Do not edit, your changes will be lost.
*/
#pragma once
'''
CONF_NASM_PRELUDE = '''; Autogenerated by the Meson build system.
; Do not edit, your changes will be lost.
'''
def dump_conf_header(ofilename, cdata, output_format):
if output_format == 'c':
prelude = CONF_C_PRELUDE
prefix = '#'
elif output_format == 'nasm':
prelude = CONF_NASM_PRELUDE
prefix = '%'
ofilename_tmp = ofilename + '~'
with open(ofilename_tmp, 'w', encoding='utf-8') as ofile:
ofile.write(prelude)
for k in sorted(cdata.keys()):
(v, desc) = cdata.get(k)
if desc:
if output_format == 'c':
ofile.write('/* %s */\n' % desc)
elif output_format == 'nasm':
for line in desc.split('\n'):
ofile.write('; %s\n' % line)
if isinstance(v, bool):
if v:
ofile.write('%sdefine %s\n\n' % (prefix, k))
else:
ofile.write('%sundef %s\n\n' % (prefix, k))
elif isinstance(v, (int, str)):
ofile.write('%sdefine %s %s\n\n' % (prefix, k, v))
else:
raise MesonException('Unknown data type in configuration file entry: ' + k)
replace_if_different(ofilename, ofilename_tmp)
def replace_if_different(dst, dst_tmp):
# If contents are identical, don't touch the file to prevent
# unnecessary rebuilds.
different = True
try:
with open(dst, 'rb') as f1, open(dst_tmp, 'rb') as f2:
if f1.read() == f2.read():
different = False
except FileNotFoundError:
pass
if different:
os.replace(dst_tmp, dst)
else:
os.unlink(dst_tmp)
def listify(item, flatten=True, unholder=False):
'''
Returns a list with all args embedded in a list if they are not a list.
This function preserves order.
@flatten: Convert lists of lists to a flat list
@unholder: Replace each item with the object it holds, if required
Note: unholding only works recursively when flattening
'''
if not isinstance(item, list):
if unholder and hasattr(item, 'held_object'):
item = item.held_object
return [item]
result = []
for i in item:
if unholder and hasattr(i, 'held_object'):
i = i.held_object
if flatten and isinstance(i, list):
result += listify(i, flatten=True, unholder=unholder)
else:
result.append(i)
return result
def extract_as_list(dict_object, *keys, pop=False, **kwargs):
'''
Extracts all values from given dict_object and listifies them.
'''
result = []
fetch = dict_object.get
if pop:
fetch = dict_object.pop
# If there's only one key, we don't return a list with one element
if len(keys) == 1:
return listify(fetch(keys[0], []), **kwargs)
# Return a list of values corresponding to *keys
for key in keys:
result.append(listify(fetch(key, []), **kwargs))
return result
def typeslistify(item, types):
'''
Ensure that type(@item) is one of @types or a
list of items all of which are of type @types
'''
if isinstance(item, types):
item = [item]
if not isinstance(item, list):
raise MesonException('Item must be a list or one of {!r}'.format(types))
for i in item:
if i is not None and not isinstance(i, types):
raise MesonException('List item must be one of {!r}'.format(types))
return item
def stringlistify(item):
return typeslistify(item, str)
def expand_arguments(args):
expended_args = []
for arg in args:
if not arg.startswith('@'):
expended_args.append(arg)
continue
args_file = arg[1:]
try:
with open(args_file) as f:
extended_args = f.read().split()
expended_args += extended_args
except Exception as e:
print('Error expanding command line arguments, %s not found' % args_file)
print(e)
return None
return expended_args
def Popen_safe(args, write=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs):
import locale
encoding = locale.getpreferredencoding()
if sys.version_info < (3, 6) or not sys.stdout.encoding or encoding.upper() != 'UTF-8':
return Popen_safe_legacy(args, write=write, stdout=stdout, stderr=stderr, **kwargs)
p = subprocess.Popen(args, universal_newlines=True, close_fds=False,
stdout=stdout, stderr=stderr, **kwargs)
o, e = p.communicate(write)
return p, o, e
def Popen_safe_legacy(args, write=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs):
p = subprocess.Popen(args, universal_newlines=False,
stdout=stdout, stderr=stderr, **kwargs)
if write is not None:
write = write.encode('utf-8')
o, e = p.communicate(write)
if o is not None:
if sys.stdout.encoding:
o = o.decode(encoding=sys.stdout.encoding, errors='replace').replace('\r\n', '\n')
else:
o = o.decode(errors='replace').replace('\r\n', '\n')
if e is not None:
if sys.stderr.encoding:
e = e.decode(encoding=sys.stderr.encoding, errors='replace').replace('\r\n', '\n')
else:
e = e.decode(errors='replace').replace('\r\n', '\n')
return p, o, e
def iter_regexin_iter(regexiter, initer):
'''
Takes each regular expression in @regexiter and tries to search for it in
every item in @initer. If there is a match, returns that match.
Else returns False.
'''
for regex in regexiter:
for ii in initer:
if not isinstance(ii, str):
continue
match = re.search(regex, ii)
if match:
return match.group()
return False
def _substitute_values_check_errors(command, values):
# Error checking
inregex = ('@INPUT([0-9]+)?@', '@PLAINNAME@', '@BASENAME@')
outregex = ('@OUTPUT([0-9]+)?@', '@OUTDIR@')
if '@INPUT@' not in values:
# Error out if any input-derived templates are present in the command
match = iter_regexin_iter(inregex, command)
if match:
m = 'Command cannot have {!r}, since no input files were specified'
raise MesonException(m.format(match))
else:
if len(values['@INPUT@']) > 1:
# Error out if @PLAINNAME@ or @BASENAME@ is present in the command
match = iter_regexin_iter(inregex[1:], command)
if match:
raise MesonException('Command cannot have {!r} when there is '
'more than one input file'.format(match))
# Error out if an invalid @INPUTnn@ template was specified
for each in command:
if not isinstance(each, str):
continue
match = re.search(inregex[0], each)
if match and match.group() not in values:
m = 'Command cannot have {!r} since there are only {!r} inputs'
raise MesonException(m.format(match.group(), len(values['@INPUT@'])))
if '@OUTPUT@' not in values:
# Error out if any output-derived templates are present in the command
match = iter_regexin_iter(outregex, command)
if match:
m = 'Command cannot have {!r} since there are no outputs'
raise MesonException(m.format(match))
else:
# Error out if an invalid @OUTPUTnn@ template was specified
for each in command:
if not isinstance(each, str):
continue
match = re.search(outregex[0], each)
if match and match.group() not in values:
m = 'Command cannot have {!r} since there are only {!r} outputs'
raise MesonException(m.format(match.group(), len(values['@OUTPUT@'])))
def substitute_values(command, values):
'''
Substitute the template strings in the @values dict into the list of
strings @command and return a new list. For a full list of the templates,
see get_filenames_templates_dict()
If multiple inputs/outputs are given in the @values dictionary, we
substitute @INPUT@ and @OUTPUT@ only if they are the entire string, not
just a part of it, and in that case we substitute *all* of them.
'''
# Error checking
_substitute_values_check_errors(command, values)
# Substitution
outcmd = []
rx_keys = [re.escape(key) for key in values if key not in ('@INPUT@', '@OUTPUT@')]
value_rx = re.compile('|'.join(rx_keys)) if rx_keys else None
for vv in command:
if not isinstance(vv, str):
outcmd.append(vv)
elif '@INPUT@' in vv:
inputs = values['@INPUT@']
if vv == '@INPUT@':
outcmd += inputs
elif len(inputs) == 1:
outcmd.append(vv.replace('@INPUT@', inputs[0]))
else:
raise MesonException("Command has '@INPUT@' as part of a "
"string and more than one input file")
elif '@OUTPUT@' in vv:
outputs = values['@OUTPUT@']
if vv == '@OUTPUT@':
outcmd += outputs
elif len(outputs) == 1:
outcmd.append(vv.replace('@OUTPUT@', outputs[0]))
else:
raise MesonException("Command has '@OUTPUT@' as part of a "
"string and more than one output file")
# Append values that are exactly a template string.
# This is faster than a string replace.
elif vv in values:
outcmd.append(values[vv])
# Substitute everything else with replacement
elif value_rx:
outcmd.append(value_rx.sub(lambda m: values[m.group(0)], vv))
else:
outcmd.append(vv)
return outcmd
def get_filenames_templates_dict(inputs, outputs):
'''
Create a dictionary with template strings as keys and values as values for
the following templates:
@INPUT@ - the full path to one or more input files, from @inputs
@OUTPUT@ - the full path to one or more output files, from @outputs
@OUTDIR@ - the full path to the directory containing the output files
If there is only one input file, the following keys are also created:
@PLAINNAME@ - the filename of the input file
@BASENAME@ - the filename of the input file with the extension removed
If there is more than one input file, the following keys are also created:
@INPUT0@, @INPUT1@, ... one for each input file
If there is more than one output file, the following keys are also created:
@OUTPUT0@, @OUTPUT1@, ... one for each output file
'''
values = {}
# Gather values derived from the input
if inputs:
# We want to substitute all the inputs.
values['@INPUT@'] = inputs
for (ii, vv) in enumerate(inputs):
# Write out @INPUT0@, @INPUT1@, ...
values['@INPUT{}@'.format(ii)] = vv
if len(inputs) == 1:
# Just one value, substitute @PLAINNAME@ and @BASENAME@
values['@PLAINNAME@'] = plain = os.path.basename(inputs[0])
values['@BASENAME@'] = os.path.splitext(plain)[0]
if outputs:
# Gather values derived from the outputs, similar to above.
values['@OUTPUT@'] = outputs
for (ii, vv) in enumerate(outputs):
values['@OUTPUT{}@'.format(ii)] = vv
# Outdir should be the same for all outputs
values['@OUTDIR@'] = os.path.dirname(outputs[0])
# Many external programs fail on empty arguments.
if values['@OUTDIR@'] == '':
values['@OUTDIR@'] = '.'
return values
def _make_tree_writable(topdir):
# Ensure all files and directories under topdir are writable
# (and readable) by owner.
for d, _, files in os.walk(topdir):
os.chmod(d, os.stat(d).st_mode | stat.S_IWRITE | stat.S_IREAD)
for fname in files:
fpath = os.path.join(d, fname)
if os.path.isfile(fpath):
os.chmod(fpath, os.stat(fpath).st_mode | stat.S_IWRITE | stat.S_IREAD)
def windows_proof_rmtree(f):
# On Windows if anyone is holding a file open you can't
# delete it. As an example an anti virus scanner might
# be scanning files you are trying to delete. The only
# way to fix this is to try again and again.
delays = [0.1, 0.1, 0.2, 0.2, 0.2, 0.5, 0.5, 1, 1, 1, 1, 2]
# Start by making the tree wriable.
_make_tree_writable(f)
for d in delays:
try:
shutil.rmtree(f)
return
except FileNotFoundError:
return
except (OSError, PermissionError):
time.sleep(d)
# Try one last time and throw if it fails.
shutil.rmtree(f)
def detect_subprojects(spdir_name, current_dir='', result=None):
if result is None:
result = {}
spdir = os.path.join(current_dir, spdir_name)
if not os.path.exists(spdir):
return result
for trial in glob(os.path.join(spdir, '*')):
basename = os.path.basename(trial)
if trial == 'packagecache':
continue
append_this = True
if os.path.isdir(trial):
detect_subprojects(spdir_name, trial, result)
elif trial.endswith('.wrap') and os.path.isfile(trial):
basename = os.path.splitext(basename)[0]
else:
append_this = False
if append_this:
if basename in result:
result[basename].append(trial)
else:
result[basename] = [trial]
return result
def get_error_location_string(fname, lineno):
return '{}:{}:'.format(fname, lineno)
def substring_is_in_list(substr, strlist):
for s in strlist:
if substr in s:
return True
return False
class OrderedSet(collections.MutableSet):
"""A set that preserves the order in which items are added, by first
insertion.
"""
def __init__(self, iterable=None):
self.__container = collections.OrderedDict()
if iterable:
self.update(iterable)
def __contains__(self, value):
return value in self.__container
def __iter__(self):
return iter(self.__container.keys())
def __len__(self):
return len(self.__container)
def __repr__(self):
# Don't print 'OrderedSet("")' for an empty set.
if self.__container:
return 'OrderedSet("{}")'.format(
'", "'.join(repr(e) for e in self.__container.keys()))
return 'OrderedSet()'
def __reversed__(self):
return reversed(self.__container)
def add(self, value):
self.__container[value] = None
def discard(self, value):
if value in self.__container:
del self.__container[value]
def update(self, iterable):
for item in iterable:
self.__container[item] = None
def difference(self, set_):
return type(self)(e for e in self if e not in set_)
class BuildDirLock:
def __init__(self, builddir):
self.lockfilename = os.path.join(builddir, 'meson-private/meson.lock')
def __enter__(self):
self.lockfile = open(self.lockfilename, 'w')
try:
if have_fcntl:
fcntl.flock(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
elif have_msvcrt:
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
except (BlockingIOError, PermissionError):
self.lockfile.close()
raise MesonException('Some other Meson process is already using this build directory. Exiting.')
def __exit__(self, *args):
if have_fcntl:
fcntl.flock(self.lockfile, fcntl.LOCK_UN)
elif have_msvcrt:
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
self.lockfile.close()
|
{
"content_hash": "3503bb489b6f3d389e0b7756900e50d6",
"timestamp": "",
"source": "github",
"line_count": 1129,
"max_line_length": 152,
"avg_line_length": 34.425155004428696,
"alnum_prop": 0.5853445170586117,
"repo_name": "jeandet/meson",
"id": "1b9cb42b8d61c859756c7d93b56c1cac016ca6ed",
"size": "38866",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mesonbuild/mesonlib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4190"
},
{
"name": "Batchfile",
"bytes": "868"
},
{
"name": "C",
"bytes": "143772"
},
{
"name": "C#",
"bytes": "949"
},
{
"name": "C++",
"bytes": "27136"
},
{
"name": "CMake",
"bytes": "1780"
},
{
"name": "D",
"bytes": "4573"
},
{
"name": "Dockerfile",
"bytes": "754"
},
{
"name": "Emacs Lisp",
"bytes": "919"
},
{
"name": "Fortran",
"bytes": "4590"
},
{
"name": "Genie",
"bytes": "341"
},
{
"name": "Inno Setup",
"bytes": "372"
},
{
"name": "Java",
"bytes": "2125"
},
{
"name": "JavaScript",
"bytes": "136"
},
{
"name": "LLVM",
"bytes": "75"
},
{
"name": "Lex",
"bytes": "135"
},
{
"name": "Meson",
"bytes": "321893"
},
{
"name": "Objective-C",
"bytes": "1092"
},
{
"name": "Objective-C++",
"bytes": "332"
},
{
"name": "Python",
"bytes": "1873182"
},
{
"name": "Roff",
"bytes": "301"
},
{
"name": "Rust",
"bytes": "1079"
},
{
"name": "Shell",
"bytes": "2083"
},
{
"name": "Swift",
"bytes": "1152"
},
{
"name": "Vala",
"bytes": "10025"
},
{
"name": "Verilog",
"bytes": "709"
},
{
"name": "Vim script",
"bytes": "9480"
},
{
"name": "Yacc",
"bytes": "50"
}
],
"symlink_target": ""
}
|
from threading import Thread
from flask import current_app, render_template
from flask.ext.mail import Message
from . import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['FARMSTAND_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['FARMSTAND_MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
|
{
"content_hash": "6c23af01ac1ecfced7e9741d94fc81b9",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 78,
"avg_line_length": 33.95,
"alnum_prop": 0.6671575846833578,
"repo_name": "codeforamerica/westsac-farm-stand",
"id": "05d06450f99b5aaab9e1ee5b22de31d42007cca5",
"size": "679",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app/email.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5406"
},
{
"name": "HTML",
"bytes": "20060"
},
{
"name": "Python",
"bytes": "32312"
}
],
"symlink_target": ""
}
|
import gzip
import os
from collections import defaultdict
from unittest.mock import patch
from parameterized import parameterized
from torchtext.datasets.conll2000chunking import CoNLL2000Chunking
from ..common.case_utils import TempDirMixin, zip_equal, get_random_unicode
from ..common.torchtext_test_case import TorchtextTestCase
def _get_mock_dataset(root_dir):
"""
root_dir: directory to the mocked dataset
"""
base_dir = os.path.join(root_dir, "CoNLL2000Chunking")
temp_dataset_dir = os.path.join(base_dir, "temp_dataset_dir")
os.makedirs(temp_dataset_dir, exist_ok=True)
seed = 1
mocked_data = defaultdict(list)
for file_name in ("train.txt", "test.txt"):
txt_file = os.path.join(temp_dataset_dir, file_name)
mocked_lines = mocked_data[os.path.splitext(file_name)[0]]
with open(txt_file, "w", encoding="utf-8") as f:
for i in range(5):
rand_strings = [get_random_unicode(seed)]
rand_label_1 = [get_random_unicode(seed)]
rand_label_2 = [get_random_unicode(seed)]
# one token per line (each sample ends with an extra \n)
for rand_string, label_1, label_2 in zip(rand_strings, rand_label_1, rand_label_2):
f.write(f"{rand_string} {label_1} {label_2}\n")
f.write("\n")
dataset_line = (rand_strings, rand_label_1, rand_label_2)
# append line to correct dataset split
mocked_lines.append(dataset_line)
seed += 1
# create gz file from dataset folder
compressed_dataset_path = os.path.join(base_dir, f"{file_name}.gz")
with gzip.open(compressed_dataset_path, "wb") as gz_file, open(txt_file, "rb") as file_in:
gz_file.writelines(file_in)
return mocked_data
class TestCoNLL2000Chunking(TempDirMixin, TorchtextTestCase):
root_dir = None
samples = []
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.root_dir = cls.get_base_temp_dir()
cls.samples = _get_mock_dataset(os.path.join(cls.root_dir, "datasets"))
cls.patcher = patch("torchdata.datapipes.iter.util.cacheholder._hash_check", return_value=True)
cls.patcher.start()
@classmethod
def tearDownClass(cls):
cls.patcher.stop()
super().tearDownClass()
@parameterized.expand(["train", "test"])
def test_conll2000chunking(self, split):
dataset = CoNLL2000Chunking(root=self.root_dir, split=split)
samples = list(dataset)
expected_samples = self.samples[split]
for sample, expected_sample in zip_equal(samples, expected_samples):
self.assertEqual(sample, expected_sample)
@parameterized.expand(["train", "test"])
def test_conll2000chunking_split_argument(self, split):
dataset1 = CoNLL2000Chunking(root=self.root_dir, split=split)
(dataset2,) = CoNLL2000Chunking(root=self.root_dir, split=(split,))
for d1, d2 in zip_equal(dataset1, dataset2):
self.assertEqual(d1, d2)
|
{
"content_hash": "d06b81ef575e121085a9d31dca9b1eef",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 103,
"avg_line_length": 39.29113924050633,
"alnum_prop": 0.6398195876288659,
"repo_name": "pytorch/text",
"id": "5f6e85180fd0e744a8473dae0a66c722629ff517",
"size": "3104",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/torchtext_unittest/datasets/test_conll2000chunking.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5989"
},
{
"name": "C",
"bytes": "1165"
},
{
"name": "C++",
"bytes": "103773"
},
{
"name": "CMake",
"bytes": "6607"
},
{
"name": "Dockerfile",
"bytes": "1632"
},
{
"name": "Python",
"bytes": "761434"
},
{
"name": "Shell",
"bytes": "19559"
}
],
"symlink_target": ""
}
|
from sqlalchemy.dialects.postgresql import ARRAY
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.hybrid import Comparator, hybrid_method, hybrid_property
from sqlalchemy.orm import joinedload, noload
from indico.core.db.sqlalchemy import PyIntEnum, db
from indico.core.db.sqlalchemy.util.models import get_simple_column_attrs
from indico.core.permissions import get_available_permissions
from indico.util.decorators import classproperty, strict_classproperty
from indico.util.enum import IndicoEnum
from indico.util.string import format_repr
class PrincipalType(int, IndicoEnum):
user = 1
local_group = 2
multipass_group = 3
email = 4
network = 5
event_role = 6
category_role = 7
registration_form = 8
def _make_check(type_, allow_emails, allow_networks, allow_event_roles, allow_category_roles,
allow_registration_forms, *cols):
all_cols = {'user_id', 'local_group_id', 'mp_group_provider', 'mp_group_name'}
if allow_emails:
all_cols.add('email')
if allow_networks:
all_cols.add('ip_network_group_id')
if allow_event_roles:
all_cols.add('event_role_id')
if allow_category_roles:
all_cols.add('category_role_id')
if allow_registration_forms:
all_cols.add('registration_form_id')
required_cols = all_cols & set(cols)
forbidden_cols = all_cols - required_cols
criteria = [f'{col} IS NULL' for col in sorted(forbidden_cols)]
criteria += [f'{col} IS NOT NULL' for col in sorted(required_cols)]
condition = 'type != {} OR ({})'.format(type_, ' AND '.join(criteria))
return db.CheckConstraint(condition, f'valid_{type_.name}')
def serialize_email_principal(email):
"""Serialize email principal to a simple dict."""
return {
'_type': 'Email',
'email': email.email,
'id': email.name,
'name': email.name,
'identifier': email.identifier
}
class EmailPrincipal:
"""Wrapper for email principals.
:param email: The email address.
"""
principal_type = PrincipalType.email
principal_order = 0
def __init__(self, email):
self.email = email.lower()
@property
def name(self):
return self.email
@property
def user(self):
from indico.modules.users import User
return User.query.filter(~User.is_deleted, User.all_emails == self.email).first()
@property
def identifier(self):
return f'Email:{self.email}'
def __eq__(self, other):
return isinstance(other, EmailPrincipal) and self.email == other.email
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.email)
def __contains__(self, user):
if not user:
return False
return self.email in user.all_emails
def __repr__(self):
return format_repr(self, 'email')
class PrincipalMixin:
#: The name of the backref added to `User` and `LocalGroup`.
#: For consistency, it is recommended to name the backref
#: ``in_foo_acl`` with *foo* describing the ACL where this
#: mixin is used.
principal_backref_name = None
#: The columns which should be included in the unique constraints.
#: If set to ``None``, no unique constraints will be added.
unique_columns = None
#: Whether it should be allowed to add a user by email address.
#: This is useful in places where no Indico user exists yet.
#: Usually adding an email address to an ACL should result in
#: an email being sent to the user, inviting him to create an
#: account with that email address.
allow_emails = False
#: Whether it should be allowed to add an IP network.
allow_networks = False
#: Whether it should be allowed to add an event role.
allow_event_roles = False
#: Whether it should be allowed to add a category role
allow_category_roles = False
#: Whether it should be allowed to add registrants
allow_registration_forms = False
@strict_classproperty
@classmethod
def __auto_table_args(cls):
uniques = ()
if cls.unique_columns:
uniques = [db.Index(f'ix_uq_{cls.__tablename__}_user', 'user_id', *cls.unique_columns, unique=True,
postgresql_where=db.text(f'type = {PrincipalType.user}')),
db.Index(f'ix_uq_{cls.__tablename__}_local_group', 'local_group_id', *cls.unique_columns,
unique=True, postgresql_where=db.text(f'type = {PrincipalType.local_group}')),
db.Index(f'ix_uq_{cls.__tablename__}_mp_group', 'mp_group_provider', 'mp_group_name',
*cls.unique_columns, unique=True,
postgresql_where=db.text(f'type = {PrincipalType.multipass_group}'))]
if cls.allow_emails:
uniques.append(db.Index(f'ix_uq_{cls.__tablename__}_email', 'email', *cls.unique_columns,
unique=True, postgresql_where=db.text(f'type = {PrincipalType.email}')))
indexes = [db.Index(None, 'mp_group_provider', 'mp_group_name')]
checks = [_make_check(PrincipalType.user, cls.allow_emails, cls.allow_networks, cls.allow_event_roles,
cls.allow_category_roles, cls.allow_registration_forms, 'user_id'),
_make_check(PrincipalType.local_group, cls.allow_emails, cls.allow_networks, cls.allow_event_roles,
cls.allow_category_roles, cls.allow_registration_forms, 'local_group_id'),
_make_check(PrincipalType.multipass_group, cls.allow_emails, cls.allow_networks,
cls.allow_event_roles, cls.allow_category_roles, cls.allow_registration_forms,
'mp_group_provider', 'mp_group_name')]
if cls.allow_emails:
checks.append(_make_check(PrincipalType.email, cls.allow_emails, cls.allow_networks, cls.allow_event_roles,
cls.allow_category_roles, cls.allow_registration_forms, 'email'))
checks.append(db.CheckConstraint('email IS NULL OR email = lower(email)', 'lowercase_email'))
if cls.allow_networks:
checks.append(_make_check(PrincipalType.network, cls.allow_emails, cls.allow_networks,
cls.allow_event_roles, cls.allow_category_roles, cls.allow_registration_forms,
'ip_network_group_id'))
if cls.allow_event_roles:
checks.append(_make_check(PrincipalType.event_role, cls.allow_emails, cls.allow_networks,
cls.allow_event_roles, cls.allow_category_roles, cls.allow_registration_forms,
'event_role_id'))
if cls.allow_category_roles:
checks.append(_make_check(PrincipalType.category_role, cls.allow_emails, cls.allow_networks,
cls.allow_event_roles, cls.allow_category_roles, cls.allow_registration_forms,
'category_role_id'))
if cls.allow_registration_forms:
checks.append(_make_check(PrincipalType.registration_form, cls.allow_emails, cls.allow_networks,
cls.allow_event_roles, cls.allow_category_roles, cls.allow_registration_forms,
'registration_form_id'))
return tuple(uniques + indexes + checks)
@declared_attr
def type(cls):
exclude_values = set()
if not cls.allow_emails:
exclude_values.add(PrincipalType.email)
if not cls.allow_networks:
exclude_values.add(PrincipalType.network)
if not cls.allow_event_roles:
exclude_values.add(PrincipalType.event_role)
if not cls.allow_category_roles:
exclude_values.add(PrincipalType.category_role)
if not cls.allow_registration_forms:
exclude_values.add(PrincipalType.registration_form)
return db.Column(
PyIntEnum(PrincipalType, exclude_values=(exclude_values or None)),
nullable=False
)
@declared_attr
def user_id(cls):
return db.Column(
db.Integer,
db.ForeignKey('users.users.id'),
nullable=True,
index=True
)
@declared_attr
def local_group_id(cls):
return db.Column(
db.Integer,
db.ForeignKey('users.groups.id'),
nullable=True,
index=True
)
@declared_attr
def multipass_group_provider(cls):
return db.Column(
'mp_group_provider', # otherwise the index name doesn't fit in 60 chars
db.String,
nullable=True
)
@declared_attr
def multipass_group_name(cls):
return db.Column(
'mp_group_name', # otherwise the index name doesn't fit in 60 chars
db.String,
nullable=True
)
@declared_attr
def email(cls):
if not cls.allow_emails:
return
return db.Column(
db.String,
nullable=True,
index=True
)
@declared_attr
def ip_network_group_id(cls):
if not cls.allow_networks:
return
return db.Column(
db.Integer,
db.ForeignKey('indico.ip_network_groups.id'),
nullable=True,
index=True
)
@declared_attr
def event_role_id(cls):
if not cls.allow_event_roles:
return
return db.Column(
db.Integer,
db.ForeignKey('events.roles.id'),
nullable=True,
index=True
)
@declared_attr
def category_role_id(cls):
if not cls.allow_category_roles:
return
return db.Column(
db.Integer,
db.ForeignKey('categories.roles.id'),
nullable=True,
index=True
)
@declared_attr
def registration_form_id(cls):
if not cls.allow_registration_forms:
return
return db.Column(
db.Integer,
db.ForeignKey('event_registration.forms.id'),
nullable=True,
index=True
)
@declared_attr
def user(cls):
assert cls.principal_backref_name
return db.relationship(
'User',
lazy=False,
backref=db.backref(
cls.principal_backref_name,
cascade='all, delete',
lazy='dynamic'
)
)
@declared_attr
def local_group(cls):
assert cls.principal_backref_name
return db.relationship(
'LocalGroup',
lazy=False,
backref=db.backref(
cls.principal_backref_name,
cascade='all, delete',
lazy='dynamic'
)
)
@declared_attr
def ip_network_group(cls):
if not cls.allow_networks:
return
assert cls.principal_backref_name
return db.relationship(
'IPNetworkGroup',
lazy=False,
backref=db.backref(
cls.principal_backref_name,
cascade='all, delete',
lazy='dynamic'
)
)
@declared_attr
def event_role(cls):
if not cls.allow_event_roles:
return
assert cls.principal_backref_name
return db.relationship(
'EventRole',
lazy=False,
backref=db.backref(
cls.principal_backref_name,
cascade='all, delete',
lazy='dynamic'
)
)
@declared_attr
def category_role(cls):
if not cls.allow_category_roles:
return
assert cls.principal_backref_name
return db.relationship(
'CategoryRole',
lazy=False,
backref=db.backref(
cls.principal_backref_name,
cascade='all, delete',
lazy='dynamic'
)
)
@declared_attr
def registration_form(cls):
if not cls.allow_registration_forms:
return
assert cls.principal_backref_name
return db.relationship(
'RegistrationForm',
lazy=False,
backref=db.backref(
cls.principal_backref_name,
cascade='all, delete',
lazy='dynamic'
)
)
@hybrid_property
def principal(self):
from indico.modules.groups import GroupProxy
if self.type == PrincipalType.user:
return self.user
elif self.type == PrincipalType.local_group:
return self.local_group.proxy
elif self.type == PrincipalType.multipass_group:
return GroupProxy(self.multipass_group_name, self.multipass_group_provider)
elif self.type == PrincipalType.email:
return EmailPrincipal(self.email)
elif self.type == PrincipalType.network:
return self.ip_network_group
elif self.type == PrincipalType.event_role:
return self.event_role
elif self.type == PrincipalType.category_role:
return self.category_role
elif self.type == PrincipalType.registration_form:
return self.registration_form
@principal.setter
def principal(self, value):
self.type = value.principal_type
self.email = None
self.user = None
self.local_group = None
self.multipass_group_provider = self.multipass_group_name = None
self.ip_network_group = None
self.event_role = None
self.category_role = None
self.registration_form = None
if self.type == PrincipalType.email:
assert self.allow_emails
self.email = value.email
elif self.type == PrincipalType.network:
assert self.allow_networks
self.ip_network_group = value
elif self.type == PrincipalType.event_role:
assert self.allow_event_roles
self.event_role = value
elif self.type == PrincipalType.category_role:
assert self.allow_category_roles
self.category_role = value
elif self.type == PrincipalType.registration_form:
assert self.allow_registration_forms
self.registration_form = value
elif self.type == PrincipalType.local_group:
self.local_group = value.group
elif self.type == PrincipalType.multipass_group:
self.multipass_group_provider = value.provider
self.multipass_group_name = value.name
elif self.type == PrincipalType.user:
self.user = value
else:
raise ValueError(f'Unexpected principal type: {self.type}')
@principal.comparator
def principal(cls):
return PrincipalComparator(cls)
def get_emails(self):
"""Get a set of all unique emails associated with this principal.
For users, this is just the primary email (or nothing for the system user).
For anything group-like it is the primary email address of each group member
who has an Indico account.
"""
if self.type == PrincipalType.user and not self.user.is_system:
return {self.user.email}
elif self.type in (PrincipalType.local_group, PrincipalType.multipass_group):
return {x.email for x in self.principal.get_members() if not x.is_system}
elif self.type in (PrincipalType.event_role, PrincipalType.category_role):
return {x.email for x in self.principal.members if not x.is_system}
return set()
def get_users(self):
"""Get a set of all users associated with this principal.
For users this is just the user itself. For anything group-like this
returns all members.
"""
if self.type == PrincipalType.user:
return {self.user}
elif self.type in (PrincipalType.local_group, PrincipalType.multipass_group):
return {x for x in self.principal.get_members() if not x.is_system}
elif self.type in (PrincipalType.event_role, PrincipalType.category_role):
return {x for x in self.principal.members if not x.is_system}
return set()
def merge_privs(self, other):
"""Merge the privileges of another principal.
:param other: Another principal object.
"""
# nothing to do here
def current_data(self):
return None
@classmethod
def merge_users(cls, target, source, relationship_attr):
"""Merge two users in the ACL.
:param target: The target user of the merge.
:param source: The user that is being merged into `target`.
:param relationship_attr: The name of the relationship pointing
to the object associated with the ACL
entry.
"""
relationship = getattr(cls, relationship_attr)
source_principals = set(getattr(source, cls.principal_backref_name).options(joinedload(relationship)))
target_objects = {getattr(x, relationship_attr): x
for x in getattr(target, cls.principal_backref_name).options(joinedload(relationship))}
for principal in source_principals:
existing = target_objects.get(getattr(principal, relationship_attr))
if existing is None:
principal.user_id = target.id
else:
existing.merge_privs(principal)
db.session.delete(principal)
db.session.flush()
@classmethod
def replace_email_with_user(cls, user, relationship_attr):
"""
Replace all email-based entries matching the user's email
addresses with user-based entries.
If the user is already in the ACL, the two entries are merged.
:param user: A User object.
:param relationship_attr: The name of the relationship pointing
to the object associated with the ACL
entry.
:return: The set of objects where the user has been added to
the ACL.
"""
assert cls.allow_emails
updated = set()
query = (cls.query
.filter(cls.email.in_(user.all_emails))
.options(noload('user'), noload('local_group'), joinedload(relationship_attr).load_only('id')))
for entry in query:
parent = getattr(entry, relationship_attr)
existing = (cls.query
.with_parent(parent, 'acl_entries')
.options(noload('user'), noload('local_group'))
.filter_by(principal=user)
.first())
if existing is None:
entry.principal = user
else:
existing.merge_privs(entry)
parent.acl_entries.remove(entry)
updated.add(parent)
db.session.flush()
return updated
class PrincipalPermissionsMixin(PrincipalMixin):
#: The model for which we are a principal. May also be a string
#: containing the model's class name.
principal_for = None
@strict_classproperty
@classmethod
def __auto_table_args(cls):
checks = [db.CheckConstraint('read_access OR full_access OR array_length(permissions, 1) IS NOT NULL',
'has_privs')]
if cls.allow_networks:
# you can match a network acl entry without being logged in.
# we never want that for anything but simple read access
checks.append(db.CheckConstraint('type != {} OR (NOT full_access AND array_length(permissions, 1) IS NULL)'
.format(PrincipalType.network),
'networks_read_only'))
if cls.allow_registration_forms:
# many events allow everyone to register, letting people give themselves
# management access by registering would be bad so we only allow read access
checks.append(db.CheckConstraint('type != {} OR (NOT full_access AND array_length(permissions, 1) IS NULL)'
.format(PrincipalType.registration_form),
'registration_form_read_only'))
return tuple(checks)
read_access = db.Column(
db.Boolean,
nullable=False,
default=False
)
full_access = db.Column(
db.Boolean,
nullable=False,
default=False
)
permissions = db.Column(
ARRAY(db.String),
nullable=False,
default=[]
)
@classproperty
@classmethod
def principal_for_obj(cls):
if isinstance(cls.principal_for, str):
return db.Model.registry._class_registry[cls.principal_for]
else:
return cls.principal_for
@hybrid_method
def has_management_permission(self, permission=None, explicit=False):
"""Check whether a principal has a certain management permission.
The check always succeeds if the user is a full manager; in
that case the list of permissions is ignored.
:param permission: The permission to check for or 'ANY' to check for any
management permission.
:param explicit: Whether to check for the permission itself even if
the user has full management privileges.
"""
if permission is None:
if explicit:
raise ValueError('permission must be specified if explicit=True')
return self.full_access
elif not explicit and self.full_access:
return True
valid_permissions = get_available_permissions(self.principal_for_obj).keys()
current_permissions = set(self.permissions) & valid_permissions
if permission == 'ANY':
return bool(current_permissions)
assert permission in valid_permissions, "invalid permission '{}' for object '{}'".format(permission,
self.principal_for_obj)
return permission in current_permissions
@has_management_permission.expression
def has_management_permission(cls, permission=None, explicit=False):
if permission is None:
if explicit:
raise ValueError('permission must be specified if explicit=True')
return cls.full_access
valid_permissions = get_available_permissions(cls.principal_for_obj).keys()
if permission == 'ANY':
crit = (cls.permissions.op('&&')(db.func.cast(valid_permissions, ARRAY(db.String))))
else:
assert permission in valid_permissions, \
f"invalid permission '{permission}' for object '{cls.principal_for_obj}'"
crit = (cls.permissions.op('&&')(db.func.cast([permission], ARRAY(db.String))))
if explicit:
return crit
else:
return cls.full_access | crit
def merge_privs(self, other):
self.read_access = self.read_access or other.read_access
self.full_access = self.full_access or other.full_access
self.permissions = sorted(set(self.permissions) | set(other.permissions))
@property
def current_data(self):
return {'permissions': set(self.permissions),
'read_access': self.read_access,
'full_access': self.full_access}
class PrincipalComparator(Comparator):
def __init__(self, cls):
self.cls = cls
def __clause_element__(self):
# just in case
raise NotImplementedError
def __eq__(self, other):
if other.principal_type == PrincipalType.email:
criteria = [self.cls.email == other.email]
elif other.principal_type == PrincipalType.network:
criteria = [self.cls.ip_network_group_id == other.id]
elif other.principal_type == PrincipalType.event_role:
criteria = [self.cls.event_role_id == other.id]
elif other.principal_type == PrincipalType.category_role:
criteria = [self.cls.category_role_id == other.id]
elif other.principal_type == PrincipalType.registration_form:
criteria = [self.cls.registration_form_id == other.id]
elif other.principal_type == PrincipalType.local_group:
criteria = [self.cls.local_group_id == other.id]
elif other.principal_type == PrincipalType.multipass_group:
criteria = [self.cls.multipass_group_provider == other.provider,
self.cls.multipass_group_name == other.name]
elif other.principal_type == PrincipalType.user:
criteria = [self.cls.user_id == other.id]
else:
raise ValueError(f'Unexpected object type {type(other)}: {other}')
return db.and_(self.cls.type == other.principal_type, *criteria)
def clone_principals(cls, principals, event_role_map=None, regform_map=None):
"""Clone a list of principals.
:param cls: the principal type to use (a `PrincipalMixin` subclass)
:param principals: a collection of these principals
:param event_role_map: the mapping from old to new event roles.
if omitted, event roles are skipped
:param regform_map: if omitted, registration forms are skipped
:return: A new set of principals that can be added to an object
"""
rv = set()
assert all(isinstance(x, cls) for x in principals)
attrs = get_simple_column_attrs(cls) | {'user', 'local_group', 'ip_network_group', 'category_role'}
for old_principal in principals:
event_role = None
registration_form = None
if old_principal.type == PrincipalType.event_role:
if event_role_map is None:
continue
event_role = event_role_map[old_principal.event_role]
elif old_principal.type == PrincipalType.registration_form:
if regform_map is None:
continue
registration_form = regform_map[old_principal.registration_form]
principal = cls()
principal.populate_from_dict({attr: getattr(old_principal, attr) for attr in attrs})
if event_role:
principal.event_role = event_role
elif registration_form:
principal.registration_form = registration_form
rv.add(principal)
return rv
|
{
"content_hash": "3c9766b42ac79e3d9ae083927ad60457",
"timestamp": "",
"source": "github",
"line_count": 686,
"max_line_length": 120,
"avg_line_length": 39.20553935860058,
"alnum_prop": 0.5921918572225321,
"repo_name": "pferreir/indico",
"id": "980196abadab0c67e67c690afc4329165769daca",
"size": "27109",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "indico/core/db/sqlalchemy/principals.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34704"
},
{
"name": "HTML",
"bytes": "1394116"
},
{
"name": "JavaScript",
"bytes": "2078347"
},
{
"name": "Mako",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "4993798"
},
{
"name": "SCSS",
"bytes": "475126"
},
{
"name": "Shell",
"bytes": "3877"
},
{
"name": "TeX",
"bytes": "23327"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
}
|
import json
import pymongo
import sys
def connect_to_db_collection(db_name, collection_name):
'''
Return collection of a given database name and collection name
'''
connection = pymongo.Connection('localhost', 27017)
db = connection[db_name]
collection = db[collection_name]
return collection
def main():
if len(sys.argv) != 2:
usage_message = """
usage: %s db_name
Create problem_ids collection\n
"""
sys.stderr.write(usage_message % sys.argv[0])
sys.exit(1)
problem_ids_collection = connect_to_db_collection(source_db, 'problem_ids')
problem_ids_collection.drop()
tracking_collection = connect_to_db_collection(source_db, 'tracking')
user_id_map_collection = connect_to_db_collection(source_db, 'user_id_map')
cursor = tracking_collection.find({'event_type' : 'problem_check',
'event_source' : 'server'})
for document in cursor:
doc_result = {}
username = document['username']
if username.isdigit():
username = int(username)
doc_result['username'] = username
user_id_map = user_id_map_collection.find_one({'username' : username})
if not user_id_map:
print "Username {0} not found in collection user_id_map".format(username)
continue
doc_result['user_id'] = user_id_map['id']
doc_result['hash_id'] = user_id_map['hash_id']
doc_result['problem_id'] = document['event']['problem_id']
doc_result['course_id'] = document['context']['course_id']
doc_result['module'] = document['context']['module']
doc_result['time'] = document['time']
doc_result['event'] = document['event']
collection['problem_ids'].insert(doc_result)
if __name__ == '__main__':
main()
|
{
"content_hash": "6a78c534e55fb8712b94afa531b469f8",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 85,
"avg_line_length": 35.76923076923077,
"alnum_prop": 0.6021505376344086,
"repo_name": "McGillX/edx_data_research",
"id": "7305b973e15fdf95ba48b635df39f8b4eb7dae73",
"size": "1860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "edx_data_research/parsing/problem_ids/reference_problem_ids_collection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "23281"
},
{
"name": "Python",
"bytes": "208187"
},
{
"name": "Shell",
"bytes": "295"
}
],
"symlink_target": ""
}
|
from config_manager.baseconfig import BaseConfig
class ClusterController(BaseConfig):
def __init__(self,
hostname,
name=None,
description=None,
read_file_path=None,
write_file_path=None,
property_type=None,
version=None,
network_type=None):
name = name or hostname
self.hostname = self.create_property('hostname', value=hostname)
self.network_type = self.create_property(json_name='network_type',
value=network_type)
self.bridge_interface = self.create_property(json_name='bridge_interface',
value=None)
self.log_level = self.create_property(json_name='log_level',
value=None)
super(ClusterController, self).__init__(name=name,
description=None,
read_file_path=None,
write_file_path=None,
property_type=property_type,
version=None)
|
{
"content_hash": "fb2a62e8e2a5c065b277a2930e347b9e",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 82,
"avg_line_length": 46.82142857142857,
"alnum_prop": 0.43783371472158655,
"repo_name": "tbeckham/DeploymentManager",
"id": "a1bee2024e3af6da242bf492dcc9a75a8b61b6c3",
"size": "1926",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "config_manager/eucalyptus/topology/cluster/clustercontroller.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "15666"
},
{
"name": "JavaScript",
"bytes": "9683"
},
{
"name": "Python",
"bytes": "132178"
}
],
"symlink_target": ""
}
|
import pygame
"""
Splits the text in such a way that none of the line rendered by the pygame.font.Font object will exceed expected width.
:returns: a generator that yields string for each line whose rendered surface's length won't exceed expected width
"""
def truncate_to_width(text, font, max_width):
done = 0
while not done:
real, done, line = truncate_line(text, font, max_width)
text = text[real:]
yield line.strip()
"""
Finds how many letters could be rendered in one line (on pygame.Surface) not to exceed maximum width.
:returns: data used by the algorithm to truncate lines
"""
def truncate_line(text, font, max_width):
real = len(text)
line = text
width = font.size(text)[0]
cut = 0
a = 0
done = 1
while width > max_width:
a += 1
n = text.rsplit(None, a)[0]
if line == n:
cut += 1
line = n[:-cut]
else:
line = n
width = font.size(line)[0]
real = len(line)
done = 0
return real, done, line
"""
Renders string lines as text blitted into pygame.Surface object.
:returns: pygame.Surface
"""
def render(lines, font, color):
rendered_lines = []
width = height = 0
for line in lines:
rendered_line = font.render(line, color)
rendered_lines.append(rendered_line)
line_width, line_height = rendered_line.get_size()
height += line_height
if width < line_width:
width = line_width
return join_rendered_lines(width, height, rendered_lines)
"""
Joins rendered surfaces (one under the other) into one, total surface.
:returns: pygame.Surface
"""
def join_rendered_lines(width, height, rendered_lines):
surface = pygame.Surface((width, height), pygame.SRCALPHA)
position = 0
for rendered_line in rendered_lines:
surface.blit(rendered_line, (0, position))
position += rendered_line.get_size()[1]
return surface
|
{
"content_hash": "5134d0c7105bc88998c3389b502d0af2",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 119,
"avg_line_length": 21.547619047619047,
"alnum_prop": 0.6883977900552486,
"repo_name": "r0jsik/rinde",
"id": "8bdc1ab026ad4985c26e7e76eeaa4f920208c7c3",
"size": "1810",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rinde/script/text_lines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3339"
},
{
"name": "HTML",
"bytes": "1025"
},
{
"name": "Python",
"bytes": "102352"
}
],
"symlink_target": ""
}
|
import contextlib
import time
import mock
from oslo.config import cfg
import testtools
from neutron.agent.linux import ovs_lib
from neutron.extensions import securitygroup as ext_sg
from neutron.plugins.oneconvergence.agent import nvsd_neutron_agent
from neutron.tests import base
DAEMON_LOOP_COUNT = 5
class TestOneConvergenceAgentBase(base.BaseTestCase):
def setUp(self):
super(TestOneConvergenceAgentBase, self).setUp()
cfg.CONF.set_default('firewall_driver',
'neutron.agent.firewall.NoopFirewallDriver',
group='SECURITYGROUP')
cfg.CONF.set_override('rpc_backend',
'neutron.openstack.common.rpc.impl_fake')
with contextlib.nested(
mock.patch('neutron.openstack.common.loopingcall.'
'FixedIntervalLoopingCall'),
) as (loopingcall):
kwargs = {'integ_br': 'integration_bridge',
'root_helper': 'dummy_wrapper',
'polling_interval': 5}
context = mock.Mock()
self.agent = nvsd_neutron_agent.NVSDNeutronAgent(**kwargs)
self.sg_agent = nvsd_neutron_agent.SecurityGroupAgentRpc(
context, 'dummy_wrapper')
self.callback_nvsd = nvsd_neutron_agent.NVSDAgentRpcCallback(
context, self.agent, self.sg_agent)
self.loopingcall = loopingcall
class TestOneConvergenceAgentCallback(TestOneConvergenceAgentBase):
def test_port_update(self):
with contextlib.nested(
mock.patch.object(ovs_lib.OVSBridge, 'get_vif_port_by_id'),
mock.patch.object(self.sg_agent, 'refresh_firewall')
) as (get_vif_port_by_id, refresh_firewall):
context = mock.Mock()
vifport = ovs_lib.VifPort('port1', '1', 'id-1', 'mac-1',
self.agent.int_br)
# The OVS port does not exist.
get_vif_port_by_id.return_value = None
port = {'id': 'update-port-1'}
self.callback_nvsd.port_update(context, port=port)
self.assertEqual(get_vif_port_by_id.call_count, 1)
self.assertFalse(refresh_firewall.call_count)
# The OVS port exists but no security group is associated.
get_vif_port_by_id.return_value = vifport
port = {'id': 'update-port-1'}
self.callback_nvsd.port_update(context, port=port)
self.assertEqual(get_vif_port_by_id.call_count, 2)
self.assertFalse(refresh_firewall.call_count)
# The OVS port exists but a security group is associated.
get_vif_port_by_id.return_value = vifport
port = {'id': 'update-port-1',
ext_sg.SECURITYGROUPS: ['default']}
self.callback_nvsd.port_update(context, port=port)
self.assertEqual(get_vif_port_by_id.call_count, 3)
self.assertEqual(refresh_firewall.call_count, 1)
get_vif_port_by_id.return_value = None
port = {'id': 'update-port-1',
ext_sg.SECURITYGROUPS: ['default']}
self.callback_nvsd.port_update(context, port=port)
self.assertEqual(get_vif_port_by_id.call_count, 4)
self.assertEqual(refresh_firewall.call_count, 1)
class TestNVSDAgent(TestOneConvergenceAgentBase):
def _setup_mock(self):
self.get_vif_ports = mock.patch.object(
ovs_lib.OVSBridge, 'get_vif_port_set',
return_value=set(['id-1', 'id-2'])).start()
self.prepare_devices_filter = mock.patch.object(
self.agent.sg_agent, 'prepare_devices_filter').start()
self.remove_devices_filter = mock.patch.object(
self.agent.sg_agent, 'remove_devices_filter').start()
def test_daemon_loop(self):
def state_check(index):
self.assertEqual(len(self.vif_ports_scenario[index]),
len(self.agent.ports))
# Fake time.sleep to stop the infinite loop in daemon_loop()
self.sleep_count = 0
def sleep_mock(*args, **kwargs):
state_check(self.sleep_count)
self.sleep_count += 1
if self.sleep_count >= DAEMON_LOOP_COUNT:
raise RuntimeError()
self.vif_ports_scenario = [set(), set(), set(), set(['id-1', 'id-2']),
set(['id-2', 'id-3'])]
# Ensure vif_ports_scenario is longer than DAEMON_LOOP_COUNT
if len(self.vif_ports_scenario) < DAEMON_LOOP_COUNT:
self.vif_ports_scenario.extend(
[] for _i in xrange(DAEMON_LOOP_COUNT -
len(self.vif_ports_scenario)))
with contextlib.nested(
mock.patch.object(time, 'sleep', side_effect=sleep_mock),
mock.patch.object(ovs_lib.OVSBridge, 'get_vif_port_set'),
mock.patch.object(self.agent.sg_agent, 'prepare_devices_filter'),
mock.patch.object(self.agent.sg_agent, 'remove_devices_filter')
) as (sleep, get_vif_port_set, prepare_devices_filter,
remove_devices_filter):
get_vif_port_set.side_effect = self.vif_ports_scenario
with testtools.ExpectedException(RuntimeError):
self.agent.daemon_loop()
self.assertEqual(sleep.call_count, DAEMON_LOOP_COUNT)
expected = [mock.call(set(['id-1', 'id-2'])),
mock.call(set(['id-3']))]
self.assertEqual(prepare_devices_filter.call_count, 2)
prepare_devices_filter.assert_has_calls(expected)
expected = [mock.call(set([])), mock.call(set(['id-1']))]
self.assertEqual(remove_devices_filter.call_count, 2)
remove_devices_filter.assert_has_calls(expected)
sleep.assert_called_with(self.agent.polling_interval)
class TestOneConvergenceAgentMain(base.BaseTestCase):
def test_main(self):
with contextlib.nested(
mock.patch.object(nvsd_neutron_agent, 'NVSDNeutronAgent'),
mock.patch.object(nvsd_neutron_agent, 'common_config'),
mock.patch.object(nvsd_neutron_agent, 'config')
) as (agent, common_config, config):
config.AGENT.integration_bridge = 'br-int-dummy'
config.AGENT.root_helper = 'root-helper'
config.AGENT.polling_interval = 5
nvsd_neutron_agent.main()
self.assertTrue(common_config.setup_logging.called)
agent.assert_has_calls([
mock.call('br-int-dummy', 'root-helper', 5),
mock.call().daemon_loop()
])
|
{
"content_hash": "0b5f182093679d243d5467dd2985ce9b",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 78,
"avg_line_length": 41.67701863354037,
"alnum_prop": 0.5912071535022355,
"repo_name": "onecloud/neutron",
"id": "f04d2fecb11bc14a1b30d27f16779aa3c9dc33e4",
"size": "7395",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/oneconvergence/test_nvsd_agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from __future__ import print_function
# from cloudmesh_job.cm_jobdb import JobDB
from cmd3.console import Console
from cmd3.shell import command
import hostlist
from pprint import pprint
from cloudmesh_base.util import banner
from cloudmesh_job.cm_jobdb import JobDB
from cloudmesh_pbs.DbPBS import DbPBS
from cloudmesh_job.command_job import CommandJob
from cloudmesh_base.tables import row_table
from prettytable import PrettyTable
import yaml
import os
def job_table(d, order=None, labels=None):
"""prints a pretty table from data in the dict.
:param d: A dict to be printed
:param order: The order in which the columns are printed.
The order is specified by the key names of the dict.
"""
# header
x = PrettyTable(labels)
# get attributes from firts element
attributes = d[d.keys()[0]].keys()
if order is None:
order = attributes
for key in d:
value = d[key]
if type(value) == list:
x.add_row([key, value[0]])
for element in value[1:]:
x.add_row(["", element])
elif type(value) == dict:
value_keys = value.keys()
first_key = value_keys[0]
rest_keys = value_keys[1:]
x.add_row([key, "{0} : {1}".format(first_key, value[first_key])])
for element in rest_keys:
x.add_row(["", "{0} : {1}".format(element, value[element])])
else:
x.add_row([key, value])
x.align = "l"
return x
def job_table_2(d, order=None, labels=None):
"""prints a pretty table from data in the dict.
:param d: A dict to be printed
:param order: The order in which the columns are printed.
The order is specified by the key names of the dict.
"""
# header
# get attributes from firts element
attributes = d[d.keys()[0]].keys()
x = PrettyTable(attributes)
if order is None:
order = attributes
for key in d:
element = d[key]
l = []
for attribute in attributes:
l.append(str(element[attribute]))
x.add_row(l)
'''
if type(value) == list:
x.add_row([key, value[0]])
for element in value[1:]:
x.add_row(["", element])
elif type(value) == dict:
value_keys = value.keys()
first_key = value_keys[0]
rest_keys = value_keys[1:]
x.add_row([key, "{0} : {1}".format(first_key, value[first_key])])
for element in rest_keys:
x.add_row(["", "{0} : {1}".format(element, value[element])])
else:
x.add_row([key, value])
'''
x.align = "l"
return x
class cm_shell_job:
db = None
def activate_cm_shell_job(self):
self.register_command_topic('HPC', 'job')
@command
def do_job(self, args, arguments):
"""
::
Usage:
job server start
job server stop
job server clean
job server deploy
job server ps
job server info
job server pid
job script add FILENAME [--name=NAME]
job script delete NAMES
job script get NAME FILENAME
job script cat NAME
job script list
job info
job stat
job list [--output=FORMAT]
job add JOBLIST [--host=HOST] [--options=OPTIONS] [--inputs=INPUTS] [--outputs=OUTPUTS]
job load FILE
job write --file=filename
job find --name=NAME
job find --attribute=ATTRIBUTE --value=VALUE
job delete JOBLIST
Arguments:
NAME the name of the job
HOST the host on which the job should run
OPTIONS options passed to the command
INPUTS input files
OUTPUTS output files
ATTRIBUTE an attribute
VALUE a value
JOBLIST the job list
Description:
manages a job catalog to submit them to a computation cloud
or Grid.
Server Management
job server start
starts the job server
job server stop
stops the job server
job server clean
removes all data in the job server and does a graceful clean, e.g deletes all scheduled jobs
job server kill
kills just the job server, but does not delete the jobs from the schedulers.
this command should not be called in normal circumstances.
Job Management
job set GROUP
sets the default job group
job add GROUP TODO
adds a job to a group
job server start
starts the server
job server stop
stops the server
job stat
prints a simple statistics of the jobs
job add NAMES [--host=HOST] [--option=OPTIONS] [--inputs=INPUTS] [--outputs=OUTPUTS]
adds a number of jobs
job add --file=filename
adds the job from the file. The postfix of the file deterimnes which
format it is. The formats supported are .csv, .yaml, .json
job write --file=filename
writes the jobs to a file. The postfix of the file deterimnes which
format it is. Thfe formats supported are .csv, .yaml, .json
job list [--output=OUTPUT]
lists the jobs in the format specified
job find --name=NAME
find the job with the given name
job find --attribute=ATTRIBUTE --value=VALUE
find jobs that match the given attribute.
job delete JOBLIST
delete the job with the specified names in the joblist.
THE FOLLOWING IS NOT YET DEFINED OR MAY CHANGE
job add TODO
... not yet sure what in the rest of the command
adds a job to the job server and returns its id
job last
returns the last job added to the server
job delete ID
deletes the job from the job server and cancels it if it is scheduled for execution.
job info ID
give the info of a job
job submit ID HOST
submits the job with the given ID to the host
job list GROUP
lists the jobs in the group
job status [ID | GROUP]
list the status of a single job or the status of all jobs in the group
job status statistics
list the statistics of the jobs in the job server (e.g. for the states)
"""
# pprint(arguments)
def connect():
try:
db = JobDB()
db.connect()
except Exception, e:
print(e)
raise Exception("connection error")
return db
if arguments["script"]:
if arguments["add"]:
# job script add FILENAME [--name=NAME]
filename = arguments["FILENAME"]
base=os.path.basename(filename)
if "." in base:
name = base.split(".")[0]
else:
name = base
if arguments["--name"] is not None:
name =arguments["--name"]
print ("Adding script {:} <- {:}".format(name, filename))
db = connect()
db.add_script_from_file(name, filename)
return
elif arguments["delete"]:
# job script delete NAMES
if arguments["NAMES"]:
names = hostlist.expand_hostlist(arguments["NAMES"])
db = connect()
for name in names:
print ("Delete Script", name)
db.delete_script(name)
return
elif arguments["get"]:
# job script get NAME FILENAME
Console.ok("get script")
if arguments["NAME"]:
name = arguments["NAME"]
filename = arguments["FILENAME"]
db = connect()
db.write_script(name, filename)
return
elif arguments["cat"]:
# job script cat NAME
if arguments["NAME"]:
name = arguments["NAME"]
db = connect()
script = db.get_script(name)
print (script)
return
elif arguments["list"]:
db = connect()
scripts = db.list_scripts()
if scripts is not None:
print("\n".join(scripts))
return
if arguments["server"]:
if arguments["start"]:
db = JobDB()
db.start()
return
elif arguments["stop"]:
db = JobDB()
db.stop()
return
elif arguments["ps"]:
db = connect()
db.ps()
return
elif arguments["clean"]:
db = connect()
db.delete_jobs()
return
elif arguments["deploy"]:
db = JobDB()
db.deploy()
return
elif arguments["pid"]:
try:
db = connect()
print(db.pid())
except:
print("ERROR: connecting to server")
return
elif arguments["info"]:
try:
db = connect()
db.info()
except Exception, e:
print("ERROR: connecting to server")
print(e)
return
if arguments["info"]:
db = connect()
db.info()
return
elif arguments["stat"]:
db = connect()
db.stat()
return
elif arguments["delete"] and arguments["JOBLIST"]:
joblist = hostlist.expand_hostlist(arguments["JOBLIST"])
db = connect()
for job in joblist:
# if job exists:
Console.ok("delete job {:}".format(job))
db.delete_jobs("job_name", job)
elif arguments["load"] and arguments["FILE"]:
filename = arguments["FILE"]
print("load", filename)
db = connect()
db.add_from_yaml(filename)
return
elif arguments["add"]:
'''
name = arguments["NAME"]
host = arguments["HOST"]
options = arguments["OPTIONS"]
input_files = arguments["INPUT_FILES"]
output_file = arguments["OUTPUT_FILES"]
db = connect()
db.insert(name, input_files, output_file, options, host)
Console.ok("add")
'''
joblist = hostlist.expand_hostlist(arguments["JOBLIST"])
host = [None]
inputs = [None]
outputs = [None]
options = [None]
db = connect()
if arguments.get("--host"):
host = arguments["--host"]
if arguments["--inputs"]:
inputs = hostlist.expand_hostlist(arguments["--inputs"])
if arguments["--outputs"]:
outputs = hostlist.expand_hostlist(arguments["--outputs"])
if arguments["--options"]:
options = hostlist.expand_hostlist(arguments["--options"])
# check if inputs are either 0, 1 or the length of joblist
def expand_parameter(parameter, label):
"""
:param parameter:
:param label:
:return: list of strings
"""
_parameter = parameter
if len(_parameter) == 1:
_parameter = _parameter * len(joblist)
elif len(_parameter) == len(joblist):
pass
else:
Console.error("the number of input files do not match the hostlist")
print("joblist count:", len(joblist))
print(label, "count: ", len(_parameter))
return _parameter
options = expand_parameter(options, "options")
inputs = expand_parameter(inputs, "inputs")
outputs = expand_parameter(outputs, "outputs")
# dependent on if 0, 1, or length of joblist handle that
for i in range(len(joblist)):
banner(str(i))
Console.ok("add job : {:} ".format(joblist[i]))
Console.ok(" input : {:} ".format(inputs[i]))
Console.ok(" output: {:} ".format(outputs[i]))
# Build the dictionary for the job to be added
job = {
"job_name": joblist[i],
"input": inputs[i],
"output": outputs[i]
}
# Add the job
db.add(job)
elif arguments["list"]:
output = arguments["--output"]
db = connect()
jobs = db.find_jobs()
d = {}
for job in jobs:
name = job["job_name"]
d[name] = job
if output is None:
output = 'table'
if 'json' in output:
pprint (d)
elif 'table' in output:
print(
job_table_2(d)
#job_table_2(d,
# order=['job_name','group'],
# labels=['Job','Attributes'])
)
elif 'yaml' in output:
print(yaml.safe_dump(d))
# wrong:
# get a list of all jobs and passes it to PBS class with output parameter
# pbs = DbPBS()
#pbs.list(allJobs, output)
Console.ok("lists the jobs in the format specified")
return
elif arguments["write"]:
'''call list function and then write output to a file'''
filename = arguments["--filename"]
# check if file exists
# if it does ask if you want to overwrite
# -f force
target = open(filename)
db = connect()
allJobs = db.find_jobs()
pbs = DbPBS()
'''review string, determine output format, call PBS class, write to filename'''
if filename.endswith("csv"):
toOutput = pbs.list(allJobs, output="csv")
elif filename.endswith("json"):
toOutput = pbs.list(allJobs, output="json")
elif filename.endswith("yaml"):
toOutput = pbs.list(allJobs, output="yaml")
target.write(toOutput)
target.close()
elif arguments["find"] and arguments["--name"]:
name = arguments["NAME"]
db = connect()
db.find_jobs("job_name", name)
Console.ok("find the job with the given name")
elif arguments["find"] and arguments["--attribute"] and arguments["--value"]:
name = arguments["NAME"]
attribute = arguments["--attribute"]
value = arguments["--value"]
db = connect()
db.find_jobs(attribute, value)
Console.ok("job find --attribute=ATTRIBUTE --value=VALUE")
pass
if __name__ == '__main__':
command = cm_shell_job()
command.do_job()
|
{
"content_hash": "3754267d7821b9e2bfc858cb8551a2e8",
"timestamp": "",
"source": "github",
"line_count": 534,
"max_line_length": 116,
"avg_line_length": 31.288389513108616,
"alnum_prop": 0.466004309312904,
"repo_name": "rajpushkar83/pbs",
"id": "847e2013a60105f720a15a25e23d9201648e55ce",
"size": "16708",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cloudmesh_job/plugins/cm_shell_job.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3936"
},
{
"name": "Makefile",
"bytes": "583"
},
{
"name": "Python",
"bytes": "154133"
},
{
"name": "Shell",
"bytes": "1841"
}
],
"symlink_target": ""
}
|
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1Volume(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
operations = [
]
# The key is attribute name
# and the value is attribute type.
swagger_types = {
'name': 'str',
'host_path': 'V1HostPathVolumeSource',
'empty_dir': 'V1EmptyDirVolumeSource',
'gce_persistent_disk': 'V1GCEPersistentDiskVolumeSource',
'aws_elastic_block_store': 'V1AWSElasticBlockStoreVolumeSource',
'git_repo': 'V1GitRepoVolumeSource',
'secret': 'V1SecretVolumeSource',
'nfs': 'V1NFSVolumeSource',
'iscsi': 'V1ISCSIVolumeSource',
'glusterfs': 'V1GlusterfsVolumeSource',
'persistent_volume_claim': 'V1PersistentVolumeClaimVolumeSource',
'rbd': 'V1RBDVolumeSource',
'flex_volume': 'V1FlexVolumeSource',
'cinder': 'V1CinderVolumeSource',
'cephfs': 'V1CephFSVolumeSource',
'flocker': 'V1FlockerVolumeSource',
'downward_api': 'V1DownwardAPIVolumeSource',
'fc': 'V1FCVolumeSource',
'azure_file': 'V1AzureFileVolumeSource',
'config_map': 'V1ConfigMapVolumeSource',
'metadata': 'V1MetadataVolumeSource'
}
# The key is attribute name
# and the value is json key in definition.
attribute_map = {
'name': 'name',
'host_path': 'hostPath',
'empty_dir': 'emptyDir',
'gce_persistent_disk': 'gcePersistentDisk',
'aws_elastic_block_store': 'awsElasticBlockStore',
'git_repo': 'gitRepo',
'secret': 'secret',
'nfs': 'nfs',
'iscsi': 'iscsi',
'glusterfs': 'glusterfs',
'persistent_volume_claim': 'persistentVolumeClaim',
'rbd': 'rbd',
'flex_volume': 'flexVolume',
'cinder': 'cinder',
'cephfs': 'cephfs',
'flocker': 'flocker',
'downward_api': 'downwardAPI',
'fc': 'fc',
'azure_file': 'azureFile',
'config_map': 'configMap',
'metadata': 'metadata'
}
def __init__(self, name=None, host_path=None, empty_dir=None, gce_persistent_disk=None, aws_elastic_block_store=None, git_repo=None, secret=None, nfs=None, iscsi=None, glusterfs=None, persistent_volume_claim=None, rbd=None, flex_volume=None, cinder=None, cephfs=None, flocker=None, downward_api=None, fc=None, azure_file=None, config_map=None, metadata=None):
"""
V1Volume - a model defined in Swagger
"""
self._name = name
self._host_path = host_path
self._empty_dir = empty_dir
self._gce_persistent_disk = gce_persistent_disk
self._aws_elastic_block_store = aws_elastic_block_store
self._git_repo = git_repo
self._secret = secret
self._nfs = nfs
self._iscsi = iscsi
self._glusterfs = glusterfs
self._persistent_volume_claim = persistent_volume_claim
self._rbd = rbd
self._flex_volume = flex_volume
self._cinder = cinder
self._cephfs = cephfs
self._flocker = flocker
self._downward_api = downward_api
self._fc = fc
self._azure_file = azure_file
self._config_map = config_map
self._metadata = metadata
@property
def name(self):
"""
Gets the name of this V1Volume.
Volume's name. Must be a DNS_LABEL and unique within the pod. More info: http://releases.k8s.io/release-1.2/docs/user-guide/identifiers.md#names
:return: The name of this V1Volume.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1Volume.
Volume's name. Must be a DNS_LABEL and unique within the pod. More info: http://releases.k8s.io/release-1.2/docs/user-guide/identifiers.md#names
:param name: The name of this V1Volume.
:type: str
"""
self._name = name
@property
def host_path(self):
"""
Gets the host_path of this V1Volume.
HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#hostpath
:return: The host_path of this V1Volume.
:rtype: V1HostPathVolumeSource
"""
return self._host_path
@host_path.setter
def host_path(self, host_path):
"""
Sets the host_path of this V1Volume.
HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#hostpath
:param host_path: The host_path of this V1Volume.
:type: V1HostPathVolumeSource
"""
self._host_path = host_path
@property
def empty_dir(self):
"""
Gets the empty_dir of this V1Volume.
EmptyDir represents a temporary directory that shares a pod's lifetime. More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#emptydir
:return: The empty_dir of this V1Volume.
:rtype: V1EmptyDirVolumeSource
"""
return self._empty_dir
@empty_dir.setter
def empty_dir(self, empty_dir):
"""
Sets the empty_dir of this V1Volume.
EmptyDir represents a temporary directory that shares a pod's lifetime. More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#emptydir
:param empty_dir: The empty_dir of this V1Volume.
:type: V1EmptyDirVolumeSource
"""
self._empty_dir = empty_dir
@property
def gce_persistent_disk(self):
"""
Gets the gce_persistent_disk of this V1Volume.
GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#gcepersistentdisk
:return: The gce_persistent_disk of this V1Volume.
:rtype: V1GCEPersistentDiskVolumeSource
"""
return self._gce_persistent_disk
@gce_persistent_disk.setter
def gce_persistent_disk(self, gce_persistent_disk):
"""
Sets the gce_persistent_disk of this V1Volume.
GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#gcepersistentdisk
:param gce_persistent_disk: The gce_persistent_disk of this V1Volume.
:type: V1GCEPersistentDiskVolumeSource
"""
self._gce_persistent_disk = gce_persistent_disk
@property
def aws_elastic_block_store(self):
"""
Gets the aws_elastic_block_store of this V1Volume.
AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#awselasticblockstore
:return: The aws_elastic_block_store of this V1Volume.
:rtype: V1AWSElasticBlockStoreVolumeSource
"""
return self._aws_elastic_block_store
@aws_elastic_block_store.setter
def aws_elastic_block_store(self, aws_elastic_block_store):
"""
Sets the aws_elastic_block_store of this V1Volume.
AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#awselasticblockstore
:param aws_elastic_block_store: The aws_elastic_block_store of this V1Volume.
:type: V1AWSElasticBlockStoreVolumeSource
"""
self._aws_elastic_block_store = aws_elastic_block_store
@property
def git_repo(self):
"""
Gets the git_repo of this V1Volume.
GitRepo represents a git repository at a particular revision.
:return: The git_repo of this V1Volume.
:rtype: V1GitRepoVolumeSource
"""
return self._git_repo
@git_repo.setter
def git_repo(self, git_repo):
"""
Sets the git_repo of this V1Volume.
GitRepo represents a git repository at a particular revision.
:param git_repo: The git_repo of this V1Volume.
:type: V1GitRepoVolumeSource
"""
self._git_repo = git_repo
@property
def secret(self):
"""
Gets the secret of this V1Volume.
Secret represents a secret that should populate this volume. More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#secrets
:return: The secret of this V1Volume.
:rtype: V1SecretVolumeSource
"""
return self._secret
@secret.setter
def secret(self, secret):
"""
Sets the secret of this V1Volume.
Secret represents a secret that should populate this volume. More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#secrets
:param secret: The secret of this V1Volume.
:type: V1SecretVolumeSource
"""
self._secret = secret
@property
def nfs(self):
"""
Gets the nfs of this V1Volume.
NFS represents an NFS mount on the host that shares a pod's lifetime More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#nfs
:return: The nfs of this V1Volume.
:rtype: V1NFSVolumeSource
"""
return self._nfs
@nfs.setter
def nfs(self, nfs):
"""
Sets the nfs of this V1Volume.
NFS represents an NFS mount on the host that shares a pod's lifetime More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#nfs
:param nfs: The nfs of this V1Volume.
:type: V1NFSVolumeSource
"""
self._nfs = nfs
@property
def iscsi(self):
"""
Gets the iscsi of this V1Volume.
ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/release-1.2/examples/iscsi/README.md
:return: The iscsi of this V1Volume.
:rtype: V1ISCSIVolumeSource
"""
return self._iscsi
@iscsi.setter
def iscsi(self, iscsi):
"""
Sets the iscsi of this V1Volume.
ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/release-1.2/examples/iscsi/README.md
:param iscsi: The iscsi of this V1Volume.
:type: V1ISCSIVolumeSource
"""
self._iscsi = iscsi
@property
def glusterfs(self):
"""
Gets the glusterfs of this V1Volume.
Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/release-1.2/examples/glusterfs/README.md
:return: The glusterfs of this V1Volume.
:rtype: V1GlusterfsVolumeSource
"""
return self._glusterfs
@glusterfs.setter
def glusterfs(self, glusterfs):
"""
Sets the glusterfs of this V1Volume.
Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/release-1.2/examples/glusterfs/README.md
:param glusterfs: The glusterfs of this V1Volume.
:type: V1GlusterfsVolumeSource
"""
self._glusterfs = glusterfs
@property
def persistent_volume_claim(self):
"""
Gets the persistent_volume_claim of this V1Volume.
PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#persistentvolumeclaims
:return: The persistent_volume_claim of this V1Volume.
:rtype: V1PersistentVolumeClaimVolumeSource
"""
return self._persistent_volume_claim
@persistent_volume_claim.setter
def persistent_volume_claim(self, persistent_volume_claim):
"""
Sets the persistent_volume_claim of this V1Volume.
PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#persistentvolumeclaims
:param persistent_volume_claim: The persistent_volume_claim of this V1Volume.
:type: V1PersistentVolumeClaimVolumeSource
"""
self._persistent_volume_claim = persistent_volume_claim
@property
def rbd(self):
"""
Gets the rbd of this V1Volume.
RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/release-1.2/examples/rbd/README.md
:return: The rbd of this V1Volume.
:rtype: V1RBDVolumeSource
"""
return self._rbd
@rbd.setter
def rbd(self, rbd):
"""
Sets the rbd of this V1Volume.
RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/release-1.2/examples/rbd/README.md
:param rbd: The rbd of this V1Volume.
:type: V1RBDVolumeSource
"""
self._rbd = rbd
@property
def flex_volume(self):
"""
Gets the flex_volume of this V1Volume.
FlexVolume represents a generic volume resource that is provisioned/attached using a exec based plugin. This is an alpha feature and may change in future.
:return: The flex_volume of this V1Volume.
:rtype: V1FlexVolumeSource
"""
return self._flex_volume
@flex_volume.setter
def flex_volume(self, flex_volume):
"""
Sets the flex_volume of this V1Volume.
FlexVolume represents a generic volume resource that is provisioned/attached using a exec based plugin. This is an alpha feature and may change in future.
:param flex_volume: The flex_volume of this V1Volume.
:type: V1FlexVolumeSource
"""
self._flex_volume = flex_volume
@property
def cinder(self):
"""
Gets the cinder of this V1Volume.
Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/release-1.2/examples/mysql-cinder-pd/README.md
:return: The cinder of this V1Volume.
:rtype: V1CinderVolumeSource
"""
return self._cinder
@cinder.setter
def cinder(self, cinder):
"""
Sets the cinder of this V1Volume.
Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/release-1.2/examples/mysql-cinder-pd/README.md
:param cinder: The cinder of this V1Volume.
:type: V1CinderVolumeSource
"""
self._cinder = cinder
@property
def cephfs(self):
"""
Gets the cephfs of this V1Volume.
CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
:return: The cephfs of this V1Volume.
:rtype: V1CephFSVolumeSource
"""
return self._cephfs
@cephfs.setter
def cephfs(self, cephfs):
"""
Sets the cephfs of this V1Volume.
CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
:param cephfs: The cephfs of this V1Volume.
:type: V1CephFSVolumeSource
"""
self._cephfs = cephfs
@property
def flocker(self):
"""
Gets the flocker of this V1Volume.
Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running
:return: The flocker of this V1Volume.
:rtype: V1FlockerVolumeSource
"""
return self._flocker
@flocker.setter
def flocker(self, flocker):
"""
Sets the flocker of this V1Volume.
Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running
:param flocker: The flocker of this V1Volume.
:type: V1FlockerVolumeSource
"""
self._flocker = flocker
@property
def downward_api(self):
"""
Gets the downward_api of this V1Volume.
DownwardAPI represents downward API about the pod that should populate this volume
:return: The downward_api of this V1Volume.
:rtype: V1DownwardAPIVolumeSource
"""
return self._downward_api
@downward_api.setter
def downward_api(self, downward_api):
"""
Sets the downward_api of this V1Volume.
DownwardAPI represents downward API about the pod that should populate this volume
:param downward_api: The downward_api of this V1Volume.
:type: V1DownwardAPIVolumeSource
"""
self._downward_api = downward_api
@property
def fc(self):
"""
Gets the fc of this V1Volume.
FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
:return: The fc of this V1Volume.
:rtype: V1FCVolumeSource
"""
return self._fc
@fc.setter
def fc(self, fc):
"""
Sets the fc of this V1Volume.
FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
:param fc: The fc of this V1Volume.
:type: V1FCVolumeSource
"""
self._fc = fc
@property
def azure_file(self):
"""
Gets the azure_file of this V1Volume.
AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
:return: The azure_file of this V1Volume.
:rtype: V1AzureFileVolumeSource
"""
return self._azure_file
@azure_file.setter
def azure_file(self, azure_file):
"""
Sets the azure_file of this V1Volume.
AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
:param azure_file: The azure_file of this V1Volume.
:type: V1AzureFileVolumeSource
"""
self._azure_file = azure_file
@property
def config_map(self):
"""
Gets the config_map of this V1Volume.
ConfigMap represents a configMap that should populate this volume
:return: The config_map of this V1Volume.
:rtype: V1ConfigMapVolumeSource
"""
return self._config_map
@config_map.setter
def config_map(self, config_map):
"""
Sets the config_map of this V1Volume.
ConfigMap represents a configMap that should populate this volume
:param config_map: The config_map of this V1Volume.
:type: V1ConfigMapVolumeSource
"""
self._config_map = config_map
@property
def metadata(self):
"""
Gets the metadata of this V1Volume.
Metadata represents metadata about the pod that should populate this volume Deprecated: Use downwardAPI instead.
:return: The metadata of this V1Volume.
:rtype: V1MetadataVolumeSource
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1Volume.
Metadata represents metadata about the pod that should populate this volume Deprecated: Use downwardAPI instead.
:param metadata: The metadata of this V1Volume.
:type: V1MetadataVolumeSource
"""
self._metadata = metadata
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(V1Volume.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
{
"content_hash": "211561fa96a91db090d9b35d5874d3a4",
"timestamp": "",
"source": "github",
"line_count": 648,
"max_line_length": 363,
"avg_line_length": 34.45216049382716,
"alnum_prop": 0.6343113101903696,
"repo_name": "ftl-toolbox/lib_openshift",
"id": "c865f33adfd8a6dbb8ddae923f50a3e7e1f9e633",
"size": "22342",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib_openshift/models/v1_volume.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "61160"
},
{
"name": "Python",
"bytes": "6149288"
},
{
"name": "Shell",
"bytes": "2825"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(
name='CONCUSS',
version='1.0',
description='Combatting Network Complexity Using Structural Sparsity',
author='Michael P. O\'Brien, Clayton G. Hobbs, Kevin Jasnik, Felix Reidl, '
'Nishant G. Rodrigues, and Blair D. Sullivan',
author_email='blair_sullivan@ncsu.edu',
url='https://www.github.com/theoryinpractice/CONCUSS',
license='BSD',
packages=['concuss'],
package_dir={
'concuss':''
},
extras_require={
'test':['networkx'],
'gexf':['beautifulsoup'],
'graphml':['beautifulsoup']
},
classifiers=[
'License :: OSI Approved :: BSD License',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 2.7',
]
)
|
{
"content_hash": "75fd54fce95c6c4119ef04599ed1e273",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 79,
"avg_line_length": 32.77777777777778,
"alnum_prop": 0.535593220338983,
"repo_name": "nish10z/CONCUSS",
"id": "29785875f7c0ff848c1c7555f182c725f22fcb2e",
"size": "885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "233824"
},
{
"name": "Shell",
"bytes": "5421"
}
],
"symlink_target": ""
}
|
"""Test script for the imageop module. This has the side
effect of partially testing the imgfile module as well.
Roger E. Masse
"""
from test.test_support import verbose, unlink, import_module, run_unittest
imageop = import_module('imageop', deprecated=True)
import uu, os, unittest
SIZES = (1, 2, 3, 4)
_VALUES = (1, 2, 2**10, 2**15-1, 2**15, 2**15+1, 2**31-2, 2**31-1)
VALUES = tuple( -x for x in reversed(_VALUES) ) + (0,) + _VALUES
AAAAA = "A" * 1024
MAX_LEN = 2**20
class InputValidationTests(unittest.TestCase):
def _check(self, name, size=None, *extra):
func = getattr(imageop, name)
for height in VALUES:
for width in VALUES:
strlen = abs(width * height)
if size:
strlen *= size
if strlen < MAX_LEN:
data = "A" * strlen
else:
data = AAAAA
if size:
arguments = (data, size, width, height) + extra
else:
arguments = (data, width, height) + extra
try:
func(*arguments)
except (ValueError, imageop.error):
pass
def check_size(self, name, *extra):
for size in SIZES:
self._check(name, size, *extra)
def check(self, name, *extra):
self._check(name, None, *extra)
def test_input_validation(self):
self.check_size("crop", 0, 0, 0, 0)
self.check_size("scale", 1, 0)
self.check_size("scale", -1, -1)
self.check_size("tovideo")
self.check("grey2mono", 128)
self.check("grey2grey4")
self.check("grey2grey2")
self.check("dither2mono")
self.check("dither2grey2")
self.check("mono2grey", 0, 0)
self.check("grey22grey")
self.check("rgb2rgb8") # nlen*4 == len
self.check("rgb82rgb")
self.check("rgb2grey")
self.check("grey2rgb")
def test_main():
run_unittest(InputValidationTests)
try:
import imgfile
except ImportError:
return
# Create binary test files
uu.decode(get_qualified_path('testrgb'+os.extsep+'uue'), 'test'+os.extsep+'rgb')
image, width, height = getimage('test'+os.extsep+'rgb')
# Return the selected part of image, which should by width by height
# in size and consist of pixels of psize bytes.
if verbose:
print 'crop'
newimage = imageop.crop (image, 4, width, height, 0, 0, 1, 1)
# Return image scaled to size newwidth by newheight. No interpolation
# is done, scaling is done by simple-minded pixel duplication or removal.
# Therefore, computer-generated images or dithered images will
# not look nice after scaling.
if verbose:
print 'scale'
scaleimage = imageop.scale(image, 4, width, height, 1, 1)
# Run a vertical low-pass filter over an image. It does so by computing
# each destination pixel as the average of two vertically-aligned source
# pixels. The main use of this routine is to forestall excessive flicker
# if the image two vertically-aligned source pixels, hence the name.
if verbose:
print 'tovideo'
videoimage = imageop.tovideo (image, 4, width, height)
# Convert an rgb image to an 8 bit rgb
if verbose:
print 'rgb2rgb8'
greyimage = imageop.rgb2rgb8(image, width, height)
# Convert an 8 bit rgb image to a 24 bit rgb image
if verbose:
print 'rgb82rgb'
image = imageop.rgb82rgb(greyimage, width, height)
# Convert an rgb image to an 8 bit greyscale image
if verbose:
print 'rgb2grey'
greyimage = imageop.rgb2grey(image, width, height)
# Convert an 8 bit greyscale image to a 24 bit rgb image
if verbose:
print 'grey2rgb'
image = imageop.grey2rgb(greyimage, width, height)
# Convert a 8-bit deep greyscale image to a 1-bit deep image by
# thresholding all the pixels. The resulting image is tightly packed
# and is probably only useful as an argument to mono2grey.
if verbose:
print 'grey2mono'
monoimage = imageop.grey2mono (greyimage, width, height, 0)
# monoimage, width, height = getimage('monotest.rgb')
# Convert a 1-bit monochrome image to an 8 bit greyscale or color image.
# All pixels that are zero-valued on input get value p0 on output and
# all one-value input pixels get value p1 on output. To convert a
# monochrome black-and-white image to greyscale pass the values 0 and
# 255 respectively.
if verbose:
print 'mono2grey'
greyimage = imageop.mono2grey (monoimage, width, height, 0, 255)
# Convert an 8-bit greyscale image to a 1-bit monochrome image using a
# (simple-minded) dithering algorithm.
if verbose:
print 'dither2mono'
monoimage = imageop.dither2mono (greyimage, width, height)
# Convert an 8-bit greyscale image to a 4-bit greyscale image without
# dithering.
if verbose:
print 'grey2grey4'
grey4image = imageop.grey2grey4 (greyimage, width, height)
# Convert an 8-bit greyscale image to a 2-bit greyscale image without
# dithering.
if verbose:
print 'grey2grey2'
grey2image = imageop.grey2grey2 (greyimage, width, height)
# Convert an 8-bit greyscale image to a 2-bit greyscale image with
# dithering. As for dither2mono, the dithering algorithm is currently
# very simple.
if verbose:
print 'dither2grey2'
grey2image = imageop.dither2grey2 (greyimage, width, height)
# Convert a 4-bit greyscale image to an 8-bit greyscale image.
if verbose:
print 'grey42grey'
greyimage = imageop.grey42grey (grey4image, width, height)
# Convert a 2-bit greyscale image to an 8-bit greyscale image.
if verbose:
print 'grey22grey'
image = imageop.grey22grey (grey2image, width, height)
# Cleanup
unlink('test'+os.extsep+'rgb')
def getimage(name):
"""return a tuple consisting of
image (in 'imgfile' format) width and height
"""
import imgfile
try:
sizes = imgfile.getsizes(name)
except imgfile.error:
name = get_qualified_path(name)
sizes = imgfile.getsizes(name)
if verbose:
print 'imgfile opening test image: %s, sizes: %s' % (name, str(sizes))
image = imgfile.read(name)
return (image, sizes[0], sizes[1])
def get_qualified_path(name):
""" return a more qualified path to name"""
import sys
import os
path = sys.path
try:
path = [os.path.dirname(__file__)] + path
except NameError:
pass
for dir in path:
fullname = os.path.join(dir, name)
if os.path.exists(fullname):
return fullname
return name
if __name__ == '__main__':
test_main()
|
{
"content_hash": "5029c2330c96312d8946a10264d13558",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 84,
"avg_line_length": 33.642857142857146,
"alnum_prop": 0.6075017692852088,
"repo_name": "MattDevo/edk2",
"id": "666a93c2adb2a25eace7787f757191d1933875a7",
"size": "7089",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "AppPkg/Applications/Python/Python-2.7.2/Lib/test/test_imageop.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "4545237"
},
{
"name": "Batchfile",
"bytes": "93042"
},
{
"name": "C",
"bytes": "94289702"
},
{
"name": "C++",
"bytes": "20170310"
},
{
"name": "CSS",
"bytes": "1905"
},
{
"name": "DIGITAL Command Language",
"bytes": "13695"
},
{
"name": "GAP",
"bytes": "698245"
},
{
"name": "GDB",
"bytes": "96"
},
{
"name": "HTML",
"bytes": "472114"
},
{
"name": "Lua",
"bytes": "249"
},
{
"name": "Makefile",
"bytes": "231845"
},
{
"name": "NSIS",
"bytes": "2229"
},
{
"name": "Objective-C",
"bytes": "4147834"
},
{
"name": "PHP",
"bytes": "674"
},
{
"name": "PLSQL",
"bytes": "24782"
},
{
"name": "Perl",
"bytes": "6218"
},
{
"name": "Python",
"bytes": "27130096"
},
{
"name": "R",
"bytes": "21094"
},
{
"name": "Roff",
"bytes": "28192"
},
{
"name": "Shell",
"bytes": "104362"
},
{
"name": "SourcePawn",
"bytes": "29427"
},
{
"name": "Visual Basic",
"bytes": "494"
}
],
"symlink_target": ""
}
|
import socket
class UDPDebugger(object):
def __init__(self, ip_address, port):
self.ip_address = ip_address
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # Internet + UDP
def Log(self, message):
self.sock.sendto(message, (self.ip_address, self.port))
|
{
"content_hash": "b320de750f09bd8138fef0d3f7a94e82",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 79,
"avg_line_length": 29.6,
"alnum_prop": 0.706081081081081,
"repo_name": "robertjacobs/rpf_python_test",
"id": "50651e7c646dff6c3baed841312fa6e29dba777f",
"size": "319",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/core/debugger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4524"
}
],
"symlink_target": ""
}
|
from allauth.socialaccount.providers import registry
from allauth.socialaccount.tests import create_oauth2_tests
from allauth.tests import MockedResponse
from .provider import AsanaProvider
class AsanaTests(create_oauth2_tests(registry.by_id(AsanaProvider.id))):
def get_mocked_response(self):
return MockedResponse(
200,
"""
{"data": {"photo": null, "workspaces": [{"id": 31337, "name": "example.com"},
{"id": 3133777, "name": "Personal Projects"}], "email": "test@example.com",
"name": "Test Name", "id": 43748387}}""",
)
|
{
"content_hash": "14c3f3a915960a4315ad8f5ca880ec17",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 77,
"avg_line_length": 35.6875,
"alnum_prop": 0.6760070052539404,
"repo_name": "pennersr/django-allauth",
"id": "c33655658db852a03021754bb5873b25f068636f",
"size": "571",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "allauth/socialaccount/providers/asana/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Emacs Lisp",
"bytes": "104"
},
{
"name": "HTML",
"bytes": "20404"
},
{
"name": "JavaScript",
"bytes": "3360"
},
{
"name": "Makefile",
"bytes": "396"
},
{
"name": "Python",
"bytes": "923713"
}
],
"symlink_target": ""
}
|
from argparse import ArgumentParser
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.datasets import FashionMNIST
from torchvision.transforms import Compose, Normalize, ToTensor
from tqdm import tqdm
from ignite.engine import create_supervised_evaluator, create_supervised_trainer, Events
from ignite.metrics import Accuracy, Loss
from ignite.utils import setup_logger
# torch.set_num_threads(4)
class NNet(nn.Module):
def __init__(self):
super(NNet, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
def get_data_loaders(train_batch_size, val_batch_size):
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(FashionMNIST(download=True, root='.', transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True)
val_loader = DataLoader(FashionMNIST(download=True, root='.', transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False)
return train_loader, val_loader
def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_interval):
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = NNet()
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
model.to(device) # Move model before creating optimizer
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
criterion = nn.NLLLoss()
trainer = create_supervised_trainer(model, optimizer, criterion, device=device)
trainer.logger = setup_logger("trainer")
val_metrics = {"accuracy": Accuracy(), "nll": Loss(criterion)}
evaluator = create_supervised_evaluator(model, metrics=val_metrics, device=device)
evaluator.logger = setup_logger("evaluator")
pbar = tqdm(initial=0, leave=False, total=len(train_loader), desc=f"ITERATION - loss: {0:.2f}")
@trainer.on(Events.ITERATION_COMPLETED(every=log_interval))
def log_training_loss(engine):
pbar.desc = f"ITERATION - loss: {engine.state.output:.2f}"
pbar.update(log_interval)
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
pbar.refresh()
evaluator.run(train_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
tqdm.write(
f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(val_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
tqdm.write(
f"Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
pbar.n = pbar.last_print_n = 0
@trainer.on(Events.EPOCH_COMPLETED | Events.COMPLETED)
def log_time(engine):
tqdm.write(f"{trainer.last_event_name.name} took { trainer.state.times[trainer.last_event_name.name]} seconds")
trainer.run(train_loader, max_epochs=epochs)
pbar.close()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)")
parser.add_argument(
"--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)"
)
parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)")
parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)")
parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)")
parser.add_argument(
"--log_interval", type=int, default=10, help="how many batches to wait before logging training status"
)
args = parser.parse_args()
run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum, args.log_interval)
|
{
"content_hash": "263769070713b5440152d39b7e79362b",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 149,
"avg_line_length": 39.88034188034188,
"alnum_prop": 0.6648092584654951,
"repo_name": "suresh/notebooks",
"id": "54de0eefacb9ad22b653a631171f4a734fe8e35e",
"size": "4666",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fashion_mnist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1242"
},
{
"name": "Jupyter Notebook",
"bytes": "4293922"
},
{
"name": "Python",
"bytes": "20140"
},
{
"name": "Shell",
"bytes": "868"
}
],
"symlink_target": ""
}
|
"""Contains the definition for Inflated 3D Inception V1 (I3D).
The network architecture is proposed by:
Joao Carreira and Andrew Zisserman,
Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset.
https://arxiv.org/abs/1705.07750
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import i3d_utils
from nets import s3dg
# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.truncated_normal_initializer(
0.0, stddev)
conv3d_spatiotemporal = i3d_utils.conv3d_spatiotemporal
def i3d_arg_scope(weight_decay=1e-7,
batch_norm_decay=0.999,
batch_norm_epsilon=0.001,
use_renorm=False,
separable_conv3d=False):
"""Defines default arg_scope for I3D.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
use_renorm: Whether to use batch renormalization or not.
separable_conv3d: Whether to use separable 3d Convs.
Returns:
sc: An arg_scope to use for the models.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
# Turns off fused batch norm.
'fused': False,
'renorm': use_renorm,
# collection containing the moving mean and moving variance.
'variables_collections': {
'beta': None,
'gamma': None,
'moving_mean': ['moving_vars'],
'moving_variance': ['moving_vars'],
}
}
with slim.arg_scope(
[slim.conv3d, conv3d_spatiotemporal],
weights_regularizer=slim.l2_regularizer(weight_decay),
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with slim.arg_scope(
[conv3d_spatiotemporal], separable=separable_conv3d) as sc:
return sc
def i3d_base(inputs, final_endpoint='Mixed_5c',
scope='InceptionV1'):
"""Defines the I3D base architecture.
Note that we use the names as defined in Inception V1 to facilitate checkpoint
conversion from an image-trained Inception V1 checkpoint to I3D checkpoint.
Args:
inputs: A 5-D float tensor of size [batch_size, num_frames, height, width,
channels].
final_endpoint: Specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e',
'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c']
scope: Optional variable_scope.
Returns:
A dictionary from components of the network to the corresponding activation.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values.
"""
return s3dg.s3dg_base(
inputs,
first_temporal_kernel_size=7,
temporal_conv_startat='Conv2d_2c_3x3',
gating_startat=None,
final_endpoint=final_endpoint,
min_depth=16,
depth_multiplier=1.0,
data_format='NDHWC',
scope=scope)
def i3d(inputs,
num_classes=1000,
dropout_keep_prob=0.8,
is_training=True,
prediction_fn=slim.softmax,
spatial_squeeze=True,
reuse=None,
scope='InceptionV1'):
"""Defines the I3D architecture.
The default image size used to train this network is 224x224.
Args:
inputs: A 5-D float tensor of size [batch_size, num_frames, height, width,
channels].
num_classes: number of predicted classes.
dropout_keep_prob: the percentage of activation values that are retained.
is_training: whether is training or not.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, num_classes]
end_points: a dictionary from components of the network to the corresponding
activation.
"""
# Final pooling and prediction
with tf.variable_scope(
scope, 'InceptionV1', [inputs, num_classes], reuse=reuse) as scope:
with slim.arg_scope(
[slim.batch_norm, slim.dropout], is_training=is_training):
net, end_points = i3d_base(inputs, scope=scope)
with tf.variable_scope('Logits'):
kernel_size = i3d_utils.reduced_kernel_size_3d(net, [2, 7, 7])
net = slim.avg_pool3d(
net, kernel_size, stride=1, scope='AvgPool_0a_7x7')
net = slim.dropout(net, dropout_keep_prob, scope='Dropout_0b')
logits = slim.conv3d(
net,
num_classes, [1, 1, 1],
activation_fn=None,
normalizer_fn=None,
scope='Conv2d_0c_1x1')
# Temporal average pooling.
logits = tf.reduce_mean(input_tensor=logits, axis=1)
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
i3d.default_image_size = 224
|
{
"content_hash": "286e8963f1b002bce177c9f0e1a036ed",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 80,
"avg_line_length": 34.52121212121212,
"alnum_prop": 0.6574789325842697,
"repo_name": "googleinterns/wss",
"id": "c4782d4199007746444a97b024f1e481c052c154",
"size": "6381",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "third_party/slim/nets/i3d.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "198395"
}
],
"symlink_target": ""
}
|
import uuid
import http.client
from oslo_serialization import jsonutils
from keystone.common.policies import grant as gp
from keystone.common import provider_api
import keystone.conf
from keystone.tests.common import auth as common_auth
from keystone.tests import unit
from keystone.tests.unit import base_classes
from keystone.tests.unit import ksfixtures
from keystone.tests.unit.ksfixtures import temporaryfile
CONF = keystone.conf.CONF
PROVIDERS = provider_api.ProviderAPIs
class _SystemUserGrantTests(object):
def test_can_list_grants_for_user_on_project(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(
domain_id=CONF.identity.default_domain_id
)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=user['id'],
project_id=project['id']
)
with self.test_client() as c:
r = c.get(
'/v3/projects/%s/users/%s/roles' % (project['id'], user['id']),
headers=self.headers
)
self.assertEqual(1, len(r.json['roles']))
def test_can_list_grants_for_user_on_domain(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=user['id'],
domain_id=domain['id']
)
with self.test_client() as c:
r = c.get(
'/v3/domains/%s/users/%s/roles' % (domain['id'], user['id']),
headers=self.headers
)
self.assertEqual(1, len(r.json['roles']))
def test_can_list_grants_for_group_on_project(self):
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(
domain_id=CONF.identity.default_domain_id
)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, group_id=group['id'],
project_id=project['id']
)
with self.test_client() as c:
r = c.get(
'/v3/projects/%s/groups/%s/roles' % (
project['id'], group['id']),
headers=self.headers
)
self.assertEqual(1, len(r.json['roles']))
def test_can_list_grants_for_group_on_domain(self):
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, group_id=group['id'],
domain_id=domain['id']
)
with self.test_client() as c:
r = c.get(
'/v3/domains/%s/groups/%s/roles' % (domain['id'], group['id']),
headers=self.headers
)
self.assertEqual(1, len(r.json['roles']))
def test_can_check_grant_for_user_on_project(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(
domain_id=CONF.identity.default_domain_id
)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=user['id'],
project_id=project['id']
)
with self.test_client() as c:
c.get(
'/v3/projects/%s/users/%s/roles/%s' % (
project['id'], user['id'], self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.NO_CONTENT
)
def test_can_check_grant_for_user_on_domain(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=user['id'],
domain_id=domain['id']
)
with self.test_client() as c:
c.get(
'/v3/domains/%s/users/%s/roles/%s' % (
domain['id'], user['id'], self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.NO_CONTENT
)
def test_can_check_grant_for_group_on_project(self):
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(
domain_id=CONF.identity.default_domain_id
)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, group_id=group['id'],
project_id=project['id']
)
with self.test_client() as c:
c.get(
'/v3/projects/%s/groups/%s/roles/%s' % (
project['id'],
group['id'],
self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.NO_CONTENT
)
def test_can_check_grant_for_group_on_domain(self):
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, group_id=group['id'],
domain_id=domain['id']
)
with self.test_client() as c:
c.get(
'/v3/domains/%s/groups/%s/roles/%s' % (
domain['id'], group['id'], self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.NO_CONTENT
)
class _SystemMemberAndReaderGrantTests(object):
def test_cannot_create_grant_for_user_on_project(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(
domain_id=CONF.identity.default_domain_id
)
)
with self.test_client() as c:
c.put(
'/v3/projects/%s/users/%s/roles/%s' % (
project['id'], user['id'], self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_create_grant_for_user_on_domain(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
with self.test_client() as c:
c.put(
'/v3/domains/%s/users/%s/roles/%s' % (
domain['id'], user['id'], self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_create_grant_for_group_on_project(self):
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(
domain_id=CONF.identity.default_domain_id
)
)
with self.test_client() as c:
c.put(
'/v3/projects/%s/groups/%s/roles/%s' % (
project['id'],
group['id'],
self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_create_grant_for_group_on_domain(self):
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
with self.test_client() as c:
c.put(
'/v3/domains/%s/groups/%s/roles/%s' % (
domain['id'], group['id'], self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_revoke_grant_from_user_on_project(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(
domain_id=CONF.identity.default_domain_id
)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=user['id'],
project_id=project['id']
)
with self.test_client() as c:
c.delete(
'/v3/projects/%s/users/%s/roles/%s' % (
project['id'], user['id'], self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_revoke_grant_from_user_on_domain(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=user['id'],
domain_id=domain['id']
)
with self.test_client() as c:
c.delete(
'/v3/domains/%s/users/%s/roles/%s' % (
domain['id'], user['id'], self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_revoke_grant_from_group_on_project(self):
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(
domain_id=CONF.identity.default_domain_id
)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, group_id=group['id'],
project_id=project['id']
)
with self.test_client() as c:
c.delete(
'/v3/projects/%s/groups/%s/roles/%s' % (
project['id'],
group['id'],
self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_revoke_grant_from_group_on_domain(self):
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, group_id=group['id'],
domain_id=domain['id']
)
with self.test_client() as c:
c.delete(
'/v3/domains/%s/groups/%s/roles/%s' % (
domain['id'], group['id'], self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
class _DomainUserTests(object):
def test_can_list_grants_for_user_on_project(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=user['id'],
project_id=project['id']
)
with self.test_client() as c:
r = c.get(
'/v3/projects/%s/users/%s/roles' % (project['id'], user['id']),
headers=self.headers
)
self.assertEqual(1, len(r.json['roles']))
def test_can_list_grants_for_user_on_domain(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=user['id'],
domain_id=self.domain_id
)
with self.test_client() as c:
r = c.get(
'/v3/domains/%s/users/%s/roles' % (self.domain_id, user['id']),
headers=self.headers
)
self.assertEqual(1, len(r.json['roles']))
def test_can_list_grants_for_group_on_project(self):
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=self.domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, group_id=group['id'],
project_id=project['id']
)
with self.test_client() as c:
r = c.get(
'/v3/projects/%s/groups/%s/roles' % (
project['id'], group['id']),
headers=self.headers
)
self.assertEqual(1, len(r.json['roles']))
def test_can_list_grants_for_group_on_domain(self):
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=self.domain_id)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, group_id=group['id'],
domain_id=self.domain_id
)
with self.test_client() as c:
r = c.get(
'/v3/domains/%s/groups/%s/roles' % (
self.domain_id, group['id']
), headers=self.headers
)
self.assertEqual(1, len(r.json['roles']))
def test_can_check_grant_for_user_on_project(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(
domain_id=self.domain_id
)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=user['id'],
project_id=project['id']
)
with self.test_client() as c:
c.get(
'/v3/projects/%s/users/%s/roles/%s' % (
project['id'], user['id'], self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.NO_CONTENT
)
def test_can_check_grant_for_user_on_domain(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=user['id'],
domain_id=self.domain_id
)
with self.test_client() as c:
c.get(
'/v3/domains/%s/users/%s/roles/%s' % (
self.domain_id, user['id'],
self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.NO_CONTENT
)
def test_can_check_grant_for_group_on_project(self):
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=self.domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, group_id=group['id'],
project_id=project['id']
)
with self.test_client() as c:
c.get(
'/v3/projects/%s/groups/%s/roles/%s' % (
project['id'],
group['id'],
self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.NO_CONTENT
)
def test_can_check_grant_for_group_on_domain(self):
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=self.domain_id)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, group_id=group['id'],
domain_id=self.domain_id
)
with self.test_client() as c:
c.get(
'/v3/domains/%s/groups/%s/roles/%s' % (
self.domain_id, group['id'],
self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.NO_CONTENT
)
def test_cannot_list_grants_for_user_other_domain_on_project_own_domain(self): # noqa: E501
user_domain_id = CONF.identity.default_domain_id
project_domain_id = self.domain_id
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=user_domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(domain_id=project_domain_id)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=user['id'],
project_id=project['id']
)
with self.test_client() as c:
c.get(
'/v3/projects/%s/users/%s/roles' % (project['id'], user['id']),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_list_grants_for_user_own_domain_on_project_other_domain(self): # noqa: E501
user_domain_id = self.domain_id
project_domain_id = CONF.identity.default_domain_id
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=user_domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex,
unit.new_project_ref(domain_id=project_domain_id)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=user['id'],
project_id=project['id']
)
with self.test_client() as c:
c.get(
'/v3/projects/%s/users/%s/roles' % (project['id'], user['id']),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_list_grants_for_user_own_domain_on_other_domain(self):
user_domain_id = self.domain_id
domain_id = CONF.identity.default_domain_id
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=user_domain_id)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=user['id'],
domain_id=domain_id
)
with self.test_client() as c:
c.get(
'/v3/domains/%s/users/%s/roles' % (domain_id, user['id']),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_list_grants_for_user_other_domain_on_own_domain(self):
user_domain_id = CONF.identity.default_domain_id
domain_id = self.domain_id
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=user_domain_id)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=user['id'],
domain_id=domain_id
)
with self.test_client() as c:
c.get(
'/v3/domains/%s/users/%s/roles' % (domain_id, user['id']),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_list_grants_for_group_other_domain_on_project_own_domain(self): # noqa: E501
group_domain_id = CONF.identity.default_domain_id
project_domain_id = self.domain_id
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=group_domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(domain_id=project_domain_id)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, group_id=group['id'],
project_id=project['id']
)
with self.test_client() as c:
c.get(
'/v3/projects/%s/groups/%s/roles' % (
project['id'], group['id']),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_list_grants_for_group_own_domain_on_project_other_domain(self): # noqa: E501
group_domain_id = self.domain_id
project_domain_id = CONF.identity.default_domain_id
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=group_domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex,
unit.new_project_ref(domain_id=project_domain_id)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, group_id=group['id'],
project_id=project['id']
)
with self.test_client() as c:
c.get(
'/v3/projects/%s/groups/%s/roles' % (
project['id'], group['id']),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_list_grants_for_group_own_domain_on_other_domain(self):
group_domain_id = self.domain_id
domain_id = CONF.identity.default_domain_id
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=group_domain_id)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, group_id=group['id'],
domain_id=domain_id
)
with self.test_client() as c:
c.get(
'/v3/domains/%s/groups/%s/roles' % (
domain_id, group['id']),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_list_grants_for_group_other_domain_on_own_domain(self):
group_domain_id = CONF.identity.default_domain_id
domain_id = self.domain_id
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=group_domain_id)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, group_id=group['id'],
domain_id=domain_id
)
with self.test_client() as c:
c.get(
'/v3/domains/%s/groups/%s/roles' % (
domain_id, group['id']),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_check_grant_for_user_other_domain_on_project_own_domain(self): # noqa: E501
user_domain_id = CONF.identity.default_domain_id
project_domain_id = self.domain_id
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=user_domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(domain_id=project_domain_id)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=user['id'],
project_id=project['id']
)
with self.test_client() as c:
c.get(
'/v3/projects/%s/users/%s/roles/%s' % (
project['id'], user['id'],
self.bootstrapper.reader_role_id),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_check_grant_for_user_own_domain_on_project_other_domain(self): # noqa: E501
user_domain_id = self.domain_id
project_domain_id = CONF.identity.default_domain_id
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=user_domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex,
unit.new_project_ref(domain_id=project_domain_id)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=user['id'],
project_id=project['id']
)
with self.test_client() as c:
c.get(
'/v3/projects/%s/users/%s/roles/%s' % (
project['id'], user['id'],
self.bootstrapper.reader_role_id),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_check_grant_for_user_own_domain_on_project_own_domain_with_role_other_domain(self): # noqa: E501
user_domain_id = self.domain_id
project_domain_id = self.domain_id
role_domain_id = CONF.identity.default_domain_id
role = PROVIDERS.role_api.create_role(
uuid.uuid4().hex, unit.new_role_ref(domain_id=role_domain_id))
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=user_domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex,
unit.new_project_ref(domain_id=project_domain_id)
)
# NOTE(cmurphy) the grant for a domain-specific role cannot be created
# for a project in a different domain, so we don't try to create it,
# but we still need to test that checking the role results in a 403 and
# not a 404
with self.test_client() as c:
c.get(
'/v3/projects/%s/users/%s/roles/%s' % (
project['id'], user['id'],
role['id']),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_check_grant_for_user_own_domain_on_other_domain(self):
user_domain_id = self.domain_id
domain_id = CONF.identity.default_domain_id
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=user_domain_id)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=user['id'],
domain_id=domain_id
)
with self.test_client() as c:
c.get(
'/v3/domains/%s/users/%s/roles/%s' % (
domain_id, user['id'],
self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_check_grant_for_user_other_domain_on_own_domain(self):
user_domain_id = CONF.identity.default_domain_id
domain_id = self.domain_id
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=user_domain_id)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=user['id'],
domain_id=domain_id
)
with self.test_client() as c:
c.get(
'/v3/domains/%s/users/%s/roles/%s' % (
domain_id, user['id'],
self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_check_grant_for_user_own_domain_on_own_domain_with_role_other_domain(self): # noqa: E501
user_domain_id = self.domain_id
domain_id = self.domain_id
role_domain_id = CONF.identity.default_domain_id
role = PROVIDERS.role_api.create_role(
uuid.uuid4().hex,
unit.new_role_ref(domain_id=role_domain_id))
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=user_domain_id)
)
# NOTE(cmurphy) the grant for a domain-specific role cannot be created
# for a project in a different domain, so we don't try to create it,
# but we still need to test that checking the role results in a 403 and
# not a 404
with self.test_client() as c:
c.get(
'/v3/domains/%s/users/%s/roles/%s' % (
domain_id, user['id'],
role['id']
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_check_grant_for_group_other_domain_on_project_own_domain(self): # noqa: E501
group_domain_id = CONF.identity.default_domain_id
project_domain_id = self.domain_id
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=group_domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(domain_id=project_domain_id)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, group_id=group['id'],
project_id=project['id']
)
with self.test_client() as c:
c.get(
'/v3/projects/%s/groups/%s/roles/%s' % (
project['id'], group['id'],
self.bootstrapper.reader_role_id),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_check_grant_for_group_own_domain_on_project_other_domain(self): # noqa: E501
group_domain_id = self.domain_id
project_domain_id = CONF.identity.default_domain_id
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=group_domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(domain_id=project_domain_id)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, group_id=group['id'],
project_id=project['id']
)
with self.test_client() as c:
c.get(
'/v3/projects/%s/groups/%s/roles/%s' % (
project['id'], group['id'],
self.bootstrapper.reader_role_id),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_check_grant_for_group_own_domain_on_project_own_domain_with_role_other_domain(self): # noqa: E501
group_domain_id = self.domain_id
project_domain_id = CONF.identity.default_domain_id
role_domain_id = CONF.identity.default_domain_id
role = PROVIDERS.role_api.create_role(
uuid.uuid4().hex,
unit.new_role_ref(domain_id=role_domain_id))
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=group_domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(domain_id=project_domain_id)
)
# NOTE(cmurphy) the grant for a domain-specific role cannot be created
# for a project in a different domain, so we don't try to create it,
# but we still need to test that checking the role results in a 403 and
# not a 404
with self.test_client() as c:
c.get(
'/v3/projects/%s/groups/%s/roles/%s' % (
project['id'], group['id'],
role['id']),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_check_grant_for_group_own_domain_on_other_domain(self):
group_domain_id = self.domain_id
domain_id = CONF.identity.default_domain_id
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=group_domain_id)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, group_id=group['id'],
domain_id=domain_id
)
with self.test_client() as c:
c.get(
'/v3/domains/%s/groups/%s/roles/%s' % (
domain_id, group['id'],
self.bootstrapper.reader_role_id),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_check_grant_for_group_other_domain_on_own_domain(self):
group_domain_id = CONF.identity.default_domain_id
domain_id = self.domain_id
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=group_domain_id)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, group_id=group['id'],
domain_id=domain_id
)
with self.test_client() as c:
c.get(
'/v3/domains/%s/groups/%s/roles/%s' % (
domain_id, group['id'],
self.bootstrapper.reader_role_id),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_check_grant_for_group_own_domain_on_own_domain_with_role_other_domain(self): # noqa: E501
group_domain_id = self.domain_id
domain_id = self.domain_id
role_domain_id = CONF.identity.default_domain_id
role = PROVIDERS.role_api.create_role(
uuid.uuid4().hex, unit.new_role_ref(domain_id=role_domain_id))
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=group_domain_id)
)
# NOTE(cmurphy) the grant for a domain-specific role cannot be created
# for a project in a different domain, so we don't try to create it,
# but we still need to test that checking the role results in a 403 and
# not a 404
with self.test_client() as c:
c.get(
'/v3/domains/%s/groups/%s/roles/%s' % (
domain_id, group['id'],
role['id']),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_create_grant_for_user_other_domain_on_project_own_domain(self): # noqa: E501
user_domain_id = CONF.identity.default_domain_id
project_domain_id = self.domain_id
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=user_domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(
domain_id=project_domain_id
)
)
with self.test_client() as c:
c.put(
'/v3/projects/%s/users/%s/roles/%s' % (
project['id'], user['id'], self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_create_grant_for_user_own_domain_on_project_other_domain(self): # noqa: E501
user_domain_id = self.domain_id
project_domain_id = CONF.identity.default_domain_id
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=user_domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(
domain_id=project_domain_id
)
)
with self.test_client() as c:
c.put(
'/v3/projects/%s/users/%s/roles/%s' % (
project['id'], user['id'], self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_create_grant_for_user_own_domain_on_project_own_domain_with_role_other_domain(self): # noqa: E501
user_domain_id = self.domain_id
project_domain_id = self.domain_id
role_domain_id = CONF.identity.default_domain_id
role = PROVIDERS.role_api.create_role(
uuid.uuid4().hex, unit.new_role_ref(domain_id=role_domain_id))
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=user_domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(
domain_id=project_domain_id
)
)
with self.test_client() as c:
c.put(
'/v3/projects/%s/users/%s/roles/%s' % (
project['id'], user['id'], role['id']
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_create_grant_for_user_other_domain_on_own_domain(self):
user_domain_id = CONF.identity.default_domain_id
domain_id = self.domain_id
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=user_domain_id)
)
with self.test_client() as c:
c.put(
'/v3/domains/%s/users/%s/roles/%s' % (
domain_id, user['id'], self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_create_grant_for_user_own_domain_on_other_domain(self):
user_domain_id = self.domain_id
domain_id = CONF.identity.default_domain_id
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=user_domain_id)
)
with self.test_client() as c:
c.put(
'/v3/domains/%s/users/%s/roles/%s' % (
domain_id, user['id'], self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_create_grant_for_user_own_domain_on_own_domain_with_role_other_domain(self): # noqa: E501
user_domain_id = self.domain_id
domain_id = self.domain_id
role_domain_id = CONF.identity.default_domain_id
role = PROVIDERS.role_api.create_role(
uuid.uuid4().hex, unit.new_role_ref(domain_id=role_domain_id))
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=user_domain_id)
)
with self.test_client() as c:
c.put(
'/v3/domains/%s/users/%s/roles/%s' % (
domain_id, user['id'], role['id']
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_create_grant_for_group_other_domain_on_project_own_domain(self): # noqa: E501
group_domain_id = CONF.identity.default_domain_id
project_domain_id = self.domain_id
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=group_domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(
domain_id=project_domain_id
)
)
with self.test_client() as c:
c.put(
'/v3/projects/%s/groups/%s/roles/%s' % (
project['id'],
group['id'],
self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_create_grant_for_group_own_domain_on_project_other_domain(self): # noqa: E501
group_domain_id = self.domain_id
project_domain_id = CONF.identity.default_domain_id
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=group_domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(
domain_id=project_domain_id
)
)
with self.test_client() as c:
c.put(
'/v3/projects/%s/groups/%s/roles/%s' % (
project['id'],
group['id'],
self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_create_grant_for_group_own_domain_on_project_own_domain_with_role_other_domain(self): # noqa: E501
group_domain_id = self.domain_id
project_domain_id = self.domain_id
role_domain_id = CONF.identity.default_domain_id
role = PROVIDERS.role_api.create_role(
uuid.uuid4().hex, unit.new_role_ref(domain_id=role_domain_id))
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=group_domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(
domain_id=project_domain_id
)
)
with self.test_client() as c:
c.put(
'/v3/projects/%s/groups/%s/roles/%s' % (
project['id'],
group['id'],
role['id']
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_create_grant_for_group_other_domain_on_own_domain(self):
group_domain_id = CONF.identity.default_domain_id
domain_id = self.domain_id
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=group_domain_id)
)
with self.test_client() as c:
c.put(
'/v3/domains/%s/groups/%s/roles/%s' % (
domain_id, group['id'], self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_create_grant_for_group_own_domain_on_other_domain(self):
group_domain_id = self.domain_id
domain_id = CONF.identity.default_domain_id
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=group_domain_id)
)
with self.test_client() as c:
c.put(
'/v3/domains/%s/groups/%s/roles/%s' % (
domain_id, group['id'], self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_create_grant_for_group_own_domain_on_own_domain_with_role_other_domain(self): # noqa: E501
group_domain_id = self.domain_id
domain_id = self.domain_id
role_domain_id = CONF.identity.default_domain_id
role = PROVIDERS.role_api.create_role(
uuid.uuid4().hex, unit.new_role_ref(domain_id=role_domain_id))
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=group_domain_id)
)
with self.test_client() as c:
c.put(
'/v3/domains/%s/groups/%s/roles/%s' % (
domain_id, group['id'], role['id']
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_revoke_grant_from_user_other_domain_on_project_own_domain(self): # noqa: E501
user_domain_id = CONF.identity.default_domain_id
project_domain_id = self.domain_id
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=user_domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(
domain_id=project_domain_id
)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=user['id'],
project_id=project['id']
)
with self.test_client() as c:
c.delete(
'/v3/projects/%s/users/%s/roles/%s' % (
project['id'], user['id'], self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_revoke_grant_from_user_own_domain_on_project_other_domain(self): # noqa: E501
user_domain_id = self.domain_id
project_domain_id = CONF.identity.default_domain_id
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=user_domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(
domain_id=project_domain_id
)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=user['id'],
project_id=project['id']
)
with self.test_client() as c:
c.delete(
'/v3/projects/%s/users/%s/roles/%s' % (
project['id'], user['id'], self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_revoke_grant_from_user_other_domain_on_own_domain(self):
user_domain_id = CONF.identity.default_domain_id
domain_id = self.domain_id
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=user_domain_id)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=user['id'],
domain_id=domain_id
)
with self.test_client() as c:
c.delete(
'/v3/domains/%s/users/%s/roles/%s' % (
domain_id, user['id'], self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_revoke_grant_from_user_own_domain_on_other_domain(self):
user_domain_id = self.domain_id
domain_id = CONF.identity.default_domain_id
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=user_domain_id)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=user['id'],
domain_id=domain_id
)
with self.test_client() as c:
c.delete(
'/v3/domains/%s/users/%s/roles/%s' % (
domain_id, user['id'], self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_revoke_grant_from_user_own_domain_on_own_domain_with_role_other_domain(self): # noqa: E501
user_domain_id = self.domain_id
domain_id = self.domain_id
role_domain_id = CONF.identity.default_domain_id
role = PROVIDERS.role_api.create_role(
uuid.uuid4().hex, unit.new_role_ref(domain_id=role_domain_id))
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=user_domain_id)
)
PROVIDERS.assignment_api.create_grant(
role['id'], user_id=user['id'],
domain_id=domain_id
)
with self.test_client() as c:
c.delete(
'/v3/domains/%s/users/%s/roles/%s' % (
domain_id, user['id'], role['id']
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_revoke_grant_from_group_other_domain_on_project_own_domain(self): # noqa: E501
group_domain_id = CONF.identity.default_domain_id
project_domain_id = self.domain_id
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=group_domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(
domain_id=project_domain_id
)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, group_id=group['id'],
project_id=project['id']
)
with self.test_client() as c:
c.delete(
'/v3/projects/%s/groups/%s/roles/%s' % (
project['id'],
group['id'],
self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_revoke_grant_from_group_own_domain_on_project_other_domain(self): # noqa: E501
group_domain_id = self.domain_id
project_domain_id = CONF.identity.default_domain_id
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=group_domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(
domain_id=project_domain_id
)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, group_id=group['id'],
project_id=project['id']
)
with self.test_client() as c:
c.delete(
'/v3/projects/%s/groups/%s/roles/%s' % (
project['id'],
group['id'],
self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_revoke_grant_from_group_other_domain_on_own_domain(self):
group_domain_id = CONF.identity.default_domain_id
domain_id = self.domain_id
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=group_domain_id)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, group_id=group['id'],
domain_id=domain_id
)
with self.test_client() as c:
c.delete(
'/v3/domains/%s/groups/%s/roles/%s' % (
domain_id, group['id'], self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_revoke_grant_from_group_own_domain_on_other_domain(self):
group_domain_id = self.domain_id
domain_id = CONF.identity.default_domain_id
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=group_domain_id)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, group_id=group['id'],
domain_id=domain_id
)
with self.test_client() as c:
c.delete(
'/v3/domains/%s/groups/%s/roles/%s' % (
domain_id, group['id'], self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_revoke_grant_from_group_own_domain_on_own_domain_with_role_other_domain(self): # noqa: E501
group_domain_id = self.domain_id
domain_id = self.domain_id
role_domain_id = CONF.identity.default_domain_id
role = PROVIDERS.role_api.create_role(
uuid.uuid4().hex,
unit.new_role_ref(domain_id=role_domain_id))
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=group_domain_id)
)
PROVIDERS.assignment_api.create_grant(
role['id'], group_id=group['id'],
domain_id=domain_id
)
with self.test_client() as c:
c.delete(
'/v3/domains/%s/groups/%s/roles/%s' % (
domain_id, group['id'], role['id']
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
class SystemReaderTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_SystemUserGrantTests,
_SystemMemberAndReaderGrantTests):
def setUp(self):
super(SystemReaderTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
system_reader = unit.new_user_ref(
domain_id=CONF.identity.default_domain_id
)
self.user_id = PROVIDERS.identity_api.create_user(
system_reader
)['id']
PROVIDERS.assignment_api.create_system_grant_for_user(
self.user_id, self.bootstrapper.reader_role_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=system_reader['password'],
system=True
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class SystemMemberTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_SystemUserGrantTests,
_SystemMemberAndReaderGrantTests):
def setUp(self):
super(SystemMemberTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
system_member = unit.new_user_ref(
domain_id=CONF.identity.default_domain_id
)
self.user_id = PROVIDERS.identity_api.create_user(
system_member
)['id']
PROVIDERS.assignment_api.create_system_grant_for_user(
self.user_id, self.bootstrapper.member_role_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=system_member['password'],
system=True
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class SystemAdminTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_SystemUserGrantTests):
def setUp(self):
super(SystemAdminTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
self.user_id = self.bootstrapper.admin_user_id
auth = self.build_authentication_request(
user_id=self.user_id,
password=self.bootstrapper.admin_password,
system=True
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
def test_can_create_grant_for_user_on_project(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(
domain_id=CONF.identity.default_domain_id
)
)
with self.test_client() as c:
c.put(
'/v3/projects/%s/users/%s/roles/%s' % (
project['id'], user['id'], self.bootstrapper.reader_role_id
),
headers=self.headers
)
def test_can_create_grant_for_user_on_domain(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
with self.test_client() as c:
c.put(
'/v3/domains/%s/users/%s/roles/%s' % (
domain['id'], user['id'], self.bootstrapper.reader_role_id
),
headers=self.headers
)
def test_can_create_grant_for_group_on_project(self):
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(
domain_id=CONF.identity.default_domain_id
)
)
with self.test_client() as c:
c.put(
'/v3/projects/%s/groups/%s/roles/%s' % (
project['id'],
group['id'],
self.bootstrapper.reader_role_id
),
headers=self.headers
)
def test_can_create_grant_for_group_on_domain(self):
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
with self.test_client() as c:
c.put(
'/v3/domains/%s/groups/%s/roles/%s' % (
domain['id'], group['id'], self.bootstrapper.reader_role_id
),
headers=self.headers
)
def test_can_revoke_grant_from_user_on_project(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(
domain_id=CONF.identity.default_domain_id
)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=user['id'],
project_id=project['id']
)
with self.test_client() as c:
c.delete(
'/v3/projects/%s/users/%s/roles/%s' % (
project['id'], user['id'], self.bootstrapper.reader_role_id
),
headers=self.headers
)
def test_can_revoke_grant_from_user_on_domain(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=user['id'],
domain_id=domain['id']
)
with self.test_client() as c:
c.delete(
'/v3/domains/%s/users/%s/roles/%s' % (
domain['id'], user['id'], self.bootstrapper.reader_role_id
),
headers=self.headers
)
def test_can_revoke_grant_from_group_on_project(self):
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(
domain_id=CONF.identity.default_domain_id
)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, group_id=group['id'],
project_id=project['id']
)
with self.test_client() as c:
c.delete(
'/v3/projects/%s/groups/%s/roles/%s' % (
project['id'],
group['id'],
self.bootstrapper.reader_role_id
),
headers=self.headers
)
def test_can_revoke_grant_from_group_on_domain(self):
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, group_id=group['id'],
domain_id=domain['id']
)
with self.test_client() as c:
c.delete(
'/v3/domains/%s/groups/%s/roles/%s' % (
domain['id'], group['id'], self.bootstrapper.reader_role_id
),
headers=self.headers
)
class _DomainMemberAndReaderTests(object):
def test_cannot_create_grant_for_user_on_project(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(
domain_id=CONF.identity.default_domain_id
)
)
with self.test_client() as c:
c.put(
'/v3/projects/%s/users/%s/roles/%s' % (
project['id'], user['id'], self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_create_grant_for_user_on_domain(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
with self.test_client() as c:
c.put(
'/v3/domains/%s/users/%s/roles/%s' % (
domain['id'], user['id'], self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_create_grant_for_group_on_project(self):
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=self.domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id)
)
with self.test_client() as c:
c.put(
'/v3/projects/%s/groups/%s/roles/%s' % (
project['id'],
group['id'],
self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_create_grant_for_group_on_domain(self):
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=self.domain_id)
)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
with self.test_client() as c:
c.put(
'/v3/domains/%s/groups/%s/roles/%s' % (
domain['id'], group['id'], self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_revoke_grant_from_user_on_project(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=user['id'],
project_id=project['id']
)
with self.test_client() as c:
c.delete(
'/v3/projects/%s/users/%s/roles/%s' % (
project['id'], user['id'], self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_revoke_grant_from_user_on_domain(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=user['id'],
domain_id=domain['id']
)
with self.test_client() as c:
c.delete(
'/v3/domains/%s/users/%s/roles/%s' % (
domain['id'], user['id'], self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_revoke_grant_from_group_on_project(self):
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=self.domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(
domain_id=CONF.identity.default_domain_id
)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, group_id=group['id'],
project_id=project['id']
)
with self.test_client() as c:
c.delete(
'/v3/projects/%s/groups/%s/roles/%s' % (
project['id'],
group['id'],
self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_cannot_revoke_grant_from_group_on_domain(self):
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=self.domain_id)
)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, group_id=group['id'],
domain_id=domain['id']
)
with self.test_client() as c:
c.delete(
'/v3/domains/%s/groups/%s/roles/%s' % (
domain['id'], group['id'], self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
class DomainReaderTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_DomainUserTests,
_DomainMemberAndReaderTests):
def setUp(self):
super(DomainReaderTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
self.domain_id = domain['id']
domain_user = unit.new_user_ref(domain_id=self.domain_id)
self.user_id = PROVIDERS.identity_api.create_user(domain_user)['id']
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=self.user_id,
domain_id=self.domain_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=domain_user['password'],
domain_id=self.domain_id
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class DomainMemberTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_DomainUserTests,
_DomainMemberAndReaderTests):
def setUp(self):
super(DomainMemberTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
self.domain_id = domain['id']
domain_user = unit.new_user_ref(domain_id=self.domain_id)
self.user_id = PROVIDERS.identity_api.create_user(domain_user)['id']
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.member_role_id, user_id=self.user_id,
domain_id=self.domain_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=domain_user['password'],
domain_id=self.domain_id
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class DomainAdminTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_DomainUserTests):
def setUp(self):
super(DomainAdminTests, self).setUp()
self.loadapp()
self.policy_file = self.useFixture(temporaryfile.SecureTempFile())
self.policy_file_name = self.policy_file.file_name
self.useFixture(
ksfixtures.Policy(
self.config_fixture, policy_file=self.policy_file_name
)
)
self._override_policy()
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
self.domain_id = domain['id']
domain_admin = unit.new_user_ref(domain_id=self.domain_id)
self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id']
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.admin_role_id, user_id=self.user_id,
domain_id=self.domain_id
)
auth = self.build_authentication_request(
user_id=self.user_id,
password=domain_admin['password'],
domain_id=self.domain_id
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
def _override_policy(self):
# TODO(lbragstad): Remove this once the deprecated policies in
# keystone.common.policies.grant have been removed. This is only
# here to make sure we test the new policies instead of the deprecated
# ones. Oslo.policy will OR deprecated policies with new policies to
# maintain compatibility and give operators a chance to update
# permissions or update policies without breaking users. This will
# cause these specific tests to fail since we're trying to correct this
# broken behavior with better scope checking.
with open(self.policy_file_name, 'w') as f:
overridden_policies = {
'identity:list_grants': gp.SYSTEM_READER_OR_DOMAIN_READER_LIST,
'identity:check_grant': gp.SYSTEM_READER_OR_DOMAIN_READER,
'identity:create_grant': gp.SYSTEM_ADMIN_OR_DOMAIN_ADMIN,
'identity:revoke_grant': gp.SYSTEM_ADMIN_OR_DOMAIN_ADMIN
}
f.write(jsonutils.dumps(overridden_policies))
def test_can_create_grant_for_user_on_project(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id)
)
with self.test_client() as c:
c.put(
'/v3/projects/%s/users/%s/roles/%s' % (
project['id'], user['id'], self.bootstrapper.reader_role_id
),
headers=self.headers
)
def test_can_create_grant_for_user_own_domain_on_own_domain(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
with self.test_client() as c:
c.put(
'/v3/domains/%s/users/%s/roles/%s' % (
self.domain_id, user['id'],
self.bootstrapper.reader_role_id
),
headers=self.headers
)
def test_can_create_grant_for_group_on_project(self):
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=self.domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id)
)
with self.test_client() as c:
c.put(
'/v3/projects/%s/groups/%s/roles/%s' % (
project['id'],
group['id'],
self.bootstrapper.reader_role_id
),
headers=self.headers
)
def test_can_create_grant_for_group_own_domain_on_own_domain(self):
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=self.domain_id)
)
with self.test_client() as c:
c.put(
'/v3/domains/%s/groups/%s/roles/%s' % (
self.domain_id, group['id'],
self.bootstrapper.reader_role_id
),
headers=self.headers
)
def test_can_revoke_grant_from_user_on_project(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=user['id'],
project_id=project['id']
)
with self.test_client() as c:
c.delete(
'/v3/projects/%s/users/%s/roles/%s' % (
project['id'], user['id'], self.bootstrapper.reader_role_id
),
headers=self.headers
)
def test_can_revoke_grant_from_group_on_project(self):
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=self.domain_id)
)
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id)
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, group_id=group['id'],
project_id=project['id']
)
with self.test_client() as c:
c.delete(
'/v3/projects/%s/groups/%s/roles/%s' % (
project['id'],
group['id'],
self.bootstrapper.reader_role_id
),
headers=self.headers
)
def test_cannot_revoke_grant_from_group_on_domain(self):
group = PROVIDERS.identity_api.create_group(
unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, group_id=group['id'],
domain_id=domain['id']
)
with self.test_client() as c:
c.delete(
'/v3/domains/%s/groups/%s/roles/%s' % (
domain['id'], group['id'], self.bootstrapper.reader_role_id
),
headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
|
{
"content_hash": "83b8f00147fb56b8a77c107260393412",
"timestamp": "",
"source": "github",
"line_count": 2265,
"max_line_length": 119,
"avg_line_length": 35.4644591611479,
"alnum_prop": 0.5496159448255257,
"repo_name": "openstack/keystone",
"id": "bb74b090142059ac2d9e8ecf31ae065a5c730b43",
"size": "80900",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "keystone/tests/protection/v3/test_grants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "665"
},
{
"name": "Mako",
"bytes": "976"
},
{
"name": "Python",
"bytes": "6213900"
},
{
"name": "Shell",
"bytes": "30491"
}
],
"symlink_target": ""
}
|
import jinja2
import jinja2.nodes
from jinja2.ext import Extension
from .templatetags.wagtailcore_tags import pageurl, richtext, slugurl, wagtail_version
class WagtailCoreExtension(Extension):
tags = {'include_block'}
def __init__(self, environment):
super().__init__(environment)
self.environment.globals.update({
'pageurl': jinja2.contextfunction(pageurl),
'slugurl': jinja2.contextfunction(slugurl),
'wagtail_version': wagtail_version,
})
self.environment.filters.update({
'richtext': richtext,
})
def parse(self, parser):
parse_method = getattr(self, 'parse_' + parser.stream.current.value)
return parse_method(parser)
def parse_include_block(self, parser):
lineno = next(parser.stream).lineno
args = [parser.parse_expression()]
with_context = True
if parser.stream.current.test_any('name:with', 'name:without') and parser.stream.look().test('name:context'):
with_context = next(parser.stream).value == 'with'
parser.stream.skip()
if with_context:
args.append(jinja2.nodes.ContextReference())
else:
# Actually we can just skip else branch because context arg default to None
args.append(jinja2.nodes.Const(None))
node = self.call_method('_include_block', args, lineno=lineno)
return jinja2.nodes.Output([node], lineno=lineno)
def _include_block(self, value, context=None):
if hasattr(value, 'render_as_block'):
if context:
new_context = context.get_all()
else:
new_context = {}
return jinja2.Markup(value.render_as_block(context=new_context))
return jinja2.Markup(value)
# Nicer import names
core = WagtailCoreExtension
|
{
"content_hash": "d11637c12febe5468ba1d48b5787231c",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 117,
"avg_line_length": 31.216666666666665,
"alnum_prop": 0.6241324079017618,
"repo_name": "timorieber/wagtail",
"id": "e5f6c499450f0e03f4f65cba338ee66430f37b98",
"size": "1873",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "wagtail/core/jinja2tags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "185324"
},
{
"name": "Dockerfile",
"bytes": "703"
},
{
"name": "HTML",
"bytes": "383475"
},
{
"name": "JavaScript",
"bytes": "267615"
},
{
"name": "Makefile",
"bytes": "992"
},
{
"name": "Python",
"bytes": "3711005"
},
{
"name": "Shell",
"bytes": "8867"
}
],
"symlink_target": ""
}
|
"""Universal Vision Transformer with Adaptive Computation Time."""
from typing import Any, Optional
import flax.linen as nn
from flax.training import common_utils
import jax
import jax.numpy as jnp
import ml_collections
import numpy as np
from scenic.model_lib.base_models import base_model
from scenic.model_lib.base_models import model_utils
from scenic.model_lib.base_models.multilabel_classification_model import MultiLabelClassificationModel
from scenic.model_lib.layers import attention_layers
from scenic.model_lib.layers import nn_layers
from scenic.projects.baselines import vit
from scenic.projects.baselines.pondernet import layers
def ponder_loss_fn(
all_p: jnp.ndarray,
p_g: jnp.ndarray,
weights: Optional[jnp.ndarray] = None,
) -> jnp.ndarray:
"""Ponder Loss for PonderNet.
Args:
all_p: Input array of any shape.
p_g: Input array of any shape.
weights: None or array of any shape.
Returns:
loss: A scaler to regularize the PonderNet.
"""
all_p = all_p.transpose((1, 0))
if weights is not None:
normalization = weights.sum()
else:
normalization = np.prod(all_p.shape[0])
p_g = jnp.expand_dims(p_g, axis=0)
p_g = p_g.repeat(all_p.shape[0], axis=0)
# Calculate the KL div between all_p and p_g
loss = jnp.sum(p_g * (jnp.log(p_g + 1e-8) - jnp.log(all_p + 1e-8))) / (
normalization + 1e-8)
return loss
class UTStochasticDepth(nn.Module):
"""Performs layer-dropout (also known as stochastic depth).
Described in
Huang & Sun et al, "Deep Networks with Stochastic Depth", 2016
https://arxiv.org/abs/1603.09382
Attributes:
rate: the layer dropout probability (_not_ the keep rate!).
deterministic: If false (e.g. in training) the inputs are scaled by `1 / (1
- rate)` and the layer dropout is applied, whereas if true (e.g. in
evaluation), no stochastic depth is applied and the inputs are returned as
is.
Note: This is a repeated implementation of model_lib.nn_layers.StochasticDepth
The implementation here is to match the nn.cond in UT
"""
rate: float = 0.0
deterministic: Optional[bool] = None
@nn.compact
def __call__(self,
x: jnp.ndarray,
deterministic: Optional[bool] = None) -> jnp.ndarray:
"""Applies a stochastic depth mask to the inputs.
Args:
x: Input tensor.
deterministic: If false (e.g. in training) the inputs are scaled by `1 /
(1 - rate)` and the layer dropout is applied, whereas if true (e.g. in
evaluation), no stochastic depth is applied and the inputs are returned
as is.
Returns:
The masked inputs reweighted to preserve mean.
"""
if self.rate <= 0.0:
return x
if deterministic:
return x
else:
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
rng = self.make_rng('dropout')
mask = jax.random.bernoulli(rng, self.rate, shape)
return x * (1.0 - mask)
class Encoder1DBlock(nn.Module):
"""Transformer encoder layer.
Attributes:
mlp_dim: Dimension of the mlp on top of attention block.
num_heads: Number of self-attention heads.
dtype: The dtype of the computation (default: float32).
dropout_rate: Dropout rate.
attention_dropout_rate: Dropout for attention heads.
stochastic_depth: probability of dropping a layer linearly grows from 0 to
the provided value.
Returns:
output after transformer encoder block.
"""
mlp_dim: int
num_heads: int
dtype: Any = jnp.float32
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
stochastic_depth: float = 0.0
@nn.compact
def __call__(self, inputs: jnp.ndarray, deterministic: bool) -> jnp.ndarray:
"""Applies Encoder1DBlock module.
Args:
inputs: Input data.
deterministic: Deterministic or not (to apply dropout).
Returns:
Output after transformer encoder block.
"""
# Attention block.
assert inputs.ndim == 3
x = nn.LayerNorm(dtype=self.dtype)(inputs)
x = nn.MultiHeadDotProductAttention(
num_heads=self.num_heads,
dtype=self.dtype,
kernel_init=nn.initializers.xavier_uniform(),
broadcast_dropout=False,
deterministic=deterministic,
dropout_rate=self.attention_dropout_rate)(x, x)
x = nn.Dropout(rate=self.dropout_rate)(x, deterministic)
x = UTStochasticDepth(rate=self.stochastic_depth)(x, deterministic)
x = x + inputs
# MLP block.
y = nn.LayerNorm(dtype=self.dtype)(x)
y = attention_layers.MlpBlock(
mlp_dim=self.mlp_dim,
dtype=self.dtype,
dropout_rate=self.dropout_rate,
activation_fn=nn.gelu,
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.normal(stddev=1e-6))(
y, deterministic=deterministic)
y = UTStochasticDepth(rate=self.stochastic_depth)(y, deterministic)
return y + x
class PonderNetEncoder(nn.Module):
"""PonderNet Transformer Encoder.
Attributes:
num_layers: Number of layers.
mlp_dim: Dimension of the mlp on top of attention block.
inputs_positions: Input subsequence positions for packed examples.
dropout_rate: Dropout rate.
stochastic_depth: probability of dropping a layer linearly grows from 0 to
the provided value. Our implementation of stochastic depth follows timm
library, which does per-example layer dropping and uses independent
dropping patterns for each skip-connection.
dtype: Dtype of activations.
"""
num_layers: int
mlp_dim: int
num_heads: int
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
stochastic_depth: float = 0.0
parameter_sharing: bool = True
ac_config: Optional[ml_collections.ConfigDict] = None
dtype: Any = jnp.float32
@nn.compact
def __call__(self, inputs: jnp.ndarray, *, train: bool = False):
"""Applies Transformer model on the inputs."""
assert inputs.ndim == 3 # Shape is `[batch, len, emb]`.
x = vit.AddPositionEmbs(
posemb_init=nn.initializers.normal(stddev=0.02), # from BERT.
name='posembed_input')(
inputs)
x = nn.Dropout(rate=self.dropout_rate)(x, deterministic=not train)
dtype = jax.dtypes.canonicalize_dtype(self.dtype)
# We use layers.AdaptiveComputationTime only when we are doing ACT.
if self.ac_config is None:
# We make the layer first if we are using parameter sharing.
if not self.parameter_sharing:
for i in range(self.num_layers):
x = Encoder1DBlock(
mlp_dim=self.mlp_dim,
num_heads=self.num_heads,
dropout_rate=self.dropout_rate,
attention_dropout_rate=self.attention_dropout_rate,
name='encoderblock_' + str(i),
dtype=dtype)(
x, deterministic=not train)
else:
encoder_block = Encoder1DBlock(
mlp_dim=self.mlp_dim,
num_heads=self.num_heads,
dropout_rate=self.dropout_rate,
attention_dropout_rate=self.attention_dropout_rate,
name='encoderblock',
dtype=dtype)
for i in range(self.num_layers):
x = encoder_block(x, deterministic=not train)
auxiliary_outputs = None
else:
encoder_block = Encoder1DBlock(
mlp_dim=self.mlp_dim,
num_heads=self.num_heads,
dropout_rate=self.dropout_rate,
attention_dropout_rate=self.attention_dropout_rate,
name='encoderblock',
dtype=dtype)
x, auxiliary_outputs = layers.AdaptiveComputationTime(
self.ac_config, encoder_block, self.parameter_sharing,
name='act')(x, not train)
(all_states, all_p, n_updates) = auxiliary_outputs
encoded_norm_layer = nn.LayerNorm(name='encoder_norm')
encoded = encoded_norm_layer(x)
all_states = encoded_norm_layer(all_states)
return encoded, (all_states, all_p, n_updates)
class PonderViT(nn.Module):
"""Universall Vision Transformer model.
Attributes:
num_classes: Number of output classes.
mlp_dim: Dimension of the mlp on top of attention block.
num_layers: Number of layers.
num_heads: Number of self-attention heads.
patches: Configuration of the patches extracted in the stem of the model.
ac_config: Configuration of the adaptive computation.
hidden_size: Size of the hidden state of the output of model's stem.
dropout_rate: Dropout rate.
attention_dropout_rate: Dropout for attention heads.
classifier: type of the classifier layer. Options are 'gap', 'gmp', 'gsp',
'token'.
dtype: JAX data type for activations.
"""
num_classes: int
mlp_dim: int
num_layers: int
num_heads: int
patches: ml_collections.ConfigDict
ac_config: ml_collections.ConfigDict
hidden_size: int
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
stochastic_depth: float = 0.0
classifier: str = 'gap'
parameter_sharing: bool = True
dtype: Any = jnp.float32
@nn.compact
def __call__(self, x: jnp.ndarray, *, train: bool, debug: bool = False):
fh, fw = self.patches.size
# Extracting patches and then embedding is in fact a single convolution.
x = nn.Conv(
self.hidden_size, (fh, fw),
strides=(fh, fw),
padding='VALID',
name='embedding')(
x)
n, h, w, c = x.shape
x = jnp.reshape(x, [n, h * w, c])
# If we want to add a class token, add it here.
if self.classifier == 'token':
cls = self.param('cls', nn.initializers.zeros, (1, 1, c), x.dtype)
cls = jnp.tile(cls, [n, 1, 1])
x = jnp.concatenate([cls, x], axis=1)
x, auxiliary_outputs = PonderNetEncoder(
mlp_dim=self.mlp_dim,
num_layers=self.num_layers,
num_heads=self.num_heads,
dropout_rate=self.dropout_rate,
attention_dropout_rate=self.attention_dropout_rate,
ac_config=self.ac_config,
stochastic_depth=self.stochastic_depth,
parameter_sharing=self.parameter_sharing,
dtype=self.dtype,
name='PonderNetTransformer')(
x, train=train)
if auxiliary_outputs is not None:
(all_states, all_p, n_updates) = auxiliary_outputs
if self.classifier in ('token', '0'):
x = x[:, 0]
all_states = all_states[:, :, 0]
elif self.classifier in ('gap', 'gmp', 'gsp'):
fn = {'gap': jnp.mean, 'gmp': jnp.max, 'gsp': jnp.sum}[self.classifier]
x = fn(x, axis=1)
all_states = fn(all_states, axis=2)
pre_logits_layer = nn_layers.IdentityLayer(name='pre_logits')
x = pre_logits_layer(x)
all_states = pre_logits_layer(all_states)
output_projection_layer = nn.Dense(
self.num_classes,
kernel_init=nn.initializers.zeros,
name='output_projection')
x = output_projection_layer(x)
all_states = output_projection_layer(all_states)
return x, (all_states, all_p, n_updates)
class PonderViTMultiLabelClassificationModel(MultiLabelClassificationModel):
"""Universal Vision Transformer model for multi-label classification task."""
def build_flax_model(self) -> nn.Module:
model_dtype = getattr(jnp, self.config.get('model_dtype_str', 'float32'))
return PonderViT(
num_classes=self.dataset_meta_data['num_classes'],
mlp_dim=self.config.model.mlp_dim,
num_layers=self.config.model.num_layers,
num_heads=self.config.model.num_heads,
patches=self.config.model.patches,
ac_config=self.config.model.get('ac_config'),
hidden_size=self.config.model.hidden_size,
classifier=self.config.model.classifier,
dropout_rate=self.config.model.get('dropout_rate', 0.1),
attention_dropout_rate=self.config.model.get('attention_dropout_rate',
0.1),
stochastic_depth=self.config.model.get('stochastic_depth', 0.0),
parameter_sharing=self.config.model.get('parameter_sharing', True),
dtype=model_dtype,
)
def loss_function(self,
logits: jnp.array,
auxiliary_outputs: Any,
batch: base_model.Batch,
model_params: Optional[jnp.array] = None) -> float:
"""Returns sigmoid cross entropy loss with an L2 penalty on the weights.
Args:
logits: Output of model in shape [batch, length, num_classes].
auxiliary_outputs: Output of model auxiliary_outputs, (ponder_times,
remainders)
batch: Batch of data that has 'label' and optionally 'batch_mask'.
model_params: Parameters of the model, for optionally applying
regularization.
Returns:
Total loss.
"""
weights = batch.get('batch_mask')
ac_config = self.config.model.get('ac_config')
total_loss = 0.0
if self.dataset_meta_data.get('target_is_onehot', False):
multihot_target = batch['label']
else:
# This is to support running a multi-label classification model on
# single-label classification tasks.
multihot_target = common_utils.onehot(batch['label'], logits.shape[-1])
# We calculate the ponder loss only when the ac_config is used.
if ac_config is not None:
# Unpack the auxiliary_outputs.
all_states = auxiliary_outputs[0]
all_p = auxiliary_outputs[1]
# Calculate different losses for different states
for i in range(ac_config.act_max_steps):
sig_ce_loss = model_utils.weighted_sigmoid_cross_entropy(
all_states[i],
multihot_target,
all_p[i] * weights, # weighted averaging different decisions
label_smoothing=self.config.get('label_smoothing'))
total_loss += sig_ce_loss
p_g = jnp.zeros([
ac_config.act_max_steps,
])
not_halted = 1.0
# Init a geometric prior distribution.
for i in range(ac_config.act_max_steps):
# For the last time step, we need to ensure the sum of different
# steps equal to 1.0.
if i < ac_config.act_max_steps - 1:
p_g = p_g.at[i].set(ac_config.lambda_p * not_halted)
not_halted = not_halted * (1 - ac_config.lambda_p)
else:
p_g = p_g.at[-1].set(1.0 - jnp.sum(p_g, axis=0))
ponder_loss = ponder_loss_fn(all_p, p_g, weights)
total_loss += ac_config.act_loss_weight * ponder_loss
else:
# We do not calculate the ponder loss as no act used in config.
sig_ce_loss = model_utils.weighted_sigmoid_cross_entropy(
logits,
multihot_target,
weights,
label_smoothing=self.config.get('label_smoothing'))
if self.config.get('l2_decay_factor') is not None:
l2_loss = model_utils.l2_regularization(model_params)
total_loss = total_loss + 0.5 * self.config.l2_decay_factor * l2_loss
return total_loss
def init_from_train_state(
self, train_state: Any, restored_train_state: Any,
restored_model_cfg: ml_collections.ConfigDict) -> Any:
"""Updates the train_state with data from restored_train_state.
This function is writen to be used for 'fine-tuning' experiments. Here, we
do some surgery to support larger resolutions (longer sequence length) in
the transformer block, with respect to the learned pos-embeddings.
Args:
train_state: A raw TrainState for the model.
restored_train_state: A TrainState that is loaded with parameters/state of
a pretrained model.
restored_model_cfg: Configuration of the model from which the
restored_train_state come from. Usually used for some asserts.
Returns:
Updated train_state.
"""
raise NotImplementedError
|
{
"content_hash": "0c7cced0290d7d4fba812275437026bb",
"timestamp": "",
"source": "github",
"line_count": 436,
"max_line_length": 102,
"avg_line_length": 36.01146788990825,
"alnum_prop": 0.6514871664225209,
"repo_name": "google-research/scenic",
"id": "f5fe86f532f8415638ecb6218c798e7f934328a2",
"size": "15701",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scenic/projects/baselines/pondernet/pondervit/pondervit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1717873"
},
{
"name": "Python",
"bytes": "3692184"
}
],
"symlink_target": ""
}
|
"""Tests for xla_sharding.Sharding class and associated module functions."""
from absl.testing import absltest
import numpy as np
from google.protobuf.message import DecodeError
from tensorflow.compiler.xla import xla_data_pb2
from tensorflow.python.compiler.xla.experimental import xla_sharding
from tensorflow.python.eager import def_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
class ShardingTest(test_util.TensorFlowTestCase):
"""Tests for member functions of the class xla_sharding.Sharding."""
def test_sharding_is_default_constructable(self):
sharding = xla_sharding.Sharding()
self.assertIsNotNone(sharding)
def test_sharding_factory_functions_can_return_sharding_objects(self):
"""Tests the various recommended ways to construct a Sharding object.
This is the most minimal of tests, doesn't assert anything about the
Sharding object produced by a given factory methods other than that it
has the correct type.
"""
self.assertIsInstance(xla_sharding.Sharding.replicate(),
xla_sharding.Sharding)
self.assertIsInstance(xla_sharding.Sharding.manual(), xla_sharding.Sharding)
self.assertIsInstance(
xla_sharding.Sharding.assign_device(0), xla_sharding.Sharding)
self.assertIsInstance(
xla_sharding.Sharding.tile(np.ones([3], dtype=int)),
xla_sharding.Sharding)
self.assertIsInstance(
xla_sharding.Sharding.partial_tile(np.ones([3], dtype=int)),
xla_sharding.Sharding)
self.assertIsInstance(
xla_sharding.Sharding.split(
array_ops.ones([3, 8, 7], dtype=dtypes.int32), 1, 2),
xla_sharding.Sharding)
self.assertIsInstance(
xla_sharding.Sharding.subgroup_tile(
np.ones([2, 3, 3], dtype=int), [
xla_data_pb2.OpSharding.REPLICATED,
xla_data_pb2.OpSharding.MANUAL
]), xla_sharding.Sharding)
class XlaShardingTest(test_util.TensorFlowTestCase):
"""Tests for non-member functions in the module xla_sharding.py."""
def test_replicate_annotates_tensor_correctly(self):
@def_function.function
def replicate_helper(tensor):
replicated_tensor = xla_sharding.replicate(
array_ops.ones([4, 5, 6], dtype=dtypes.float32))
self.assertIsNone(xla_sharding.get_tensor_sharding(tensor))
replicated_sharding = xla_sharding.get_tensor_sharding(replicated_tensor)
self.assertIsNotNone(replicated_sharding)
self.assertIsNone(
xla_sharding.get_sharding_tile_shape(replicated_sharding))
return replicated_tensor
in_tensor = array_ops.ones([4, 5, 6], dtype=dtypes.float32)
result = replicate_helper(array_ops.ones([4, 5, 6], dtype=dtypes.float32))
self.assertAllEqual(in_tensor, result)
def test_tile_annotates_tensor_correctly(self):
@def_function.function
def tile_helper(tensor):
self.assertIsNone(xla_sharding.get_tensor_sharding(tensor))
tiled_tensor = xla_sharding.tile(tensor, np.array([2, 1, 6]))
self.assertIsInstance(tiled_tensor, ops.Tensor)
tiled_sharding = xla_sharding.get_tensor_sharding(tiled_tensor)
tile_shape = xla_sharding.get_sharding_tile_shape(tiled_sharding)
# This is the shape of the tile assignment [2, 1, 6]
expected_shape = [3]
self.assertEqual(expected_shape, tile_shape)
return tiled_tensor
in_tensor = array_ops.ones([4, 5, 6], dtype=dtypes.float32)
result = tile_helper(array_ops.ones([4, 5, 6], dtype=dtypes.float32))
self.assertAllEqual(in_tensor, result)
def test_split_annotates_tensor_correctly(self):
@def_function.function
def split_helper(tensor):
self.assertIsNone(xla_sharding.get_tensor_sharding(tensor))
split_tensor = xla_sharding.split(tensor, 2, 3)
self.assertIsInstance(split_tensor, ops.Tensor)
split_sharding = xla_sharding.get_tensor_sharding(split_tensor)
split_shape = xla_sharding.get_sharding_tile_shape(split_sharding)
expected_shape = [1, 1, 3]
self.assertEqual(expected_shape, split_shape)
return split_tensor
in_tensor = array_ops.ones([4, 5, 6], dtype=dtypes.float32)
result = split_helper(array_ops.ones([4, 5, 6], dtype=dtypes.float32))
self.assertAllEqual(in_tensor, result)
def test_split_raises_error_with_incommensurate_dimensions(self):
@def_function.function
def split_helper(tensor):
split_tensor = xla_sharding.split(tensor, 0, 8)
return split_tensor
with self.assertRaises(ValueError):
_ = split_helper(array_ops.ones([4, 5, 6], dtype=dtypes.float32))
# TODO(drm): Modify split() so that this call raises an error since
# 8 does not divide 9 (currently only checks that 8 is smaller than 9,
# which it is, but this is not good for splitting).
# with self.assertRaises(ValueError):
# _ = split_helper(array_ops.ones([9, 5, 6], dtype=dtypes.float32))
def test_copy_sharding_succeeds_with_identically_shaped_tensors(self):
@def_function.function
def copy_helper(tensor):
tensor_src = array_ops.identity(tensor)
tensor_src = xla_sharding.split(tensor, 2, 3)
sharding_src = xla_sharding.get_tensor_sharding(tensor_src)
shape_src = xla_sharding.get_sharding_tile_shape(sharding_src)
self.assertEqual([1, 1, 3], shape_src)
tensor_dest = array_ops.identity(tensor)
self.assertIsNone(xla_sharding.get_tensor_sharding(tensor_dest))
xla_sharding.copy_sharding(tensor_src, tensor_dest)
sharding_dest = xla_sharding.get_tensor_sharding(tensor_dest)
shape_dest = xla_sharding.get_sharding_tile_shape(sharding_dest)
self.assertEqual([1, 1, 3], shape_dest)
return tensor_dest
in_tensor = array_ops.ones([4, 5, 6], dtype=dtypes.float32)
result = copy_helper(array_ops.ones([4, 5, 6], dtype=dtypes.float32))
self.assertAllEqual(in_tensor, result)
def test_get_sharding_tile_shape_returns_none_on_none_input(self):
self.assertIsNone(xla_sharding.get_sharding_tile_shape(None))
def test_get_sharding_tile_shape_raises_error_on_nonparsable_input(self):
bad_proto_data = b'\x0f'
with self.assertRaises(DecodeError):
xla_sharding.get_sharding_tile_shape(bad_proto_data)
if __name__ == '__main__':
absltest.main()
|
{
"content_hash": "d66b5f991ff59ef40954f90840f50f99",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 80,
"avg_line_length": 40.98089171974522,
"alnum_prop": 0.704849238420889,
"repo_name": "yongtang/tensorflow",
"id": "2f0281e99b21de16a9548666e78faa7ab1f22d9e",
"size": "7083",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "tensorflow/python/compiler/xla/experimental/xla_sharding_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "36962"
},
{
"name": "C",
"bytes": "1368342"
},
{
"name": "C#",
"bytes": "13584"
},
{
"name": "C++",
"bytes": "125162438"
},
{
"name": "CMake",
"bytes": "179878"
},
{
"name": "Cython",
"bytes": "5003"
},
{
"name": "Dockerfile",
"bytes": "416133"
},
{
"name": "Go",
"bytes": "2118448"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "1074438"
},
{
"name": "Jupyter Notebook",
"bytes": "792868"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "11205807"
},
{
"name": "Makefile",
"bytes": "2760"
},
{
"name": "Objective-C",
"bytes": "172666"
},
{
"name": "Objective-C++",
"bytes": "300198"
},
{
"name": "Pawn",
"bytes": "5552"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42642473"
},
{
"name": "Roff",
"bytes": "5034"
},
{
"name": "Ruby",
"bytes": "9199"
},
{
"name": "Shell",
"bytes": "621427"
},
{
"name": "Smarty",
"bytes": "89545"
},
{
"name": "SourcePawn",
"bytes": "14607"
},
{
"name": "Starlark",
"bytes": "7577804"
},
{
"name": "Swift",
"bytes": "78435"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
import sys
import logging
from os import path, sep, remove
from subprocess import Popen, PIPE # the default jython included in android sdk does not support check_output
from engine.Tester import Tester
from engine.TestServer import TestServer
from engine.AppTestEngine import AppTestEngine
#from engine.Android import callCmd
from engine.TestUtil import printLog, DEFAULT_TEST_SUITE, TestStatus,APP_NAME, H_LINE, \
GREEN_RECEIVERS, YELLOW_RECEIVERS, RED_RECEIVERS, \
CRITICAL_TESTCASES, CONFIG_FILE
from pyh import PyH, h3, h4, div, p, table, td, tr
## configurables
import ConfigParser
config = ConfigParser.ConfigParser()
config.readfp(open(CONFIG_FILE))
CLIENT_VERSION_PREFIX=config.get('BUILD', 'CLIENT_VERSION_PREFIX')
BUILD_VERSION=config.get('BUILD', 'BUILD_VERSION')
BUILD_FILENAME=config.get('BUILD', 'BUILD_FILENAME')
# build path info
BUILD_ROOT_PATH = config.get('BUILD', 'BUILD_ROOT_PATH')
#BUILD_LOCAL_ROOT_PATH = config.get('BUILD', 'BUILD_LOCAL_ROOT_PATH')
class AppTester(Tester):
def __init__(self, suite_name=DEFAULT_TEST_SUITE, build_num='latest'):
Tester.__init__(self, suite_name, build_num)
if self.initOK:
pass
def __writeHtmlTestResult(self):
content_format="%-*s%-*s%*s%*s%*s"
content_format2="%-*s%*s" # '-' means left just (right just by default) http://www.cnblogs.com/zero86/archive/2012/11/22/2783679.html
content_format3="%-*s%*d"
self.Total=self.testPool.getTotalCount()
self.Pass=self.testPool.getPassCount()
self.Fail=self.testPool.getFailCount()
## create HTML content
page = PyH('Test Result')
page << h3('Overall Result:')
# table0 = page << table(border='0',id='table_overall')
# tmpRow = table0 << tr(id='line1')
# tmpRow << td("Total:") <<td(str(self.Total))
# tmpRow = table0 << tr(id='line2')
# tmpRow << td("Pass:") <<td(str(self.Pass))
# tmpRow = table0 << tr(id='line3')
# tmpRow << td("Fail:") <<td(str(self.Fail))
# tmpRow = table0 << tr(id='line4')
# tmpRow << td("Not Run:") <<td(str(self.Total-self.Pass-self.Fail))
page << p(content_format3 % (10, 'Total:' , 5, self.Total))
page << p(content_format3 % (10, 'Pass:' , 5, self.Pass))
page << p(content_format3 % (10, 'Fail:' , 5, self.Fail))
page << p(content_format3 % (10, 'Not Run:' , 5, self.Total-self.Pass-self.Fail))
if self.Fail>0:
page << h3('Failed Testcase:',style='color:red;')
table1 = page << table(border='1',cellPadding='5',id='table_failedTest')
headtr = table1 << tr(id='headline1')
headtr << td('Test Name') << td('Failure Description') << td('Device')<< td('Start Time')<< td('End Time')
for tc in self.testPool.queue:
if tc.result == TestStatus.Fail:
tmpRow = table1 << tr(id='line1')
tmpRow << td(tc.name) <<td(tc.errormsg)<<td(tc.device)<<td(tc.start_time)<<td(tc.end_time)
# page << content_format % (25, tc.name, 45, '\t'+tc.errormsg, 30, '\t'+tc.device, 20, '\t'+tc.start_time, 20, '\t'+tc.end_time)
# page << H_LINE
if self.Pass>0:
page << h3('Passed Testcase:',style='color:green;')
table2 = page << table(border='1',cellPadding='5',id='table_passedTest')
headtr = table2 << tr(id='headline2')
headtr << td('Test Name') << td('Test Description') << td('Device')<< td('Start Time')<< td('End Time')
# page << content_format % (25, 'Test Name', 45, 'Test Description', 30, "\tDevice", 20, "\tStart Time", 20, "\tEnd Time")
# page << H_LINE
for tc in self.testPool.queue:
if tc.result == TestStatus.Pass:
tmpRow = table2 << tr(id='line2')
tmpRow << td(tc.name) <<td(tc.desc)<<td(tc.device)<<td(tc.start_time)<<td(tc.end_time)
# page << content_format % (25, tc.name, 45, tc.desc, 30, '\t'+tc.device,20, '\t'+tc.start_time, 20, '\t'+tc.end_time)
## Test time
mydiv2 = page << div(id='myDiv2')
mydiv2 << h4('Test build:')+ p(CLIENT_VERSION_PREFIX+str(self.buildnum))
mydiv2 << h4('Test start:')+ p(self.start_time)
mydiv2 << h4('Test stop: ')+ p(self.end_time)
## host info
mydiv2 << h4('Test Server: ')+ p(TestServer().getHostname())
# page << h4(content_format2 % (11, 'Test start:', 30, self.start_time), cl='left')
# page << h4(content_format2 % (11, 'Test stop: ', 30, self.end_time), cl='left')
# page << h4(content_format2 % (11, 'Build:', 30, CLIENT_VERSION_PREFIX+str(self.buildnum)), cl='left')
## Test device
mydiv2 << h4('Test Devices:')
count=0
table_device = mydiv2 << table(cellSpacing='1', cellPadding='5', border='1',borderColor='#666666', id='table_device')
table_device.attributes['cellSpacing'] = 1
headtr = table_device << tr(id='headline5')
headtr << td('No.') << td('Make') << td('Model') << td('Android Version') << td('ID')
for device in self.devicePool.queue:
count+=1
tmpRow = table_device << tr(id='line1')
tmpRow << td(str(count)) <<td(device.make)<<td(device.model)<<td(device.androidVersion)<<td(device.deviceId)
# page << h5(content_format2 % (11, 'Device'+str(count)+":\t", 50, \
# device.make+' '+device.model+' '+device.androidVersion+' ' + device.deviceId))
## write file
page.printOut(file=self.suiteName+'.html')
def getBuild(self):
'''
# implement: get build from mainline/release server
'''
result=False
## remove any existing build file
if path.isfile(BUILD_FILENAME):
remove(BUILD_FILENAME)
if self.buildnum=='latest':
self.buildnum=AppTestEngine.getLatestBuildNumber()
if self.buildnum==0:
printLog('[getBuild] invalid build number specified or build location not accessible.', logging.ERROR)
return result
#TODO: customize the target file path
target=BUILD_ROOT_PATH+sep+BUILD_VERSION+sep+APP_NAME+'-'+str(self.buildnum)+sep+BUILD_FILENAME
printLog('[getBuild] Downloading build %s from %s...' % (str(self.buildnum), target), logging.INFO)
try:
self.testServer.callShellCmd(r'cp '+target+' .')
if path.isfile(BUILD_FILENAME):
printLog('[getBuild] Build %s is downloaded.' % str(self.buildnum), logging.INFO)
result=True
except IOError, e:
printLog('[getBuild] Build %s download failed: %s' % e.message, logging.ERROR)
# self.appTestEngine.buildnum=0
return result
# def generateTestReport(self):
# # TODO: customize your own report
# self.__writeHtmlTestResult()
def main(argv):
'''
@param argv include:
1. the test suite name(optional). e.g. unittest (if test suite not provided, use the default test suite)
2. build number
'''
print H_LINE
buildnum='latest'
suite=DEFAULT_TEST_SUITE
if len(argv) >= 2:
if argv[1]=='-h':
print 'Usage: AppTester.py [suite] [buildnum]'
return
suite=argv[1]
print '[Main] Test suite specified:', suite
else:
print '[Main] Test suite not specified, use default test suite:', suite
if len(argv) >= 3:
buildnum=argv[2]
print '[Main] Build number specified:', buildnum
if not buildnum.isdigit() and buildnum != 'latest':
print '[Main] Invalid build number! Quit...'
return
else:
print '[Main] Build number not specified, use latest build.'
print H_LINE
tester = AppTester(suite, buildnum)
if tester.initOK:
tester.getBuild()
ret=tester.start()
else:
print('[Main] Tester init failed.')
return
# to=[MAIL_ADMIN_ADDRESS]
prefix='Automation Test - %s %s (build %s%d)' % (APP_NAME, suite, CLIENT_VERSION_PREFIX, tester.buildnum)
subject=''
do_deploy=True
DO_SEND_MAIL=False
status=''
if ret<0:
print('Test aborted!')
status='RED'
subject=prefix+r': RED (Test aborted! PLEASE CHECK THE TEST SERVER!)'
to=RED_RECEIVERS
elif ret==0 and len(tester.exception_string) <= 0:
print 'All Test PASS!'
status='GREEN'
subject='%s: GREEN'% (prefix)
to=GREEN_RECEIVERS
DO_SEND_MAIL=False
elif tester.testPool.isFatalErrorHappened(CRITICAL_TESTCASES):
print 'Test has fatal error!'
status='RED'
subject='%s: RED (Found fatal error)'% (prefix)
to=RED_RECEIVERS
do_deploy=False
elif float(ret)/float(tester.Total) >= 0.5:
status='RED'
print 'Test has failures (Fail: %d of %d)' % (ret, tester.Total)
subject='%s: RED (Fail: %d of %d)'% (prefix, ret, tester.Total)
to=RED_RECEIVERS
else:
status='YELLOW'
print 'Test has failures (Fail: %d of %d)' % (ret, tester.Total)
subject='%s: YELLOW (Fail: %d of %d)'% (prefix, ret, tester.Total)
to=YELLOW_RECEIVERS
## do deploy
# if do_deploy:
#TODO: implement the deploy logic
if DO_SEND_MAIL:
try:
print('[Main] mail subject: %s' % subject)
cmd=' '.join('python', 'mail.py', suite, buildnum, status)
try:
p=Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
out, err=p.communicate()
printLog("[runShellCmd] Command returns:\noutput:%s\n" % out)
if len(err)>0:
printLog('[runShellCmd] error:%s' % err, logging.ERROR)
except:
printLog("[runShellCmd] Exception when run cmd '%s'." % cmd, logging.ERROR)
return None
tester.sendmail(subject, to)
except Exception, e:
# print '[Main] Exception during mail send:%s' % (e.message)
pass
if __name__ == "__main__":
main(sys.argv)
|
{
"content_hash": "80ebef7738569a4dae529cf6d670e3ae",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 135,
"avg_line_length": 40.39148936170213,
"alnum_prop": 0.6267383059418458,
"repo_name": "xinquanking/test4u",
"id": "16d963c6e89af21a1ab096520fcfaed02b41e682",
"size": "9535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AppTester.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "91521"
},
{
"name": "TypeScript",
"bytes": "77"
}
],
"symlink_target": ""
}
|
import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import laplacian_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unknown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels(): # Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "laplacian", "sigmoid", "polynomial", "linear",
"chi2", "additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {'gamma': 0.1}
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1)
Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_laplacian_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = laplacian_kernel(X, X)
# the diagonal elements of a laplacian kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(5))
# off-diagonal elements are < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
# The modified tests are not 1D. In the old test, the array was internally
# converted to 2D anyways
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
|
{
"content_hash": "c907d59c82b40c081644111742efea59",
"timestamp": "",
"source": "github",
"line_count": 663,
"max_line_length": 78,
"avg_line_length": 38.47511312217195,
"alnum_prop": 0.6369516641185464,
"repo_name": "kaichogami/scikit-learn",
"id": "ce67c0eccc9d89c9bbbb10abe568f5eaa2b8e759",
"size": "25509",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "sklearn/metrics/tests/test_pairwise.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "394787"
},
{
"name": "C++",
"bytes": "140225"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "PowerShell",
"bytes": "17312"
},
{
"name": "Python",
"bytes": "6372312"
},
{
"name": "Shell",
"bytes": "9258"
}
],
"symlink_target": ""
}
|
"""
KFServing
Python SDK for KFServing # noqa: E501
OpenAPI spec version: v0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class V1Time(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""V1Time - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(V1Time, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Time):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
{
"content_hash": "01352225875569b5519e97b7e701ac4a",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 80,
"avg_line_length": 27.28235294117647,
"alnum_prop": 0.5222078482104355,
"repo_name": "kubeflow/kfserving-lts",
"id": "cf0646700c2b3920aa2b827547691defaec0f5bf",
"size": "2913",
"binary": false,
"copies": "1",
"ref": "refs/heads/release-0.6",
"path": "python/kfserving/kfserving/models/v1_time.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "224"
},
{
"name": "Dockerfile",
"bytes": "10549"
},
{
"name": "Go",
"bytes": "1251102"
},
{
"name": "HTML",
"bytes": "17922"
},
{
"name": "JavaScript",
"bytes": "1828"
},
{
"name": "Jsonnet",
"bytes": "2434415"
},
{
"name": "Makefile",
"bytes": "16071"
},
{
"name": "Python",
"bytes": "1860674"
},
{
"name": "SCSS",
"bytes": "1789"
},
{
"name": "Shell",
"bytes": "36788"
},
{
"name": "TypeScript",
"bytes": "78886"
}
],
"symlink_target": ""
}
|
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('../'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.coverage', 'sphinx.ext.autodoc']
os.environ['DJANGO_SETTINGS_MODULE'] = 'blog.testsettings'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Asgard Blog'
copyright = u'2009, Myles Braithwaite'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'basic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'asgard_blog_doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'asgard_blog.tex', u'Asgard Blog Documentation',
u'Myles Braithwaite', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
{
"content_hash": "de5d5eff1f80629f87c0aab2c512b23a",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 80,
"avg_line_length": 32.78688524590164,
"alnum_prop": 0.7108333333333333,
"repo_name": "myles-archive/asgard-blog",
"id": "bee533e2a1429a4f82b231dcc0509103325e7add",
"size": "6422",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "27416"
},
{
"name": "JavaScript",
"bytes": "498"
},
{
"name": "Python",
"bytes": "79524"
},
{
"name": "Shell",
"bytes": "2967"
}
],
"symlink_target": ""
}
|
import numpy as np
def percent_round_int(percent, x):
return np.round(percent * x).astype(int)
|
{
"content_hash": "d8259666725c6cc7396352cdc8201a1d",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 44,
"avg_line_length": 20.2,
"alnum_prop": 0.7029702970297029,
"repo_name": "ntasfi/PyGame-Learning-Environment",
"id": "cf7f43aa25b5a67ac35b0b60fcb760e5d507b335",
"size": "101",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ple/games/utils/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1588"
},
{
"name": "Python",
"bytes": "167222"
}
],
"symlink_target": ""
}
|
"""Extra commands for setup.py.
In addition to providing a few extra command classes in `l10n_cmdclass`,
we also modify the standard `distutils.command.build` and
`setuptools.command.install_lib` classes so that the relevant l10n commands
for compiling catalogs are issued upon install.
"""
from __future__ import with_statement
from StringIO import StringIO
from itertools import izip
import os
import re
from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
from distutils import log
from distutils.cmd import Command
from distutils.command.build import build as _build
from distutils.errors import DistutilsOptionError
from setuptools.command.install_lib import install_lib as _install_lib
try:
from babel.messages.catalog import TranslationError
from babel.messages.extract import extract_javascript
from babel.messages.frontend import extract_messages, init_catalog, \
compile_catalog, update_catalog
from babel.messages.pofile import read_po
from babel.support import Translations
from babel.util import parse_encoding
_GENSHI_MARKUP_SEARCH = re.compile(r'\[[0-9]+:').search
_DEFAULT_KWARGS_MAPS = {
'Option': {'doc': 4},
'BoolOption': {'doc': 4},
'IntOption': {'doc': 4},
'FloatOption': {'doc': 4},
'ListOption': {'doc': 6},
'ChoiceOption': {'doc': 4},
'PathOption': {'doc': 4},
'ExtensionOption': {'doc': 5},
'OrderedExtensionsOption': {'doc': 6},
}
_DEFAULT_CLEANDOC_KEYWORDS = (
'ConfigSection', 'Option', 'BoolOption', 'IntOption', 'FloatOption',
'ListOption', 'ChoiceOption', 'PathOption', 'ExtensionOption',
'OrderedExtensionsOption', 'cleandoc_',
)
def extract_python(fileobj, keywords, comment_tags, options):
"""Extract messages from Python source code, This is patched
extract_python from Babel to support keyword argument mapping.
`kwargs_maps` option: names of keyword arguments will be mapping to
index of messages array.
`cleandoc_keywords` option: a list of keywords to clean up the
extracted messages with `cleandoc`.
"""
from trac.util.compat import cleandoc
funcname = lineno = message_lineno = None
kwargs_maps = func_kwargs_map = None
call_stack = -1
buf = []
messages = []
messages_kwargs = {}
translator_comments = []
in_def = in_translator_comments = False
comment_tag = None
encoding = str(parse_encoding(fileobj) or
options.get('encoding', 'iso-8859-1'))
kwargs_maps = _DEFAULT_KWARGS_MAPS.copy()
if 'kwargs_maps' in options:
kwargs_maps.update(options['kwargs_maps'])
cleandoc_keywords = set(_DEFAULT_CLEANDOC_KEYWORDS)
if 'cleandoc_keywords' in options:
cleandoc_keywords.update(options['cleandoc_keywords'])
tokens = generate_tokens(fileobj.readline)
tok = value = None
for _ in tokens:
prev_tok, prev_value = tok, value
tok, value, (lineno, _), _, _ = _
if call_stack == -1 and tok == NAME and value in ('def', 'class'):
in_def = True
elif tok == OP and value == '(':
if in_def:
# Avoid false positives for declarations such as:
# def gettext(arg='message'):
in_def = False
continue
if funcname:
message_lineno = lineno
call_stack += 1
kwarg_name = None
elif in_def and tok == OP and value == ':':
# End of a class definition without parens
in_def = False
continue
elif call_stack == -1 and tok == COMMENT:
# Strip the comment token from the line
value = value.decode(encoding)[1:].strip()
if in_translator_comments and \
translator_comments[-1][0] == lineno - 1:
# We're already inside a translator comment, continue
# appending
translator_comments.append((lineno, value))
continue
# If execution reaches this point, let's see if comment line
# starts with one of the comment tags
for comment_tag in comment_tags:
if value.startswith(comment_tag):
in_translator_comments = True
translator_comments.append((lineno, value))
break
elif funcname and call_stack == 0:
if tok == OP and value == ')':
if buf:
message = ''.join(buf)
if kwarg_name in func_kwargs_map:
messages_kwargs[kwarg_name] = message
else:
messages.append(message)
del buf[:]
else:
messages.append(None)
for name, message in messages_kwargs.iteritems():
if name not in func_kwargs_map:
continue
index = func_kwargs_map[name]
while index >= len(messages):
messages.append(None)
messages[index - 1] = message
if funcname in cleandoc_keywords:
messages = [m and cleandoc(m) for m in messages]
if len(messages) > 1:
messages = tuple(messages)
else:
messages = messages[0]
# Comments don't apply unless they immediately preceed the
# message
if translator_comments and \
translator_comments[-1][0] < message_lineno - 1:
translator_comments = []
yield (message_lineno, funcname, messages,
[comment[1] for comment in translator_comments])
funcname = lineno = message_lineno = None
kwarg_name = func_kwargs_map = None
call_stack = -1
messages = []
messages_kwargs = {}
translator_comments = []
in_translator_comments = False
elif tok == STRING:
# Unwrap quotes in a safe manner, maintaining the string's
# encoding
# https://sourceforge.net/tracker/?func=detail&atid=355470&
# aid=617979&group_id=5470
value = eval('# coding=%s\n%s' % (encoding, value),
{'__builtins__':{}}, {})
if isinstance(value, str):
value = value.decode(encoding)
buf.append(value)
elif tok == OP and value == '=' and prev_tok == NAME:
kwarg_name = prev_value
elif tok == OP and value == ',':
if buf:
message = ''.join(buf)
if kwarg_name in func_kwargs_map:
messages_kwargs[kwarg_name] = message
else:
messages.append(message)
del buf[:]
else:
messages.append(None)
kwarg_name = None
if translator_comments:
# We have translator comments, and since we're on a
# comma(,) user is allowed to break into a new line
# Let's increase the last comment's lineno in order
# for the comment to still be a valid one
old_lineno, old_comment = translator_comments.pop()
translator_comments.append((old_lineno+1, old_comment))
elif call_stack > 0 and tok == OP and value == ')':
call_stack -= 1
elif funcname and call_stack == -1:
funcname = func_kwargs_map = kwarg_name = None
elif tok == NAME and value in keywords:
funcname = value
func_kwargs_map = kwargs_maps.get(funcname, {})
kwarg_name = None
def extract_javascript_script(fileobj, keywords, comment_tags, options):
"""Extract messages from Javascript embedding in <script> tags.
Select <script type="javascript/text"> tags and delegate to
`extract_javascript`.
"""
from genshi.core import Stream
from genshi.input import XMLParser
out = StringIO()
stream = Stream(XMLParser(fileobj))
stream = stream.select('//script[@type="text/javascript"]')
stream.render(out=out, encoding='utf-8')
out.seek(0)
return extract_javascript(out, keywords, comment_tags, options)
class generate_messages_js(Command):
"""Generating message javascripts command for use ``setup.py`` scripts.
"""
description = 'generate message javascript files from binary MO files'
user_options = [
('domain=', 'D',
"domain of PO file (default 'messages')"),
('input-dir=', 'I',
'path to base directory containing the catalogs'),
('input-file=', 'i',
'name of the input file'),
('output-dir=', 'O',
"name of the output directory"),
('output-file=', 'o',
"name of the output file (default "
"'<output_dir>/<locale>.js')"),
('locale=', 'l',
'locale of the catalog to compile'),
]
def initialize_options(self):
self.domain = 'messages'
self.input_dir = None
self.input_file = None
self.output_dir = None
self.output_file = None
self.locale = None
def finalize_options(self):
if not self.input_file and not self.input_dir:
raise DistutilsOptionError('you must specify either the input '
'file or directory')
if not self.output_file and not self.output_dir:
raise DistutilsOptionError('you must specify either the '
'output file or directory')
def run(self):
mo_files = []
js_files = []
def js_path(dir, locale):
return os.path.join(dir, locale + '.js')
if not self.input_file:
if self.locale:
mo_files.append((self.locale,
os.path.join(self.input_dir, self.locale,
'LC_MESSAGES',
self.domain + '.mo')))
js_files.append(js_path(self.output_dir, self.locale))
else:
for locale in os.listdir(self.input_dir):
mo_file = os.path.join(self.input_dir, locale,
'LC_MESSAGES',
self.domain + '.mo')
if os.path.exists(mo_file):
mo_files.append((locale, mo_file))
js_files.append(js_path(self.output_dir, locale))
else:
mo_files.append((self.locale, self.input_file))
if self.output_file:
js_files.append(self.output_file)
else:
js_files.append(js_path(self.output_dir, locale))
if not mo_files:
raise DistutilsOptionError('no compiled catalogs found')
if not os.path.isdir(self.output_dir):
os.mkdir(self.output_dir)
for idx, (locale, mo_file) in enumerate(mo_files):
js_file = js_files[idx]
log.info('generating messages javascript %r to %r',
mo_file, js_file)
with open(mo_file, 'rb') as infile:
t = Translations(infile, self.domain)
catalog = t._catalog
with open(js_file, 'w') as outfile:
write_js(outfile, catalog, self.domain, locale)
class check_catalog(Command):
"""Check message catalog command for use ``setup.py`` scripts."""
description = 'check message catalog files, like `msgfmt --check`'
user_options = [
('domain=', 'D',
"domain of PO file (default 'messages')"),
('input-dir=', 'I',
'path to base directory containing the catalogs'),
('input-file=', 'i',
'name of the input file'),
('locale=', 'l',
'locale of the catalog to compile'),
]
def initialize_options(self):
self.domain = 'messages'
self.input_dir = None
self.input_file = None
self.locale = None
def finalize_options(self):
if not self.input_file and not self.input_dir:
raise DistutilsOptionError('you must specify either the input '
'file or directory')
def run(self):
for filename in self._get_po_files():
log.info('checking catalog %s', filename)
f = open(filename)
try:
catalog = read_po(f, domain=self.domain)
finally:
f.close()
for message in catalog:
for error in self._check_message(catalog, message):
log.warn('%s:%d: %s', filename, message.lineno, error)
def _get_po_files(self):
if self.input_file:
return [self.input_file]
if self.locale:
return [os.path.join(self.input_dir, self.locale,
'LC_MESSAGES', self.domain + '.po')]
files = []
for locale in os.listdir(self.input_dir):
filename = os.path.join(self.input_dir, locale, 'LC_MESSAGES',
self.domain + '.po')
if os.path.exists(filename):
files.append(filename)
return sorted(files)
def _check_message(self, catalog, message):
errors = [e for e in message.check(catalog)]
try:
check_genshi_markup(catalog, message)
except TranslationError, e:
errors.append(e)
return errors
def check_genshi_markup(catalog, message):
"""Verify the genshi markups in the translation."""
msgids = message.id
if not isinstance(msgids, (list, tuple)):
msgids = (msgids,)
msgstrs = message.string
if not isinstance(msgstrs, (list, tuple)):
msgstrs = (msgstrs,)
# check using genshi-markup
if not _GENSHI_MARKUP_SEARCH(msgids[0]):
return
for msgid, msgstr in izip(msgids, msgstrs):
if msgstr:
_validate_genshi_markup(msgid, msgstr)
def _validate_genshi_markup(markup, alternative):
indices_markup = _parse_genshi_markup(markup)
indices_alternative = _parse_genshi_markup(alternative)
indices = indices_markup - indices_alternative
if indices:
raise TranslationError(
'genshi markups are unbalanced %s' % \
' '.join(['[%d:]' % idx for idx in indices]))
def _parse_genshi_markup(message):
from genshi.filters.i18n import parse_msg
try:
return set([idx for idx, text in parse_msg(message)
if idx > 0])
except Exception, e:
raise TranslationError('cannot parse message (%s: %s)' % \
(e.__class__.__name__, unicode(e)))
def write_js(fileobj, catalog, domain, locale):
from trac.util.presentation import to_json
data = {'domain': domain, 'locale': locale}
messages = {}
for msgid, msgstr in catalog.iteritems():
if isinstance(msgid, (list, tuple)):
messages.setdefault(msgid[0], {})
messages[msgid[0]][msgid[1]] = msgstr
elif msgid:
messages[msgid] = msgstr
else:
for line in msgstr.splitlines():
line = line.strip()
if not line:
continue
if ':' not in line:
continue
name, val = line.split(':', 1)
name = name.strip().lower()
if name == 'plural-forms':
data['plural_expr'] = pluralexpr(val)
break
data['messages'] = messages
fileobj.write('// Generated messages javascript file '
'from compiled MO file\n')
fileobj.write('babel.Translations.load(')
fileobj.write(to_json(data).encode('utf-8'))
fileobj.write(').install();\n')
def pluralexpr(forms):
match = re.search(r'\bplural\s*=\s*([^;]+)', forms)
if not match:
raise ValueError('Failed to parse plural_forms %r' % (forms,))
return match.group(1)
def get_command_overriders():
# 'bdist_wininst' runs a 'build', so make the latter
# run a 'compile_catalog' before 'build_py'
class build(_build):
sub_commands = [('compile_catalog', None)] + _build.sub_commands
# 'bdist_egg' isn't that nice, all it does is an 'install_lib'
class install_lib(_install_lib): # playing setuptools' own tricks ;-)
def l10n_run(self):
self.run_command('compile_catalog')
def run(self):
self.l10n_run()
# When bdist_egg is called on distribute 0.6.29 and later, the
# egg file includes no *.mo and *.js files which are generated
# in l10n_run() method.
# We remove build_py.data_files property to re-compute in order
# to avoid the issue (#11640).
build_py = self.get_finalized_command('build_py')
if 'data_files' in build_py.__dict__ and \
not any(any(name.endswith('.mo') for name in filenames)
for pkg, src_dir, build_dir, filenames
in build_py.data_files):
del build_py.__dict__['data_files']
_install_lib.run(self)
return build, install_lib
def get_l10n_cmdclass():
build, install_lib = get_command_overriders()
return {
'build': build, 'install_lib': install_lib,
'check_catalog': check_catalog,
}
def get_l10n_js_cmdclass():
build, _install_lib = get_command_overriders()
build.sub_commands.insert(0, ('generate_messages_js', None))
build.sub_commands.insert(0, ('compile_catalog_js', None))
class install_lib(_install_lib):
def l10n_run(self):
self.run_command('compile_catalog_js')
self.run_command('generate_messages_js')
self.run_command('compile_catalog')
return {
'build': build, 'install_lib': install_lib,
'check_catalog': check_catalog,
'extract_messages_js': extract_messages,
'init_catalog_js': init_catalog,
'compile_catalog_js': compile_catalog,
'update_catalog_js': update_catalog,
'generate_messages_js': generate_messages_js,
'check_catalog_js': check_catalog,
}
def get_l10n_trac_cmdclass():
build, _install_lib = get_command_overriders()
build.sub_commands.insert(0, ('generate_messages_js', None))
build.sub_commands.insert(0, ('compile_catalog_js', None))
build.sub_commands.insert(0, ('compile_catalog_tracini', None))
class install_lib(_install_lib):
def l10n_run(self):
self.run_command('compile_catalog_tracini')
self.run_command('compile_catalog_js')
self.run_command('generate_messages_js')
self.run_command('compile_catalog')
return {
'build': build, 'install_lib': install_lib,
'check_catalog': check_catalog,
'extract_messages_js': extract_messages,
'init_catalog_js': init_catalog,
'compile_catalog_js': compile_catalog,
'update_catalog_js': update_catalog,
'generate_messages_js': generate_messages_js,
'check_catalog_js': check_catalog,
'extract_messages_tracini': extract_messages,
'init_catalog_tracini': init_catalog,
'compile_catalog_tracini': compile_catalog,
'update_catalog_tracini': update_catalog,
'check_catalog_tracini': check_catalog,
}
except ImportError:
def get_l10n_cmdclass():
return
def get_l10n_js_cmdclass():
return
def get_l10n_trac_cmdclass():
return
|
{
"content_hash": "edc6af443602f3232c715544ecb604ea",
"timestamp": "",
"source": "github",
"line_count": 531,
"max_line_length": 79,
"avg_line_length": 41.026365348399246,
"alnum_prop": 0.5121413816846454,
"repo_name": "jun66j5/trac-ja",
"id": "ddb586e48520a1bedb86ec992716b82d973e0227",
"size": "22277",
"binary": false,
"copies": "2",
"ref": "refs/heads/trac-ja/1.0.2",
"path": "trac/dist.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C#",
"bytes": "11618"
},
{
"name": "JavaScript",
"bytes": "52431"
},
{
"name": "Python",
"bytes": "2570150"
},
{
"name": "Shell",
"bytes": "11226"
}
],
"symlink_target": ""
}
|
import re
from gitdb.util import hex_to_bin
from .objects.blob import Blob
from .objects.util import mode_str_to_int
from git.compat import (
defenc,
PY3
)
__all__ = ('Diffable', 'DiffIndex', 'Diff')
class Diffable(object):
"""Common interface for all object that can be diffed against another object of compatible type.
:note:
Subclasses require a repo member as it is the case for Object instances, for practical
reasons we do not derive from Object."""
__slots__ = tuple()
# standin indicating you want to diff against the index
class Index(object):
pass
def _process_diff_args(self, args):
"""
:return:
possibly altered version of the given args list.
Method is called right before git command execution.
Subclasses can use it to alter the behaviour of the superclass"""
return args
def diff(self, other=Index, paths=None, create_patch=False, **kwargs):
"""Creates diffs between two items being trees, trees and index or an
index and the working tree. It will detect renames automatically.
:param other:
Is the item to compare us with.
If None, we will be compared to the working tree.
If Treeish, it will be compared against the respective tree
If Index ( type ), it will be compared against the index.
It defaults to Index to assure the method will not by-default fail
on bare repositories.
:param paths:
is a list of paths or a single path to limit the diff to.
It will only include at least one of the givne path or paths.
:param create_patch:
If True, the returned Diff contains a detailed patch that if applied
makes the self to other. Patches are somwhat costly as blobs have to be read
and diffed.
:param kwargs:
Additional arguments passed to git-diff, such as
R=True to swap both sides of the diff.
:return: git.DiffIndex
:note:
On a bare repository, 'other' needs to be provided as Index or as
as Tree/Commit, or a git command error will occour"""
args = list()
args.append("--abbrev=40") # we need full shas
args.append("--full-index") # get full index paths, not only filenames
args.append("-M") # check for renames, in both formats
if create_patch:
args.append("-p")
else:
args.append("--raw")
# in any way, assure we don't see colored output,
# fixes https://github.com/gitpython-developers/GitPython/issues/172
args.append('--no-color')
if paths is not None and not isinstance(paths, (tuple, list)):
paths = [paths]
if other is not None and other is not self.Index:
args.insert(0, other)
if other is self.Index:
args.insert(0, "--cached")
args.insert(0, self)
# paths is list here or None
if paths:
args.append("--")
args.extend(paths)
# END paths handling
kwargs['as_process'] = True
proc = self.repo.git.diff(*self._process_diff_args(args), **kwargs)
diff_method = Diff._index_from_raw_format
if create_patch:
diff_method = Diff._index_from_patch_format
index = diff_method(self.repo, proc.stdout)
proc.wait()
return index
class DiffIndex(list):
"""Implements an Index for diffs, allowing a list of Diffs to be queried by
the diff properties.
The class improves the diff handling convenience"""
# change type invariant identifying possible ways a blob can have changed
# A = Added
# D = Deleted
# R = Renamed
# M = modified
change_type = ("A", "D", "R", "M")
def iter_change_type(self, change_type):
"""
:return:
iterator yieling Diff instances that match the given change_type
:param change_type:
Member of DiffIndex.change_type, namely:
* 'A' for added paths
* 'D' for deleted paths
* 'R' for renamed paths
* 'M' for paths with modified data"""
if change_type not in self.change_type:
raise ValueError("Invalid change type: %s" % change_type)
for diff in self:
if change_type == "A" and diff.new_file:
yield diff
elif change_type == "D" and diff.deleted_file:
yield diff
elif change_type == "R" and diff.renamed:
yield diff
elif change_type == "M" and diff.a_blob and diff.b_blob and diff.a_blob != diff.b_blob:
yield diff
# END for each diff
class Diff(object):
"""A Diff contains diff information between two Trees.
It contains two sides a and b of the diff, members are prefixed with
"a" and "b" respectively to inidcate that.
Diffs keep information about the changed blob objects, the file mode, renames,
deletions and new files.
There are a few cases where None has to be expected as member variable value:
``New File``::
a_mode is None
a_blob is None
a_path is None
``Deleted File``::
b_mode is None
b_blob is None
b_path is None
``Working Tree Blobs``
When comparing to working trees, the working tree blob will have a null hexsha
as a corresponding object does not yet exist. The mode will be null as well.
But the path will be available though.
If it is listed in a diff the working tree version of the file must
be different to the version in the index or tree, and hence has been modified."""
# precompiled regex
re_header = re.compile(r"""
^diff[ ]--git
[ ](?:a/)?(?P<a_path>.+?)[ ](?:b/)?(?P<b_path>.+?)\n
(?:^similarity[ ]index[ ](?P<similarity_index>\d+)%\n
^rename[ ]from[ ](?P<rename_from>\S+)\n
^rename[ ]to[ ](?P<rename_to>\S+)(?:\n|$))?
(?:^old[ ]mode[ ](?P<old_mode>\d+)\n
^new[ ]mode[ ](?P<new_mode>\d+)(?:\n|$))?
(?:^new[ ]file[ ]mode[ ](?P<new_file_mode>.+)(?:\n|$))?
(?:^deleted[ ]file[ ]mode[ ](?P<deleted_file_mode>.+)(?:\n|$))?
(?:^index[ ](?P<a_blob_id>[0-9A-Fa-f]+)
\.\.(?P<b_blob_id>[0-9A-Fa-f]+)[ ]?(?P<b_mode>.+)?(?:\n|$))?
""".encode('ascii'), re.VERBOSE | re.MULTILINE)
# can be used for comparisons
NULL_HEX_SHA = "0" * 40
NULL_BIN_SHA = b"\0" * 20
__slots__ = ("a_blob", "b_blob", "a_mode", "b_mode", "a_path", "b_path",
"new_file", "deleted_file", "rename_from", "rename_to", "diff")
def __init__(self, repo, a_path, b_path, a_blob_id, b_blob_id, a_mode,
b_mode, new_file, deleted_file, rename_from,
rename_to, diff):
self.a_mode = a_mode
self.b_mode = b_mode
self.a_path = a_path
self.b_path = b_path
if self.a_mode:
self.a_mode = mode_str_to_int(self.a_mode)
if self.b_mode:
self.b_mode = mode_str_to_int(self.b_mode)
if a_blob_id is None:
self.a_blob = None
else:
assert self.a_mode
self.a_blob = Blob(repo, hex_to_bin(a_blob_id), mode=self.a_mode, path=a_path)
if b_blob_id is None:
self.b_blob = None
else:
assert self.b_mode
self.b_blob = Blob(repo, hex_to_bin(b_blob_id), mode=self.b_mode, path=b_path)
self.new_file = new_file
self.deleted_file = deleted_file
# be clear and use None instead of empty strings
self.rename_from = rename_from or None
self.rename_to = rename_to or None
self.diff = diff
def __eq__(self, other):
for name in self.__slots__:
if getattr(self, name) != getattr(other, name):
return False
# END for each name
return True
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(tuple(getattr(self, n) for n in self.__slots__))
def __str__(self):
h = "%s"
if self.a_blob:
h %= self.a_blob.path
elif self.b_blob:
h %= self.b_blob.path
msg = ''
l = None # temp line
ll = 0 # line length
for b, n in zip((self.a_blob, self.b_blob), ('lhs', 'rhs')):
if b:
l = "\n%s: %o | %s" % (n, b.mode, b.hexsha)
else:
l = "\n%s: None" % n
# END if blob is not None
ll = max(len(l), ll)
msg += l
# END for each blob
# add headline
h += '\n' + '=' * ll
if self.deleted_file:
msg += '\nfile deleted in rhs'
if self.new_file:
msg += '\nfile added in rhs'
if self.rename_from:
msg += '\nfile renamed from %r' % self.rename_from
if self.rename_to:
msg += '\nfile renamed to %r' % self.rename_to
if self.diff:
msg += '\n---'
try:
msg += self.diff.decode(defenc)
except UnicodeDecodeError:
msg += 'OMITTED BINARY DATA'
# end handle encoding
msg += '\n---'
# END diff info
# Python2 sillyness: have to assure we convert our likely to be unicode object to a string with the
# right encoding. Otherwise it tries to convert it using ascii, which may fail ungracefully
res = h + msg
if not PY3:
res = res.encode(defenc)
# end
return res
@property
def renamed(self):
""":returns: True if the blob of our diff has been renamed"""
return self.rename_from != self.rename_to
@classmethod
def _index_from_patch_format(cls, repo, stream):
"""Create a new DiffIndex from the given text which must be in patch format
:param repo: is the repository we are operating on - it is required
:param stream: result of 'git diff' as a stream (supporting file protocol)
:return: git.DiffIndex """
# for now, we have to bake the stream
text = stream.read()
index = DiffIndex()
previous_header = None
for header in cls.re_header.finditer(text):
a_path, b_path, similarity_index, rename_from, rename_to, \
old_mode, new_mode, new_file_mode, deleted_file_mode, \
a_blob_id, b_blob_id, b_mode = header.groups()
new_file, deleted_file = bool(new_file_mode), bool(deleted_file_mode)
# Our only means to find the actual text is to see what has not been matched by our regex,
# and then retro-actively assin it to our index
if previous_header is not None:
index[-1].diff = text[previous_header.end():header.start()]
# end assign actual diff
# Make sure the mode is set if the path is set. Otherwise the resulting blob is invalid
# We just use the one mode we should have parsed
a_mode = old_mode or deleted_file_mode or (a_path and (b_mode or new_mode or new_file_mode))
b_mode = b_mode or new_mode or new_file_mode or (b_path and a_mode)
index.append(Diff(repo,
a_path and a_path.decode(defenc),
b_path and b_path.decode(defenc),
a_blob_id and a_blob_id.decode(defenc),
b_blob_id and b_blob_id.decode(defenc),
a_mode and a_mode.decode(defenc),
b_mode and b_mode.decode(defenc),
new_file, deleted_file,
rename_from and rename_from.decode(defenc),
rename_to and rename_to.decode(defenc),
None))
previous_header = header
# end for each header we parse
if index:
index[-1].diff = text[header.end():]
# end assign last diff
return index
@classmethod
def _index_from_raw_format(cls, repo, stream):
"""Create a new DiffIndex from the given stream which must be in raw format.
:return: git.DiffIndex"""
# handles
# :100644 100644 687099101... 37c5e30c8... M .gitignore
index = DiffIndex()
for line in stream.readlines():
line = line.decode(defenc)
if not line.startswith(":"):
continue
# END its not a valid diff line
old_mode, new_mode, a_blob_id, b_blob_id, change_type, path = line[1:].split(None, 5)
path = path.strip()
a_path = path
b_path = path
deleted_file = False
new_file = False
rename_from = None
rename_to = None
# NOTE: We cannot conclude from the existance of a blob to change type
# as diffs with the working do not have blobs yet
if change_type == 'D':
b_blob_id = None
deleted_file = True
elif change_type == 'A':
a_blob_id = None
new_file = True
elif change_type[0] == 'R': # parses RXXX, where XXX is a confidence value
a_path, b_path = path.split('\t', 1)
rename_from, rename_to = a_path, b_path
# END add/remove handling
diff = Diff(repo, a_path, b_path, a_blob_id, b_blob_id, old_mode, new_mode,
new_file, deleted_file, rename_from, rename_to, '')
index.append(diff)
# END for each line
return index
|
{
"content_hash": "489d4e67fa0e923e8b88b5676a31a27c",
"timestamp": "",
"source": "github",
"line_count": 390,
"max_line_length": 107,
"avg_line_length": 36.55128205128205,
"alnum_prop": 0.5351104875482287,
"repo_name": "bwrsandman/GitPython",
"id": "dc53f3f7113abab8196b12745d03d06265fc5941",
"size": "14471",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "git/diff.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "963"
},
{
"name": "Python",
"bytes": "710650"
},
{
"name": "Ruby",
"bytes": "5705"
}
],
"symlink_target": ""
}
|
import sys, os
foo_dir = os.path.dirname(os.path.join(os.getcwd(), __file__))
sys.path.append(os.path.normpath(os.path.join(foo_dir, '../DataGathering', '..')))
sys.path.append(os.path.normpath(os.path.join(foo_dir, '../Classification', '..')))
sys.path.append(os.path.normpath(os.path.join(foo_dir, '../TextCleaning', '..')))
sys.path.insert(0, '../DataGathering/')
from svm_classifier import train_and_predict
import matplotlib.pyplot as plt
import datetime
import csv
def classify():
tweet_texts = []
tweets = []
i = 0
with open('../../480k_trump_merged.csv', 'r', encoding='utf-8') as csvfile:
csv_reader = csv.reader(csvfile, delimiter=',')
for row in csv_reader:
tweet_texts.append(row[1])
tweets.append(row)
i += 1
if i > 300000000:
break
del tweet_texts[0]
del tweets[0]
results = train_and_predict(tweet_texts)
with open('../../480k_trump_classified.csv', 'w+', encoding='utf-8') as csvfile:
csv_writer = csv.writer(csvfile, delimiter=',')
for i in range(0, len(results)):
csv_writer.writerow(tweets[i]+[results[i]])
def plot():
values = []
timestamps = []
data = []
with open('../../480k_trump_classified.csv', 'r', encoding='utf-8') as csvfile:
csv_reader = csv.reader(csvfile, delimiter=',')
for row in csv_reader:
raw_date = row[6]
value = row[7]
timestamp = datetime.datetime.strptime(raw_date, "%a %b %d %H:%M:%S %z %Y").timestamp()
values.append(value)
timestamps.append(timestamp)
data.append([timestamp, value])
data = sorted(data, key=lambda x: x[0])
maxTimestamp = data[-1][0]
minTimestamp = data[0][0]
bin_size = 1000*3600
hours = int((maxTimestamp-minTimestamp)/bin_size)
hourLists = [[] for _ in range(hours)]
print(len(hourLists))
for i in range(0,len(data)):
h = int((data[i][0]-minTimestamp)/bin_size)-1
print(h)
hourLists[h].append(int(data[i][1]))
values = []
for hourList in hourLists:
if len(hourList) >= 4:
values.append(sum(hourList)/max(len(hourList), 1))
plt.plot(range(0, len(values)), values, 'ro')
plt.show()
#classify()
plot()
|
{
"content_hash": "c5add6cc45a1fbf98068e70be7cf1831",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 93,
"avg_line_length": 26.8375,
"alnum_prop": 0.6297158826269212,
"repo_name": "steinnp/Big-Data-Final",
"id": "60e88a82cb8cb28171fdbcd4e7adc1a270766bfa",
"size": "2148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Classification/trump_plot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3214"
},
{
"name": "Python",
"bytes": "51193"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.